1 /* 2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_mount.h" 27 #include "xfs_btree.h" 28 #include "xfs_alloc_btree.h" 29 #include "xfs_alloc.h" 30 #include "xfs_extent_busy.h" 31 #include "xfs_error.h" 32 #include "xfs_trace.h" 33 #include "xfs_cksum.h" 34 #include "xfs_trans.h" 35 36 37 STATIC struct xfs_btree_cur * 38 xfs_allocbt_dup_cursor( 39 struct xfs_btree_cur *cur) 40 { 41 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, 42 cur->bc_private.a.agbp, cur->bc_private.a.agno, 43 cur->bc_btnum); 44 } 45 46 STATIC void 47 xfs_allocbt_set_root( 48 struct xfs_btree_cur *cur, 49 union xfs_btree_ptr *ptr, 50 int inc) 51 { 52 struct xfs_buf *agbp = cur->bc_private.a.agbp; 53 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 54 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); 55 int btnum = cur->bc_btnum; 56 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); 57 58 ASSERT(ptr->s != 0); 59 60 agf->agf_roots[btnum] = ptr->s; 61 be32_add_cpu(&agf->agf_levels[btnum], inc); 62 pag->pagf_levels[btnum] += inc; 63 xfs_perag_put(pag); 64 65 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 66 } 67 68 STATIC int 69 xfs_allocbt_alloc_block( 70 struct xfs_btree_cur *cur, 71 union xfs_btree_ptr *start, 72 union xfs_btree_ptr *new, 73 int *stat) 74 { 75 int error; 76 xfs_agblock_t bno; 77 78 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); 79 80 /* Allocate the new block from the freelist. If we can't, give up. */ 81 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 82 &bno, 1); 83 if (error) { 84 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); 85 return error; 86 } 87 88 if (bno == NULLAGBLOCK) { 89 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 90 *stat = 0; 91 return 0; 92 } 93 94 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false); 95 96 xfs_trans_agbtree_delta(cur->bc_tp, 1); 97 new->s = cpu_to_be32(bno); 98 99 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 100 *stat = 1; 101 return 0; 102 } 103 104 STATIC int 105 xfs_allocbt_free_block( 106 struct xfs_btree_cur *cur, 107 struct xfs_buf *bp) 108 { 109 struct xfs_buf *agbp = cur->bc_private.a.agbp; 110 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 111 xfs_agblock_t bno; 112 int error; 113 114 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); 115 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); 116 if (error) 117 return error; 118 119 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, 120 XFS_EXTENT_BUSY_SKIP_DISCARD); 121 xfs_trans_agbtree_delta(cur->bc_tp, -1); 122 123 xfs_trans_binval(cur->bc_tp, bp); 124 return 0; 125 } 126 127 /* 128 * Update the longest extent in the AGF 129 */ 130 STATIC void 131 xfs_allocbt_update_lastrec( 132 struct xfs_btree_cur *cur, 133 struct xfs_btree_block *block, 134 union xfs_btree_rec *rec, 135 int ptr, 136 int reason) 137 { 138 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 139 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); 140 struct xfs_perag *pag; 141 __be32 len; 142 int numrecs; 143 144 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); 145 146 switch (reason) { 147 case LASTREC_UPDATE: 148 /* 149 * If this is the last leaf block and it's the last record, 150 * then update the size of the longest extent in the AG. 151 */ 152 if (ptr != xfs_btree_get_numrecs(block)) 153 return; 154 len = rec->alloc.ar_blockcount; 155 break; 156 case LASTREC_INSREC: 157 if (be32_to_cpu(rec->alloc.ar_blockcount) <= 158 be32_to_cpu(agf->agf_longest)) 159 return; 160 len = rec->alloc.ar_blockcount; 161 break; 162 case LASTREC_DELREC: 163 numrecs = xfs_btree_get_numrecs(block); 164 if (ptr <= numrecs) 165 return; 166 ASSERT(ptr == numrecs + 1); 167 168 if (numrecs) { 169 xfs_alloc_rec_t *rrp; 170 171 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); 172 len = rrp->ar_blockcount; 173 } else { 174 len = 0; 175 } 176 177 break; 178 default: 179 ASSERT(0); 180 return; 181 } 182 183 agf->agf_longest = len; 184 pag = xfs_perag_get(cur->bc_mp, seqno); 185 pag->pagf_longest = be32_to_cpu(len); 186 xfs_perag_put(pag); 187 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); 188 } 189 190 STATIC int 191 xfs_allocbt_get_minrecs( 192 struct xfs_btree_cur *cur, 193 int level) 194 { 195 return cur->bc_mp->m_alloc_mnr[level != 0]; 196 } 197 198 STATIC int 199 xfs_allocbt_get_maxrecs( 200 struct xfs_btree_cur *cur, 201 int level) 202 { 203 return cur->bc_mp->m_alloc_mxr[level != 0]; 204 } 205 206 STATIC void 207 xfs_allocbt_init_key_from_rec( 208 union xfs_btree_key *key, 209 union xfs_btree_rec *rec) 210 { 211 ASSERT(rec->alloc.ar_startblock != 0); 212 213 key->alloc.ar_startblock = rec->alloc.ar_startblock; 214 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 215 } 216 217 STATIC void 218 xfs_allocbt_init_rec_from_key( 219 union xfs_btree_key *key, 220 union xfs_btree_rec *rec) 221 { 222 ASSERT(key->alloc.ar_startblock != 0); 223 224 rec->alloc.ar_startblock = key->alloc.ar_startblock; 225 rec->alloc.ar_blockcount = key->alloc.ar_blockcount; 226 } 227 228 STATIC void 229 xfs_allocbt_init_rec_from_cur( 230 struct xfs_btree_cur *cur, 231 union xfs_btree_rec *rec) 232 { 233 ASSERT(cur->bc_rec.a.ar_startblock != 0); 234 235 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 236 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 237 } 238 239 STATIC void 240 xfs_allocbt_init_ptr_from_cur( 241 struct xfs_btree_cur *cur, 242 union xfs_btree_ptr *ptr) 243 { 244 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); 245 246 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); 247 ASSERT(agf->agf_roots[cur->bc_btnum] != 0); 248 249 ptr->s = agf->agf_roots[cur->bc_btnum]; 250 } 251 252 STATIC __int64_t 253 xfs_allocbt_key_diff( 254 struct xfs_btree_cur *cur, 255 union xfs_btree_key *key) 256 { 257 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a; 258 xfs_alloc_key_t *kp = &key->alloc; 259 __int64_t diff; 260 261 if (cur->bc_btnum == XFS_BTNUM_BNO) { 262 return (__int64_t)be32_to_cpu(kp->ar_startblock) - 263 rec->ar_startblock; 264 } 265 266 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; 267 if (diff) 268 return diff; 269 270 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 271 } 272 273 static bool 274 xfs_allocbt_verify( 275 struct xfs_buf *bp) 276 { 277 struct xfs_mount *mp = bp->b_target->bt_mount; 278 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 279 struct xfs_perag *pag = bp->b_pag; 280 unsigned int level; 281 282 /* 283 * magic number and level verification 284 * 285 * During growfs operations, we can't verify the exact level or owner as 286 * the perag is not fully initialised and hence not attached to the 287 * buffer. In this case, check against the maximum tree depth. 288 * 289 * Similarly, during log recovery we will have a perag structure 290 * attached, but the agf information will not yet have been initialised 291 * from the on disk AGF. Again, we can only check against maximum limits 292 * in this case. 293 */ 294 level = be16_to_cpu(block->bb_level); 295 switch (block->bb_magic) { 296 case cpu_to_be32(XFS_ABTB_CRC_MAGIC): 297 if (!xfs_sb_version_hascrc(&mp->m_sb)) 298 return false; 299 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid)) 300 return false; 301 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) 302 return false; 303 if (pag && 304 be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno) 305 return false; 306 /* fall through */ 307 case cpu_to_be32(XFS_ABTB_MAGIC): 308 if (pag && pag->pagf_init) { 309 if (level >= pag->pagf_levels[XFS_BTNUM_BNOi]) 310 return false; 311 } else if (level >= mp->m_ag_maxlevels) 312 return false; 313 break; 314 case cpu_to_be32(XFS_ABTC_CRC_MAGIC): 315 if (!xfs_sb_version_hascrc(&mp->m_sb)) 316 return false; 317 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid)) 318 return false; 319 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) 320 return false; 321 if (pag && 322 be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno) 323 return false; 324 /* fall through */ 325 case cpu_to_be32(XFS_ABTC_MAGIC): 326 if (pag && pag->pagf_init) { 327 if (level >= pag->pagf_levels[XFS_BTNUM_CNTi]) 328 return false; 329 } else if (level >= mp->m_ag_maxlevels) 330 return false; 331 break; 332 default: 333 return false; 334 } 335 336 /* numrecs verification */ 337 if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0]) 338 return false; 339 340 /* sibling pointer verification */ 341 if (!block->bb_u.s.bb_leftsib || 342 (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks && 343 block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK))) 344 return false; 345 if (!block->bb_u.s.bb_rightsib || 346 (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks && 347 block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK))) 348 return false; 349 350 return true; 351 } 352 353 static void 354 xfs_allocbt_read_verify( 355 struct xfs_buf *bp) 356 { 357 if (!xfs_btree_sblock_verify_crc(bp)) 358 xfs_buf_ioerror(bp, -EFSBADCRC); 359 else if (!xfs_allocbt_verify(bp)) 360 xfs_buf_ioerror(bp, -EFSCORRUPTED); 361 362 if (bp->b_error) { 363 trace_xfs_btree_corrupt(bp, _RET_IP_); 364 xfs_verifier_error(bp); 365 } 366 } 367 368 static void 369 xfs_allocbt_write_verify( 370 struct xfs_buf *bp) 371 { 372 if (!xfs_allocbt_verify(bp)) { 373 trace_xfs_btree_corrupt(bp, _RET_IP_); 374 xfs_buf_ioerror(bp, -EFSCORRUPTED); 375 xfs_verifier_error(bp); 376 return; 377 } 378 xfs_btree_sblock_calc_crc(bp); 379 380 } 381 382 const struct xfs_buf_ops xfs_allocbt_buf_ops = { 383 .verify_read = xfs_allocbt_read_verify, 384 .verify_write = xfs_allocbt_write_verify, 385 }; 386 387 388 #if defined(DEBUG) || defined(XFS_WARN) 389 STATIC int 390 xfs_allocbt_keys_inorder( 391 struct xfs_btree_cur *cur, 392 union xfs_btree_key *k1, 393 union xfs_btree_key *k2) 394 { 395 if (cur->bc_btnum == XFS_BTNUM_BNO) { 396 return be32_to_cpu(k1->alloc.ar_startblock) < 397 be32_to_cpu(k2->alloc.ar_startblock); 398 } else { 399 return be32_to_cpu(k1->alloc.ar_blockcount) < 400 be32_to_cpu(k2->alloc.ar_blockcount) || 401 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && 402 be32_to_cpu(k1->alloc.ar_startblock) < 403 be32_to_cpu(k2->alloc.ar_startblock)); 404 } 405 } 406 407 STATIC int 408 xfs_allocbt_recs_inorder( 409 struct xfs_btree_cur *cur, 410 union xfs_btree_rec *r1, 411 union xfs_btree_rec *r2) 412 { 413 if (cur->bc_btnum == XFS_BTNUM_BNO) { 414 return be32_to_cpu(r1->alloc.ar_startblock) + 415 be32_to_cpu(r1->alloc.ar_blockcount) <= 416 be32_to_cpu(r2->alloc.ar_startblock); 417 } else { 418 return be32_to_cpu(r1->alloc.ar_blockcount) < 419 be32_to_cpu(r2->alloc.ar_blockcount) || 420 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && 421 be32_to_cpu(r1->alloc.ar_startblock) < 422 be32_to_cpu(r2->alloc.ar_startblock)); 423 } 424 } 425 #endif /* DEBUG */ 426 427 static const struct xfs_btree_ops xfs_allocbt_ops = { 428 .rec_len = sizeof(xfs_alloc_rec_t), 429 .key_len = sizeof(xfs_alloc_key_t), 430 431 .dup_cursor = xfs_allocbt_dup_cursor, 432 .set_root = xfs_allocbt_set_root, 433 .alloc_block = xfs_allocbt_alloc_block, 434 .free_block = xfs_allocbt_free_block, 435 .update_lastrec = xfs_allocbt_update_lastrec, 436 .get_minrecs = xfs_allocbt_get_minrecs, 437 .get_maxrecs = xfs_allocbt_get_maxrecs, 438 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 439 .init_rec_from_key = xfs_allocbt_init_rec_from_key, 440 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 441 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 442 .key_diff = xfs_allocbt_key_diff, 443 .buf_ops = &xfs_allocbt_buf_ops, 444 #if defined(DEBUG) || defined(XFS_WARN) 445 .keys_inorder = xfs_allocbt_keys_inorder, 446 .recs_inorder = xfs_allocbt_recs_inorder, 447 #endif 448 }; 449 450 /* 451 * Allocate a new allocation btree cursor. 452 */ 453 struct xfs_btree_cur * /* new alloc btree cursor */ 454 xfs_allocbt_init_cursor( 455 struct xfs_mount *mp, /* file system mount point */ 456 struct xfs_trans *tp, /* transaction pointer */ 457 struct xfs_buf *agbp, /* buffer for agf structure */ 458 xfs_agnumber_t agno, /* allocation group number */ 459 xfs_btnum_t btnum) /* btree identifier */ 460 { 461 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 462 struct xfs_btree_cur *cur; 463 464 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); 465 466 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); 467 468 cur->bc_tp = tp; 469 cur->bc_mp = mp; 470 cur->bc_btnum = btnum; 471 cur->bc_blocklog = mp->m_sb.sb_blocklog; 472 cur->bc_ops = &xfs_allocbt_ops; 473 474 if (btnum == XFS_BTNUM_CNT) { 475 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 476 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; 477 } else { 478 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 479 } 480 481 cur->bc_private.a.agbp = agbp; 482 cur->bc_private.a.agno = agno; 483 484 if (xfs_sb_version_hascrc(&mp->m_sb)) 485 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 486 487 return cur; 488 } 489 490 /* 491 * Calculate number of records in an alloc btree block. 492 */ 493 int 494 xfs_allocbt_maxrecs( 495 struct xfs_mount *mp, 496 int blocklen, 497 int leaf) 498 { 499 blocklen -= XFS_ALLOC_BLOCK_LEN(mp); 500 501 if (leaf) 502 return blocklen / sizeof(xfs_alloc_rec_t); 503 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); 504 } 505