1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_btree.h" 17 #include "xfs_ialloc.h" 18 #include "xfs_ialloc_btree.h" 19 #include "xfs_alloc.h" 20 #include "xfs_errortag.h" 21 #include "xfs_error.h" 22 #include "xfs_bmap.h" 23 #include "xfs_trans.h" 24 #include "xfs_buf_item.h" 25 #include "xfs_icreate_item.h" 26 #include "xfs_icache.h" 27 #include "xfs_trace.h" 28 #include "xfs_log.h" 29 #include "xfs_rmap.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* 99 * Get the data from the pointed-to record. 100 */ 101 int 102 xfs_inobt_get_rec( 103 struct xfs_btree_cur *cur, 104 struct xfs_inobt_rec_incore *irec, 105 int *stat) 106 { 107 struct xfs_mount *mp = cur->bc_mp; 108 xfs_agnumber_t agno = cur->bc_private.a.agno; 109 union xfs_btree_rec *rec; 110 int error; 111 uint64_t realfree; 112 113 error = xfs_btree_get_rec(cur, &rec, stat); 114 if (error || *stat == 0) 115 return error; 116 117 xfs_inobt_btrec_to_irec(mp, rec, irec); 118 119 if (!xfs_verify_agino(mp, agno, irec->ir_startino)) 120 goto out_bad_rec; 121 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 122 irec->ir_count > XFS_INODES_PER_CHUNK) 123 goto out_bad_rec; 124 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 125 goto out_bad_rec; 126 127 /* if there are no holes, return the first available offset */ 128 if (!xfs_inobt_issparse(irec->ir_holemask)) 129 realfree = irec->ir_free; 130 else 131 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec); 132 if (hweight64(realfree) != irec->ir_freecount) 133 goto out_bad_rec; 134 135 return 0; 136 137 out_bad_rec: 138 xfs_warn(mp, 139 "%s Inode BTree record corruption in AG %d detected!", 140 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno); 141 xfs_warn(mp, 142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 143 irec->ir_startino, irec->ir_count, irec->ir_freecount, 144 irec->ir_free, irec->ir_holemask); 145 return -EFSCORRUPTED; 146 } 147 148 /* 149 * Insert a single inobt record. Cursor must already point to desired location. 150 */ 151 int 152 xfs_inobt_insert_rec( 153 struct xfs_btree_cur *cur, 154 uint16_t holemask, 155 uint8_t count, 156 int32_t freecount, 157 xfs_inofree_t free, 158 int *stat) 159 { 160 cur->bc_rec.i.ir_holemask = holemask; 161 cur->bc_rec.i.ir_count = count; 162 cur->bc_rec.i.ir_freecount = freecount; 163 cur->bc_rec.i.ir_free = free; 164 return xfs_btree_insert(cur, stat); 165 } 166 167 /* 168 * Insert records describing a newly allocated inode chunk into the inobt. 169 */ 170 STATIC int 171 xfs_inobt_insert( 172 struct xfs_mount *mp, 173 struct xfs_trans *tp, 174 struct xfs_buf *agbp, 175 xfs_agino_t newino, 176 xfs_agino_t newlen, 177 xfs_btnum_t btnum) 178 { 179 struct xfs_btree_cur *cur; 180 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 181 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 182 xfs_agino_t thisino; 183 int i; 184 int error; 185 186 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum); 187 188 for (thisino = newino; 189 thisino < newino + newlen; 190 thisino += XFS_INODES_PER_CHUNK) { 191 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 192 if (error) { 193 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 194 return error; 195 } 196 ASSERT(i == 0); 197 198 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 199 XFS_INODES_PER_CHUNK, 200 XFS_INODES_PER_CHUNK, 201 XFS_INOBT_ALL_FREE, &i); 202 if (error) { 203 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 204 return error; 205 } 206 ASSERT(i == 1); 207 } 208 209 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 210 211 return 0; 212 } 213 214 /* 215 * Verify that the number of free inodes in the AGI is correct. 216 */ 217 #ifdef DEBUG 218 STATIC int 219 xfs_check_agi_freecount( 220 struct xfs_btree_cur *cur, 221 struct xfs_agi *agi) 222 { 223 if (cur->bc_nlevels == 1) { 224 xfs_inobt_rec_incore_t rec; 225 int freecount = 0; 226 int error; 227 int i; 228 229 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 230 if (error) 231 return error; 232 233 do { 234 error = xfs_inobt_get_rec(cur, &rec, &i); 235 if (error) 236 return error; 237 238 if (i) { 239 freecount += rec.ir_freecount; 240 error = xfs_btree_increment(cur, 0, &i); 241 if (error) 242 return error; 243 } 244 } while (i == 1); 245 246 if (!XFS_FORCED_SHUTDOWN(cur->bc_mp)) 247 ASSERT(freecount == be32_to_cpu(agi->agi_freecount)); 248 } 249 return 0; 250 } 251 #else 252 #define xfs_check_agi_freecount(cur, agi) 0 253 #endif 254 255 /* 256 * Initialise a new set of inodes. When called without a transaction context 257 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 258 * than logging them (which in a transaction context puts them into the AIL 259 * for writeback rather than the xfsbufd queue). 260 */ 261 int 262 xfs_ialloc_inode_init( 263 struct xfs_mount *mp, 264 struct xfs_trans *tp, 265 struct list_head *buffer_list, 266 int icount, 267 xfs_agnumber_t agno, 268 xfs_agblock_t agbno, 269 xfs_agblock_t length, 270 unsigned int gen) 271 { 272 struct xfs_buf *fbuf; 273 struct xfs_dinode *free; 274 int nbufs; 275 int version; 276 int i, j; 277 xfs_daddr_t d; 278 xfs_ino_t ino = 0; 279 int error; 280 281 /* 282 * Loop over the new block(s), filling in the inodes. For small block 283 * sizes, manipulate the inodes in buffers which are multiples of the 284 * blocks size. 285 */ 286 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 287 288 /* 289 * Figure out what version number to use in the inodes we create. If 290 * the superblock version has caught up to the one that supports the new 291 * inode format, then use the new inode version. Otherwise use the old 292 * version so that old kernels will continue to be able to use the file 293 * system. 294 * 295 * For v3 inodes, we also need to write the inode number into the inode, 296 * so calculate the first inode number of the chunk here as 297 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 298 * across multiple filesystem blocks (such as a cluster) and so cannot 299 * be used in the cluster buffer loop below. 300 * 301 * Further, because we are writing the inode directly into the buffer 302 * and calculating a CRC on the entire inode, we have ot log the entire 303 * inode so that the entire range the CRC covers is present in the log. 304 * That means for v3 inode we log the entire buffer rather than just the 305 * inode cores. 306 */ 307 if (xfs_sb_version_hascrc(&mp->m_sb)) { 308 version = 3; 309 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 310 311 /* 312 * log the initialisation that is about to take place as an 313 * logical operation. This means the transaction does not 314 * need to log the physical changes to the inode buffers as log 315 * recovery will know what initialisation is actually needed. 316 * Hence we only need to log the buffers as "ordered" buffers so 317 * they track in the AIL as if they were physically logged. 318 */ 319 if (tp) 320 xfs_icreate_log(tp, agno, agbno, icount, 321 mp->m_sb.sb_inodesize, length, gen); 322 } else 323 version = 2; 324 325 for (j = 0; j < nbufs; j++) { 326 /* 327 * Get the block. 328 */ 329 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 330 (j * M_IGEO(mp)->blocks_per_cluster)); 331 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 332 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 333 XBF_UNMAPPED, &fbuf); 334 if (error) 335 return error; 336 337 /* Initialize the inode buffers and log them appropriately. */ 338 fbuf->b_ops = &xfs_inode_buf_ops; 339 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 340 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 341 int ioffset = i << mp->m_sb.sb_inodelog; 342 uint isize = xfs_dinode_size(version); 343 344 free = xfs_make_iptr(mp, fbuf, i); 345 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 346 free->di_version = version; 347 free->di_gen = cpu_to_be32(gen); 348 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 349 350 if (version == 3) { 351 free->di_ino = cpu_to_be64(ino); 352 ino++; 353 uuid_copy(&free->di_uuid, 354 &mp->m_sb.sb_meta_uuid); 355 xfs_dinode_calc_crc(mp, free); 356 } else if (tp) { 357 /* just log the inode core */ 358 xfs_trans_log_buf(tp, fbuf, ioffset, 359 ioffset + isize - 1); 360 } 361 } 362 363 if (tp) { 364 /* 365 * Mark the buffer as an inode allocation buffer so it 366 * sticks in AIL at the point of this allocation 367 * transaction. This ensures the they are on disk before 368 * the tail of the log can be moved past this 369 * transaction (i.e. by preventing relogging from moving 370 * it forward in the log). 371 */ 372 xfs_trans_inode_alloc_buf(tp, fbuf); 373 if (version == 3) { 374 /* 375 * Mark the buffer as ordered so that they are 376 * not physically logged in the transaction but 377 * still tracked in the AIL as part of the 378 * transaction and pin the log appropriately. 379 */ 380 xfs_trans_ordered_buf(tp, fbuf); 381 } 382 } else { 383 fbuf->b_flags |= XBF_DONE; 384 xfs_buf_delwri_queue(fbuf, buffer_list); 385 xfs_buf_relse(fbuf); 386 } 387 } 388 return 0; 389 } 390 391 /* 392 * Align startino and allocmask for a recently allocated sparse chunk such that 393 * they are fit for insertion (or merge) into the on-disk inode btrees. 394 * 395 * Background: 396 * 397 * When enabled, sparse inode support increases the inode alignment from cluster 398 * size to inode chunk size. This means that the minimum range between two 399 * non-adjacent inode records in the inobt is large enough for a full inode 400 * record. This allows for cluster sized, cluster aligned block allocation 401 * without need to worry about whether the resulting inode record overlaps with 402 * another record in the tree. Without this basic rule, we would have to deal 403 * with the consequences of overlap by potentially undoing recent allocations in 404 * the inode allocation codepath. 405 * 406 * Because of this alignment rule (which is enforced on mount), there are two 407 * inobt possibilities for newly allocated sparse chunks. One is that the 408 * aligned inode record for the chunk covers a range of inodes not already 409 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 410 * other is that a record already exists at the aligned startino that considers 411 * the newly allocated range as sparse. In the latter case, record content is 412 * merged in hope that sparse inode chunks fill to full chunks over time. 413 */ 414 STATIC void 415 xfs_align_sparse_ino( 416 struct xfs_mount *mp, 417 xfs_agino_t *startino, 418 uint16_t *allocmask) 419 { 420 xfs_agblock_t agbno; 421 xfs_agblock_t mod; 422 int offset; 423 424 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 425 mod = agbno % mp->m_sb.sb_inoalignmt; 426 if (!mod) 427 return; 428 429 /* calculate the inode offset and align startino */ 430 offset = XFS_AGB_TO_AGINO(mp, mod); 431 *startino -= offset; 432 433 /* 434 * Since startino has been aligned down, left shift allocmask such that 435 * it continues to represent the same physical inodes relative to the 436 * new startino. 437 */ 438 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 439 } 440 441 /* 442 * Determine whether the source inode record can merge into the target. Both 443 * records must be sparse, the inode ranges must match and there must be no 444 * allocation overlap between the records. 445 */ 446 STATIC bool 447 __xfs_inobt_can_merge( 448 struct xfs_inobt_rec_incore *trec, /* tgt record */ 449 struct xfs_inobt_rec_incore *srec) /* src record */ 450 { 451 uint64_t talloc; 452 uint64_t salloc; 453 454 /* records must cover the same inode range */ 455 if (trec->ir_startino != srec->ir_startino) 456 return false; 457 458 /* both records must be sparse */ 459 if (!xfs_inobt_issparse(trec->ir_holemask) || 460 !xfs_inobt_issparse(srec->ir_holemask)) 461 return false; 462 463 /* both records must track some inodes */ 464 if (!trec->ir_count || !srec->ir_count) 465 return false; 466 467 /* can't exceed capacity of a full record */ 468 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 469 return false; 470 471 /* verify there is no allocation overlap */ 472 talloc = xfs_inobt_irec_to_allocmask(trec); 473 salloc = xfs_inobt_irec_to_allocmask(srec); 474 if (talloc & salloc) 475 return false; 476 477 return true; 478 } 479 480 /* 481 * Merge the source inode record into the target. The caller must call 482 * __xfs_inobt_can_merge() to ensure the merge is valid. 483 */ 484 STATIC void 485 __xfs_inobt_rec_merge( 486 struct xfs_inobt_rec_incore *trec, /* target */ 487 struct xfs_inobt_rec_incore *srec) /* src */ 488 { 489 ASSERT(trec->ir_startino == srec->ir_startino); 490 491 /* combine the counts */ 492 trec->ir_count += srec->ir_count; 493 trec->ir_freecount += srec->ir_freecount; 494 495 /* 496 * Merge the holemask and free mask. For both fields, 0 bits refer to 497 * allocated inodes. We combine the allocated ranges with bitwise AND. 498 */ 499 trec->ir_holemask &= srec->ir_holemask; 500 trec->ir_free &= srec->ir_free; 501 } 502 503 /* 504 * Insert a new sparse inode chunk into the associated inode btree. The inode 505 * record for the sparse chunk is pre-aligned to a startino that should match 506 * any pre-existing sparse inode record in the tree. This allows sparse chunks 507 * to fill over time. 508 * 509 * This function supports two modes of handling preexisting records depending on 510 * the merge flag. If merge is true, the provided record is merged with the 511 * existing record and updated in place. The merged record is returned in nrec. 512 * If merge is false, an existing record is replaced with the provided record. 513 * If no preexisting record exists, the provided record is always inserted. 514 * 515 * It is considered corruption if a merge is requested and not possible. Given 516 * the sparse inode alignment constraints, this should never happen. 517 */ 518 STATIC int 519 xfs_inobt_insert_sprec( 520 struct xfs_mount *mp, 521 struct xfs_trans *tp, 522 struct xfs_buf *agbp, 523 int btnum, 524 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 525 bool merge) /* merge or replace */ 526 { 527 struct xfs_btree_cur *cur; 528 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 529 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 530 int error; 531 int i; 532 struct xfs_inobt_rec_incore rec; 533 534 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum); 535 536 /* the new record is pre-aligned so we know where to look */ 537 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 538 if (error) 539 goto error; 540 /* if nothing there, insert a new record and return */ 541 if (i == 0) { 542 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 543 nrec->ir_count, nrec->ir_freecount, 544 nrec->ir_free, &i); 545 if (error) 546 goto error; 547 if (XFS_IS_CORRUPT(mp, i != 1)) { 548 error = -EFSCORRUPTED; 549 goto error; 550 } 551 552 goto out; 553 } 554 555 /* 556 * A record exists at this startino. Merge or replace the record 557 * depending on what we've been asked to do. 558 */ 559 if (merge) { 560 error = xfs_inobt_get_rec(cur, &rec, &i); 561 if (error) 562 goto error; 563 if (XFS_IS_CORRUPT(mp, i != 1)) { 564 error = -EFSCORRUPTED; 565 goto error; 566 } 567 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 568 error = -EFSCORRUPTED; 569 goto error; 570 } 571 572 /* 573 * This should never fail. If we have coexisting records that 574 * cannot merge, something is seriously wrong. 575 */ 576 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 577 error = -EFSCORRUPTED; 578 goto error; 579 } 580 581 trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino, 582 rec.ir_holemask, nrec->ir_startino, 583 nrec->ir_holemask); 584 585 /* merge to nrec to output the updated record */ 586 __xfs_inobt_rec_merge(nrec, &rec); 587 588 trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino, 589 nrec->ir_holemask); 590 591 error = xfs_inobt_rec_check_count(mp, nrec); 592 if (error) 593 goto error; 594 } 595 596 error = xfs_inobt_update(cur, nrec); 597 if (error) 598 goto error; 599 600 out: 601 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 602 return 0; 603 error: 604 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 605 return error; 606 } 607 608 /* 609 * Allocate new inodes in the allocation group specified by agbp. 610 * Return 0 for success, else error code. 611 */ 612 STATIC int 613 xfs_ialloc_ag_alloc( 614 struct xfs_trans *tp, 615 struct xfs_buf *agbp, 616 int *alloc) 617 { 618 struct xfs_agi *agi; 619 struct xfs_alloc_arg args; 620 xfs_agnumber_t agno; 621 int error; 622 xfs_agino_t newino; /* new first inode's number */ 623 xfs_agino_t newlen; /* new number of inodes */ 624 int isaligned = 0; /* inode allocation at stripe */ 625 /* unit boundary */ 626 /* init. to full chunk */ 627 uint16_t allocmask = (uint16_t) -1; 628 struct xfs_inobt_rec_incore rec; 629 struct xfs_perag *pag; 630 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 631 int do_sparse = 0; 632 633 memset(&args, 0, sizeof(args)); 634 args.tp = tp; 635 args.mp = tp->t_mountp; 636 args.fsbno = NULLFSBLOCK; 637 args.oinfo = XFS_RMAP_OINFO_INODES; 638 639 #ifdef DEBUG 640 /* randomly do sparse inode allocations */ 641 if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) && 642 igeo->ialloc_min_blks < igeo->ialloc_blks) 643 do_sparse = prandom_u32() & 1; 644 #endif 645 646 /* 647 * Locking will ensure that we don't have two callers in here 648 * at one time. 649 */ 650 newlen = igeo->ialloc_inos; 651 if (igeo->maxicount && 652 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 653 igeo->maxicount) 654 return -ENOSPC; 655 args.minlen = args.maxlen = igeo->ialloc_blks; 656 /* 657 * First try to allocate inodes contiguous with the last-allocated 658 * chunk of inodes. If the filesystem is striped, this will fill 659 * an entire stripe unit with inodes. 660 */ 661 agi = XFS_BUF_TO_AGI(agbp); 662 newino = be32_to_cpu(agi->agi_newino); 663 agno = be32_to_cpu(agi->agi_seqno); 664 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 665 igeo->ialloc_blks; 666 if (do_sparse) 667 goto sparse_alloc; 668 if (likely(newino != NULLAGINO && 669 (args.agbno < be32_to_cpu(agi->agi_length)))) { 670 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 671 args.type = XFS_ALLOCTYPE_THIS_BNO; 672 args.prod = 1; 673 674 /* 675 * We need to take into account alignment here to ensure that 676 * we don't modify the free list if we fail to have an exact 677 * block. If we don't have an exact match, and every oher 678 * attempt allocation attempt fails, we'll end up cancelling 679 * a dirty transaction and shutting down. 680 * 681 * For an exact allocation, alignment must be 1, 682 * however we need to take cluster alignment into account when 683 * fixing up the freelist. Use the minalignslop field to 684 * indicate that extra blocks might be required for alignment, 685 * but not to use them in the actual exact allocation. 686 */ 687 args.alignment = 1; 688 args.minalignslop = igeo->cluster_align - 1; 689 690 /* Allow space for the inode btree to split. */ 691 args.minleft = igeo->inobt_maxlevels - 1; 692 if ((error = xfs_alloc_vextent(&args))) 693 return error; 694 695 /* 696 * This request might have dirtied the transaction if the AG can 697 * satisfy the request, but the exact block was not available. 698 * If the allocation did fail, subsequent requests will relax 699 * the exact agbno requirement and increase the alignment 700 * instead. It is critical that the total size of the request 701 * (len + alignment + slop) does not increase from this point 702 * on, so reset minalignslop to ensure it is not included in 703 * subsequent requests. 704 */ 705 args.minalignslop = 0; 706 } 707 708 if (unlikely(args.fsbno == NULLFSBLOCK)) { 709 /* 710 * Set the alignment for the allocation. 711 * If stripe alignment is turned on then align at stripe unit 712 * boundary. 713 * If the cluster size is smaller than a filesystem block 714 * then we're doing I/O for inodes in filesystem block size 715 * pieces, so don't need alignment anyway. 716 */ 717 isaligned = 0; 718 if (igeo->ialloc_align) { 719 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); 720 args.alignment = args.mp->m_dalign; 721 isaligned = 1; 722 } else 723 args.alignment = igeo->cluster_align; 724 /* 725 * Need to figure out where to allocate the inode blocks. 726 * Ideally they should be spaced out through the a.g. 727 * For now, just allocate blocks up front. 728 */ 729 args.agbno = be32_to_cpu(agi->agi_root); 730 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 731 /* 732 * Allocate a fixed-size extent of inodes. 733 */ 734 args.type = XFS_ALLOCTYPE_NEAR_BNO; 735 args.prod = 1; 736 /* 737 * Allow space for the inode btree to split. 738 */ 739 args.minleft = igeo->inobt_maxlevels - 1; 740 if ((error = xfs_alloc_vextent(&args))) 741 return error; 742 } 743 744 /* 745 * If stripe alignment is turned on, then try again with cluster 746 * alignment. 747 */ 748 if (isaligned && args.fsbno == NULLFSBLOCK) { 749 args.type = XFS_ALLOCTYPE_NEAR_BNO; 750 args.agbno = be32_to_cpu(agi->agi_root); 751 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 752 args.alignment = igeo->cluster_align; 753 if ((error = xfs_alloc_vextent(&args))) 754 return error; 755 } 756 757 /* 758 * Finally, try a sparse allocation if the filesystem supports it and 759 * the sparse allocation length is smaller than a full chunk. 760 */ 761 if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) && 762 igeo->ialloc_min_blks < igeo->ialloc_blks && 763 args.fsbno == NULLFSBLOCK) { 764 sparse_alloc: 765 args.type = XFS_ALLOCTYPE_NEAR_BNO; 766 args.agbno = be32_to_cpu(agi->agi_root); 767 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 768 args.alignment = args.mp->m_sb.sb_spino_align; 769 args.prod = 1; 770 771 args.minlen = igeo->ialloc_min_blks; 772 args.maxlen = args.minlen; 773 774 /* 775 * The inode record will be aligned to full chunk size. We must 776 * prevent sparse allocation from AG boundaries that result in 777 * invalid inode records, such as records that start at agbno 0 778 * or extend beyond the AG. 779 * 780 * Set min agbno to the first aligned, non-zero agbno and max to 781 * the last aligned agbno that is at least one full chunk from 782 * the end of the AG. 783 */ 784 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 785 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 786 args.mp->m_sb.sb_inoalignmt) - 787 igeo->ialloc_blks; 788 789 error = xfs_alloc_vextent(&args); 790 if (error) 791 return error; 792 793 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 794 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 795 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 796 } 797 798 if (args.fsbno == NULLFSBLOCK) { 799 *alloc = 0; 800 return 0; 801 } 802 ASSERT(args.len == args.minlen); 803 804 /* 805 * Stamp and write the inode buffers. 806 * 807 * Seed the new inode cluster with a random generation number. This 808 * prevents short-term reuse of generation numbers if a chunk is 809 * freed and then immediately reallocated. We use random numbers 810 * rather than a linear progression to prevent the next generation 811 * number from being easily guessable. 812 */ 813 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno, 814 args.agbno, args.len, prandom_u32()); 815 816 if (error) 817 return error; 818 /* 819 * Convert the results. 820 */ 821 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 822 823 if (xfs_inobt_issparse(~allocmask)) { 824 /* 825 * We've allocated a sparse chunk. Align the startino and mask. 826 */ 827 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 828 829 rec.ir_startino = newino; 830 rec.ir_holemask = ~allocmask; 831 rec.ir_count = newlen; 832 rec.ir_freecount = newlen; 833 rec.ir_free = XFS_INOBT_ALL_FREE; 834 835 /* 836 * Insert the sparse record into the inobt and allow for a merge 837 * if necessary. If a merge does occur, rec is updated to the 838 * merged record. 839 */ 840 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO, 841 &rec, true); 842 if (error == -EFSCORRUPTED) { 843 xfs_alert(args.mp, 844 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 845 XFS_AGINO_TO_INO(args.mp, agno, 846 rec.ir_startino), 847 rec.ir_holemask, rec.ir_count); 848 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 849 } 850 if (error) 851 return error; 852 853 /* 854 * We can't merge the part we've just allocated as for the inobt 855 * due to finobt semantics. The original record may or may not 856 * exist independent of whether physical inodes exist in this 857 * sparse chunk. 858 * 859 * We must update the finobt record based on the inobt record. 860 * rec contains the fully merged and up to date inobt record 861 * from the previous call. Set merge false to replace any 862 * existing record with this one. 863 */ 864 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) { 865 error = xfs_inobt_insert_sprec(args.mp, tp, agbp, 866 XFS_BTNUM_FINO, &rec, 867 false); 868 if (error) 869 return error; 870 } 871 } else { 872 /* full chunk - insert new records to both btrees */ 873 error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen, 874 XFS_BTNUM_INO); 875 if (error) 876 return error; 877 878 if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) { 879 error = xfs_inobt_insert(args.mp, tp, agbp, newino, 880 newlen, XFS_BTNUM_FINO); 881 if (error) 882 return error; 883 } 884 } 885 886 /* 887 * Update AGI counts and newino. 888 */ 889 be32_add_cpu(&agi->agi_count, newlen); 890 be32_add_cpu(&agi->agi_freecount, newlen); 891 pag = xfs_perag_get(args.mp, agno); 892 pag->pagi_freecount += newlen; 893 pag->pagi_count += newlen; 894 xfs_perag_put(pag); 895 agi->agi_newino = cpu_to_be32(newino); 896 897 /* 898 * Log allocation group header fields 899 */ 900 xfs_ialloc_log_agi(tp, agbp, 901 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 902 /* 903 * Modify/log superblock values for inode count and inode free count. 904 */ 905 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 906 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 907 *alloc = 1; 908 return 0; 909 } 910 911 STATIC xfs_agnumber_t 912 xfs_ialloc_next_ag( 913 xfs_mount_t *mp) 914 { 915 xfs_agnumber_t agno; 916 917 spin_lock(&mp->m_agirotor_lock); 918 agno = mp->m_agirotor; 919 if (++mp->m_agirotor >= mp->m_maxagi) 920 mp->m_agirotor = 0; 921 spin_unlock(&mp->m_agirotor_lock); 922 923 return agno; 924 } 925 926 /* 927 * Select an allocation group to look for a free inode in, based on the parent 928 * inode and the mode. Return the allocation group buffer. 929 */ 930 STATIC xfs_agnumber_t 931 xfs_ialloc_ag_select( 932 xfs_trans_t *tp, /* transaction pointer */ 933 xfs_ino_t parent, /* parent directory inode number */ 934 umode_t mode) /* bits set to indicate file type */ 935 { 936 xfs_agnumber_t agcount; /* number of ag's in the filesystem */ 937 xfs_agnumber_t agno; /* current ag number */ 938 int flags; /* alloc buffer locking flags */ 939 xfs_extlen_t ineed; /* blocks needed for inode allocation */ 940 xfs_extlen_t longest = 0; /* longest extent available */ 941 xfs_mount_t *mp; /* mount point structure */ 942 int needspace; /* file mode implies space allocated */ 943 xfs_perag_t *pag; /* per allocation group data */ 944 xfs_agnumber_t pagno; /* parent (starting) ag number */ 945 int error; 946 947 /* 948 * Files of these types need at least one block if length > 0 949 * (and they won't fit in the inode, but that's hard to figure out). 950 */ 951 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 952 mp = tp->t_mountp; 953 agcount = mp->m_maxagi; 954 if (S_ISDIR(mode)) 955 pagno = xfs_ialloc_next_ag(mp); 956 else { 957 pagno = XFS_INO_TO_AGNO(mp, parent); 958 if (pagno >= agcount) 959 pagno = 0; 960 } 961 962 ASSERT(pagno < agcount); 963 964 /* 965 * Loop through allocation groups, looking for one with a little 966 * free space in it. Note we don't look for free inodes, exactly. 967 * Instead, we include whether there is a need to allocate inodes 968 * to mean that blocks must be allocated for them, 969 * if none are currently free. 970 */ 971 agno = pagno; 972 flags = XFS_ALLOC_FLAG_TRYLOCK; 973 for (;;) { 974 pag = xfs_perag_get(mp, agno); 975 if (!pag->pagi_inodeok) { 976 xfs_ialloc_next_ag(mp); 977 goto nextag; 978 } 979 980 if (!pag->pagi_init) { 981 error = xfs_ialloc_pagi_init(mp, tp, agno); 982 if (error) 983 goto nextag; 984 } 985 986 if (pag->pagi_freecount) { 987 xfs_perag_put(pag); 988 return agno; 989 } 990 991 if (!pag->pagf_init) { 992 error = xfs_alloc_pagf_init(mp, tp, agno, flags); 993 if (error) 994 goto nextag; 995 } 996 997 /* 998 * Check that there is enough free space for the file plus a 999 * chunk of inodes if we need to allocate some. If this is the 1000 * first pass across the AGs, take into account the potential 1001 * space needed for alignment of inode chunks when checking the 1002 * longest contiguous free space in the AG - this prevents us 1003 * from getting ENOSPC because we have free space larger than 1004 * ialloc_blks but alignment constraints prevent us from using 1005 * it. 1006 * 1007 * If we can't find an AG with space for full alignment slack to 1008 * be taken into account, we must be near ENOSPC in all AGs. 1009 * Hence we don't include alignment for the second pass and so 1010 * if we fail allocation due to alignment issues then it is most 1011 * likely a real ENOSPC condition. 1012 */ 1013 ineed = M_IGEO(mp)->ialloc_min_blks; 1014 if (flags && ineed > 1) 1015 ineed += M_IGEO(mp)->cluster_align; 1016 longest = pag->pagf_longest; 1017 if (!longest) 1018 longest = pag->pagf_flcount > 0; 1019 1020 if (pag->pagf_freeblks >= needspace + ineed && 1021 longest >= ineed) { 1022 xfs_perag_put(pag); 1023 return agno; 1024 } 1025 nextag: 1026 xfs_perag_put(pag); 1027 /* 1028 * No point in iterating over the rest, if we're shutting 1029 * down. 1030 */ 1031 if (XFS_FORCED_SHUTDOWN(mp)) 1032 return NULLAGNUMBER; 1033 agno++; 1034 if (agno >= agcount) 1035 agno = 0; 1036 if (agno == pagno) { 1037 if (flags == 0) 1038 return NULLAGNUMBER; 1039 flags = 0; 1040 } 1041 } 1042 } 1043 1044 /* 1045 * Try to retrieve the next record to the left/right from the current one. 1046 */ 1047 STATIC int 1048 xfs_ialloc_next_rec( 1049 struct xfs_btree_cur *cur, 1050 xfs_inobt_rec_incore_t *rec, 1051 int *done, 1052 int left) 1053 { 1054 int error; 1055 int i; 1056 1057 if (left) 1058 error = xfs_btree_decrement(cur, 0, &i); 1059 else 1060 error = xfs_btree_increment(cur, 0, &i); 1061 1062 if (error) 1063 return error; 1064 *done = !i; 1065 if (i) { 1066 error = xfs_inobt_get_rec(cur, rec, &i); 1067 if (error) 1068 return error; 1069 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1070 return -EFSCORRUPTED; 1071 } 1072 1073 return 0; 1074 } 1075 1076 STATIC int 1077 xfs_ialloc_get_rec( 1078 struct xfs_btree_cur *cur, 1079 xfs_agino_t agino, 1080 xfs_inobt_rec_incore_t *rec, 1081 int *done) 1082 { 1083 int error; 1084 int i; 1085 1086 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 1087 if (error) 1088 return error; 1089 *done = !i; 1090 if (i) { 1091 error = xfs_inobt_get_rec(cur, rec, &i); 1092 if (error) 1093 return error; 1094 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1095 return -EFSCORRUPTED; 1096 } 1097 1098 return 0; 1099 } 1100 1101 /* 1102 * Return the offset of the first free inode in the record. If the inode chunk 1103 * is sparsely allocated, we convert the record holemask to inode granularity 1104 * and mask off the unallocated regions from the inode free mask. 1105 */ 1106 STATIC int 1107 xfs_inobt_first_free_inode( 1108 struct xfs_inobt_rec_incore *rec) 1109 { 1110 xfs_inofree_t realfree; 1111 1112 /* if there are no holes, return the first available offset */ 1113 if (!xfs_inobt_issparse(rec->ir_holemask)) 1114 return xfs_lowbit64(rec->ir_free); 1115 1116 realfree = xfs_inobt_irec_to_allocmask(rec); 1117 realfree &= rec->ir_free; 1118 1119 return xfs_lowbit64(realfree); 1120 } 1121 1122 /* 1123 * Allocate an inode using the inobt-only algorithm. 1124 */ 1125 STATIC int 1126 xfs_dialloc_ag_inobt( 1127 struct xfs_trans *tp, 1128 struct xfs_buf *agbp, 1129 xfs_ino_t parent, 1130 xfs_ino_t *inop) 1131 { 1132 struct xfs_mount *mp = tp->t_mountp; 1133 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 1134 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 1135 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1136 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1137 struct xfs_perag *pag; 1138 struct xfs_btree_cur *cur, *tcur; 1139 struct xfs_inobt_rec_incore rec, trec; 1140 xfs_ino_t ino; 1141 int error; 1142 int offset; 1143 int i, j; 1144 int searchdistance = 10; 1145 1146 pag = xfs_perag_get(mp, agno); 1147 1148 ASSERT(pag->pagi_init); 1149 ASSERT(pag->pagi_inodeok); 1150 ASSERT(pag->pagi_freecount > 0); 1151 1152 restart_pagno: 1153 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); 1154 /* 1155 * If pagino is 0 (this is the root inode allocation) use newino. 1156 * This must work because we've just allocated some. 1157 */ 1158 if (!pagino) 1159 pagino = be32_to_cpu(agi->agi_newino); 1160 1161 error = xfs_check_agi_freecount(cur, agi); 1162 if (error) 1163 goto error0; 1164 1165 /* 1166 * If in the same AG as the parent, try to get near the parent. 1167 */ 1168 if (pagno == agno) { 1169 int doneleft; /* done, to the left */ 1170 int doneright; /* done, to the right */ 1171 1172 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1173 if (error) 1174 goto error0; 1175 if (XFS_IS_CORRUPT(mp, i != 1)) { 1176 error = -EFSCORRUPTED; 1177 goto error0; 1178 } 1179 1180 error = xfs_inobt_get_rec(cur, &rec, &j); 1181 if (error) 1182 goto error0; 1183 if (XFS_IS_CORRUPT(mp, j != 1)) { 1184 error = -EFSCORRUPTED; 1185 goto error0; 1186 } 1187 1188 if (rec.ir_freecount > 0) { 1189 /* 1190 * Found a free inode in the same chunk 1191 * as the parent, done. 1192 */ 1193 goto alloc_inode; 1194 } 1195 1196 1197 /* 1198 * In the same AG as parent, but parent's chunk is full. 1199 */ 1200 1201 /* duplicate the cursor, search left & right simultaneously */ 1202 error = xfs_btree_dup_cursor(cur, &tcur); 1203 if (error) 1204 goto error0; 1205 1206 /* 1207 * Skip to last blocks looked up if same parent inode. 1208 */ 1209 if (pagino != NULLAGINO && 1210 pag->pagl_pagino == pagino && 1211 pag->pagl_leftrec != NULLAGINO && 1212 pag->pagl_rightrec != NULLAGINO) { 1213 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1214 &trec, &doneleft); 1215 if (error) 1216 goto error1; 1217 1218 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1219 &rec, &doneright); 1220 if (error) 1221 goto error1; 1222 } else { 1223 /* search left with tcur, back up 1 record */ 1224 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1225 if (error) 1226 goto error1; 1227 1228 /* search right with cur, go forward 1 record. */ 1229 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1230 if (error) 1231 goto error1; 1232 } 1233 1234 /* 1235 * Loop until we find an inode chunk with a free inode. 1236 */ 1237 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1238 int useleft; /* using left inode chunk this time */ 1239 1240 /* figure out the closer block if both are valid. */ 1241 if (!doneleft && !doneright) { 1242 useleft = pagino - 1243 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1244 rec.ir_startino - pagino; 1245 } else { 1246 useleft = !doneleft; 1247 } 1248 1249 /* free inodes to the left? */ 1250 if (useleft && trec.ir_freecount) { 1251 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1252 cur = tcur; 1253 1254 pag->pagl_leftrec = trec.ir_startino; 1255 pag->pagl_rightrec = rec.ir_startino; 1256 pag->pagl_pagino = pagino; 1257 rec = trec; 1258 goto alloc_inode; 1259 } 1260 1261 /* free inodes to the right? */ 1262 if (!useleft && rec.ir_freecount) { 1263 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1264 1265 pag->pagl_leftrec = trec.ir_startino; 1266 pag->pagl_rightrec = rec.ir_startino; 1267 pag->pagl_pagino = pagino; 1268 goto alloc_inode; 1269 } 1270 1271 /* get next record to check */ 1272 if (useleft) { 1273 error = xfs_ialloc_next_rec(tcur, &trec, 1274 &doneleft, 1); 1275 } else { 1276 error = xfs_ialloc_next_rec(cur, &rec, 1277 &doneright, 0); 1278 } 1279 if (error) 1280 goto error1; 1281 } 1282 1283 if (searchdistance <= 0) { 1284 /* 1285 * Not in range - save last search 1286 * location and allocate a new inode 1287 */ 1288 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1289 pag->pagl_leftrec = trec.ir_startino; 1290 pag->pagl_rightrec = rec.ir_startino; 1291 pag->pagl_pagino = pagino; 1292 1293 } else { 1294 /* 1295 * We've reached the end of the btree. because 1296 * we are only searching a small chunk of the 1297 * btree each search, there is obviously free 1298 * inodes closer to the parent inode than we 1299 * are now. restart the search again. 1300 */ 1301 pag->pagl_pagino = NULLAGINO; 1302 pag->pagl_leftrec = NULLAGINO; 1303 pag->pagl_rightrec = NULLAGINO; 1304 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1305 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1306 goto restart_pagno; 1307 } 1308 } 1309 1310 /* 1311 * In a different AG from the parent. 1312 * See if the most recently allocated block has any free. 1313 */ 1314 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1315 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1316 XFS_LOOKUP_EQ, &i); 1317 if (error) 1318 goto error0; 1319 1320 if (i == 1) { 1321 error = xfs_inobt_get_rec(cur, &rec, &j); 1322 if (error) 1323 goto error0; 1324 1325 if (j == 1 && rec.ir_freecount > 0) { 1326 /* 1327 * The last chunk allocated in the group 1328 * still has a free inode. 1329 */ 1330 goto alloc_inode; 1331 } 1332 } 1333 } 1334 1335 /* 1336 * None left in the last group, search the whole AG 1337 */ 1338 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1339 if (error) 1340 goto error0; 1341 if (XFS_IS_CORRUPT(mp, i != 1)) { 1342 error = -EFSCORRUPTED; 1343 goto error0; 1344 } 1345 1346 for (;;) { 1347 error = xfs_inobt_get_rec(cur, &rec, &i); 1348 if (error) 1349 goto error0; 1350 if (XFS_IS_CORRUPT(mp, i != 1)) { 1351 error = -EFSCORRUPTED; 1352 goto error0; 1353 } 1354 if (rec.ir_freecount > 0) 1355 break; 1356 error = xfs_btree_increment(cur, 0, &i); 1357 if (error) 1358 goto error0; 1359 if (XFS_IS_CORRUPT(mp, i != 1)) { 1360 error = -EFSCORRUPTED; 1361 goto error0; 1362 } 1363 } 1364 1365 alloc_inode: 1366 offset = xfs_inobt_first_free_inode(&rec); 1367 ASSERT(offset >= 0); 1368 ASSERT(offset < XFS_INODES_PER_CHUNK); 1369 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1370 XFS_INODES_PER_CHUNK) == 0); 1371 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); 1372 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1373 rec.ir_freecount--; 1374 error = xfs_inobt_update(cur, &rec); 1375 if (error) 1376 goto error0; 1377 be32_add_cpu(&agi->agi_freecount, -1); 1378 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1379 pag->pagi_freecount--; 1380 1381 error = xfs_check_agi_freecount(cur, agi); 1382 if (error) 1383 goto error0; 1384 1385 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1386 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1387 xfs_perag_put(pag); 1388 *inop = ino; 1389 return 0; 1390 error1: 1391 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1392 error0: 1393 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1394 xfs_perag_put(pag); 1395 return error; 1396 } 1397 1398 /* 1399 * Use the free inode btree to allocate an inode based on distance from the 1400 * parent. Note that the provided cursor may be deleted and replaced. 1401 */ 1402 STATIC int 1403 xfs_dialloc_ag_finobt_near( 1404 xfs_agino_t pagino, 1405 struct xfs_btree_cur **ocur, 1406 struct xfs_inobt_rec_incore *rec) 1407 { 1408 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1409 struct xfs_btree_cur *rcur; /* right search cursor */ 1410 struct xfs_inobt_rec_incore rrec; 1411 int error; 1412 int i, j; 1413 1414 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1415 if (error) 1416 return error; 1417 1418 if (i == 1) { 1419 error = xfs_inobt_get_rec(lcur, rec, &i); 1420 if (error) 1421 return error; 1422 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1423 return -EFSCORRUPTED; 1424 1425 /* 1426 * See if we've landed in the parent inode record. The finobt 1427 * only tracks chunks with at least one free inode, so record 1428 * existence is enough. 1429 */ 1430 if (pagino >= rec->ir_startino && 1431 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1432 return 0; 1433 } 1434 1435 error = xfs_btree_dup_cursor(lcur, &rcur); 1436 if (error) 1437 return error; 1438 1439 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1440 if (error) 1441 goto error_rcur; 1442 if (j == 1) { 1443 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1444 if (error) 1445 goto error_rcur; 1446 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1447 error = -EFSCORRUPTED; 1448 goto error_rcur; 1449 } 1450 } 1451 1452 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1453 error = -EFSCORRUPTED; 1454 goto error_rcur; 1455 } 1456 if (i == 1 && j == 1) { 1457 /* 1458 * Both the left and right records are valid. Choose the closer 1459 * inode chunk to the target. 1460 */ 1461 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1462 (rrec.ir_startino - pagino)) { 1463 *rec = rrec; 1464 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1465 *ocur = rcur; 1466 } else { 1467 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1468 } 1469 } else if (j == 1) { 1470 /* only the right record is valid */ 1471 *rec = rrec; 1472 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1473 *ocur = rcur; 1474 } else if (i == 1) { 1475 /* only the left record is valid */ 1476 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1477 } 1478 1479 return 0; 1480 1481 error_rcur: 1482 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1483 return error; 1484 } 1485 1486 /* 1487 * Use the free inode btree to find a free inode based on a newino hint. If 1488 * the hint is NULL, find the first free inode in the AG. 1489 */ 1490 STATIC int 1491 xfs_dialloc_ag_finobt_newino( 1492 struct xfs_agi *agi, 1493 struct xfs_btree_cur *cur, 1494 struct xfs_inobt_rec_incore *rec) 1495 { 1496 int error; 1497 int i; 1498 1499 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1500 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1501 XFS_LOOKUP_EQ, &i); 1502 if (error) 1503 return error; 1504 if (i == 1) { 1505 error = xfs_inobt_get_rec(cur, rec, &i); 1506 if (error) 1507 return error; 1508 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1509 return -EFSCORRUPTED; 1510 return 0; 1511 } 1512 } 1513 1514 /* 1515 * Find the first inode available in the AG. 1516 */ 1517 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1518 if (error) 1519 return error; 1520 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1521 return -EFSCORRUPTED; 1522 1523 error = xfs_inobt_get_rec(cur, rec, &i); 1524 if (error) 1525 return error; 1526 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1527 return -EFSCORRUPTED; 1528 1529 return 0; 1530 } 1531 1532 /* 1533 * Update the inobt based on a modification made to the finobt. Also ensure that 1534 * the records from both trees are equivalent post-modification. 1535 */ 1536 STATIC int 1537 xfs_dialloc_ag_update_inobt( 1538 struct xfs_btree_cur *cur, /* inobt cursor */ 1539 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1540 int offset) /* inode offset */ 1541 { 1542 struct xfs_inobt_rec_incore rec; 1543 int error; 1544 int i; 1545 1546 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1547 if (error) 1548 return error; 1549 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1550 return -EFSCORRUPTED; 1551 1552 error = xfs_inobt_get_rec(cur, &rec, &i); 1553 if (error) 1554 return error; 1555 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1556 return -EFSCORRUPTED; 1557 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1558 XFS_INODES_PER_CHUNK) == 0); 1559 1560 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1561 rec.ir_freecount--; 1562 1563 if (XFS_IS_CORRUPT(cur->bc_mp, 1564 rec.ir_free != frec->ir_free || 1565 rec.ir_freecount != frec->ir_freecount)) 1566 return -EFSCORRUPTED; 1567 1568 return xfs_inobt_update(cur, &rec); 1569 } 1570 1571 /* 1572 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1573 * back to the inobt search algorithm. 1574 * 1575 * The caller selected an AG for us, and made sure that free inodes are 1576 * available. 1577 */ 1578 STATIC int 1579 xfs_dialloc_ag( 1580 struct xfs_trans *tp, 1581 struct xfs_buf *agbp, 1582 xfs_ino_t parent, 1583 xfs_ino_t *inop) 1584 { 1585 struct xfs_mount *mp = tp->t_mountp; 1586 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 1587 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 1588 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1589 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1590 struct xfs_perag *pag; 1591 struct xfs_btree_cur *cur; /* finobt cursor */ 1592 struct xfs_btree_cur *icur; /* inobt cursor */ 1593 struct xfs_inobt_rec_incore rec; 1594 xfs_ino_t ino; 1595 int error; 1596 int offset; 1597 int i; 1598 1599 if (!xfs_sb_version_hasfinobt(&mp->m_sb)) 1600 return xfs_dialloc_ag_inobt(tp, agbp, parent, inop); 1601 1602 pag = xfs_perag_get(mp, agno); 1603 1604 /* 1605 * If pagino is 0 (this is the root inode allocation) use newino. 1606 * This must work because we've just allocated some. 1607 */ 1608 if (!pagino) 1609 pagino = be32_to_cpu(agi->agi_newino); 1610 1611 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); 1612 1613 error = xfs_check_agi_freecount(cur, agi); 1614 if (error) 1615 goto error_cur; 1616 1617 /* 1618 * The search algorithm depends on whether we're in the same AG as the 1619 * parent. If so, find the closest available inode to the parent. If 1620 * not, consider the agi hint or find the first free inode in the AG. 1621 */ 1622 if (agno == pagno) 1623 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1624 else 1625 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1626 if (error) 1627 goto error_cur; 1628 1629 offset = xfs_inobt_first_free_inode(&rec); 1630 ASSERT(offset >= 0); 1631 ASSERT(offset < XFS_INODES_PER_CHUNK); 1632 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1633 XFS_INODES_PER_CHUNK) == 0); 1634 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); 1635 1636 /* 1637 * Modify or remove the finobt record. 1638 */ 1639 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1640 rec.ir_freecount--; 1641 if (rec.ir_freecount) 1642 error = xfs_inobt_update(cur, &rec); 1643 else 1644 error = xfs_btree_delete(cur, &i); 1645 if (error) 1646 goto error_cur; 1647 1648 /* 1649 * The finobt has now been updated appropriately. We haven't updated the 1650 * agi and superblock yet, so we can create an inobt cursor and validate 1651 * the original freecount. If all is well, make the equivalent update to 1652 * the inobt using the finobt record and offset information. 1653 */ 1654 icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); 1655 1656 error = xfs_check_agi_freecount(icur, agi); 1657 if (error) 1658 goto error_icur; 1659 1660 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1661 if (error) 1662 goto error_icur; 1663 1664 /* 1665 * Both trees have now been updated. We must update the perag and 1666 * superblock before we can check the freecount for each btree. 1667 */ 1668 be32_add_cpu(&agi->agi_freecount, -1); 1669 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1670 pag->pagi_freecount--; 1671 1672 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1673 1674 error = xfs_check_agi_freecount(icur, agi); 1675 if (error) 1676 goto error_icur; 1677 error = xfs_check_agi_freecount(cur, agi); 1678 if (error) 1679 goto error_icur; 1680 1681 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1682 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1683 xfs_perag_put(pag); 1684 *inop = ino; 1685 return 0; 1686 1687 error_icur: 1688 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1689 error_cur: 1690 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1691 xfs_perag_put(pag); 1692 return error; 1693 } 1694 1695 /* 1696 * Allocate an inode on disk. 1697 * 1698 * Mode is used to tell whether the new inode will need space, and whether it 1699 * is a directory. 1700 * 1701 * This function is designed to be called twice if it has to do an allocation 1702 * to make more free inodes. On the first call, *IO_agbp should be set to NULL. 1703 * If an inode is available without having to performn an allocation, an inode 1704 * number is returned. In this case, *IO_agbp is set to NULL. If an allocation 1705 * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp. 1706 * The caller should then commit the current transaction, allocate a 1707 * new transaction, and call xfs_dialloc() again, passing in the previous value 1708 * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI 1709 * buffer is locked across the two calls, the second call is guaranteed to have 1710 * a free inode available. 1711 * 1712 * Once we successfully pick an inode its number is returned and the on-disk 1713 * data structures are updated. The inode itself is not read in, since doing so 1714 * would break ordering constraints with xfs_reclaim. 1715 */ 1716 int 1717 xfs_dialloc( 1718 struct xfs_trans *tp, 1719 xfs_ino_t parent, 1720 umode_t mode, 1721 struct xfs_buf **IO_agbp, 1722 xfs_ino_t *inop) 1723 { 1724 struct xfs_mount *mp = tp->t_mountp; 1725 struct xfs_buf *agbp; 1726 xfs_agnumber_t agno; 1727 int error; 1728 int ialloced; 1729 int noroom = 0; 1730 xfs_agnumber_t start_agno; 1731 struct xfs_perag *pag; 1732 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1733 int okalloc = 1; 1734 1735 if (*IO_agbp) { 1736 /* 1737 * If the caller passes in a pointer to the AGI buffer, 1738 * continue where we left off before. In this case, we 1739 * know that the allocation group has free inodes. 1740 */ 1741 agbp = *IO_agbp; 1742 goto out_alloc; 1743 } 1744 1745 /* 1746 * We do not have an agbp, so select an initial allocation 1747 * group for inode allocation. 1748 */ 1749 start_agno = xfs_ialloc_ag_select(tp, parent, mode); 1750 if (start_agno == NULLAGNUMBER) { 1751 *inop = NULLFSINO; 1752 return 0; 1753 } 1754 1755 /* 1756 * If we have already hit the ceiling of inode blocks then clear 1757 * okalloc so we scan all available agi structures for a free 1758 * inode. 1759 * 1760 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1761 * which will sacrifice the preciseness but improve the performance. 1762 */ 1763 if (igeo->maxicount && 1764 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1765 > igeo->maxicount) { 1766 noroom = 1; 1767 okalloc = 0; 1768 } 1769 1770 /* 1771 * Loop until we find an allocation group that either has free inodes 1772 * or in which we can allocate some inodes. Iterate through the 1773 * allocation groups upward, wrapping at the end. 1774 */ 1775 agno = start_agno; 1776 for (;;) { 1777 pag = xfs_perag_get(mp, agno); 1778 if (!pag->pagi_inodeok) { 1779 xfs_ialloc_next_ag(mp); 1780 goto nextag; 1781 } 1782 1783 if (!pag->pagi_init) { 1784 error = xfs_ialloc_pagi_init(mp, tp, agno); 1785 if (error) 1786 goto out_error; 1787 } 1788 1789 /* 1790 * Do a first racy fast path check if this AG is usable. 1791 */ 1792 if (!pag->pagi_freecount && !okalloc) 1793 goto nextag; 1794 1795 /* 1796 * Then read in the AGI buffer and recheck with the AGI buffer 1797 * lock held. 1798 */ 1799 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1800 if (error) 1801 goto out_error; 1802 1803 if (pag->pagi_freecount) { 1804 xfs_perag_put(pag); 1805 goto out_alloc; 1806 } 1807 1808 if (!okalloc) 1809 goto nextag_relse_buffer; 1810 1811 1812 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); 1813 if (error) { 1814 xfs_trans_brelse(tp, agbp); 1815 1816 if (error != -ENOSPC) 1817 goto out_error; 1818 1819 xfs_perag_put(pag); 1820 *inop = NULLFSINO; 1821 return 0; 1822 } 1823 1824 if (ialloced) { 1825 /* 1826 * We successfully allocated some inodes, return 1827 * the current context to the caller so that it 1828 * can commit the current transaction and call 1829 * us again where we left off. 1830 */ 1831 ASSERT(pag->pagi_freecount > 0); 1832 xfs_perag_put(pag); 1833 1834 *IO_agbp = agbp; 1835 *inop = NULLFSINO; 1836 return 0; 1837 } 1838 1839 nextag_relse_buffer: 1840 xfs_trans_brelse(tp, agbp); 1841 nextag: 1842 xfs_perag_put(pag); 1843 if (++agno == mp->m_sb.sb_agcount) 1844 agno = 0; 1845 if (agno == start_agno) { 1846 *inop = NULLFSINO; 1847 return noroom ? -ENOSPC : 0; 1848 } 1849 } 1850 1851 out_alloc: 1852 *IO_agbp = NULL; 1853 return xfs_dialloc_ag(tp, agbp, parent, inop); 1854 out_error: 1855 xfs_perag_put(pag); 1856 return error; 1857 } 1858 1859 /* 1860 * Free the blocks of an inode chunk. We must consider that the inode chunk 1861 * might be sparse and only free the regions that are allocated as part of the 1862 * chunk. 1863 */ 1864 STATIC void 1865 xfs_difree_inode_chunk( 1866 struct xfs_trans *tp, 1867 xfs_agnumber_t agno, 1868 struct xfs_inobt_rec_incore *rec) 1869 { 1870 struct xfs_mount *mp = tp->t_mountp; 1871 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1872 rec->ir_startino); 1873 int startidx, endidx; 1874 int nextbit; 1875 xfs_agblock_t agbno; 1876 int contigblk; 1877 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1878 1879 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1880 /* not sparse, calculate extent info directly */ 1881 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1882 M_IGEO(mp)->ialloc_blks, 1883 &XFS_RMAP_OINFO_INODES); 1884 return; 1885 } 1886 1887 /* holemask is only 16-bits (fits in an unsigned long) */ 1888 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1889 holemask[0] = rec->ir_holemask; 1890 1891 /* 1892 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1893 * holemask and convert the start/end index of each range to an extent. 1894 * We start with the start and end index both pointing at the first 0 in 1895 * the mask. 1896 */ 1897 startidx = endidx = find_first_zero_bit(holemask, 1898 XFS_INOBT_HOLEMASK_BITS); 1899 nextbit = startidx + 1; 1900 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1901 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1902 nextbit); 1903 /* 1904 * If the next zero bit is contiguous, update the end index of 1905 * the current range and continue. 1906 */ 1907 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1908 nextbit == endidx + 1) { 1909 endidx = nextbit; 1910 goto next; 1911 } 1912 1913 /* 1914 * nextbit is not contiguous with the current end index. Convert 1915 * the current start/end to an extent and add it to the free 1916 * list. 1917 */ 1918 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1919 mp->m_sb.sb_inopblock; 1920 contigblk = ((endidx - startidx + 1) * 1921 XFS_INODES_PER_HOLEMASK_BIT) / 1922 mp->m_sb.sb_inopblock; 1923 1924 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1925 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1926 xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1927 contigblk, &XFS_RMAP_OINFO_INODES); 1928 1929 /* reset range to current bit and carry on... */ 1930 startidx = endidx = nextbit; 1931 1932 next: 1933 nextbit++; 1934 } 1935 } 1936 1937 STATIC int 1938 xfs_difree_inobt( 1939 struct xfs_mount *mp, 1940 struct xfs_trans *tp, 1941 struct xfs_buf *agbp, 1942 xfs_agino_t agino, 1943 struct xfs_icluster *xic, 1944 struct xfs_inobt_rec_incore *orec) 1945 { 1946 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 1947 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 1948 struct xfs_perag *pag; 1949 struct xfs_btree_cur *cur; 1950 struct xfs_inobt_rec_incore rec; 1951 int ilen; 1952 int error; 1953 int i; 1954 int off; 1955 1956 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1957 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1958 1959 /* 1960 * Initialize the cursor. 1961 */ 1962 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); 1963 1964 error = xfs_check_agi_freecount(cur, agi); 1965 if (error) 1966 goto error0; 1967 1968 /* 1969 * Look for the entry describing this inode. 1970 */ 1971 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1972 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1973 __func__, error); 1974 goto error0; 1975 } 1976 if (XFS_IS_CORRUPT(mp, i != 1)) { 1977 error = -EFSCORRUPTED; 1978 goto error0; 1979 } 1980 error = xfs_inobt_get_rec(cur, &rec, &i); 1981 if (error) { 1982 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1983 __func__, error); 1984 goto error0; 1985 } 1986 if (XFS_IS_CORRUPT(mp, i != 1)) { 1987 error = -EFSCORRUPTED; 1988 goto error0; 1989 } 1990 /* 1991 * Get the offset in the inode chunk. 1992 */ 1993 off = agino - rec.ir_startino; 1994 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1995 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1996 /* 1997 * Mark the inode free & increment the count. 1998 */ 1999 rec.ir_free |= XFS_INOBT_MASK(off); 2000 rec.ir_freecount++; 2001 2002 /* 2003 * When an inode chunk is free, it becomes eligible for removal. Don't 2004 * remove the chunk if the block size is large enough for multiple inode 2005 * chunks (that might not be free). 2006 */ 2007 if (!(mp->m_flags & XFS_MOUNT_IKEEP) && 2008 rec.ir_free == XFS_INOBT_ALL_FREE && 2009 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2010 xic->deleted = true; 2011 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); 2012 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 2013 2014 /* 2015 * Remove the inode cluster from the AGI B+Tree, adjust the 2016 * AGI and Superblock inode counts, and mark the disk space 2017 * to be freed when the transaction is committed. 2018 */ 2019 ilen = rec.ir_freecount; 2020 be32_add_cpu(&agi->agi_count, -ilen); 2021 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 2022 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 2023 pag = xfs_perag_get(mp, agno); 2024 pag->pagi_freecount -= ilen - 1; 2025 pag->pagi_count -= ilen; 2026 xfs_perag_put(pag); 2027 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 2028 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 2029 2030 if ((error = xfs_btree_delete(cur, &i))) { 2031 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 2032 __func__, error); 2033 goto error0; 2034 } 2035 2036 xfs_difree_inode_chunk(tp, agno, &rec); 2037 } else { 2038 xic->deleted = false; 2039 2040 error = xfs_inobt_update(cur, &rec); 2041 if (error) { 2042 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 2043 __func__, error); 2044 goto error0; 2045 } 2046 2047 /* 2048 * Change the inode free counts and log the ag/sb changes. 2049 */ 2050 be32_add_cpu(&agi->agi_freecount, 1); 2051 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2052 pag = xfs_perag_get(mp, agno); 2053 pag->pagi_freecount++; 2054 xfs_perag_put(pag); 2055 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2056 } 2057 2058 error = xfs_check_agi_freecount(cur, agi); 2059 if (error) 2060 goto error0; 2061 2062 *orec = rec; 2063 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2064 return 0; 2065 2066 error0: 2067 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2068 return error; 2069 } 2070 2071 /* 2072 * Free an inode in the free inode btree. 2073 */ 2074 STATIC int 2075 xfs_difree_finobt( 2076 struct xfs_mount *mp, 2077 struct xfs_trans *tp, 2078 struct xfs_buf *agbp, 2079 xfs_agino_t agino, 2080 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2081 { 2082 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); 2083 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); 2084 struct xfs_btree_cur *cur; 2085 struct xfs_inobt_rec_incore rec; 2086 int offset = agino - ibtrec->ir_startino; 2087 int error; 2088 int i; 2089 2090 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO); 2091 2092 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2093 if (error) 2094 goto error; 2095 if (i == 0) { 2096 /* 2097 * If the record does not exist in the finobt, we must have just 2098 * freed an inode in a previously fully allocated chunk. If not, 2099 * something is out of sync. 2100 */ 2101 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2102 error = -EFSCORRUPTED; 2103 goto error; 2104 } 2105 2106 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2107 ibtrec->ir_count, 2108 ibtrec->ir_freecount, 2109 ibtrec->ir_free, &i); 2110 if (error) 2111 goto error; 2112 ASSERT(i == 1); 2113 2114 goto out; 2115 } 2116 2117 /* 2118 * Read and update the existing record. We could just copy the ibtrec 2119 * across here, but that would defeat the purpose of having redundant 2120 * metadata. By making the modifications independently, we can catch 2121 * corruptions that we wouldn't see if we just copied from one record 2122 * to another. 2123 */ 2124 error = xfs_inobt_get_rec(cur, &rec, &i); 2125 if (error) 2126 goto error; 2127 if (XFS_IS_CORRUPT(mp, i != 1)) { 2128 error = -EFSCORRUPTED; 2129 goto error; 2130 } 2131 2132 rec.ir_free |= XFS_INOBT_MASK(offset); 2133 rec.ir_freecount++; 2134 2135 if (XFS_IS_CORRUPT(mp, 2136 rec.ir_free != ibtrec->ir_free || 2137 rec.ir_freecount != ibtrec->ir_freecount)) { 2138 error = -EFSCORRUPTED; 2139 goto error; 2140 } 2141 2142 /* 2143 * The content of inobt records should always match between the inobt 2144 * and finobt. The lifecycle of records in the finobt is different from 2145 * the inobt in that the finobt only tracks records with at least one 2146 * free inode. Hence, if all of the inodes are free and we aren't 2147 * keeping inode chunks permanently on disk, remove the record. 2148 * Otherwise, update the record with the new information. 2149 * 2150 * Note that we currently can't free chunks when the block size is large 2151 * enough for multiple chunks. Leave the finobt record to remain in sync 2152 * with the inobt. 2153 */ 2154 if (rec.ir_free == XFS_INOBT_ALL_FREE && 2155 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK && 2156 !(mp->m_flags & XFS_MOUNT_IKEEP)) { 2157 error = xfs_btree_delete(cur, &i); 2158 if (error) 2159 goto error; 2160 ASSERT(i == 1); 2161 } else { 2162 error = xfs_inobt_update(cur, &rec); 2163 if (error) 2164 goto error; 2165 } 2166 2167 out: 2168 error = xfs_check_agi_freecount(cur, agi); 2169 if (error) 2170 goto error; 2171 2172 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2173 return 0; 2174 2175 error: 2176 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2177 return error; 2178 } 2179 2180 /* 2181 * Free disk inode. Carefully avoids touching the incore inode, all 2182 * manipulations incore are the caller's responsibility. 2183 * The on-disk inode is not changed by this operation, only the 2184 * btree (free inode mask) is changed. 2185 */ 2186 int 2187 xfs_difree( 2188 struct xfs_trans *tp, /* transaction pointer */ 2189 xfs_ino_t inode, /* inode to be freed */ 2190 struct xfs_icluster *xic) /* cluster info if deleted */ 2191 { 2192 /* REFERENCED */ 2193 xfs_agblock_t agbno; /* block number containing inode */ 2194 struct xfs_buf *agbp; /* buffer for allocation group header */ 2195 xfs_agino_t agino; /* allocation group inode number */ 2196 xfs_agnumber_t agno; /* allocation group number */ 2197 int error; /* error return value */ 2198 struct xfs_mount *mp; /* mount structure for filesystem */ 2199 struct xfs_inobt_rec_incore rec;/* btree record */ 2200 2201 mp = tp->t_mountp; 2202 2203 /* 2204 * Break up inode number into its components. 2205 */ 2206 agno = XFS_INO_TO_AGNO(mp, inode); 2207 if (agno >= mp->m_sb.sb_agcount) { 2208 xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", 2209 __func__, agno, mp->m_sb.sb_agcount); 2210 ASSERT(0); 2211 return -EINVAL; 2212 } 2213 agino = XFS_INO_TO_AGINO(mp, inode); 2214 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { 2215 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2216 __func__, (unsigned long long)inode, 2217 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); 2218 ASSERT(0); 2219 return -EINVAL; 2220 } 2221 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2222 if (agbno >= mp->m_sb.sb_agblocks) { 2223 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2224 __func__, agbno, mp->m_sb.sb_agblocks); 2225 ASSERT(0); 2226 return -EINVAL; 2227 } 2228 /* 2229 * Get the allocation group header. 2230 */ 2231 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 2232 if (error) { 2233 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2234 __func__, error); 2235 return error; 2236 } 2237 2238 /* 2239 * Fix up the inode allocation btree. 2240 */ 2241 error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec); 2242 if (error) 2243 goto error0; 2244 2245 /* 2246 * Fix up the free inode btree. 2247 */ 2248 if (xfs_sb_version_hasfinobt(&mp->m_sb)) { 2249 error = xfs_difree_finobt(mp, tp, agbp, agino, &rec); 2250 if (error) 2251 goto error0; 2252 } 2253 2254 return 0; 2255 2256 error0: 2257 return error; 2258 } 2259 2260 STATIC int 2261 xfs_imap_lookup( 2262 struct xfs_mount *mp, 2263 struct xfs_trans *tp, 2264 xfs_agnumber_t agno, 2265 xfs_agino_t agino, 2266 xfs_agblock_t agbno, 2267 xfs_agblock_t *chunk_agbno, 2268 xfs_agblock_t *offset_agbno, 2269 int flags) 2270 { 2271 struct xfs_inobt_rec_incore rec; 2272 struct xfs_btree_cur *cur; 2273 struct xfs_buf *agbp; 2274 int error; 2275 int i; 2276 2277 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 2278 if (error) { 2279 xfs_alert(mp, 2280 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2281 __func__, error, agno); 2282 return error; 2283 } 2284 2285 /* 2286 * Lookup the inode record for the given agino. If the record cannot be 2287 * found, then it's an invalid inode number and we should abort. Once 2288 * we have a record, we need to ensure it contains the inode number 2289 * we are looking up. 2290 */ 2291 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO); 2292 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2293 if (!error) { 2294 if (i) 2295 error = xfs_inobt_get_rec(cur, &rec, &i); 2296 if (!error && i == 0) 2297 error = -EINVAL; 2298 } 2299 2300 xfs_trans_brelse(tp, agbp); 2301 xfs_btree_del_cursor(cur, error); 2302 if (error) 2303 return error; 2304 2305 /* check that the returned record contains the required inode */ 2306 if (rec.ir_startino > agino || 2307 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2308 return -EINVAL; 2309 2310 /* for untrusted inodes check it is allocated first */ 2311 if ((flags & XFS_IGET_UNTRUSTED) && 2312 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2313 return -EINVAL; 2314 2315 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2316 *offset_agbno = agbno - *chunk_agbno; 2317 return 0; 2318 } 2319 2320 /* 2321 * Return the location of the inode in imap, for mapping it into a buffer. 2322 */ 2323 int 2324 xfs_imap( 2325 xfs_mount_t *mp, /* file system mount structure */ 2326 xfs_trans_t *tp, /* transaction pointer */ 2327 xfs_ino_t ino, /* inode to locate */ 2328 struct xfs_imap *imap, /* location map structure */ 2329 uint flags) /* flags for inode btree lookup */ 2330 { 2331 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2332 xfs_agino_t agino; /* inode number within alloc group */ 2333 xfs_agnumber_t agno; /* allocation group number */ 2334 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2335 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2336 int error; /* error code */ 2337 int offset; /* index of inode in its buffer */ 2338 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2339 2340 ASSERT(ino != NULLFSINO); 2341 2342 /* 2343 * Split up the inode number into its parts. 2344 */ 2345 agno = XFS_INO_TO_AGNO(mp, ino); 2346 agino = XFS_INO_TO_AGINO(mp, ino); 2347 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2348 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || 2349 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 2350 #ifdef DEBUG 2351 /* 2352 * Don't output diagnostic information for untrusted inodes 2353 * as they can be invalid without implying corruption. 2354 */ 2355 if (flags & XFS_IGET_UNTRUSTED) 2356 return -EINVAL; 2357 if (agno >= mp->m_sb.sb_agcount) { 2358 xfs_alert(mp, 2359 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", 2360 __func__, agno, mp->m_sb.sb_agcount); 2361 } 2362 if (agbno >= mp->m_sb.sb_agblocks) { 2363 xfs_alert(mp, 2364 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2365 __func__, (unsigned long long)agbno, 2366 (unsigned long)mp->m_sb.sb_agblocks); 2367 } 2368 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 2369 xfs_alert(mp, 2370 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2371 __func__, ino, 2372 XFS_AGINO_TO_INO(mp, agno, agino)); 2373 } 2374 xfs_stack_trace(); 2375 #endif /* DEBUG */ 2376 return -EINVAL; 2377 } 2378 2379 /* 2380 * For bulkstat and handle lookups, we have an untrusted inode number 2381 * that we have to verify is valid. We cannot do this just by reading 2382 * the inode buffer as it may have been unlinked and removed leaving 2383 * inodes in stale state on disk. Hence we have to do a btree lookup 2384 * in all cases where an untrusted inode number is passed. 2385 */ 2386 if (flags & XFS_IGET_UNTRUSTED) { 2387 error = xfs_imap_lookup(mp, tp, agno, agino, agbno, 2388 &chunk_agbno, &offset_agbno, flags); 2389 if (error) 2390 return error; 2391 goto out_map; 2392 } 2393 2394 /* 2395 * If the inode cluster size is the same as the blocksize or 2396 * smaller we get to the buffer by simple arithmetics. 2397 */ 2398 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2399 offset = XFS_INO_TO_OFFSET(mp, ino); 2400 ASSERT(offset < mp->m_sb.sb_inopblock); 2401 2402 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); 2403 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2404 imap->im_boffset = (unsigned short)(offset << 2405 mp->m_sb.sb_inodelog); 2406 return 0; 2407 } 2408 2409 /* 2410 * If the inode chunks are aligned then use simple maths to 2411 * find the location. Otherwise we have to do a btree 2412 * lookup to find the location. 2413 */ 2414 if (M_IGEO(mp)->inoalign_mask) { 2415 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2416 chunk_agbno = agbno - offset_agbno; 2417 } else { 2418 error = xfs_imap_lookup(mp, tp, agno, agino, agbno, 2419 &chunk_agbno, &offset_agbno, flags); 2420 if (error) 2421 return error; 2422 } 2423 2424 out_map: 2425 ASSERT(agbno >= chunk_agbno); 2426 cluster_agbno = chunk_agbno + 2427 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2428 M_IGEO(mp)->blocks_per_cluster); 2429 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2430 XFS_INO_TO_OFFSET(mp, ino); 2431 2432 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno); 2433 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2434 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2435 2436 /* 2437 * If the inode number maps to a block outside the bounds 2438 * of the file system then return NULL rather than calling 2439 * read_buf and panicing when we get an error from the 2440 * driver. 2441 */ 2442 if ((imap->im_blkno + imap->im_len) > 2443 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2444 xfs_alert(mp, 2445 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2446 __func__, (unsigned long long) imap->im_blkno, 2447 (unsigned long long) imap->im_len, 2448 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2449 return -EINVAL; 2450 } 2451 return 0; 2452 } 2453 2454 /* 2455 * Log specified fields for the ag hdr (inode section). The growth of the agi 2456 * structure over time requires that we interpret the buffer as two logical 2457 * regions delineated by the end of the unlinked list. This is due to the size 2458 * of the hash table and its location in the middle of the agi. 2459 * 2460 * For example, a request to log a field before agi_unlinked and a field after 2461 * agi_unlinked could cause us to log the entire hash table and use an excessive 2462 * amount of log space. To avoid this behavior, log the region up through 2463 * agi_unlinked in one call and the region after agi_unlinked through the end of 2464 * the structure in another. 2465 */ 2466 void 2467 xfs_ialloc_log_agi( 2468 xfs_trans_t *tp, /* transaction pointer */ 2469 xfs_buf_t *bp, /* allocation group header buffer */ 2470 int fields) /* bitmask of fields to log */ 2471 { 2472 int first; /* first byte number */ 2473 int last; /* last byte number */ 2474 static const short offsets[] = { /* field starting offsets */ 2475 /* keep in sync with bit definitions */ 2476 offsetof(xfs_agi_t, agi_magicnum), 2477 offsetof(xfs_agi_t, agi_versionnum), 2478 offsetof(xfs_agi_t, agi_seqno), 2479 offsetof(xfs_agi_t, agi_length), 2480 offsetof(xfs_agi_t, agi_count), 2481 offsetof(xfs_agi_t, agi_root), 2482 offsetof(xfs_agi_t, agi_level), 2483 offsetof(xfs_agi_t, agi_freecount), 2484 offsetof(xfs_agi_t, agi_newino), 2485 offsetof(xfs_agi_t, agi_dirino), 2486 offsetof(xfs_agi_t, agi_unlinked), 2487 offsetof(xfs_agi_t, agi_free_root), 2488 offsetof(xfs_agi_t, agi_free_level), 2489 sizeof(xfs_agi_t) 2490 }; 2491 #ifdef DEBUG 2492 xfs_agi_t *agi; /* allocation group header */ 2493 2494 agi = XFS_BUF_TO_AGI(bp); 2495 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2496 #endif 2497 2498 /* 2499 * Compute byte offsets for the first and last fields in the first 2500 * region and log the agi buffer. This only logs up through 2501 * agi_unlinked. 2502 */ 2503 if (fields & XFS_AGI_ALL_BITS_R1) { 2504 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2505 &first, &last); 2506 xfs_trans_log_buf(tp, bp, first, last); 2507 } 2508 2509 /* 2510 * Mask off the bits in the first region and calculate the first and 2511 * last field offsets for any bits in the second region. 2512 */ 2513 fields &= ~XFS_AGI_ALL_BITS_R1; 2514 if (fields) { 2515 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2516 &first, &last); 2517 xfs_trans_log_buf(tp, bp, first, last); 2518 } 2519 } 2520 2521 static xfs_failaddr_t 2522 xfs_agi_verify( 2523 struct xfs_buf *bp) 2524 { 2525 struct xfs_mount *mp = bp->b_mount; 2526 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp); 2527 int i; 2528 2529 if (xfs_sb_version_hascrc(&mp->m_sb)) { 2530 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2531 return __this_address; 2532 if (!xfs_log_check_lsn(mp, 2533 be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn))) 2534 return __this_address; 2535 } 2536 2537 /* 2538 * Validate the magic number of the agi block. 2539 */ 2540 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2541 return __this_address; 2542 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2543 return __this_address; 2544 2545 if (be32_to_cpu(agi->agi_level) < 1 || 2546 be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS) 2547 return __this_address; 2548 2549 if (xfs_sb_version_hasfinobt(&mp->m_sb) && 2550 (be32_to_cpu(agi->agi_free_level) < 1 || 2551 be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS)) 2552 return __this_address; 2553 2554 /* 2555 * during growfs operations, the perag is not fully initialised, 2556 * so we can't use it for any useful checking. growfs ensures we can't 2557 * use it by using uncached buffers that don't have the perag attached 2558 * so we can detect and avoid this problem. 2559 */ 2560 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) 2561 return __this_address; 2562 2563 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2564 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2565 continue; 2566 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2567 return __this_address; 2568 } 2569 2570 return NULL; 2571 } 2572 2573 static void 2574 xfs_agi_read_verify( 2575 struct xfs_buf *bp) 2576 { 2577 struct xfs_mount *mp = bp->b_mount; 2578 xfs_failaddr_t fa; 2579 2580 if (xfs_sb_version_hascrc(&mp->m_sb) && 2581 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2582 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2583 else { 2584 fa = xfs_agi_verify(bp); 2585 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2586 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2587 } 2588 } 2589 2590 static void 2591 xfs_agi_write_verify( 2592 struct xfs_buf *bp) 2593 { 2594 struct xfs_mount *mp = bp->b_mount; 2595 struct xfs_buf_log_item *bip = bp->b_log_item; 2596 xfs_failaddr_t fa; 2597 2598 fa = xfs_agi_verify(bp); 2599 if (fa) { 2600 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2601 return; 2602 } 2603 2604 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2605 return; 2606 2607 if (bip) 2608 XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2609 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2610 } 2611 2612 const struct xfs_buf_ops xfs_agi_buf_ops = { 2613 .name = "xfs_agi", 2614 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2615 .verify_read = xfs_agi_read_verify, 2616 .verify_write = xfs_agi_write_verify, 2617 .verify_struct = xfs_agi_verify, 2618 }; 2619 2620 /* 2621 * Read in the allocation group header (inode allocation section) 2622 */ 2623 int 2624 xfs_read_agi( 2625 struct xfs_mount *mp, /* file system mount structure */ 2626 struct xfs_trans *tp, /* transaction pointer */ 2627 xfs_agnumber_t agno, /* allocation group number */ 2628 struct xfs_buf **bpp) /* allocation group hdr buf */ 2629 { 2630 int error; 2631 2632 trace_xfs_read_agi(mp, agno); 2633 2634 ASSERT(agno != NULLAGNUMBER); 2635 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2636 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 2637 XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops); 2638 if (error) 2639 return error; 2640 if (tp) 2641 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF); 2642 2643 xfs_buf_set_ref(*bpp, XFS_AGI_REF); 2644 return 0; 2645 } 2646 2647 int 2648 xfs_ialloc_read_agi( 2649 struct xfs_mount *mp, /* file system mount structure */ 2650 struct xfs_trans *tp, /* transaction pointer */ 2651 xfs_agnumber_t agno, /* allocation group number */ 2652 struct xfs_buf **bpp) /* allocation group hdr buf */ 2653 { 2654 struct xfs_agi *agi; /* allocation group header */ 2655 struct xfs_perag *pag; /* per allocation group data */ 2656 int error; 2657 2658 trace_xfs_ialloc_read_agi(mp, agno); 2659 2660 error = xfs_read_agi(mp, tp, agno, bpp); 2661 if (error) 2662 return error; 2663 2664 agi = XFS_BUF_TO_AGI(*bpp); 2665 pag = xfs_perag_get(mp, agno); 2666 if (!pag->pagi_init) { 2667 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2668 pag->pagi_count = be32_to_cpu(agi->agi_count); 2669 pag->pagi_init = 1; 2670 } 2671 2672 /* 2673 * It's possible for these to be out of sync if 2674 * we are in the middle of a forced shutdown. 2675 */ 2676 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2677 XFS_FORCED_SHUTDOWN(mp)); 2678 xfs_perag_put(pag); 2679 return 0; 2680 } 2681 2682 /* 2683 * Read in the agi to initialise the per-ag data in the mount structure 2684 */ 2685 int 2686 xfs_ialloc_pagi_init( 2687 xfs_mount_t *mp, /* file system mount structure */ 2688 xfs_trans_t *tp, /* transaction pointer */ 2689 xfs_agnumber_t agno) /* allocation group number */ 2690 { 2691 xfs_buf_t *bp = NULL; 2692 int error; 2693 2694 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); 2695 if (error) 2696 return error; 2697 if (bp) 2698 xfs_trans_brelse(tp, bp); 2699 return 0; 2700 } 2701 2702 /* Is there an inode record covering a given range of inode numbers? */ 2703 int 2704 xfs_ialloc_has_inode_record( 2705 struct xfs_btree_cur *cur, 2706 xfs_agino_t low, 2707 xfs_agino_t high, 2708 bool *exists) 2709 { 2710 struct xfs_inobt_rec_incore irec; 2711 xfs_agino_t agino; 2712 uint16_t holemask; 2713 int has_record; 2714 int i; 2715 int error; 2716 2717 *exists = false; 2718 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2719 while (error == 0 && has_record) { 2720 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2721 if (error || irec.ir_startino > high) 2722 break; 2723 2724 agino = irec.ir_startino; 2725 holemask = irec.ir_holemask; 2726 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1, 2727 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) { 2728 if (holemask & 1) 2729 continue; 2730 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low && 2731 agino <= high) { 2732 *exists = true; 2733 return 0; 2734 } 2735 } 2736 2737 error = xfs_btree_increment(cur, 0, &has_record); 2738 } 2739 return error; 2740 } 2741 2742 /* Is there an inode record covering a given extent? */ 2743 int 2744 xfs_ialloc_has_inodes_at_extent( 2745 struct xfs_btree_cur *cur, 2746 xfs_agblock_t bno, 2747 xfs_extlen_t len, 2748 bool *exists) 2749 { 2750 xfs_agino_t low; 2751 xfs_agino_t high; 2752 2753 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2754 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2755 2756 return xfs_ialloc_has_inode_record(cur, low, high, exists); 2757 } 2758 2759 struct xfs_ialloc_count_inodes { 2760 xfs_agino_t count; 2761 xfs_agino_t freecount; 2762 }; 2763 2764 /* Record inode counts across all inobt records. */ 2765 STATIC int 2766 xfs_ialloc_count_inodes_rec( 2767 struct xfs_btree_cur *cur, 2768 union xfs_btree_rec *rec, 2769 void *priv) 2770 { 2771 struct xfs_inobt_rec_incore irec; 2772 struct xfs_ialloc_count_inodes *ci = priv; 2773 2774 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2775 ci->count += irec.ir_count; 2776 ci->freecount += irec.ir_freecount; 2777 2778 return 0; 2779 } 2780 2781 /* Count allocated and free inodes under an inobt. */ 2782 int 2783 xfs_ialloc_count_inodes( 2784 struct xfs_btree_cur *cur, 2785 xfs_agino_t *count, 2786 xfs_agino_t *freecount) 2787 { 2788 struct xfs_ialloc_count_inodes ci = {0}; 2789 int error; 2790 2791 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2792 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2793 if (error) 2794 return error; 2795 2796 *count = ci.count; 2797 *freecount = ci.freecount; 2798 return 0; 2799 } 2800 2801 /* 2802 * Initialize inode-related geometry information. 2803 * 2804 * Compute the inode btree min and max levels and set maxicount. 2805 * 2806 * Set the inode cluster size. This may still be overridden by the file 2807 * system block size if it is larger than the chosen cluster size. 2808 * 2809 * For v5 filesystems, scale the cluster size with the inode size to keep a 2810 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2811 * inode alignment value appropriately for larger cluster sizes. 2812 * 2813 * Then compute the inode cluster alignment information. 2814 */ 2815 void 2816 xfs_ialloc_setup_geometry( 2817 struct xfs_mount *mp) 2818 { 2819 struct xfs_sb *sbp = &mp->m_sb; 2820 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2821 uint64_t icount; 2822 uint inodes; 2823 2824 /* Compute inode btree geometry. */ 2825 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2826 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2827 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2828 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2829 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2830 2831 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2832 sbp->sb_inopblock); 2833 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2834 2835 if (sbp->sb_spino_align) 2836 igeo->ialloc_min_blks = sbp->sb_spino_align; 2837 else 2838 igeo->ialloc_min_blks = igeo->ialloc_blks; 2839 2840 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2841 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2842 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2843 inodes); 2844 2845 /* 2846 * Set the maximum inode count for this filesystem, being careful not 2847 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2848 * users should never get here due to failing sb verification, but 2849 * certain users (xfs_db) need to be usable even with corrupt metadata. 2850 */ 2851 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2852 /* 2853 * Make sure the maximum inode count is a multiple 2854 * of the units we allocate inodes in. 2855 */ 2856 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2857 do_div(icount, 100); 2858 do_div(icount, igeo->ialloc_blks); 2859 igeo->maxicount = XFS_FSB_TO_INO(mp, 2860 icount * igeo->ialloc_blks); 2861 } else { 2862 igeo->maxicount = 0; 2863 } 2864 2865 /* 2866 * Compute the desired size of an inode cluster buffer size, which 2867 * starts at 8K and (on v5 filesystems) scales up with larger inode 2868 * sizes. 2869 * 2870 * Preserve the desired inode cluster size because the sparse inodes 2871 * feature uses that desired size (not the actual size) to compute the 2872 * sparse inode alignment. The mount code validates this value, so we 2873 * cannot change the behavior. 2874 */ 2875 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2876 if (xfs_sb_version_hascrc(&mp->m_sb)) { 2877 int new_size = igeo->inode_cluster_size_raw; 2878 2879 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2880 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2881 igeo->inode_cluster_size_raw = new_size; 2882 } 2883 2884 /* Calculate inode cluster ratios. */ 2885 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2886 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2887 igeo->inode_cluster_size_raw); 2888 else 2889 igeo->blocks_per_cluster = 1; 2890 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2891 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2892 2893 /* Calculate inode cluster alignment. */ 2894 if (xfs_sb_version_hasalign(&mp->m_sb) && 2895 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2896 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2897 else 2898 igeo->cluster_align = 1; 2899 igeo->inoalign_mask = igeo->cluster_align - 1; 2900 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2901 2902 /* 2903 * If we are using stripe alignment, check whether 2904 * the stripe unit is a multiple of the inode alignment 2905 */ 2906 if (mp->m_dalign && igeo->inoalign_mask && 2907 !(mp->m_dalign & igeo->inoalign_mask)) 2908 igeo->ialloc_align = mp->m_dalign; 2909 else 2910 igeo->ialloc_align = 0; 2911 } 2912 2913 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2914 xfs_ino_t 2915 xfs_ialloc_calc_rootino( 2916 struct xfs_mount *mp, 2917 int sunit) 2918 { 2919 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2920 xfs_agblock_t first_bno; 2921 2922 /* 2923 * Pre-calculate the geometry of AG 0. We know what it looks like 2924 * because libxfs knows how to create allocation groups now. 2925 * 2926 * first_bno is the first block in which mkfs could possibly have 2927 * allocated the root directory inode, once we factor in the metadata 2928 * that mkfs formats before it. Namely, the four AG headers... 2929 */ 2930 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2931 2932 /* ...the two free space btree roots... */ 2933 first_bno += 2; 2934 2935 /* ...the inode btree root... */ 2936 first_bno += 1; 2937 2938 /* ...the initial AGFL... */ 2939 first_bno += xfs_alloc_min_freelist(mp, NULL); 2940 2941 /* ...the free inode btree root... */ 2942 if (xfs_sb_version_hasfinobt(&mp->m_sb)) 2943 first_bno++; 2944 2945 /* ...the reverse mapping btree root... */ 2946 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 2947 first_bno++; 2948 2949 /* ...the reference count btree... */ 2950 if (xfs_sb_version_hasreflink(&mp->m_sb)) 2951 first_bno++; 2952 2953 /* 2954 * ...and the log, if it is allocated in the first allocation group. 2955 * 2956 * This can happen with filesystems that only have a single 2957 * allocation group, or very odd geometries created by old mkfs 2958 * versions on very small filesystems. 2959 */ 2960 if (mp->m_sb.sb_logstart && 2961 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0) 2962 first_bno += mp->m_sb.sb_logblocks; 2963 2964 /* 2965 * Now round first_bno up to whatever allocation alignment is given 2966 * by the filesystem or was passed in. 2967 */ 2968 if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0) 2969 first_bno = roundup(first_bno, sunit); 2970 else if (xfs_sb_version_hasalign(&mp->m_sb) && 2971 mp->m_sb.sb_inoalignmt > 1) 2972 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2973 2974 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2975 } 2976