1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_bmap.h" 22 #include "xfs_trans.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_icreate_item.h" 25 #include "xfs_icache.h" 26 #include "xfs_trace.h" 27 #include "xfs_log.h" 28 #include "xfs_rmap.h" 29 #include "xfs_ag.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_has_sparseinodes(cur->bc_mp)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 const union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_has_sparseinodes(mp)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* Simple checks for inode records. */ 99 xfs_failaddr_t 100 xfs_inobt_check_irec( 101 struct xfs_btree_cur *cur, 102 const struct xfs_inobt_rec_incore *irec) 103 { 104 uint64_t realfree; 105 106 if (!xfs_verify_agino(cur->bc_ag.pag, irec->ir_startino)) 107 return __this_address; 108 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 109 irec->ir_count > XFS_INODES_PER_CHUNK) 110 return __this_address; 111 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 112 return __this_address; 113 114 /* if there are no holes, return the first available offset */ 115 if (!xfs_inobt_issparse(irec->ir_holemask)) 116 realfree = irec->ir_free; 117 else 118 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec); 119 if (hweight64(realfree) != irec->ir_freecount) 120 return __this_address; 121 122 return NULL; 123 } 124 125 /* 126 * Get the data from the pointed-to record. 127 */ 128 int 129 xfs_inobt_get_rec( 130 struct xfs_btree_cur *cur, 131 struct xfs_inobt_rec_incore *irec, 132 int *stat) 133 { 134 struct xfs_mount *mp = cur->bc_mp; 135 union xfs_btree_rec *rec; 136 xfs_failaddr_t fa; 137 int error; 138 139 error = xfs_btree_get_rec(cur, &rec, stat); 140 if (error || *stat == 0) 141 return error; 142 143 xfs_inobt_btrec_to_irec(mp, rec, irec); 144 fa = xfs_inobt_check_irec(cur, irec); 145 if (fa) 146 goto out_bad_rec; 147 148 return 0; 149 150 out_bad_rec: 151 xfs_warn(mp, 152 "%s Inode BTree record corruption in AG %d detected at %pS!", 153 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", 154 cur->bc_ag.pag->pag_agno, fa); 155 xfs_warn(mp, 156 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 157 irec->ir_startino, irec->ir_count, irec->ir_freecount, 158 irec->ir_free, irec->ir_holemask); 159 return -EFSCORRUPTED; 160 } 161 162 /* 163 * Insert a single inobt record. Cursor must already point to desired location. 164 */ 165 int 166 xfs_inobt_insert_rec( 167 struct xfs_btree_cur *cur, 168 uint16_t holemask, 169 uint8_t count, 170 int32_t freecount, 171 xfs_inofree_t free, 172 int *stat) 173 { 174 cur->bc_rec.i.ir_holemask = holemask; 175 cur->bc_rec.i.ir_count = count; 176 cur->bc_rec.i.ir_freecount = freecount; 177 cur->bc_rec.i.ir_free = free; 178 return xfs_btree_insert(cur, stat); 179 } 180 181 /* 182 * Insert records describing a newly allocated inode chunk into the inobt. 183 */ 184 STATIC int 185 xfs_inobt_insert( 186 struct xfs_perag *pag, 187 struct xfs_trans *tp, 188 struct xfs_buf *agbp, 189 xfs_agino_t newino, 190 xfs_agino_t newlen, 191 xfs_btnum_t btnum) 192 { 193 struct xfs_btree_cur *cur; 194 xfs_agino_t thisino; 195 int i; 196 int error; 197 198 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 199 200 for (thisino = newino; 201 thisino < newino + newlen; 202 thisino += XFS_INODES_PER_CHUNK) { 203 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 204 if (error) { 205 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 206 return error; 207 } 208 ASSERT(i == 0); 209 210 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 211 XFS_INODES_PER_CHUNK, 212 XFS_INODES_PER_CHUNK, 213 XFS_INOBT_ALL_FREE, &i); 214 if (error) { 215 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 216 return error; 217 } 218 ASSERT(i == 1); 219 } 220 221 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 222 223 return 0; 224 } 225 226 /* 227 * Verify that the number of free inodes in the AGI is correct. 228 */ 229 #ifdef DEBUG 230 static int 231 xfs_check_agi_freecount( 232 struct xfs_btree_cur *cur) 233 { 234 if (cur->bc_nlevels == 1) { 235 xfs_inobt_rec_incore_t rec; 236 int freecount = 0; 237 int error; 238 int i; 239 240 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 241 if (error) 242 return error; 243 244 do { 245 error = xfs_inobt_get_rec(cur, &rec, &i); 246 if (error) 247 return error; 248 249 if (i) { 250 freecount += rec.ir_freecount; 251 error = xfs_btree_increment(cur, 0, &i); 252 if (error) 253 return error; 254 } 255 } while (i == 1); 256 257 if (!xfs_is_shutdown(cur->bc_mp)) 258 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); 259 } 260 return 0; 261 } 262 #else 263 #define xfs_check_agi_freecount(cur) 0 264 #endif 265 266 /* 267 * Initialise a new set of inodes. When called without a transaction context 268 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 269 * than logging them (which in a transaction context puts them into the AIL 270 * for writeback rather than the xfsbufd queue). 271 */ 272 int 273 xfs_ialloc_inode_init( 274 struct xfs_mount *mp, 275 struct xfs_trans *tp, 276 struct list_head *buffer_list, 277 int icount, 278 xfs_agnumber_t agno, 279 xfs_agblock_t agbno, 280 xfs_agblock_t length, 281 unsigned int gen) 282 { 283 struct xfs_buf *fbuf; 284 struct xfs_dinode *free; 285 int nbufs; 286 int version; 287 int i, j; 288 xfs_daddr_t d; 289 xfs_ino_t ino = 0; 290 int error; 291 292 /* 293 * Loop over the new block(s), filling in the inodes. For small block 294 * sizes, manipulate the inodes in buffers which are multiples of the 295 * blocks size. 296 */ 297 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 298 299 /* 300 * Figure out what version number to use in the inodes we create. If 301 * the superblock version has caught up to the one that supports the new 302 * inode format, then use the new inode version. Otherwise use the old 303 * version so that old kernels will continue to be able to use the file 304 * system. 305 * 306 * For v3 inodes, we also need to write the inode number into the inode, 307 * so calculate the first inode number of the chunk here as 308 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 309 * across multiple filesystem blocks (such as a cluster) and so cannot 310 * be used in the cluster buffer loop below. 311 * 312 * Further, because we are writing the inode directly into the buffer 313 * and calculating a CRC on the entire inode, we have ot log the entire 314 * inode so that the entire range the CRC covers is present in the log. 315 * That means for v3 inode we log the entire buffer rather than just the 316 * inode cores. 317 */ 318 if (xfs_has_v3inodes(mp)) { 319 version = 3; 320 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 321 322 /* 323 * log the initialisation that is about to take place as an 324 * logical operation. This means the transaction does not 325 * need to log the physical changes to the inode buffers as log 326 * recovery will know what initialisation is actually needed. 327 * Hence we only need to log the buffers as "ordered" buffers so 328 * they track in the AIL as if they were physically logged. 329 */ 330 if (tp) 331 xfs_icreate_log(tp, agno, agbno, icount, 332 mp->m_sb.sb_inodesize, length, gen); 333 } else 334 version = 2; 335 336 for (j = 0; j < nbufs; j++) { 337 /* 338 * Get the block. 339 */ 340 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 341 (j * M_IGEO(mp)->blocks_per_cluster)); 342 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 343 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 344 XBF_UNMAPPED, &fbuf); 345 if (error) 346 return error; 347 348 /* Initialize the inode buffers and log them appropriately. */ 349 fbuf->b_ops = &xfs_inode_buf_ops; 350 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 351 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 352 int ioffset = i << mp->m_sb.sb_inodelog; 353 354 free = xfs_make_iptr(mp, fbuf, i); 355 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 356 free->di_version = version; 357 free->di_gen = cpu_to_be32(gen); 358 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 359 360 if (version == 3) { 361 free->di_ino = cpu_to_be64(ino); 362 ino++; 363 uuid_copy(&free->di_uuid, 364 &mp->m_sb.sb_meta_uuid); 365 xfs_dinode_calc_crc(mp, free); 366 } else if (tp) { 367 /* just log the inode core */ 368 xfs_trans_log_buf(tp, fbuf, ioffset, 369 ioffset + XFS_DINODE_SIZE(mp) - 1); 370 } 371 } 372 373 if (tp) { 374 /* 375 * Mark the buffer as an inode allocation buffer so it 376 * sticks in AIL at the point of this allocation 377 * transaction. This ensures the they are on disk before 378 * the tail of the log can be moved past this 379 * transaction (i.e. by preventing relogging from moving 380 * it forward in the log). 381 */ 382 xfs_trans_inode_alloc_buf(tp, fbuf); 383 if (version == 3) { 384 /* 385 * Mark the buffer as ordered so that they are 386 * not physically logged in the transaction but 387 * still tracked in the AIL as part of the 388 * transaction and pin the log appropriately. 389 */ 390 xfs_trans_ordered_buf(tp, fbuf); 391 } 392 } else { 393 fbuf->b_flags |= XBF_DONE; 394 xfs_buf_delwri_queue(fbuf, buffer_list); 395 xfs_buf_relse(fbuf); 396 } 397 } 398 return 0; 399 } 400 401 /* 402 * Align startino and allocmask for a recently allocated sparse chunk such that 403 * they are fit for insertion (or merge) into the on-disk inode btrees. 404 * 405 * Background: 406 * 407 * When enabled, sparse inode support increases the inode alignment from cluster 408 * size to inode chunk size. This means that the minimum range between two 409 * non-adjacent inode records in the inobt is large enough for a full inode 410 * record. This allows for cluster sized, cluster aligned block allocation 411 * without need to worry about whether the resulting inode record overlaps with 412 * another record in the tree. Without this basic rule, we would have to deal 413 * with the consequences of overlap by potentially undoing recent allocations in 414 * the inode allocation codepath. 415 * 416 * Because of this alignment rule (which is enforced on mount), there are two 417 * inobt possibilities for newly allocated sparse chunks. One is that the 418 * aligned inode record for the chunk covers a range of inodes not already 419 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 420 * other is that a record already exists at the aligned startino that considers 421 * the newly allocated range as sparse. In the latter case, record content is 422 * merged in hope that sparse inode chunks fill to full chunks over time. 423 */ 424 STATIC void 425 xfs_align_sparse_ino( 426 struct xfs_mount *mp, 427 xfs_agino_t *startino, 428 uint16_t *allocmask) 429 { 430 xfs_agblock_t agbno; 431 xfs_agblock_t mod; 432 int offset; 433 434 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 435 mod = agbno % mp->m_sb.sb_inoalignmt; 436 if (!mod) 437 return; 438 439 /* calculate the inode offset and align startino */ 440 offset = XFS_AGB_TO_AGINO(mp, mod); 441 *startino -= offset; 442 443 /* 444 * Since startino has been aligned down, left shift allocmask such that 445 * it continues to represent the same physical inodes relative to the 446 * new startino. 447 */ 448 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 449 } 450 451 /* 452 * Determine whether the source inode record can merge into the target. Both 453 * records must be sparse, the inode ranges must match and there must be no 454 * allocation overlap between the records. 455 */ 456 STATIC bool 457 __xfs_inobt_can_merge( 458 struct xfs_inobt_rec_incore *trec, /* tgt record */ 459 struct xfs_inobt_rec_incore *srec) /* src record */ 460 { 461 uint64_t talloc; 462 uint64_t salloc; 463 464 /* records must cover the same inode range */ 465 if (trec->ir_startino != srec->ir_startino) 466 return false; 467 468 /* both records must be sparse */ 469 if (!xfs_inobt_issparse(trec->ir_holemask) || 470 !xfs_inobt_issparse(srec->ir_holemask)) 471 return false; 472 473 /* both records must track some inodes */ 474 if (!trec->ir_count || !srec->ir_count) 475 return false; 476 477 /* can't exceed capacity of a full record */ 478 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 479 return false; 480 481 /* verify there is no allocation overlap */ 482 talloc = xfs_inobt_irec_to_allocmask(trec); 483 salloc = xfs_inobt_irec_to_allocmask(srec); 484 if (talloc & salloc) 485 return false; 486 487 return true; 488 } 489 490 /* 491 * Merge the source inode record into the target. The caller must call 492 * __xfs_inobt_can_merge() to ensure the merge is valid. 493 */ 494 STATIC void 495 __xfs_inobt_rec_merge( 496 struct xfs_inobt_rec_incore *trec, /* target */ 497 struct xfs_inobt_rec_incore *srec) /* src */ 498 { 499 ASSERT(trec->ir_startino == srec->ir_startino); 500 501 /* combine the counts */ 502 trec->ir_count += srec->ir_count; 503 trec->ir_freecount += srec->ir_freecount; 504 505 /* 506 * Merge the holemask and free mask. For both fields, 0 bits refer to 507 * allocated inodes. We combine the allocated ranges with bitwise AND. 508 */ 509 trec->ir_holemask &= srec->ir_holemask; 510 trec->ir_free &= srec->ir_free; 511 } 512 513 /* 514 * Insert a new sparse inode chunk into the associated inode btree. The inode 515 * record for the sparse chunk is pre-aligned to a startino that should match 516 * any pre-existing sparse inode record in the tree. This allows sparse chunks 517 * to fill over time. 518 * 519 * This function supports two modes of handling preexisting records depending on 520 * the merge flag. If merge is true, the provided record is merged with the 521 * existing record and updated in place. The merged record is returned in nrec. 522 * If merge is false, an existing record is replaced with the provided record. 523 * If no preexisting record exists, the provided record is always inserted. 524 * 525 * It is considered corruption if a merge is requested and not possible. Given 526 * the sparse inode alignment constraints, this should never happen. 527 */ 528 STATIC int 529 xfs_inobt_insert_sprec( 530 struct xfs_perag *pag, 531 struct xfs_trans *tp, 532 struct xfs_buf *agbp, 533 int btnum, 534 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 535 bool merge) /* merge or replace */ 536 { 537 struct xfs_mount *mp = pag->pag_mount; 538 struct xfs_btree_cur *cur; 539 int error; 540 int i; 541 struct xfs_inobt_rec_incore rec; 542 543 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 544 545 /* the new record is pre-aligned so we know where to look */ 546 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 547 if (error) 548 goto error; 549 /* if nothing there, insert a new record and return */ 550 if (i == 0) { 551 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 552 nrec->ir_count, nrec->ir_freecount, 553 nrec->ir_free, &i); 554 if (error) 555 goto error; 556 if (XFS_IS_CORRUPT(mp, i != 1)) { 557 error = -EFSCORRUPTED; 558 goto error; 559 } 560 561 goto out; 562 } 563 564 /* 565 * A record exists at this startino. Merge or replace the record 566 * depending on what we've been asked to do. 567 */ 568 if (merge) { 569 error = xfs_inobt_get_rec(cur, &rec, &i); 570 if (error) 571 goto error; 572 if (XFS_IS_CORRUPT(mp, i != 1)) { 573 error = -EFSCORRUPTED; 574 goto error; 575 } 576 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 577 error = -EFSCORRUPTED; 578 goto error; 579 } 580 581 /* 582 * This should never fail. If we have coexisting records that 583 * cannot merge, something is seriously wrong. 584 */ 585 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 586 error = -EFSCORRUPTED; 587 goto error; 588 } 589 590 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, 591 rec.ir_holemask, nrec->ir_startino, 592 nrec->ir_holemask); 593 594 /* merge to nrec to output the updated record */ 595 __xfs_inobt_rec_merge(nrec, &rec); 596 597 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, 598 nrec->ir_holemask); 599 600 error = xfs_inobt_rec_check_count(mp, nrec); 601 if (error) 602 goto error; 603 } 604 605 error = xfs_inobt_update(cur, nrec); 606 if (error) 607 goto error; 608 609 out: 610 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 611 return 0; 612 error: 613 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 614 return error; 615 } 616 617 /* 618 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if 619 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so 620 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum 621 * inode count threshold, or the usual negative error code for other errors. 622 */ 623 STATIC int 624 xfs_ialloc_ag_alloc( 625 struct xfs_perag *pag, 626 struct xfs_trans *tp, 627 struct xfs_buf *agbp) 628 { 629 struct xfs_agi *agi; 630 struct xfs_alloc_arg args; 631 int error; 632 xfs_agino_t newino; /* new first inode's number */ 633 xfs_agino_t newlen; /* new number of inodes */ 634 int isaligned = 0; /* inode allocation at stripe */ 635 /* unit boundary */ 636 /* init. to full chunk */ 637 struct xfs_inobt_rec_incore rec; 638 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 639 uint16_t allocmask = (uint16_t) -1; 640 int do_sparse = 0; 641 642 memset(&args, 0, sizeof(args)); 643 args.tp = tp; 644 args.mp = tp->t_mountp; 645 args.fsbno = NULLFSBLOCK; 646 args.oinfo = XFS_RMAP_OINFO_INODES; 647 args.pag = pag; 648 649 #ifdef DEBUG 650 /* randomly do sparse inode allocations */ 651 if (xfs_has_sparseinodes(tp->t_mountp) && 652 igeo->ialloc_min_blks < igeo->ialloc_blks) 653 do_sparse = get_random_u32_below(2); 654 #endif 655 656 /* 657 * Locking will ensure that we don't have two callers in here 658 * at one time. 659 */ 660 newlen = igeo->ialloc_inos; 661 if (igeo->maxicount && 662 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 663 igeo->maxicount) 664 return -ENOSPC; 665 args.minlen = args.maxlen = igeo->ialloc_blks; 666 /* 667 * First try to allocate inodes contiguous with the last-allocated 668 * chunk of inodes. If the filesystem is striped, this will fill 669 * an entire stripe unit with inodes. 670 */ 671 agi = agbp->b_addr; 672 newino = be32_to_cpu(agi->agi_newino); 673 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 674 igeo->ialloc_blks; 675 if (do_sparse) 676 goto sparse_alloc; 677 if (likely(newino != NULLAGINO && 678 (args.agbno < be32_to_cpu(agi->agi_length)))) { 679 args.prod = 1; 680 681 /* 682 * We need to take into account alignment here to ensure that 683 * we don't modify the free list if we fail to have an exact 684 * block. If we don't have an exact match, and every oher 685 * attempt allocation attempt fails, we'll end up cancelling 686 * a dirty transaction and shutting down. 687 * 688 * For an exact allocation, alignment must be 1, 689 * however we need to take cluster alignment into account when 690 * fixing up the freelist. Use the minalignslop field to 691 * indicate that extra blocks might be required for alignment, 692 * but not to use them in the actual exact allocation. 693 */ 694 args.alignment = 1; 695 args.minalignslop = igeo->cluster_align - 1; 696 697 /* Allow space for the inode btree to split. */ 698 args.minleft = igeo->inobt_maxlevels; 699 error = xfs_alloc_vextent_exact_bno(&args, 700 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 701 args.agbno)); 702 if (error) 703 return error; 704 705 /* 706 * This request might have dirtied the transaction if the AG can 707 * satisfy the request, but the exact block was not available. 708 * If the allocation did fail, subsequent requests will relax 709 * the exact agbno requirement and increase the alignment 710 * instead. It is critical that the total size of the request 711 * (len + alignment + slop) does not increase from this point 712 * on, so reset minalignslop to ensure it is not included in 713 * subsequent requests. 714 */ 715 args.minalignslop = 0; 716 } 717 718 if (unlikely(args.fsbno == NULLFSBLOCK)) { 719 /* 720 * Set the alignment for the allocation. 721 * If stripe alignment is turned on then align at stripe unit 722 * boundary. 723 * If the cluster size is smaller than a filesystem block 724 * then we're doing I/O for inodes in filesystem block size 725 * pieces, so don't need alignment anyway. 726 */ 727 isaligned = 0; 728 if (igeo->ialloc_align) { 729 ASSERT(!xfs_has_noalign(args.mp)); 730 args.alignment = args.mp->m_dalign; 731 isaligned = 1; 732 } else 733 args.alignment = igeo->cluster_align; 734 /* 735 * Allocate a fixed-size extent of inodes. 736 */ 737 args.prod = 1; 738 /* 739 * Allow space for the inode btree to split. 740 */ 741 args.minleft = igeo->inobt_maxlevels; 742 error = xfs_alloc_vextent_near_bno(&args, 743 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 744 be32_to_cpu(agi->agi_root))); 745 if (error) 746 return error; 747 } 748 749 /* 750 * If stripe alignment is turned on, then try again with cluster 751 * alignment. 752 */ 753 if (isaligned && args.fsbno == NULLFSBLOCK) { 754 args.alignment = igeo->cluster_align; 755 error = xfs_alloc_vextent_near_bno(&args, 756 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 757 be32_to_cpu(agi->agi_root))); 758 if (error) 759 return error; 760 } 761 762 /* 763 * Finally, try a sparse allocation if the filesystem supports it and 764 * the sparse allocation length is smaller than a full chunk. 765 */ 766 if (xfs_has_sparseinodes(args.mp) && 767 igeo->ialloc_min_blks < igeo->ialloc_blks && 768 args.fsbno == NULLFSBLOCK) { 769 sparse_alloc: 770 args.alignment = args.mp->m_sb.sb_spino_align; 771 args.prod = 1; 772 773 args.minlen = igeo->ialloc_min_blks; 774 args.maxlen = args.minlen; 775 776 /* 777 * The inode record will be aligned to full chunk size. We must 778 * prevent sparse allocation from AG boundaries that result in 779 * invalid inode records, such as records that start at agbno 0 780 * or extend beyond the AG. 781 * 782 * Set min agbno to the first aligned, non-zero agbno and max to 783 * the last aligned agbno that is at least one full chunk from 784 * the end of the AG. 785 */ 786 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 787 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 788 args.mp->m_sb.sb_inoalignmt) - 789 igeo->ialloc_blks; 790 791 error = xfs_alloc_vextent_near_bno(&args, 792 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 793 be32_to_cpu(agi->agi_root))); 794 if (error) 795 return error; 796 797 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 798 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 799 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 800 } 801 802 if (args.fsbno == NULLFSBLOCK) 803 return -EAGAIN; 804 805 ASSERT(args.len == args.minlen); 806 807 /* 808 * Stamp and write the inode buffers. 809 * 810 * Seed the new inode cluster with a random generation number. This 811 * prevents short-term reuse of generation numbers if a chunk is 812 * freed and then immediately reallocated. We use random numbers 813 * rather than a linear progression to prevent the next generation 814 * number from being easily guessable. 815 */ 816 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 817 args.agbno, args.len, get_random_u32()); 818 819 if (error) 820 return error; 821 /* 822 * Convert the results. 823 */ 824 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 825 826 if (xfs_inobt_issparse(~allocmask)) { 827 /* 828 * We've allocated a sparse chunk. Align the startino and mask. 829 */ 830 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 831 832 rec.ir_startino = newino; 833 rec.ir_holemask = ~allocmask; 834 rec.ir_count = newlen; 835 rec.ir_freecount = newlen; 836 rec.ir_free = XFS_INOBT_ALL_FREE; 837 838 /* 839 * Insert the sparse record into the inobt and allow for a merge 840 * if necessary. If a merge does occur, rec is updated to the 841 * merged record. 842 */ 843 error = xfs_inobt_insert_sprec(pag, tp, agbp, 844 XFS_BTNUM_INO, &rec, true); 845 if (error == -EFSCORRUPTED) { 846 xfs_alert(args.mp, 847 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 848 XFS_AGINO_TO_INO(args.mp, pag->pag_agno, 849 rec.ir_startino), 850 rec.ir_holemask, rec.ir_count); 851 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 852 } 853 if (error) 854 return error; 855 856 /* 857 * We can't merge the part we've just allocated as for the inobt 858 * due to finobt semantics. The original record may or may not 859 * exist independent of whether physical inodes exist in this 860 * sparse chunk. 861 * 862 * We must update the finobt record based on the inobt record. 863 * rec contains the fully merged and up to date inobt record 864 * from the previous call. Set merge false to replace any 865 * existing record with this one. 866 */ 867 if (xfs_has_finobt(args.mp)) { 868 error = xfs_inobt_insert_sprec(pag, tp, agbp, 869 XFS_BTNUM_FINO, &rec, false); 870 if (error) 871 return error; 872 } 873 } else { 874 /* full chunk - insert new records to both btrees */ 875 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, 876 XFS_BTNUM_INO); 877 if (error) 878 return error; 879 880 if (xfs_has_finobt(args.mp)) { 881 error = xfs_inobt_insert(pag, tp, agbp, newino, 882 newlen, XFS_BTNUM_FINO); 883 if (error) 884 return error; 885 } 886 } 887 888 /* 889 * Update AGI counts and newino. 890 */ 891 be32_add_cpu(&agi->agi_count, newlen); 892 be32_add_cpu(&agi->agi_freecount, newlen); 893 pag->pagi_freecount += newlen; 894 pag->pagi_count += newlen; 895 agi->agi_newino = cpu_to_be32(newino); 896 897 /* 898 * Log allocation group header fields 899 */ 900 xfs_ialloc_log_agi(tp, agbp, 901 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 902 /* 903 * Modify/log superblock values for inode count and inode free count. 904 */ 905 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 906 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 907 return 0; 908 } 909 910 /* 911 * Try to retrieve the next record to the left/right from the current one. 912 */ 913 STATIC int 914 xfs_ialloc_next_rec( 915 struct xfs_btree_cur *cur, 916 xfs_inobt_rec_incore_t *rec, 917 int *done, 918 int left) 919 { 920 int error; 921 int i; 922 923 if (left) 924 error = xfs_btree_decrement(cur, 0, &i); 925 else 926 error = xfs_btree_increment(cur, 0, &i); 927 928 if (error) 929 return error; 930 *done = !i; 931 if (i) { 932 error = xfs_inobt_get_rec(cur, rec, &i); 933 if (error) 934 return error; 935 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 936 return -EFSCORRUPTED; 937 } 938 939 return 0; 940 } 941 942 STATIC int 943 xfs_ialloc_get_rec( 944 struct xfs_btree_cur *cur, 945 xfs_agino_t agino, 946 xfs_inobt_rec_incore_t *rec, 947 int *done) 948 { 949 int error; 950 int i; 951 952 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 953 if (error) 954 return error; 955 *done = !i; 956 if (i) { 957 error = xfs_inobt_get_rec(cur, rec, &i); 958 if (error) 959 return error; 960 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 961 return -EFSCORRUPTED; 962 } 963 964 return 0; 965 } 966 967 /* 968 * Return the offset of the first free inode in the record. If the inode chunk 969 * is sparsely allocated, we convert the record holemask to inode granularity 970 * and mask off the unallocated regions from the inode free mask. 971 */ 972 STATIC int 973 xfs_inobt_first_free_inode( 974 struct xfs_inobt_rec_incore *rec) 975 { 976 xfs_inofree_t realfree; 977 978 /* if there are no holes, return the first available offset */ 979 if (!xfs_inobt_issparse(rec->ir_holemask)) 980 return xfs_lowbit64(rec->ir_free); 981 982 realfree = xfs_inobt_irec_to_allocmask(rec); 983 realfree &= rec->ir_free; 984 985 return xfs_lowbit64(realfree); 986 } 987 988 /* 989 * Allocate an inode using the inobt-only algorithm. 990 */ 991 STATIC int 992 xfs_dialloc_ag_inobt( 993 struct xfs_perag *pag, 994 struct xfs_trans *tp, 995 struct xfs_buf *agbp, 996 xfs_ino_t parent, 997 xfs_ino_t *inop) 998 { 999 struct xfs_mount *mp = tp->t_mountp; 1000 struct xfs_agi *agi = agbp->b_addr; 1001 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1002 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1003 struct xfs_btree_cur *cur, *tcur; 1004 struct xfs_inobt_rec_incore rec, trec; 1005 xfs_ino_t ino; 1006 int error; 1007 int offset; 1008 int i, j; 1009 int searchdistance = 10; 1010 1011 ASSERT(xfs_perag_initialised_agi(pag)); 1012 ASSERT(xfs_perag_allows_inodes(pag)); 1013 ASSERT(pag->pagi_freecount > 0); 1014 1015 restart_pagno: 1016 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1017 /* 1018 * If pagino is 0 (this is the root inode allocation) use newino. 1019 * This must work because we've just allocated some. 1020 */ 1021 if (!pagino) 1022 pagino = be32_to_cpu(agi->agi_newino); 1023 1024 error = xfs_check_agi_freecount(cur); 1025 if (error) 1026 goto error0; 1027 1028 /* 1029 * If in the same AG as the parent, try to get near the parent. 1030 */ 1031 if (pagno == pag->pag_agno) { 1032 int doneleft; /* done, to the left */ 1033 int doneright; /* done, to the right */ 1034 1035 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1036 if (error) 1037 goto error0; 1038 if (XFS_IS_CORRUPT(mp, i != 1)) { 1039 error = -EFSCORRUPTED; 1040 goto error0; 1041 } 1042 1043 error = xfs_inobt_get_rec(cur, &rec, &j); 1044 if (error) 1045 goto error0; 1046 if (XFS_IS_CORRUPT(mp, j != 1)) { 1047 error = -EFSCORRUPTED; 1048 goto error0; 1049 } 1050 1051 if (rec.ir_freecount > 0) { 1052 /* 1053 * Found a free inode in the same chunk 1054 * as the parent, done. 1055 */ 1056 goto alloc_inode; 1057 } 1058 1059 1060 /* 1061 * In the same AG as parent, but parent's chunk is full. 1062 */ 1063 1064 /* duplicate the cursor, search left & right simultaneously */ 1065 error = xfs_btree_dup_cursor(cur, &tcur); 1066 if (error) 1067 goto error0; 1068 1069 /* 1070 * Skip to last blocks looked up if same parent inode. 1071 */ 1072 if (pagino != NULLAGINO && 1073 pag->pagl_pagino == pagino && 1074 pag->pagl_leftrec != NULLAGINO && 1075 pag->pagl_rightrec != NULLAGINO) { 1076 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1077 &trec, &doneleft); 1078 if (error) 1079 goto error1; 1080 1081 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1082 &rec, &doneright); 1083 if (error) 1084 goto error1; 1085 } else { 1086 /* search left with tcur, back up 1 record */ 1087 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1088 if (error) 1089 goto error1; 1090 1091 /* search right with cur, go forward 1 record. */ 1092 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1093 if (error) 1094 goto error1; 1095 } 1096 1097 /* 1098 * Loop until we find an inode chunk with a free inode. 1099 */ 1100 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1101 int useleft; /* using left inode chunk this time */ 1102 1103 /* figure out the closer block if both are valid. */ 1104 if (!doneleft && !doneright) { 1105 useleft = pagino - 1106 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1107 rec.ir_startino - pagino; 1108 } else { 1109 useleft = !doneleft; 1110 } 1111 1112 /* free inodes to the left? */ 1113 if (useleft && trec.ir_freecount) { 1114 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1115 cur = tcur; 1116 1117 pag->pagl_leftrec = trec.ir_startino; 1118 pag->pagl_rightrec = rec.ir_startino; 1119 pag->pagl_pagino = pagino; 1120 rec = trec; 1121 goto alloc_inode; 1122 } 1123 1124 /* free inodes to the right? */ 1125 if (!useleft && rec.ir_freecount) { 1126 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1127 1128 pag->pagl_leftrec = trec.ir_startino; 1129 pag->pagl_rightrec = rec.ir_startino; 1130 pag->pagl_pagino = pagino; 1131 goto alloc_inode; 1132 } 1133 1134 /* get next record to check */ 1135 if (useleft) { 1136 error = xfs_ialloc_next_rec(tcur, &trec, 1137 &doneleft, 1); 1138 } else { 1139 error = xfs_ialloc_next_rec(cur, &rec, 1140 &doneright, 0); 1141 } 1142 if (error) 1143 goto error1; 1144 } 1145 1146 if (searchdistance <= 0) { 1147 /* 1148 * Not in range - save last search 1149 * location and allocate a new inode 1150 */ 1151 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1152 pag->pagl_leftrec = trec.ir_startino; 1153 pag->pagl_rightrec = rec.ir_startino; 1154 pag->pagl_pagino = pagino; 1155 1156 } else { 1157 /* 1158 * We've reached the end of the btree. because 1159 * we are only searching a small chunk of the 1160 * btree each search, there is obviously free 1161 * inodes closer to the parent inode than we 1162 * are now. restart the search again. 1163 */ 1164 pag->pagl_pagino = NULLAGINO; 1165 pag->pagl_leftrec = NULLAGINO; 1166 pag->pagl_rightrec = NULLAGINO; 1167 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1168 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1169 goto restart_pagno; 1170 } 1171 } 1172 1173 /* 1174 * In a different AG from the parent. 1175 * See if the most recently allocated block has any free. 1176 */ 1177 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1178 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1179 XFS_LOOKUP_EQ, &i); 1180 if (error) 1181 goto error0; 1182 1183 if (i == 1) { 1184 error = xfs_inobt_get_rec(cur, &rec, &j); 1185 if (error) 1186 goto error0; 1187 1188 if (j == 1 && rec.ir_freecount > 0) { 1189 /* 1190 * The last chunk allocated in the group 1191 * still has a free inode. 1192 */ 1193 goto alloc_inode; 1194 } 1195 } 1196 } 1197 1198 /* 1199 * None left in the last group, search the whole AG 1200 */ 1201 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1202 if (error) 1203 goto error0; 1204 if (XFS_IS_CORRUPT(mp, i != 1)) { 1205 error = -EFSCORRUPTED; 1206 goto error0; 1207 } 1208 1209 for (;;) { 1210 error = xfs_inobt_get_rec(cur, &rec, &i); 1211 if (error) 1212 goto error0; 1213 if (XFS_IS_CORRUPT(mp, i != 1)) { 1214 error = -EFSCORRUPTED; 1215 goto error0; 1216 } 1217 if (rec.ir_freecount > 0) 1218 break; 1219 error = xfs_btree_increment(cur, 0, &i); 1220 if (error) 1221 goto error0; 1222 if (XFS_IS_CORRUPT(mp, i != 1)) { 1223 error = -EFSCORRUPTED; 1224 goto error0; 1225 } 1226 } 1227 1228 alloc_inode: 1229 offset = xfs_inobt_first_free_inode(&rec); 1230 ASSERT(offset >= 0); 1231 ASSERT(offset < XFS_INODES_PER_CHUNK); 1232 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1233 XFS_INODES_PER_CHUNK) == 0); 1234 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1235 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1236 rec.ir_freecount--; 1237 error = xfs_inobt_update(cur, &rec); 1238 if (error) 1239 goto error0; 1240 be32_add_cpu(&agi->agi_freecount, -1); 1241 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1242 pag->pagi_freecount--; 1243 1244 error = xfs_check_agi_freecount(cur); 1245 if (error) 1246 goto error0; 1247 1248 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1249 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1250 *inop = ino; 1251 return 0; 1252 error1: 1253 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1254 error0: 1255 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1256 return error; 1257 } 1258 1259 /* 1260 * Use the free inode btree to allocate an inode based on distance from the 1261 * parent. Note that the provided cursor may be deleted and replaced. 1262 */ 1263 STATIC int 1264 xfs_dialloc_ag_finobt_near( 1265 xfs_agino_t pagino, 1266 struct xfs_btree_cur **ocur, 1267 struct xfs_inobt_rec_incore *rec) 1268 { 1269 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1270 struct xfs_btree_cur *rcur; /* right search cursor */ 1271 struct xfs_inobt_rec_incore rrec; 1272 int error; 1273 int i, j; 1274 1275 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1276 if (error) 1277 return error; 1278 1279 if (i == 1) { 1280 error = xfs_inobt_get_rec(lcur, rec, &i); 1281 if (error) 1282 return error; 1283 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1284 return -EFSCORRUPTED; 1285 1286 /* 1287 * See if we've landed in the parent inode record. The finobt 1288 * only tracks chunks with at least one free inode, so record 1289 * existence is enough. 1290 */ 1291 if (pagino >= rec->ir_startino && 1292 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1293 return 0; 1294 } 1295 1296 error = xfs_btree_dup_cursor(lcur, &rcur); 1297 if (error) 1298 return error; 1299 1300 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1301 if (error) 1302 goto error_rcur; 1303 if (j == 1) { 1304 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1305 if (error) 1306 goto error_rcur; 1307 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1308 error = -EFSCORRUPTED; 1309 goto error_rcur; 1310 } 1311 } 1312 1313 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1314 error = -EFSCORRUPTED; 1315 goto error_rcur; 1316 } 1317 if (i == 1 && j == 1) { 1318 /* 1319 * Both the left and right records are valid. Choose the closer 1320 * inode chunk to the target. 1321 */ 1322 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1323 (rrec.ir_startino - pagino)) { 1324 *rec = rrec; 1325 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1326 *ocur = rcur; 1327 } else { 1328 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1329 } 1330 } else if (j == 1) { 1331 /* only the right record is valid */ 1332 *rec = rrec; 1333 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1334 *ocur = rcur; 1335 } else if (i == 1) { 1336 /* only the left record is valid */ 1337 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1338 } 1339 1340 return 0; 1341 1342 error_rcur: 1343 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1344 return error; 1345 } 1346 1347 /* 1348 * Use the free inode btree to find a free inode based on a newino hint. If 1349 * the hint is NULL, find the first free inode in the AG. 1350 */ 1351 STATIC int 1352 xfs_dialloc_ag_finobt_newino( 1353 struct xfs_agi *agi, 1354 struct xfs_btree_cur *cur, 1355 struct xfs_inobt_rec_incore *rec) 1356 { 1357 int error; 1358 int i; 1359 1360 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1361 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1362 XFS_LOOKUP_EQ, &i); 1363 if (error) 1364 return error; 1365 if (i == 1) { 1366 error = xfs_inobt_get_rec(cur, rec, &i); 1367 if (error) 1368 return error; 1369 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1370 return -EFSCORRUPTED; 1371 return 0; 1372 } 1373 } 1374 1375 /* 1376 * Find the first inode available in the AG. 1377 */ 1378 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1379 if (error) 1380 return error; 1381 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1382 return -EFSCORRUPTED; 1383 1384 error = xfs_inobt_get_rec(cur, rec, &i); 1385 if (error) 1386 return error; 1387 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1388 return -EFSCORRUPTED; 1389 1390 return 0; 1391 } 1392 1393 /* 1394 * Update the inobt based on a modification made to the finobt. Also ensure that 1395 * the records from both trees are equivalent post-modification. 1396 */ 1397 STATIC int 1398 xfs_dialloc_ag_update_inobt( 1399 struct xfs_btree_cur *cur, /* inobt cursor */ 1400 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1401 int offset) /* inode offset */ 1402 { 1403 struct xfs_inobt_rec_incore rec; 1404 int error; 1405 int i; 1406 1407 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1408 if (error) 1409 return error; 1410 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1411 return -EFSCORRUPTED; 1412 1413 error = xfs_inobt_get_rec(cur, &rec, &i); 1414 if (error) 1415 return error; 1416 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1417 return -EFSCORRUPTED; 1418 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1419 XFS_INODES_PER_CHUNK) == 0); 1420 1421 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1422 rec.ir_freecount--; 1423 1424 if (XFS_IS_CORRUPT(cur->bc_mp, 1425 rec.ir_free != frec->ir_free || 1426 rec.ir_freecount != frec->ir_freecount)) 1427 return -EFSCORRUPTED; 1428 1429 return xfs_inobt_update(cur, &rec); 1430 } 1431 1432 /* 1433 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1434 * back to the inobt search algorithm. 1435 * 1436 * The caller selected an AG for us, and made sure that free inodes are 1437 * available. 1438 */ 1439 static int 1440 xfs_dialloc_ag( 1441 struct xfs_perag *pag, 1442 struct xfs_trans *tp, 1443 struct xfs_buf *agbp, 1444 xfs_ino_t parent, 1445 xfs_ino_t *inop) 1446 { 1447 struct xfs_mount *mp = tp->t_mountp; 1448 struct xfs_agi *agi = agbp->b_addr; 1449 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1450 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1451 struct xfs_btree_cur *cur; /* finobt cursor */ 1452 struct xfs_btree_cur *icur; /* inobt cursor */ 1453 struct xfs_inobt_rec_incore rec; 1454 xfs_ino_t ino; 1455 int error; 1456 int offset; 1457 int i; 1458 1459 if (!xfs_has_finobt(mp)) 1460 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop); 1461 1462 /* 1463 * If pagino is 0 (this is the root inode allocation) use newino. 1464 * This must work because we've just allocated some. 1465 */ 1466 if (!pagino) 1467 pagino = be32_to_cpu(agi->agi_newino); 1468 1469 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 1470 1471 error = xfs_check_agi_freecount(cur); 1472 if (error) 1473 goto error_cur; 1474 1475 /* 1476 * The search algorithm depends on whether we're in the same AG as the 1477 * parent. If so, find the closest available inode to the parent. If 1478 * not, consider the agi hint or find the first free inode in the AG. 1479 */ 1480 if (pag->pag_agno == pagno) 1481 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1482 else 1483 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1484 if (error) 1485 goto error_cur; 1486 1487 offset = xfs_inobt_first_free_inode(&rec); 1488 ASSERT(offset >= 0); 1489 ASSERT(offset < XFS_INODES_PER_CHUNK); 1490 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1491 XFS_INODES_PER_CHUNK) == 0); 1492 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1493 1494 /* 1495 * Modify or remove the finobt record. 1496 */ 1497 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1498 rec.ir_freecount--; 1499 if (rec.ir_freecount) 1500 error = xfs_inobt_update(cur, &rec); 1501 else 1502 error = xfs_btree_delete(cur, &i); 1503 if (error) 1504 goto error_cur; 1505 1506 /* 1507 * The finobt has now been updated appropriately. We haven't updated the 1508 * agi and superblock yet, so we can create an inobt cursor and validate 1509 * the original freecount. If all is well, make the equivalent update to 1510 * the inobt using the finobt record and offset information. 1511 */ 1512 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1513 1514 error = xfs_check_agi_freecount(icur); 1515 if (error) 1516 goto error_icur; 1517 1518 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1519 if (error) 1520 goto error_icur; 1521 1522 /* 1523 * Both trees have now been updated. We must update the perag and 1524 * superblock before we can check the freecount for each btree. 1525 */ 1526 be32_add_cpu(&agi->agi_freecount, -1); 1527 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1528 pag->pagi_freecount--; 1529 1530 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1531 1532 error = xfs_check_agi_freecount(icur); 1533 if (error) 1534 goto error_icur; 1535 error = xfs_check_agi_freecount(cur); 1536 if (error) 1537 goto error_icur; 1538 1539 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1540 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1541 *inop = ino; 1542 return 0; 1543 1544 error_icur: 1545 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1546 error_cur: 1547 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1548 return error; 1549 } 1550 1551 static int 1552 xfs_dialloc_roll( 1553 struct xfs_trans **tpp, 1554 struct xfs_buf *agibp) 1555 { 1556 struct xfs_trans *tp = *tpp; 1557 struct xfs_dquot_acct *dqinfo; 1558 int error; 1559 1560 /* 1561 * Hold to on to the agibp across the commit so no other allocation can 1562 * come in and take the free inodes we just allocated for our caller. 1563 */ 1564 xfs_trans_bhold(tp, agibp); 1565 1566 /* 1567 * We want the quota changes to be associated with the next transaction, 1568 * NOT this one. So, detach the dqinfo from this and attach it to the 1569 * next transaction. 1570 */ 1571 dqinfo = tp->t_dqinfo; 1572 tp->t_dqinfo = NULL; 1573 1574 error = xfs_trans_roll(&tp); 1575 1576 /* Re-attach the quota info that we detached from prev trx. */ 1577 tp->t_dqinfo = dqinfo; 1578 1579 /* 1580 * Join the buffer even on commit error so that the buffer is released 1581 * when the caller cancels the transaction and doesn't have to handle 1582 * this error case specially. 1583 */ 1584 xfs_trans_bjoin(tp, agibp); 1585 *tpp = tp; 1586 return error; 1587 } 1588 1589 static bool 1590 xfs_dialloc_good_ag( 1591 struct xfs_perag *pag, 1592 struct xfs_trans *tp, 1593 umode_t mode, 1594 int flags, 1595 bool ok_alloc) 1596 { 1597 struct xfs_mount *mp = tp->t_mountp; 1598 xfs_extlen_t ineed; 1599 xfs_extlen_t longest = 0; 1600 int needspace; 1601 int error; 1602 1603 if (!pag) 1604 return false; 1605 if (!xfs_perag_allows_inodes(pag)) 1606 return false; 1607 1608 if (!xfs_perag_initialised_agi(pag)) { 1609 error = xfs_ialloc_read_agi(pag, tp, NULL); 1610 if (error) 1611 return false; 1612 } 1613 1614 if (pag->pagi_freecount) 1615 return true; 1616 if (!ok_alloc) 1617 return false; 1618 1619 if (!xfs_perag_initialised_agf(pag)) { 1620 error = xfs_alloc_read_agf(pag, tp, flags, NULL); 1621 if (error) 1622 return false; 1623 } 1624 1625 /* 1626 * Check that there is enough free space for the file plus a chunk of 1627 * inodes if we need to allocate some. If this is the first pass across 1628 * the AGs, take into account the potential space needed for alignment 1629 * of inode chunks when checking the longest contiguous free space in 1630 * the AG - this prevents us from getting ENOSPC because we have free 1631 * space larger than ialloc_blks but alignment constraints prevent us 1632 * from using it. 1633 * 1634 * If we can't find an AG with space for full alignment slack to be 1635 * taken into account, we must be near ENOSPC in all AGs. Hence we 1636 * don't include alignment for the second pass and so if we fail 1637 * allocation due to alignment issues then it is most likely a real 1638 * ENOSPC condition. 1639 * 1640 * XXX(dgc): this calculation is now bogus thanks to the per-ag 1641 * reservations that xfs_alloc_fix_freelist() now does via 1642 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will 1643 * be more than large enough for the check below to succeed, but 1644 * xfs_alloc_space_available() will fail because of the non-zero 1645 * metadata reservation and hence we won't actually be able to allocate 1646 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC 1647 * because of this. 1648 */ 1649 ineed = M_IGEO(mp)->ialloc_min_blks; 1650 if (flags && ineed > 1) 1651 ineed += M_IGEO(mp)->cluster_align; 1652 longest = pag->pagf_longest; 1653 if (!longest) 1654 longest = pag->pagf_flcount > 0; 1655 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 1656 1657 if (pag->pagf_freeblks < needspace + ineed || longest < ineed) 1658 return false; 1659 return true; 1660 } 1661 1662 static int 1663 xfs_dialloc_try_ag( 1664 struct xfs_perag *pag, 1665 struct xfs_trans **tpp, 1666 xfs_ino_t parent, 1667 xfs_ino_t *new_ino, 1668 bool ok_alloc) 1669 { 1670 struct xfs_buf *agbp; 1671 xfs_ino_t ino; 1672 int error; 1673 1674 /* 1675 * Then read in the AGI buffer and recheck with the AGI buffer 1676 * lock held. 1677 */ 1678 error = xfs_ialloc_read_agi(pag, *tpp, &agbp); 1679 if (error) 1680 return error; 1681 1682 if (!pag->pagi_freecount) { 1683 if (!ok_alloc) { 1684 error = -EAGAIN; 1685 goto out_release; 1686 } 1687 1688 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp); 1689 if (error < 0) 1690 goto out_release; 1691 1692 /* 1693 * We successfully allocated space for an inode cluster in this 1694 * AG. Roll the transaction so that we can allocate one of the 1695 * new inodes. 1696 */ 1697 ASSERT(pag->pagi_freecount > 0); 1698 error = xfs_dialloc_roll(tpp, agbp); 1699 if (error) 1700 goto out_release; 1701 } 1702 1703 /* Allocate an inode in the found AG */ 1704 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino); 1705 if (!error) 1706 *new_ino = ino; 1707 return error; 1708 1709 out_release: 1710 xfs_trans_brelse(*tpp, agbp); 1711 return error; 1712 } 1713 1714 /* 1715 * Allocate an on-disk inode. 1716 * 1717 * Mode is used to tell whether the new inode is a directory and hence where to 1718 * locate it. The on-disk inode that is allocated will be returned in @new_ino 1719 * on success, otherwise an error will be set to indicate the failure (e.g. 1720 * -ENOSPC). 1721 */ 1722 int 1723 xfs_dialloc( 1724 struct xfs_trans **tpp, 1725 xfs_ino_t parent, 1726 umode_t mode, 1727 xfs_ino_t *new_ino) 1728 { 1729 struct xfs_mount *mp = (*tpp)->t_mountp; 1730 xfs_agnumber_t agno; 1731 int error = 0; 1732 xfs_agnumber_t start_agno; 1733 struct xfs_perag *pag; 1734 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1735 bool ok_alloc = true; 1736 bool low_space = false; 1737 int flags; 1738 xfs_ino_t ino = NULLFSINO; 1739 1740 /* 1741 * Directories, symlinks, and regular files frequently allocate at least 1742 * one block, so factor that potential expansion when we examine whether 1743 * an AG has enough space for file creation. 1744 */ 1745 if (S_ISDIR(mode)) 1746 start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) % 1747 mp->m_maxagi; 1748 else { 1749 start_agno = XFS_INO_TO_AGNO(mp, parent); 1750 if (start_agno >= mp->m_maxagi) 1751 start_agno = 0; 1752 } 1753 1754 /* 1755 * If we have already hit the ceiling of inode blocks then clear 1756 * ok_alloc so we scan all available agi structures for a free 1757 * inode. 1758 * 1759 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1760 * which will sacrifice the preciseness but improve the performance. 1761 */ 1762 if (igeo->maxicount && 1763 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1764 > igeo->maxicount) { 1765 ok_alloc = false; 1766 } 1767 1768 /* 1769 * If we are near to ENOSPC, we want to prefer allocation from AGs that 1770 * have free inodes in them rather than use up free space allocating new 1771 * inode chunks. Hence we turn off allocation for the first non-blocking 1772 * pass through the AGs if we are near ENOSPC to consume free inodes 1773 * that we can immediately allocate, but then we allow allocation on the 1774 * second pass if we fail to find an AG with free inodes in it. 1775 */ 1776 if (percpu_counter_read_positive(&mp->m_fdblocks) < 1777 mp->m_low_space[XFS_LOWSP_1_PCNT]) { 1778 ok_alloc = false; 1779 low_space = true; 1780 } 1781 1782 /* 1783 * Loop until we find an allocation group that either has free inodes 1784 * or in which we can allocate some inodes. Iterate through the 1785 * allocation groups upward, wrapping at the end. 1786 */ 1787 flags = XFS_ALLOC_FLAG_TRYLOCK; 1788 retry: 1789 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) { 1790 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) { 1791 error = xfs_dialloc_try_ag(pag, tpp, parent, 1792 &ino, ok_alloc); 1793 if (error != -EAGAIN) 1794 break; 1795 error = 0; 1796 } 1797 1798 if (xfs_is_shutdown(mp)) { 1799 error = -EFSCORRUPTED; 1800 break; 1801 } 1802 } 1803 if (pag) 1804 xfs_perag_rele(pag); 1805 if (error) 1806 return error; 1807 if (ino == NULLFSINO) { 1808 if (flags) { 1809 flags = 0; 1810 if (low_space) 1811 ok_alloc = true; 1812 goto retry; 1813 } 1814 return -ENOSPC; 1815 } 1816 *new_ino = ino; 1817 return 0; 1818 } 1819 1820 /* 1821 * Free the blocks of an inode chunk. We must consider that the inode chunk 1822 * might be sparse and only free the regions that are allocated as part of the 1823 * chunk. 1824 */ 1825 STATIC void 1826 xfs_difree_inode_chunk( 1827 struct xfs_trans *tp, 1828 xfs_agnumber_t agno, 1829 struct xfs_inobt_rec_incore *rec) 1830 { 1831 struct xfs_mount *mp = tp->t_mountp; 1832 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1833 rec->ir_startino); 1834 int startidx, endidx; 1835 int nextbit; 1836 xfs_agblock_t agbno; 1837 int contigblk; 1838 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1839 1840 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1841 /* not sparse, calculate extent info directly */ 1842 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1843 M_IGEO(mp)->ialloc_blks, 1844 &XFS_RMAP_OINFO_INODES); 1845 return; 1846 } 1847 1848 /* holemask is only 16-bits (fits in an unsigned long) */ 1849 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1850 holemask[0] = rec->ir_holemask; 1851 1852 /* 1853 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1854 * holemask and convert the start/end index of each range to an extent. 1855 * We start with the start and end index both pointing at the first 0 in 1856 * the mask. 1857 */ 1858 startidx = endidx = find_first_zero_bit(holemask, 1859 XFS_INOBT_HOLEMASK_BITS); 1860 nextbit = startidx + 1; 1861 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1862 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1863 nextbit); 1864 /* 1865 * If the next zero bit is contiguous, update the end index of 1866 * the current range and continue. 1867 */ 1868 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1869 nextbit == endidx + 1) { 1870 endidx = nextbit; 1871 goto next; 1872 } 1873 1874 /* 1875 * nextbit is not contiguous with the current end index. Convert 1876 * the current start/end to an extent and add it to the free 1877 * list. 1878 */ 1879 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1880 mp->m_sb.sb_inopblock; 1881 contigblk = ((endidx - startidx + 1) * 1882 XFS_INODES_PER_HOLEMASK_BIT) / 1883 mp->m_sb.sb_inopblock; 1884 1885 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1886 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1887 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1888 contigblk, &XFS_RMAP_OINFO_INODES); 1889 1890 /* reset range to current bit and carry on... */ 1891 startidx = endidx = nextbit; 1892 1893 next: 1894 nextbit++; 1895 } 1896 } 1897 1898 STATIC int 1899 xfs_difree_inobt( 1900 struct xfs_perag *pag, 1901 struct xfs_trans *tp, 1902 struct xfs_buf *agbp, 1903 xfs_agino_t agino, 1904 struct xfs_icluster *xic, 1905 struct xfs_inobt_rec_incore *orec) 1906 { 1907 struct xfs_mount *mp = pag->pag_mount; 1908 struct xfs_agi *agi = agbp->b_addr; 1909 struct xfs_btree_cur *cur; 1910 struct xfs_inobt_rec_incore rec; 1911 int ilen; 1912 int error; 1913 int i; 1914 int off; 1915 1916 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1917 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1918 1919 /* 1920 * Initialize the cursor. 1921 */ 1922 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1923 1924 error = xfs_check_agi_freecount(cur); 1925 if (error) 1926 goto error0; 1927 1928 /* 1929 * Look for the entry describing this inode. 1930 */ 1931 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1932 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1933 __func__, error); 1934 goto error0; 1935 } 1936 if (XFS_IS_CORRUPT(mp, i != 1)) { 1937 error = -EFSCORRUPTED; 1938 goto error0; 1939 } 1940 error = xfs_inobt_get_rec(cur, &rec, &i); 1941 if (error) { 1942 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1943 __func__, error); 1944 goto error0; 1945 } 1946 if (XFS_IS_CORRUPT(mp, i != 1)) { 1947 error = -EFSCORRUPTED; 1948 goto error0; 1949 } 1950 /* 1951 * Get the offset in the inode chunk. 1952 */ 1953 off = agino - rec.ir_startino; 1954 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1955 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1956 /* 1957 * Mark the inode free & increment the count. 1958 */ 1959 rec.ir_free |= XFS_INOBT_MASK(off); 1960 rec.ir_freecount++; 1961 1962 /* 1963 * When an inode chunk is free, it becomes eligible for removal. Don't 1964 * remove the chunk if the block size is large enough for multiple inode 1965 * chunks (that might not be free). 1966 */ 1967 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 1968 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1969 struct xfs_perag *pag = agbp->b_pag; 1970 1971 xic->deleted = true; 1972 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, 1973 rec.ir_startino); 1974 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1975 1976 /* 1977 * Remove the inode cluster from the AGI B+Tree, adjust the 1978 * AGI and Superblock inode counts, and mark the disk space 1979 * to be freed when the transaction is committed. 1980 */ 1981 ilen = rec.ir_freecount; 1982 be32_add_cpu(&agi->agi_count, -ilen); 1983 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 1984 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1985 pag->pagi_freecount -= ilen - 1; 1986 pag->pagi_count -= ilen; 1987 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 1988 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1989 1990 if ((error = xfs_btree_delete(cur, &i))) { 1991 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 1992 __func__, error); 1993 goto error0; 1994 } 1995 1996 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 1997 } else { 1998 xic->deleted = false; 1999 2000 error = xfs_inobt_update(cur, &rec); 2001 if (error) { 2002 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 2003 __func__, error); 2004 goto error0; 2005 } 2006 2007 /* 2008 * Change the inode free counts and log the ag/sb changes. 2009 */ 2010 be32_add_cpu(&agi->agi_freecount, 1); 2011 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2012 pag->pagi_freecount++; 2013 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2014 } 2015 2016 error = xfs_check_agi_freecount(cur); 2017 if (error) 2018 goto error0; 2019 2020 *orec = rec; 2021 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2022 return 0; 2023 2024 error0: 2025 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2026 return error; 2027 } 2028 2029 /* 2030 * Free an inode in the free inode btree. 2031 */ 2032 STATIC int 2033 xfs_difree_finobt( 2034 struct xfs_perag *pag, 2035 struct xfs_trans *tp, 2036 struct xfs_buf *agbp, 2037 xfs_agino_t agino, 2038 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2039 { 2040 struct xfs_mount *mp = pag->pag_mount; 2041 struct xfs_btree_cur *cur; 2042 struct xfs_inobt_rec_incore rec; 2043 int offset = agino - ibtrec->ir_startino; 2044 int error; 2045 int i; 2046 2047 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 2048 2049 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2050 if (error) 2051 goto error; 2052 if (i == 0) { 2053 /* 2054 * If the record does not exist in the finobt, we must have just 2055 * freed an inode in a previously fully allocated chunk. If not, 2056 * something is out of sync. 2057 */ 2058 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2059 error = -EFSCORRUPTED; 2060 goto error; 2061 } 2062 2063 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2064 ibtrec->ir_count, 2065 ibtrec->ir_freecount, 2066 ibtrec->ir_free, &i); 2067 if (error) 2068 goto error; 2069 ASSERT(i == 1); 2070 2071 goto out; 2072 } 2073 2074 /* 2075 * Read and update the existing record. We could just copy the ibtrec 2076 * across here, but that would defeat the purpose of having redundant 2077 * metadata. By making the modifications independently, we can catch 2078 * corruptions that we wouldn't see if we just copied from one record 2079 * to another. 2080 */ 2081 error = xfs_inobt_get_rec(cur, &rec, &i); 2082 if (error) 2083 goto error; 2084 if (XFS_IS_CORRUPT(mp, i != 1)) { 2085 error = -EFSCORRUPTED; 2086 goto error; 2087 } 2088 2089 rec.ir_free |= XFS_INOBT_MASK(offset); 2090 rec.ir_freecount++; 2091 2092 if (XFS_IS_CORRUPT(mp, 2093 rec.ir_free != ibtrec->ir_free || 2094 rec.ir_freecount != ibtrec->ir_freecount)) { 2095 error = -EFSCORRUPTED; 2096 goto error; 2097 } 2098 2099 /* 2100 * The content of inobt records should always match between the inobt 2101 * and finobt. The lifecycle of records in the finobt is different from 2102 * the inobt in that the finobt only tracks records with at least one 2103 * free inode. Hence, if all of the inodes are free and we aren't 2104 * keeping inode chunks permanently on disk, remove the record. 2105 * Otherwise, update the record with the new information. 2106 * 2107 * Note that we currently can't free chunks when the block size is large 2108 * enough for multiple chunks. Leave the finobt record to remain in sync 2109 * with the inobt. 2110 */ 2111 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 2112 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2113 error = xfs_btree_delete(cur, &i); 2114 if (error) 2115 goto error; 2116 ASSERT(i == 1); 2117 } else { 2118 error = xfs_inobt_update(cur, &rec); 2119 if (error) 2120 goto error; 2121 } 2122 2123 out: 2124 error = xfs_check_agi_freecount(cur); 2125 if (error) 2126 goto error; 2127 2128 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2129 return 0; 2130 2131 error: 2132 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2133 return error; 2134 } 2135 2136 /* 2137 * Free disk inode. Carefully avoids touching the incore inode, all 2138 * manipulations incore are the caller's responsibility. 2139 * The on-disk inode is not changed by this operation, only the 2140 * btree (free inode mask) is changed. 2141 */ 2142 int 2143 xfs_difree( 2144 struct xfs_trans *tp, 2145 struct xfs_perag *pag, 2146 xfs_ino_t inode, 2147 struct xfs_icluster *xic) 2148 { 2149 /* REFERENCED */ 2150 xfs_agblock_t agbno; /* block number containing inode */ 2151 struct xfs_buf *agbp; /* buffer for allocation group header */ 2152 xfs_agino_t agino; /* allocation group inode number */ 2153 int error; /* error return value */ 2154 struct xfs_mount *mp = tp->t_mountp; 2155 struct xfs_inobt_rec_incore rec;/* btree record */ 2156 2157 /* 2158 * Break up inode number into its components. 2159 */ 2160 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { 2161 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", 2162 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); 2163 ASSERT(0); 2164 return -EINVAL; 2165 } 2166 agino = XFS_INO_TO_AGINO(mp, inode); 2167 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2168 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2169 __func__, (unsigned long long)inode, 2170 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2171 ASSERT(0); 2172 return -EINVAL; 2173 } 2174 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2175 if (agbno >= mp->m_sb.sb_agblocks) { 2176 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2177 __func__, agbno, mp->m_sb.sb_agblocks); 2178 ASSERT(0); 2179 return -EINVAL; 2180 } 2181 /* 2182 * Get the allocation group header. 2183 */ 2184 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2185 if (error) { 2186 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2187 __func__, error); 2188 return error; 2189 } 2190 2191 /* 2192 * Fix up the inode allocation btree. 2193 */ 2194 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec); 2195 if (error) 2196 goto error0; 2197 2198 /* 2199 * Fix up the free inode btree. 2200 */ 2201 if (xfs_has_finobt(mp)) { 2202 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec); 2203 if (error) 2204 goto error0; 2205 } 2206 2207 return 0; 2208 2209 error0: 2210 return error; 2211 } 2212 2213 STATIC int 2214 xfs_imap_lookup( 2215 struct xfs_perag *pag, 2216 struct xfs_trans *tp, 2217 xfs_agino_t agino, 2218 xfs_agblock_t agbno, 2219 xfs_agblock_t *chunk_agbno, 2220 xfs_agblock_t *offset_agbno, 2221 int flags) 2222 { 2223 struct xfs_mount *mp = pag->pag_mount; 2224 struct xfs_inobt_rec_incore rec; 2225 struct xfs_btree_cur *cur; 2226 struct xfs_buf *agbp; 2227 int error; 2228 int i; 2229 2230 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2231 if (error) { 2232 xfs_alert(mp, 2233 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2234 __func__, error, pag->pag_agno); 2235 return error; 2236 } 2237 2238 /* 2239 * Lookup the inode record for the given agino. If the record cannot be 2240 * found, then it's an invalid inode number and we should abort. Once 2241 * we have a record, we need to ensure it contains the inode number 2242 * we are looking up. 2243 */ 2244 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 2245 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2246 if (!error) { 2247 if (i) 2248 error = xfs_inobt_get_rec(cur, &rec, &i); 2249 if (!error && i == 0) 2250 error = -EINVAL; 2251 } 2252 2253 xfs_trans_brelse(tp, agbp); 2254 xfs_btree_del_cursor(cur, error); 2255 if (error) 2256 return error; 2257 2258 /* check that the returned record contains the required inode */ 2259 if (rec.ir_startino > agino || 2260 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2261 return -EINVAL; 2262 2263 /* for untrusted inodes check it is allocated first */ 2264 if ((flags & XFS_IGET_UNTRUSTED) && 2265 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2266 return -EINVAL; 2267 2268 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2269 *offset_agbno = agbno - *chunk_agbno; 2270 return 0; 2271 } 2272 2273 /* 2274 * Return the location of the inode in imap, for mapping it into a buffer. 2275 */ 2276 int 2277 xfs_imap( 2278 struct xfs_perag *pag, 2279 struct xfs_trans *tp, 2280 xfs_ino_t ino, /* inode to locate */ 2281 struct xfs_imap *imap, /* location map structure */ 2282 uint flags) /* flags for inode btree lookup */ 2283 { 2284 struct xfs_mount *mp = pag->pag_mount; 2285 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2286 xfs_agino_t agino; /* inode number within alloc group */ 2287 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2288 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2289 int error; /* error code */ 2290 int offset; /* index of inode in its buffer */ 2291 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2292 2293 ASSERT(ino != NULLFSINO); 2294 2295 /* 2296 * Split up the inode number into its parts. 2297 */ 2298 agino = XFS_INO_TO_AGINO(mp, ino); 2299 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2300 if (agbno >= mp->m_sb.sb_agblocks || 2301 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2302 error = -EINVAL; 2303 #ifdef DEBUG 2304 /* 2305 * Don't output diagnostic information for untrusted inodes 2306 * as they can be invalid without implying corruption. 2307 */ 2308 if (flags & XFS_IGET_UNTRUSTED) 2309 return error; 2310 if (agbno >= mp->m_sb.sb_agblocks) { 2311 xfs_alert(mp, 2312 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2313 __func__, (unsigned long long)agbno, 2314 (unsigned long)mp->m_sb.sb_agblocks); 2315 } 2316 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2317 xfs_alert(mp, 2318 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2319 __func__, ino, 2320 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2321 } 2322 xfs_stack_trace(); 2323 #endif /* DEBUG */ 2324 return error; 2325 } 2326 2327 /* 2328 * For bulkstat and handle lookups, we have an untrusted inode number 2329 * that we have to verify is valid. We cannot do this just by reading 2330 * the inode buffer as it may have been unlinked and removed leaving 2331 * inodes in stale state on disk. Hence we have to do a btree lookup 2332 * in all cases where an untrusted inode number is passed. 2333 */ 2334 if (flags & XFS_IGET_UNTRUSTED) { 2335 error = xfs_imap_lookup(pag, tp, agino, agbno, 2336 &chunk_agbno, &offset_agbno, flags); 2337 if (error) 2338 return error; 2339 goto out_map; 2340 } 2341 2342 /* 2343 * If the inode cluster size is the same as the blocksize or 2344 * smaller we get to the buffer by simple arithmetics. 2345 */ 2346 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2347 offset = XFS_INO_TO_OFFSET(mp, ino); 2348 ASSERT(offset < mp->m_sb.sb_inopblock); 2349 2350 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno); 2351 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2352 imap->im_boffset = (unsigned short)(offset << 2353 mp->m_sb.sb_inodelog); 2354 return 0; 2355 } 2356 2357 /* 2358 * If the inode chunks are aligned then use simple maths to 2359 * find the location. Otherwise we have to do a btree 2360 * lookup to find the location. 2361 */ 2362 if (M_IGEO(mp)->inoalign_mask) { 2363 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2364 chunk_agbno = agbno - offset_agbno; 2365 } else { 2366 error = xfs_imap_lookup(pag, tp, agino, agbno, 2367 &chunk_agbno, &offset_agbno, flags); 2368 if (error) 2369 return error; 2370 } 2371 2372 out_map: 2373 ASSERT(agbno >= chunk_agbno); 2374 cluster_agbno = chunk_agbno + 2375 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2376 M_IGEO(mp)->blocks_per_cluster); 2377 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2378 XFS_INO_TO_OFFSET(mp, ino); 2379 2380 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno); 2381 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2382 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2383 2384 /* 2385 * If the inode number maps to a block outside the bounds 2386 * of the file system then return NULL rather than calling 2387 * read_buf and panicing when we get an error from the 2388 * driver. 2389 */ 2390 if ((imap->im_blkno + imap->im_len) > 2391 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2392 xfs_alert(mp, 2393 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2394 __func__, (unsigned long long) imap->im_blkno, 2395 (unsigned long long) imap->im_len, 2396 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2397 return -EINVAL; 2398 } 2399 return 0; 2400 } 2401 2402 /* 2403 * Log specified fields for the ag hdr (inode section). The growth of the agi 2404 * structure over time requires that we interpret the buffer as two logical 2405 * regions delineated by the end of the unlinked list. This is due to the size 2406 * of the hash table and its location in the middle of the agi. 2407 * 2408 * For example, a request to log a field before agi_unlinked and a field after 2409 * agi_unlinked could cause us to log the entire hash table and use an excessive 2410 * amount of log space. To avoid this behavior, log the region up through 2411 * agi_unlinked in one call and the region after agi_unlinked through the end of 2412 * the structure in another. 2413 */ 2414 void 2415 xfs_ialloc_log_agi( 2416 struct xfs_trans *tp, 2417 struct xfs_buf *bp, 2418 uint32_t fields) 2419 { 2420 int first; /* first byte number */ 2421 int last; /* last byte number */ 2422 static const short offsets[] = { /* field starting offsets */ 2423 /* keep in sync with bit definitions */ 2424 offsetof(xfs_agi_t, agi_magicnum), 2425 offsetof(xfs_agi_t, agi_versionnum), 2426 offsetof(xfs_agi_t, agi_seqno), 2427 offsetof(xfs_agi_t, agi_length), 2428 offsetof(xfs_agi_t, agi_count), 2429 offsetof(xfs_agi_t, agi_root), 2430 offsetof(xfs_agi_t, agi_level), 2431 offsetof(xfs_agi_t, agi_freecount), 2432 offsetof(xfs_agi_t, agi_newino), 2433 offsetof(xfs_agi_t, agi_dirino), 2434 offsetof(xfs_agi_t, agi_unlinked), 2435 offsetof(xfs_agi_t, agi_free_root), 2436 offsetof(xfs_agi_t, agi_free_level), 2437 offsetof(xfs_agi_t, agi_iblocks), 2438 sizeof(xfs_agi_t) 2439 }; 2440 #ifdef DEBUG 2441 struct xfs_agi *agi = bp->b_addr; 2442 2443 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2444 #endif 2445 2446 /* 2447 * Compute byte offsets for the first and last fields in the first 2448 * region and log the agi buffer. This only logs up through 2449 * agi_unlinked. 2450 */ 2451 if (fields & XFS_AGI_ALL_BITS_R1) { 2452 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2453 &first, &last); 2454 xfs_trans_log_buf(tp, bp, first, last); 2455 } 2456 2457 /* 2458 * Mask off the bits in the first region and calculate the first and 2459 * last field offsets for any bits in the second region. 2460 */ 2461 fields &= ~XFS_AGI_ALL_BITS_R1; 2462 if (fields) { 2463 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2464 &first, &last); 2465 xfs_trans_log_buf(tp, bp, first, last); 2466 } 2467 } 2468 2469 static xfs_failaddr_t 2470 xfs_agi_verify( 2471 struct xfs_buf *bp) 2472 { 2473 struct xfs_mount *mp = bp->b_mount; 2474 struct xfs_agi *agi = bp->b_addr; 2475 int i; 2476 2477 if (xfs_has_crc(mp)) { 2478 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2479 return __this_address; 2480 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn))) 2481 return __this_address; 2482 } 2483 2484 /* 2485 * Validate the magic number of the agi block. 2486 */ 2487 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2488 return __this_address; 2489 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2490 return __this_address; 2491 2492 if (be32_to_cpu(agi->agi_level) < 1 || 2493 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels) 2494 return __this_address; 2495 2496 if (xfs_has_finobt(mp) && 2497 (be32_to_cpu(agi->agi_free_level) < 1 || 2498 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels)) 2499 return __this_address; 2500 2501 /* 2502 * during growfs operations, the perag is not fully initialised, 2503 * so we can't use it for any useful checking. growfs ensures we can't 2504 * use it by using uncached buffers that don't have the perag attached 2505 * so we can detect and avoid this problem. 2506 */ 2507 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) 2508 return __this_address; 2509 2510 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2511 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2512 continue; 2513 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2514 return __this_address; 2515 } 2516 2517 return NULL; 2518 } 2519 2520 static void 2521 xfs_agi_read_verify( 2522 struct xfs_buf *bp) 2523 { 2524 struct xfs_mount *mp = bp->b_mount; 2525 xfs_failaddr_t fa; 2526 2527 if (xfs_has_crc(mp) && 2528 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2529 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2530 else { 2531 fa = xfs_agi_verify(bp); 2532 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2533 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2534 } 2535 } 2536 2537 static void 2538 xfs_agi_write_verify( 2539 struct xfs_buf *bp) 2540 { 2541 struct xfs_mount *mp = bp->b_mount; 2542 struct xfs_buf_log_item *bip = bp->b_log_item; 2543 struct xfs_agi *agi = bp->b_addr; 2544 xfs_failaddr_t fa; 2545 2546 fa = xfs_agi_verify(bp); 2547 if (fa) { 2548 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2549 return; 2550 } 2551 2552 if (!xfs_has_crc(mp)) 2553 return; 2554 2555 if (bip) 2556 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2557 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2558 } 2559 2560 const struct xfs_buf_ops xfs_agi_buf_ops = { 2561 .name = "xfs_agi", 2562 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2563 .verify_read = xfs_agi_read_verify, 2564 .verify_write = xfs_agi_write_verify, 2565 .verify_struct = xfs_agi_verify, 2566 }; 2567 2568 /* 2569 * Read in the allocation group header (inode allocation section) 2570 */ 2571 int 2572 xfs_read_agi( 2573 struct xfs_perag *pag, 2574 struct xfs_trans *tp, 2575 struct xfs_buf **agibpp) 2576 { 2577 struct xfs_mount *mp = pag->pag_mount; 2578 int error; 2579 2580 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno); 2581 2582 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2583 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), 2584 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops); 2585 if (error) 2586 return error; 2587 if (tp) 2588 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF); 2589 2590 xfs_buf_set_ref(*agibpp, XFS_AGI_REF); 2591 return 0; 2592 } 2593 2594 /* 2595 * Read in the agi and initialise the per-ag data. If the caller supplies a 2596 * @agibpp, return the locked AGI buffer to them, otherwise release it. 2597 */ 2598 int 2599 xfs_ialloc_read_agi( 2600 struct xfs_perag *pag, 2601 struct xfs_trans *tp, 2602 struct xfs_buf **agibpp) 2603 { 2604 struct xfs_buf *agibp; 2605 struct xfs_agi *agi; 2606 int error; 2607 2608 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno); 2609 2610 error = xfs_read_agi(pag, tp, &agibp); 2611 if (error) 2612 return error; 2613 2614 agi = agibp->b_addr; 2615 if (!xfs_perag_initialised_agi(pag)) { 2616 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2617 pag->pagi_count = be32_to_cpu(agi->agi_count); 2618 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 2619 } 2620 2621 /* 2622 * It's possible for these to be out of sync if 2623 * we are in the middle of a forced shutdown. 2624 */ 2625 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2626 xfs_is_shutdown(pag->pag_mount)); 2627 if (agibpp) 2628 *agibpp = agibp; 2629 else 2630 xfs_trans_brelse(tp, agibp); 2631 return 0; 2632 } 2633 2634 /* Is there an inode record covering a given range of inode numbers? */ 2635 int 2636 xfs_ialloc_has_inode_record( 2637 struct xfs_btree_cur *cur, 2638 xfs_agino_t low, 2639 xfs_agino_t high, 2640 bool *exists) 2641 { 2642 struct xfs_inobt_rec_incore irec; 2643 xfs_agino_t agino; 2644 uint16_t holemask; 2645 int has_record; 2646 int i; 2647 int error; 2648 2649 *exists = false; 2650 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2651 while (error == 0 && has_record) { 2652 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2653 if (error || irec.ir_startino > high) 2654 break; 2655 2656 agino = irec.ir_startino; 2657 holemask = irec.ir_holemask; 2658 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1, 2659 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) { 2660 if (holemask & 1) 2661 continue; 2662 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low && 2663 agino <= high) { 2664 *exists = true; 2665 return 0; 2666 } 2667 } 2668 2669 error = xfs_btree_increment(cur, 0, &has_record); 2670 } 2671 return error; 2672 } 2673 2674 /* Is there an inode record covering a given extent? */ 2675 int 2676 xfs_ialloc_has_inodes_at_extent( 2677 struct xfs_btree_cur *cur, 2678 xfs_agblock_t bno, 2679 xfs_extlen_t len, 2680 bool *exists) 2681 { 2682 xfs_agino_t low; 2683 xfs_agino_t high; 2684 2685 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2686 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2687 2688 return xfs_ialloc_has_inode_record(cur, low, high, exists); 2689 } 2690 2691 struct xfs_ialloc_count_inodes { 2692 xfs_agino_t count; 2693 xfs_agino_t freecount; 2694 }; 2695 2696 /* Record inode counts across all inobt records. */ 2697 STATIC int 2698 xfs_ialloc_count_inodes_rec( 2699 struct xfs_btree_cur *cur, 2700 const union xfs_btree_rec *rec, 2701 void *priv) 2702 { 2703 struct xfs_inobt_rec_incore irec; 2704 struct xfs_ialloc_count_inodes *ci = priv; 2705 2706 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2707 if (xfs_inobt_check_irec(cur, &irec) != NULL) 2708 return -EFSCORRUPTED; 2709 2710 ci->count += irec.ir_count; 2711 ci->freecount += irec.ir_freecount; 2712 2713 return 0; 2714 } 2715 2716 /* Count allocated and free inodes under an inobt. */ 2717 int 2718 xfs_ialloc_count_inodes( 2719 struct xfs_btree_cur *cur, 2720 xfs_agino_t *count, 2721 xfs_agino_t *freecount) 2722 { 2723 struct xfs_ialloc_count_inodes ci = {0}; 2724 int error; 2725 2726 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2727 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2728 if (error) 2729 return error; 2730 2731 *count = ci.count; 2732 *freecount = ci.freecount; 2733 return 0; 2734 } 2735 2736 /* 2737 * Initialize inode-related geometry information. 2738 * 2739 * Compute the inode btree min and max levels and set maxicount. 2740 * 2741 * Set the inode cluster size. This may still be overridden by the file 2742 * system block size if it is larger than the chosen cluster size. 2743 * 2744 * For v5 filesystems, scale the cluster size with the inode size to keep a 2745 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2746 * inode alignment value appropriately for larger cluster sizes. 2747 * 2748 * Then compute the inode cluster alignment information. 2749 */ 2750 void 2751 xfs_ialloc_setup_geometry( 2752 struct xfs_mount *mp) 2753 { 2754 struct xfs_sb *sbp = &mp->m_sb; 2755 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2756 uint64_t icount; 2757 uint inodes; 2758 2759 igeo->new_diflags2 = 0; 2760 if (xfs_has_bigtime(mp)) 2761 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2762 if (xfs_has_large_extent_counts(mp)) 2763 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64; 2764 2765 /* Compute inode btree geometry. */ 2766 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2767 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2768 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2769 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2770 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2771 2772 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2773 sbp->sb_inopblock); 2774 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2775 2776 if (sbp->sb_spino_align) 2777 igeo->ialloc_min_blks = sbp->sb_spino_align; 2778 else 2779 igeo->ialloc_min_blks = igeo->ialloc_blks; 2780 2781 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2782 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2783 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2784 inodes); 2785 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk()); 2786 2787 /* 2788 * Set the maximum inode count for this filesystem, being careful not 2789 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2790 * users should never get here due to failing sb verification, but 2791 * certain users (xfs_db) need to be usable even with corrupt metadata. 2792 */ 2793 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2794 /* 2795 * Make sure the maximum inode count is a multiple 2796 * of the units we allocate inodes in. 2797 */ 2798 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2799 do_div(icount, 100); 2800 do_div(icount, igeo->ialloc_blks); 2801 igeo->maxicount = XFS_FSB_TO_INO(mp, 2802 icount * igeo->ialloc_blks); 2803 } else { 2804 igeo->maxicount = 0; 2805 } 2806 2807 /* 2808 * Compute the desired size of an inode cluster buffer size, which 2809 * starts at 8K and (on v5 filesystems) scales up with larger inode 2810 * sizes. 2811 * 2812 * Preserve the desired inode cluster size because the sparse inodes 2813 * feature uses that desired size (not the actual size) to compute the 2814 * sparse inode alignment. The mount code validates this value, so we 2815 * cannot change the behavior. 2816 */ 2817 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2818 if (xfs_has_v3inodes(mp)) { 2819 int new_size = igeo->inode_cluster_size_raw; 2820 2821 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2822 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2823 igeo->inode_cluster_size_raw = new_size; 2824 } 2825 2826 /* Calculate inode cluster ratios. */ 2827 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2828 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2829 igeo->inode_cluster_size_raw); 2830 else 2831 igeo->blocks_per_cluster = 1; 2832 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2833 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2834 2835 /* Calculate inode cluster alignment. */ 2836 if (xfs_has_align(mp) && 2837 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2838 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2839 else 2840 igeo->cluster_align = 1; 2841 igeo->inoalign_mask = igeo->cluster_align - 1; 2842 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2843 2844 /* 2845 * If we are using stripe alignment, check whether 2846 * the stripe unit is a multiple of the inode alignment 2847 */ 2848 if (mp->m_dalign && igeo->inoalign_mask && 2849 !(mp->m_dalign & igeo->inoalign_mask)) 2850 igeo->ialloc_align = mp->m_dalign; 2851 else 2852 igeo->ialloc_align = 0; 2853 } 2854 2855 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2856 xfs_ino_t 2857 xfs_ialloc_calc_rootino( 2858 struct xfs_mount *mp, 2859 int sunit) 2860 { 2861 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2862 xfs_agblock_t first_bno; 2863 2864 /* 2865 * Pre-calculate the geometry of AG 0. We know what it looks like 2866 * because libxfs knows how to create allocation groups now. 2867 * 2868 * first_bno is the first block in which mkfs could possibly have 2869 * allocated the root directory inode, once we factor in the metadata 2870 * that mkfs formats before it. Namely, the four AG headers... 2871 */ 2872 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2873 2874 /* ...the two free space btree roots... */ 2875 first_bno += 2; 2876 2877 /* ...the inode btree root... */ 2878 first_bno += 1; 2879 2880 /* ...the initial AGFL... */ 2881 first_bno += xfs_alloc_min_freelist(mp, NULL); 2882 2883 /* ...the free inode btree root... */ 2884 if (xfs_has_finobt(mp)) 2885 first_bno++; 2886 2887 /* ...the reverse mapping btree root... */ 2888 if (xfs_has_rmapbt(mp)) 2889 first_bno++; 2890 2891 /* ...the reference count btree... */ 2892 if (xfs_has_reflink(mp)) 2893 first_bno++; 2894 2895 /* 2896 * ...and the log, if it is allocated in the first allocation group. 2897 * 2898 * This can happen with filesystems that only have a single 2899 * allocation group, or very odd geometries created by old mkfs 2900 * versions on very small filesystems. 2901 */ 2902 if (xfs_ag_contains_log(mp, 0)) 2903 first_bno += mp->m_sb.sb_logblocks; 2904 2905 /* 2906 * Now round first_bno up to whatever allocation alignment is given 2907 * by the filesystem or was passed in. 2908 */ 2909 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2910 first_bno = roundup(first_bno, sunit); 2911 else if (xfs_has_align(mp) && 2912 mp->m_sb.sb_inoalignmt > 1) 2913 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2914 2915 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2916 } 2917 2918 /* 2919 * Ensure there are not sparse inode clusters that cross the new EOAG. 2920 * 2921 * This is a no-op for non-spinode filesystems since clusters are always fully 2922 * allocated and checking the bnobt suffices. However, a spinode filesystem 2923 * could have a record where the upper inodes are free blocks. If those blocks 2924 * were removed from the filesystem, the inode record would extend beyond EOAG, 2925 * which will be flagged as corruption. 2926 */ 2927 int 2928 xfs_ialloc_check_shrink( 2929 struct xfs_perag *pag, 2930 struct xfs_trans *tp, 2931 struct xfs_buf *agibp, 2932 xfs_agblock_t new_length) 2933 { 2934 struct xfs_inobt_rec_incore rec; 2935 struct xfs_btree_cur *cur; 2936 xfs_agino_t agino; 2937 int has; 2938 int error; 2939 2940 if (!xfs_has_sparseinodes(pag->pag_mount)) 2941 return 0; 2942 2943 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO); 2944 2945 /* Look up the inobt record that would correspond to the new EOFS. */ 2946 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); 2947 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); 2948 if (error || !has) 2949 goto out; 2950 2951 error = xfs_inobt_get_rec(cur, &rec, &has); 2952 if (error) 2953 goto out; 2954 2955 if (!has) { 2956 error = -EFSCORRUPTED; 2957 goto out; 2958 } 2959 2960 /* If the record covers inodes that would be beyond EOFS, bail out. */ 2961 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { 2962 error = -ENOSPC; 2963 goto out; 2964 } 2965 out: 2966 xfs_btree_del_cursor(cur, error); 2967 return error; 2968 } 2969