1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_bmap.h" 22 #include "xfs_trans.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_icreate_item.h" 25 #include "xfs_icache.h" 26 #include "xfs_trace.h" 27 #include "xfs_log.h" 28 #include "xfs_rmap.h" 29 #include "xfs_ag.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_has_sparseinodes(cur->bc_mp)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 const union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_has_sparseinodes(mp)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* 99 * Get the data from the pointed-to record. 100 */ 101 int 102 xfs_inobt_get_rec( 103 struct xfs_btree_cur *cur, 104 struct xfs_inobt_rec_incore *irec, 105 int *stat) 106 { 107 struct xfs_mount *mp = cur->bc_mp; 108 union xfs_btree_rec *rec; 109 int error; 110 uint64_t realfree; 111 112 error = xfs_btree_get_rec(cur, &rec, stat); 113 if (error || *stat == 0) 114 return error; 115 116 xfs_inobt_btrec_to_irec(mp, rec, irec); 117 118 if (!xfs_verify_agino(cur->bc_ag.pag, irec->ir_startino)) 119 goto out_bad_rec; 120 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 121 irec->ir_count > XFS_INODES_PER_CHUNK) 122 goto out_bad_rec; 123 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 124 goto out_bad_rec; 125 126 /* if there are no holes, return the first available offset */ 127 if (!xfs_inobt_issparse(irec->ir_holemask)) 128 realfree = irec->ir_free; 129 else 130 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec); 131 if (hweight64(realfree) != irec->ir_freecount) 132 goto out_bad_rec; 133 134 return 0; 135 136 out_bad_rec: 137 xfs_warn(mp, 138 "%s Inode BTree record corruption in AG %d detected!", 139 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", 140 cur->bc_ag.pag->pag_agno); 141 xfs_warn(mp, 142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 143 irec->ir_startino, irec->ir_count, irec->ir_freecount, 144 irec->ir_free, irec->ir_holemask); 145 return -EFSCORRUPTED; 146 } 147 148 /* 149 * Insert a single inobt record. Cursor must already point to desired location. 150 */ 151 int 152 xfs_inobt_insert_rec( 153 struct xfs_btree_cur *cur, 154 uint16_t holemask, 155 uint8_t count, 156 int32_t freecount, 157 xfs_inofree_t free, 158 int *stat) 159 { 160 cur->bc_rec.i.ir_holemask = holemask; 161 cur->bc_rec.i.ir_count = count; 162 cur->bc_rec.i.ir_freecount = freecount; 163 cur->bc_rec.i.ir_free = free; 164 return xfs_btree_insert(cur, stat); 165 } 166 167 /* 168 * Insert records describing a newly allocated inode chunk into the inobt. 169 */ 170 STATIC int 171 xfs_inobt_insert( 172 struct xfs_perag *pag, 173 struct xfs_trans *tp, 174 struct xfs_buf *agbp, 175 xfs_agino_t newino, 176 xfs_agino_t newlen, 177 xfs_btnum_t btnum) 178 { 179 struct xfs_btree_cur *cur; 180 xfs_agino_t thisino; 181 int i; 182 int error; 183 184 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 185 186 for (thisino = newino; 187 thisino < newino + newlen; 188 thisino += XFS_INODES_PER_CHUNK) { 189 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 190 if (error) { 191 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 192 return error; 193 } 194 ASSERT(i == 0); 195 196 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 197 XFS_INODES_PER_CHUNK, 198 XFS_INODES_PER_CHUNK, 199 XFS_INOBT_ALL_FREE, &i); 200 if (error) { 201 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 202 return error; 203 } 204 ASSERT(i == 1); 205 } 206 207 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 208 209 return 0; 210 } 211 212 /* 213 * Verify that the number of free inodes in the AGI is correct. 214 */ 215 #ifdef DEBUG 216 static int 217 xfs_check_agi_freecount( 218 struct xfs_btree_cur *cur) 219 { 220 if (cur->bc_nlevels == 1) { 221 xfs_inobt_rec_incore_t rec; 222 int freecount = 0; 223 int error; 224 int i; 225 226 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 227 if (error) 228 return error; 229 230 do { 231 error = xfs_inobt_get_rec(cur, &rec, &i); 232 if (error) 233 return error; 234 235 if (i) { 236 freecount += rec.ir_freecount; 237 error = xfs_btree_increment(cur, 0, &i); 238 if (error) 239 return error; 240 } 241 } while (i == 1); 242 243 if (!xfs_is_shutdown(cur->bc_mp)) 244 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); 245 } 246 return 0; 247 } 248 #else 249 #define xfs_check_agi_freecount(cur) 0 250 #endif 251 252 /* 253 * Initialise a new set of inodes. When called without a transaction context 254 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 255 * than logging them (which in a transaction context puts them into the AIL 256 * for writeback rather than the xfsbufd queue). 257 */ 258 int 259 xfs_ialloc_inode_init( 260 struct xfs_mount *mp, 261 struct xfs_trans *tp, 262 struct list_head *buffer_list, 263 int icount, 264 xfs_agnumber_t agno, 265 xfs_agblock_t agbno, 266 xfs_agblock_t length, 267 unsigned int gen) 268 { 269 struct xfs_buf *fbuf; 270 struct xfs_dinode *free; 271 int nbufs; 272 int version; 273 int i, j; 274 xfs_daddr_t d; 275 xfs_ino_t ino = 0; 276 int error; 277 278 /* 279 * Loop over the new block(s), filling in the inodes. For small block 280 * sizes, manipulate the inodes in buffers which are multiples of the 281 * blocks size. 282 */ 283 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 284 285 /* 286 * Figure out what version number to use in the inodes we create. If 287 * the superblock version has caught up to the one that supports the new 288 * inode format, then use the new inode version. Otherwise use the old 289 * version so that old kernels will continue to be able to use the file 290 * system. 291 * 292 * For v3 inodes, we also need to write the inode number into the inode, 293 * so calculate the first inode number of the chunk here as 294 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 295 * across multiple filesystem blocks (such as a cluster) and so cannot 296 * be used in the cluster buffer loop below. 297 * 298 * Further, because we are writing the inode directly into the buffer 299 * and calculating a CRC on the entire inode, we have ot log the entire 300 * inode so that the entire range the CRC covers is present in the log. 301 * That means for v3 inode we log the entire buffer rather than just the 302 * inode cores. 303 */ 304 if (xfs_has_v3inodes(mp)) { 305 version = 3; 306 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 307 308 /* 309 * log the initialisation that is about to take place as an 310 * logical operation. This means the transaction does not 311 * need to log the physical changes to the inode buffers as log 312 * recovery will know what initialisation is actually needed. 313 * Hence we only need to log the buffers as "ordered" buffers so 314 * they track in the AIL as if they were physically logged. 315 */ 316 if (tp) 317 xfs_icreate_log(tp, agno, agbno, icount, 318 mp->m_sb.sb_inodesize, length, gen); 319 } else 320 version = 2; 321 322 for (j = 0; j < nbufs; j++) { 323 /* 324 * Get the block. 325 */ 326 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 327 (j * M_IGEO(mp)->blocks_per_cluster)); 328 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 329 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 330 XBF_UNMAPPED, &fbuf); 331 if (error) 332 return error; 333 334 /* Initialize the inode buffers and log them appropriately. */ 335 fbuf->b_ops = &xfs_inode_buf_ops; 336 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 337 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 338 int ioffset = i << mp->m_sb.sb_inodelog; 339 340 free = xfs_make_iptr(mp, fbuf, i); 341 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 342 free->di_version = version; 343 free->di_gen = cpu_to_be32(gen); 344 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 345 346 if (version == 3) { 347 free->di_ino = cpu_to_be64(ino); 348 ino++; 349 uuid_copy(&free->di_uuid, 350 &mp->m_sb.sb_meta_uuid); 351 xfs_dinode_calc_crc(mp, free); 352 } else if (tp) { 353 /* just log the inode core */ 354 xfs_trans_log_buf(tp, fbuf, ioffset, 355 ioffset + XFS_DINODE_SIZE(mp) - 1); 356 } 357 } 358 359 if (tp) { 360 /* 361 * Mark the buffer as an inode allocation buffer so it 362 * sticks in AIL at the point of this allocation 363 * transaction. This ensures the they are on disk before 364 * the tail of the log can be moved past this 365 * transaction (i.e. by preventing relogging from moving 366 * it forward in the log). 367 */ 368 xfs_trans_inode_alloc_buf(tp, fbuf); 369 if (version == 3) { 370 /* 371 * Mark the buffer as ordered so that they are 372 * not physically logged in the transaction but 373 * still tracked in the AIL as part of the 374 * transaction and pin the log appropriately. 375 */ 376 xfs_trans_ordered_buf(tp, fbuf); 377 } 378 } else { 379 fbuf->b_flags |= XBF_DONE; 380 xfs_buf_delwri_queue(fbuf, buffer_list); 381 xfs_buf_relse(fbuf); 382 } 383 } 384 return 0; 385 } 386 387 /* 388 * Align startino and allocmask for a recently allocated sparse chunk such that 389 * they are fit for insertion (or merge) into the on-disk inode btrees. 390 * 391 * Background: 392 * 393 * When enabled, sparse inode support increases the inode alignment from cluster 394 * size to inode chunk size. This means that the minimum range between two 395 * non-adjacent inode records in the inobt is large enough for a full inode 396 * record. This allows for cluster sized, cluster aligned block allocation 397 * without need to worry about whether the resulting inode record overlaps with 398 * another record in the tree. Without this basic rule, we would have to deal 399 * with the consequences of overlap by potentially undoing recent allocations in 400 * the inode allocation codepath. 401 * 402 * Because of this alignment rule (which is enforced on mount), there are two 403 * inobt possibilities for newly allocated sparse chunks. One is that the 404 * aligned inode record for the chunk covers a range of inodes not already 405 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 406 * other is that a record already exists at the aligned startino that considers 407 * the newly allocated range as sparse. In the latter case, record content is 408 * merged in hope that sparse inode chunks fill to full chunks over time. 409 */ 410 STATIC void 411 xfs_align_sparse_ino( 412 struct xfs_mount *mp, 413 xfs_agino_t *startino, 414 uint16_t *allocmask) 415 { 416 xfs_agblock_t agbno; 417 xfs_agblock_t mod; 418 int offset; 419 420 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 421 mod = agbno % mp->m_sb.sb_inoalignmt; 422 if (!mod) 423 return; 424 425 /* calculate the inode offset and align startino */ 426 offset = XFS_AGB_TO_AGINO(mp, mod); 427 *startino -= offset; 428 429 /* 430 * Since startino has been aligned down, left shift allocmask such that 431 * it continues to represent the same physical inodes relative to the 432 * new startino. 433 */ 434 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 435 } 436 437 /* 438 * Determine whether the source inode record can merge into the target. Both 439 * records must be sparse, the inode ranges must match and there must be no 440 * allocation overlap between the records. 441 */ 442 STATIC bool 443 __xfs_inobt_can_merge( 444 struct xfs_inobt_rec_incore *trec, /* tgt record */ 445 struct xfs_inobt_rec_incore *srec) /* src record */ 446 { 447 uint64_t talloc; 448 uint64_t salloc; 449 450 /* records must cover the same inode range */ 451 if (trec->ir_startino != srec->ir_startino) 452 return false; 453 454 /* both records must be sparse */ 455 if (!xfs_inobt_issparse(trec->ir_holemask) || 456 !xfs_inobt_issparse(srec->ir_holemask)) 457 return false; 458 459 /* both records must track some inodes */ 460 if (!trec->ir_count || !srec->ir_count) 461 return false; 462 463 /* can't exceed capacity of a full record */ 464 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 465 return false; 466 467 /* verify there is no allocation overlap */ 468 talloc = xfs_inobt_irec_to_allocmask(trec); 469 salloc = xfs_inobt_irec_to_allocmask(srec); 470 if (talloc & salloc) 471 return false; 472 473 return true; 474 } 475 476 /* 477 * Merge the source inode record into the target. The caller must call 478 * __xfs_inobt_can_merge() to ensure the merge is valid. 479 */ 480 STATIC void 481 __xfs_inobt_rec_merge( 482 struct xfs_inobt_rec_incore *trec, /* target */ 483 struct xfs_inobt_rec_incore *srec) /* src */ 484 { 485 ASSERT(trec->ir_startino == srec->ir_startino); 486 487 /* combine the counts */ 488 trec->ir_count += srec->ir_count; 489 trec->ir_freecount += srec->ir_freecount; 490 491 /* 492 * Merge the holemask and free mask. For both fields, 0 bits refer to 493 * allocated inodes. We combine the allocated ranges with bitwise AND. 494 */ 495 trec->ir_holemask &= srec->ir_holemask; 496 trec->ir_free &= srec->ir_free; 497 } 498 499 /* 500 * Insert a new sparse inode chunk into the associated inode btree. The inode 501 * record for the sparse chunk is pre-aligned to a startino that should match 502 * any pre-existing sparse inode record in the tree. This allows sparse chunks 503 * to fill over time. 504 * 505 * This function supports two modes of handling preexisting records depending on 506 * the merge flag. If merge is true, the provided record is merged with the 507 * existing record and updated in place. The merged record is returned in nrec. 508 * If merge is false, an existing record is replaced with the provided record. 509 * If no preexisting record exists, the provided record is always inserted. 510 * 511 * It is considered corruption if a merge is requested and not possible. Given 512 * the sparse inode alignment constraints, this should never happen. 513 */ 514 STATIC int 515 xfs_inobt_insert_sprec( 516 struct xfs_perag *pag, 517 struct xfs_trans *tp, 518 struct xfs_buf *agbp, 519 int btnum, 520 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 521 bool merge) /* merge or replace */ 522 { 523 struct xfs_mount *mp = pag->pag_mount; 524 struct xfs_btree_cur *cur; 525 int error; 526 int i; 527 struct xfs_inobt_rec_incore rec; 528 529 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 530 531 /* the new record is pre-aligned so we know where to look */ 532 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 533 if (error) 534 goto error; 535 /* if nothing there, insert a new record and return */ 536 if (i == 0) { 537 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 538 nrec->ir_count, nrec->ir_freecount, 539 nrec->ir_free, &i); 540 if (error) 541 goto error; 542 if (XFS_IS_CORRUPT(mp, i != 1)) { 543 error = -EFSCORRUPTED; 544 goto error; 545 } 546 547 goto out; 548 } 549 550 /* 551 * A record exists at this startino. Merge or replace the record 552 * depending on what we've been asked to do. 553 */ 554 if (merge) { 555 error = xfs_inobt_get_rec(cur, &rec, &i); 556 if (error) 557 goto error; 558 if (XFS_IS_CORRUPT(mp, i != 1)) { 559 error = -EFSCORRUPTED; 560 goto error; 561 } 562 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 563 error = -EFSCORRUPTED; 564 goto error; 565 } 566 567 /* 568 * This should never fail. If we have coexisting records that 569 * cannot merge, something is seriously wrong. 570 */ 571 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 572 error = -EFSCORRUPTED; 573 goto error; 574 } 575 576 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, 577 rec.ir_holemask, nrec->ir_startino, 578 nrec->ir_holemask); 579 580 /* merge to nrec to output the updated record */ 581 __xfs_inobt_rec_merge(nrec, &rec); 582 583 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, 584 nrec->ir_holemask); 585 586 error = xfs_inobt_rec_check_count(mp, nrec); 587 if (error) 588 goto error; 589 } 590 591 error = xfs_inobt_update(cur, nrec); 592 if (error) 593 goto error; 594 595 out: 596 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 597 return 0; 598 error: 599 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 600 return error; 601 } 602 603 /* 604 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if 605 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so 606 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum 607 * inode count threshold, or the usual negative error code for other errors. 608 */ 609 STATIC int 610 xfs_ialloc_ag_alloc( 611 struct xfs_perag *pag, 612 struct xfs_trans *tp, 613 struct xfs_buf *agbp) 614 { 615 struct xfs_agi *agi; 616 struct xfs_alloc_arg args; 617 int error; 618 xfs_agino_t newino; /* new first inode's number */ 619 xfs_agino_t newlen; /* new number of inodes */ 620 int isaligned = 0; /* inode allocation at stripe */ 621 /* unit boundary */ 622 /* init. to full chunk */ 623 struct xfs_inobt_rec_incore rec; 624 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 625 uint16_t allocmask = (uint16_t) -1; 626 int do_sparse = 0; 627 628 memset(&args, 0, sizeof(args)); 629 args.tp = tp; 630 args.mp = tp->t_mountp; 631 args.fsbno = NULLFSBLOCK; 632 args.oinfo = XFS_RMAP_OINFO_INODES; 633 args.pag = pag; 634 635 #ifdef DEBUG 636 /* randomly do sparse inode allocations */ 637 if (xfs_has_sparseinodes(tp->t_mountp) && 638 igeo->ialloc_min_blks < igeo->ialloc_blks) 639 do_sparse = get_random_u32_below(2); 640 #endif 641 642 /* 643 * Locking will ensure that we don't have two callers in here 644 * at one time. 645 */ 646 newlen = igeo->ialloc_inos; 647 if (igeo->maxicount && 648 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 649 igeo->maxicount) 650 return -ENOSPC; 651 args.minlen = args.maxlen = igeo->ialloc_blks; 652 /* 653 * First try to allocate inodes contiguous with the last-allocated 654 * chunk of inodes. If the filesystem is striped, this will fill 655 * an entire stripe unit with inodes. 656 */ 657 agi = agbp->b_addr; 658 newino = be32_to_cpu(agi->agi_newino); 659 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 660 igeo->ialloc_blks; 661 if (do_sparse) 662 goto sparse_alloc; 663 if (likely(newino != NULLAGINO && 664 (args.agbno < be32_to_cpu(agi->agi_length)))) { 665 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 666 args.type = XFS_ALLOCTYPE_THIS_BNO; 667 args.prod = 1; 668 669 /* 670 * We need to take into account alignment here to ensure that 671 * we don't modify the free list if we fail to have an exact 672 * block. If we don't have an exact match, and every oher 673 * attempt allocation attempt fails, we'll end up cancelling 674 * a dirty transaction and shutting down. 675 * 676 * For an exact allocation, alignment must be 1, 677 * however we need to take cluster alignment into account when 678 * fixing up the freelist. Use the minalignslop field to 679 * indicate that extra blocks might be required for alignment, 680 * but not to use them in the actual exact allocation. 681 */ 682 args.alignment = 1; 683 args.minalignslop = igeo->cluster_align - 1; 684 685 /* Allow space for the inode btree to split. */ 686 args.minleft = igeo->inobt_maxlevels; 687 error = xfs_alloc_vextent_this_ag(&args); 688 if (error) 689 return error; 690 691 /* 692 * This request might have dirtied the transaction if the AG can 693 * satisfy the request, but the exact block was not available. 694 * If the allocation did fail, subsequent requests will relax 695 * the exact agbno requirement and increase the alignment 696 * instead. It is critical that the total size of the request 697 * (len + alignment + slop) does not increase from this point 698 * on, so reset minalignslop to ensure it is not included in 699 * subsequent requests. 700 */ 701 args.minalignslop = 0; 702 } 703 704 if (unlikely(args.fsbno == NULLFSBLOCK)) { 705 /* 706 * Set the alignment for the allocation. 707 * If stripe alignment is turned on then align at stripe unit 708 * boundary. 709 * If the cluster size is smaller than a filesystem block 710 * then we're doing I/O for inodes in filesystem block size 711 * pieces, so don't need alignment anyway. 712 */ 713 isaligned = 0; 714 if (igeo->ialloc_align) { 715 ASSERT(!xfs_has_noalign(args.mp)); 716 args.alignment = args.mp->m_dalign; 717 isaligned = 1; 718 } else 719 args.alignment = igeo->cluster_align; 720 /* 721 * Need to figure out where to allocate the inode blocks. 722 * Ideally they should be spaced out through the a.g. 723 * For now, just allocate blocks up front. 724 */ 725 args.agbno = be32_to_cpu(agi->agi_root); 726 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 727 /* 728 * Allocate a fixed-size extent of inodes. 729 */ 730 args.type = XFS_ALLOCTYPE_NEAR_BNO; 731 args.prod = 1; 732 /* 733 * Allow space for the inode btree to split. 734 */ 735 args.minleft = igeo->inobt_maxlevels; 736 error = xfs_alloc_vextent_this_ag(&args); 737 if (error) 738 return error; 739 } 740 741 /* 742 * If stripe alignment is turned on, then try again with cluster 743 * alignment. 744 */ 745 if (isaligned && args.fsbno == NULLFSBLOCK) { 746 args.type = XFS_ALLOCTYPE_NEAR_BNO; 747 args.agbno = be32_to_cpu(agi->agi_root); 748 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 749 args.alignment = igeo->cluster_align; 750 if ((error = xfs_alloc_vextent(&args))) 751 return error; 752 } 753 754 /* 755 * Finally, try a sparse allocation if the filesystem supports it and 756 * the sparse allocation length is smaller than a full chunk. 757 */ 758 if (xfs_has_sparseinodes(args.mp) && 759 igeo->ialloc_min_blks < igeo->ialloc_blks && 760 args.fsbno == NULLFSBLOCK) { 761 sparse_alloc: 762 args.type = XFS_ALLOCTYPE_NEAR_BNO; 763 args.agbno = be32_to_cpu(agi->agi_root); 764 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 765 args.alignment = args.mp->m_sb.sb_spino_align; 766 args.prod = 1; 767 768 args.minlen = igeo->ialloc_min_blks; 769 args.maxlen = args.minlen; 770 771 /* 772 * The inode record will be aligned to full chunk size. We must 773 * prevent sparse allocation from AG boundaries that result in 774 * invalid inode records, such as records that start at agbno 0 775 * or extend beyond the AG. 776 * 777 * Set min agbno to the first aligned, non-zero agbno and max to 778 * the last aligned agbno that is at least one full chunk from 779 * the end of the AG. 780 */ 781 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 782 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 783 args.mp->m_sb.sb_inoalignmt) - 784 igeo->ialloc_blks; 785 786 error = xfs_alloc_vextent_this_ag(&args); 787 if (error) 788 return error; 789 790 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 791 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 792 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 793 } 794 795 if (args.fsbno == NULLFSBLOCK) 796 return -EAGAIN; 797 798 ASSERT(args.len == args.minlen); 799 800 /* 801 * Stamp and write the inode buffers. 802 * 803 * Seed the new inode cluster with a random generation number. This 804 * prevents short-term reuse of generation numbers if a chunk is 805 * freed and then immediately reallocated. We use random numbers 806 * rather than a linear progression to prevent the next generation 807 * number from being easily guessable. 808 */ 809 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 810 args.agbno, args.len, get_random_u32()); 811 812 if (error) 813 return error; 814 /* 815 * Convert the results. 816 */ 817 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 818 819 if (xfs_inobt_issparse(~allocmask)) { 820 /* 821 * We've allocated a sparse chunk. Align the startino and mask. 822 */ 823 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 824 825 rec.ir_startino = newino; 826 rec.ir_holemask = ~allocmask; 827 rec.ir_count = newlen; 828 rec.ir_freecount = newlen; 829 rec.ir_free = XFS_INOBT_ALL_FREE; 830 831 /* 832 * Insert the sparse record into the inobt and allow for a merge 833 * if necessary. If a merge does occur, rec is updated to the 834 * merged record. 835 */ 836 error = xfs_inobt_insert_sprec(pag, tp, agbp, 837 XFS_BTNUM_INO, &rec, true); 838 if (error == -EFSCORRUPTED) { 839 xfs_alert(args.mp, 840 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 841 XFS_AGINO_TO_INO(args.mp, pag->pag_agno, 842 rec.ir_startino), 843 rec.ir_holemask, rec.ir_count); 844 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 845 } 846 if (error) 847 return error; 848 849 /* 850 * We can't merge the part we've just allocated as for the inobt 851 * due to finobt semantics. The original record may or may not 852 * exist independent of whether physical inodes exist in this 853 * sparse chunk. 854 * 855 * We must update the finobt record based on the inobt record. 856 * rec contains the fully merged and up to date inobt record 857 * from the previous call. Set merge false to replace any 858 * existing record with this one. 859 */ 860 if (xfs_has_finobt(args.mp)) { 861 error = xfs_inobt_insert_sprec(pag, tp, agbp, 862 XFS_BTNUM_FINO, &rec, false); 863 if (error) 864 return error; 865 } 866 } else { 867 /* full chunk - insert new records to both btrees */ 868 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, 869 XFS_BTNUM_INO); 870 if (error) 871 return error; 872 873 if (xfs_has_finobt(args.mp)) { 874 error = xfs_inobt_insert(pag, tp, agbp, newino, 875 newlen, XFS_BTNUM_FINO); 876 if (error) 877 return error; 878 } 879 } 880 881 /* 882 * Update AGI counts and newino. 883 */ 884 be32_add_cpu(&agi->agi_count, newlen); 885 be32_add_cpu(&agi->agi_freecount, newlen); 886 pag->pagi_freecount += newlen; 887 pag->pagi_count += newlen; 888 agi->agi_newino = cpu_to_be32(newino); 889 890 /* 891 * Log allocation group header fields 892 */ 893 xfs_ialloc_log_agi(tp, agbp, 894 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 895 /* 896 * Modify/log superblock values for inode count and inode free count. 897 */ 898 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 899 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 900 return 0; 901 } 902 903 /* 904 * Try to retrieve the next record to the left/right from the current one. 905 */ 906 STATIC int 907 xfs_ialloc_next_rec( 908 struct xfs_btree_cur *cur, 909 xfs_inobt_rec_incore_t *rec, 910 int *done, 911 int left) 912 { 913 int error; 914 int i; 915 916 if (left) 917 error = xfs_btree_decrement(cur, 0, &i); 918 else 919 error = xfs_btree_increment(cur, 0, &i); 920 921 if (error) 922 return error; 923 *done = !i; 924 if (i) { 925 error = xfs_inobt_get_rec(cur, rec, &i); 926 if (error) 927 return error; 928 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 929 return -EFSCORRUPTED; 930 } 931 932 return 0; 933 } 934 935 STATIC int 936 xfs_ialloc_get_rec( 937 struct xfs_btree_cur *cur, 938 xfs_agino_t agino, 939 xfs_inobt_rec_incore_t *rec, 940 int *done) 941 { 942 int error; 943 int i; 944 945 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 946 if (error) 947 return error; 948 *done = !i; 949 if (i) { 950 error = xfs_inobt_get_rec(cur, rec, &i); 951 if (error) 952 return error; 953 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 954 return -EFSCORRUPTED; 955 } 956 957 return 0; 958 } 959 960 /* 961 * Return the offset of the first free inode in the record. If the inode chunk 962 * is sparsely allocated, we convert the record holemask to inode granularity 963 * and mask off the unallocated regions from the inode free mask. 964 */ 965 STATIC int 966 xfs_inobt_first_free_inode( 967 struct xfs_inobt_rec_incore *rec) 968 { 969 xfs_inofree_t realfree; 970 971 /* if there are no holes, return the first available offset */ 972 if (!xfs_inobt_issparse(rec->ir_holemask)) 973 return xfs_lowbit64(rec->ir_free); 974 975 realfree = xfs_inobt_irec_to_allocmask(rec); 976 realfree &= rec->ir_free; 977 978 return xfs_lowbit64(realfree); 979 } 980 981 /* 982 * Allocate an inode using the inobt-only algorithm. 983 */ 984 STATIC int 985 xfs_dialloc_ag_inobt( 986 struct xfs_perag *pag, 987 struct xfs_trans *tp, 988 struct xfs_buf *agbp, 989 xfs_ino_t parent, 990 xfs_ino_t *inop) 991 { 992 struct xfs_mount *mp = tp->t_mountp; 993 struct xfs_agi *agi = agbp->b_addr; 994 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 995 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 996 struct xfs_btree_cur *cur, *tcur; 997 struct xfs_inobt_rec_incore rec, trec; 998 xfs_ino_t ino; 999 int error; 1000 int offset; 1001 int i, j; 1002 int searchdistance = 10; 1003 1004 ASSERT(xfs_perag_initialised_agi(pag)); 1005 ASSERT(xfs_perag_allows_inodes(pag)); 1006 ASSERT(pag->pagi_freecount > 0); 1007 1008 restart_pagno: 1009 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1010 /* 1011 * If pagino is 0 (this is the root inode allocation) use newino. 1012 * This must work because we've just allocated some. 1013 */ 1014 if (!pagino) 1015 pagino = be32_to_cpu(agi->agi_newino); 1016 1017 error = xfs_check_agi_freecount(cur); 1018 if (error) 1019 goto error0; 1020 1021 /* 1022 * If in the same AG as the parent, try to get near the parent. 1023 */ 1024 if (pagno == pag->pag_agno) { 1025 int doneleft; /* done, to the left */ 1026 int doneright; /* done, to the right */ 1027 1028 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1029 if (error) 1030 goto error0; 1031 if (XFS_IS_CORRUPT(mp, i != 1)) { 1032 error = -EFSCORRUPTED; 1033 goto error0; 1034 } 1035 1036 error = xfs_inobt_get_rec(cur, &rec, &j); 1037 if (error) 1038 goto error0; 1039 if (XFS_IS_CORRUPT(mp, j != 1)) { 1040 error = -EFSCORRUPTED; 1041 goto error0; 1042 } 1043 1044 if (rec.ir_freecount > 0) { 1045 /* 1046 * Found a free inode in the same chunk 1047 * as the parent, done. 1048 */ 1049 goto alloc_inode; 1050 } 1051 1052 1053 /* 1054 * In the same AG as parent, but parent's chunk is full. 1055 */ 1056 1057 /* duplicate the cursor, search left & right simultaneously */ 1058 error = xfs_btree_dup_cursor(cur, &tcur); 1059 if (error) 1060 goto error0; 1061 1062 /* 1063 * Skip to last blocks looked up if same parent inode. 1064 */ 1065 if (pagino != NULLAGINO && 1066 pag->pagl_pagino == pagino && 1067 pag->pagl_leftrec != NULLAGINO && 1068 pag->pagl_rightrec != NULLAGINO) { 1069 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1070 &trec, &doneleft); 1071 if (error) 1072 goto error1; 1073 1074 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1075 &rec, &doneright); 1076 if (error) 1077 goto error1; 1078 } else { 1079 /* search left with tcur, back up 1 record */ 1080 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1081 if (error) 1082 goto error1; 1083 1084 /* search right with cur, go forward 1 record. */ 1085 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1086 if (error) 1087 goto error1; 1088 } 1089 1090 /* 1091 * Loop until we find an inode chunk with a free inode. 1092 */ 1093 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1094 int useleft; /* using left inode chunk this time */ 1095 1096 /* figure out the closer block if both are valid. */ 1097 if (!doneleft && !doneright) { 1098 useleft = pagino - 1099 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1100 rec.ir_startino - pagino; 1101 } else { 1102 useleft = !doneleft; 1103 } 1104 1105 /* free inodes to the left? */ 1106 if (useleft && trec.ir_freecount) { 1107 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1108 cur = tcur; 1109 1110 pag->pagl_leftrec = trec.ir_startino; 1111 pag->pagl_rightrec = rec.ir_startino; 1112 pag->pagl_pagino = pagino; 1113 rec = trec; 1114 goto alloc_inode; 1115 } 1116 1117 /* free inodes to the right? */ 1118 if (!useleft && rec.ir_freecount) { 1119 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1120 1121 pag->pagl_leftrec = trec.ir_startino; 1122 pag->pagl_rightrec = rec.ir_startino; 1123 pag->pagl_pagino = pagino; 1124 goto alloc_inode; 1125 } 1126 1127 /* get next record to check */ 1128 if (useleft) { 1129 error = xfs_ialloc_next_rec(tcur, &trec, 1130 &doneleft, 1); 1131 } else { 1132 error = xfs_ialloc_next_rec(cur, &rec, 1133 &doneright, 0); 1134 } 1135 if (error) 1136 goto error1; 1137 } 1138 1139 if (searchdistance <= 0) { 1140 /* 1141 * Not in range - save last search 1142 * location and allocate a new inode 1143 */ 1144 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1145 pag->pagl_leftrec = trec.ir_startino; 1146 pag->pagl_rightrec = rec.ir_startino; 1147 pag->pagl_pagino = pagino; 1148 1149 } else { 1150 /* 1151 * We've reached the end of the btree. because 1152 * we are only searching a small chunk of the 1153 * btree each search, there is obviously free 1154 * inodes closer to the parent inode than we 1155 * are now. restart the search again. 1156 */ 1157 pag->pagl_pagino = NULLAGINO; 1158 pag->pagl_leftrec = NULLAGINO; 1159 pag->pagl_rightrec = NULLAGINO; 1160 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1161 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1162 goto restart_pagno; 1163 } 1164 } 1165 1166 /* 1167 * In a different AG from the parent. 1168 * See if the most recently allocated block has any free. 1169 */ 1170 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1171 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1172 XFS_LOOKUP_EQ, &i); 1173 if (error) 1174 goto error0; 1175 1176 if (i == 1) { 1177 error = xfs_inobt_get_rec(cur, &rec, &j); 1178 if (error) 1179 goto error0; 1180 1181 if (j == 1 && rec.ir_freecount > 0) { 1182 /* 1183 * The last chunk allocated in the group 1184 * still has a free inode. 1185 */ 1186 goto alloc_inode; 1187 } 1188 } 1189 } 1190 1191 /* 1192 * None left in the last group, search the whole AG 1193 */ 1194 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1195 if (error) 1196 goto error0; 1197 if (XFS_IS_CORRUPT(mp, i != 1)) { 1198 error = -EFSCORRUPTED; 1199 goto error0; 1200 } 1201 1202 for (;;) { 1203 error = xfs_inobt_get_rec(cur, &rec, &i); 1204 if (error) 1205 goto error0; 1206 if (XFS_IS_CORRUPT(mp, i != 1)) { 1207 error = -EFSCORRUPTED; 1208 goto error0; 1209 } 1210 if (rec.ir_freecount > 0) 1211 break; 1212 error = xfs_btree_increment(cur, 0, &i); 1213 if (error) 1214 goto error0; 1215 if (XFS_IS_CORRUPT(mp, i != 1)) { 1216 error = -EFSCORRUPTED; 1217 goto error0; 1218 } 1219 } 1220 1221 alloc_inode: 1222 offset = xfs_inobt_first_free_inode(&rec); 1223 ASSERT(offset >= 0); 1224 ASSERT(offset < XFS_INODES_PER_CHUNK); 1225 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1226 XFS_INODES_PER_CHUNK) == 0); 1227 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1228 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1229 rec.ir_freecount--; 1230 error = xfs_inobt_update(cur, &rec); 1231 if (error) 1232 goto error0; 1233 be32_add_cpu(&agi->agi_freecount, -1); 1234 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1235 pag->pagi_freecount--; 1236 1237 error = xfs_check_agi_freecount(cur); 1238 if (error) 1239 goto error0; 1240 1241 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1242 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1243 *inop = ino; 1244 return 0; 1245 error1: 1246 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1247 error0: 1248 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1249 return error; 1250 } 1251 1252 /* 1253 * Use the free inode btree to allocate an inode based on distance from the 1254 * parent. Note that the provided cursor may be deleted and replaced. 1255 */ 1256 STATIC int 1257 xfs_dialloc_ag_finobt_near( 1258 xfs_agino_t pagino, 1259 struct xfs_btree_cur **ocur, 1260 struct xfs_inobt_rec_incore *rec) 1261 { 1262 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1263 struct xfs_btree_cur *rcur; /* right search cursor */ 1264 struct xfs_inobt_rec_incore rrec; 1265 int error; 1266 int i, j; 1267 1268 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1269 if (error) 1270 return error; 1271 1272 if (i == 1) { 1273 error = xfs_inobt_get_rec(lcur, rec, &i); 1274 if (error) 1275 return error; 1276 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1277 return -EFSCORRUPTED; 1278 1279 /* 1280 * See if we've landed in the parent inode record. The finobt 1281 * only tracks chunks with at least one free inode, so record 1282 * existence is enough. 1283 */ 1284 if (pagino >= rec->ir_startino && 1285 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1286 return 0; 1287 } 1288 1289 error = xfs_btree_dup_cursor(lcur, &rcur); 1290 if (error) 1291 return error; 1292 1293 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1294 if (error) 1295 goto error_rcur; 1296 if (j == 1) { 1297 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1298 if (error) 1299 goto error_rcur; 1300 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1301 error = -EFSCORRUPTED; 1302 goto error_rcur; 1303 } 1304 } 1305 1306 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1307 error = -EFSCORRUPTED; 1308 goto error_rcur; 1309 } 1310 if (i == 1 && j == 1) { 1311 /* 1312 * Both the left and right records are valid. Choose the closer 1313 * inode chunk to the target. 1314 */ 1315 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1316 (rrec.ir_startino - pagino)) { 1317 *rec = rrec; 1318 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1319 *ocur = rcur; 1320 } else { 1321 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1322 } 1323 } else if (j == 1) { 1324 /* only the right record is valid */ 1325 *rec = rrec; 1326 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1327 *ocur = rcur; 1328 } else if (i == 1) { 1329 /* only the left record is valid */ 1330 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1331 } 1332 1333 return 0; 1334 1335 error_rcur: 1336 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1337 return error; 1338 } 1339 1340 /* 1341 * Use the free inode btree to find a free inode based on a newino hint. If 1342 * the hint is NULL, find the first free inode in the AG. 1343 */ 1344 STATIC int 1345 xfs_dialloc_ag_finobt_newino( 1346 struct xfs_agi *agi, 1347 struct xfs_btree_cur *cur, 1348 struct xfs_inobt_rec_incore *rec) 1349 { 1350 int error; 1351 int i; 1352 1353 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1354 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1355 XFS_LOOKUP_EQ, &i); 1356 if (error) 1357 return error; 1358 if (i == 1) { 1359 error = xfs_inobt_get_rec(cur, rec, &i); 1360 if (error) 1361 return error; 1362 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1363 return -EFSCORRUPTED; 1364 return 0; 1365 } 1366 } 1367 1368 /* 1369 * Find the first inode available in the AG. 1370 */ 1371 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1372 if (error) 1373 return error; 1374 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1375 return -EFSCORRUPTED; 1376 1377 error = xfs_inobt_get_rec(cur, rec, &i); 1378 if (error) 1379 return error; 1380 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1381 return -EFSCORRUPTED; 1382 1383 return 0; 1384 } 1385 1386 /* 1387 * Update the inobt based on a modification made to the finobt. Also ensure that 1388 * the records from both trees are equivalent post-modification. 1389 */ 1390 STATIC int 1391 xfs_dialloc_ag_update_inobt( 1392 struct xfs_btree_cur *cur, /* inobt cursor */ 1393 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1394 int offset) /* inode offset */ 1395 { 1396 struct xfs_inobt_rec_incore rec; 1397 int error; 1398 int i; 1399 1400 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1401 if (error) 1402 return error; 1403 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1404 return -EFSCORRUPTED; 1405 1406 error = xfs_inobt_get_rec(cur, &rec, &i); 1407 if (error) 1408 return error; 1409 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1410 return -EFSCORRUPTED; 1411 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1412 XFS_INODES_PER_CHUNK) == 0); 1413 1414 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1415 rec.ir_freecount--; 1416 1417 if (XFS_IS_CORRUPT(cur->bc_mp, 1418 rec.ir_free != frec->ir_free || 1419 rec.ir_freecount != frec->ir_freecount)) 1420 return -EFSCORRUPTED; 1421 1422 return xfs_inobt_update(cur, &rec); 1423 } 1424 1425 /* 1426 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1427 * back to the inobt search algorithm. 1428 * 1429 * The caller selected an AG for us, and made sure that free inodes are 1430 * available. 1431 */ 1432 static int 1433 xfs_dialloc_ag( 1434 struct xfs_perag *pag, 1435 struct xfs_trans *tp, 1436 struct xfs_buf *agbp, 1437 xfs_ino_t parent, 1438 xfs_ino_t *inop) 1439 { 1440 struct xfs_mount *mp = tp->t_mountp; 1441 struct xfs_agi *agi = agbp->b_addr; 1442 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1443 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1444 struct xfs_btree_cur *cur; /* finobt cursor */ 1445 struct xfs_btree_cur *icur; /* inobt cursor */ 1446 struct xfs_inobt_rec_incore rec; 1447 xfs_ino_t ino; 1448 int error; 1449 int offset; 1450 int i; 1451 1452 if (!xfs_has_finobt(mp)) 1453 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop); 1454 1455 /* 1456 * If pagino is 0 (this is the root inode allocation) use newino. 1457 * This must work because we've just allocated some. 1458 */ 1459 if (!pagino) 1460 pagino = be32_to_cpu(agi->agi_newino); 1461 1462 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 1463 1464 error = xfs_check_agi_freecount(cur); 1465 if (error) 1466 goto error_cur; 1467 1468 /* 1469 * The search algorithm depends on whether we're in the same AG as the 1470 * parent. If so, find the closest available inode to the parent. If 1471 * not, consider the agi hint or find the first free inode in the AG. 1472 */ 1473 if (pag->pag_agno == pagno) 1474 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1475 else 1476 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1477 if (error) 1478 goto error_cur; 1479 1480 offset = xfs_inobt_first_free_inode(&rec); 1481 ASSERT(offset >= 0); 1482 ASSERT(offset < XFS_INODES_PER_CHUNK); 1483 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1484 XFS_INODES_PER_CHUNK) == 0); 1485 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1486 1487 /* 1488 * Modify or remove the finobt record. 1489 */ 1490 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1491 rec.ir_freecount--; 1492 if (rec.ir_freecount) 1493 error = xfs_inobt_update(cur, &rec); 1494 else 1495 error = xfs_btree_delete(cur, &i); 1496 if (error) 1497 goto error_cur; 1498 1499 /* 1500 * The finobt has now been updated appropriately. We haven't updated the 1501 * agi and superblock yet, so we can create an inobt cursor and validate 1502 * the original freecount. If all is well, make the equivalent update to 1503 * the inobt using the finobt record and offset information. 1504 */ 1505 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1506 1507 error = xfs_check_agi_freecount(icur); 1508 if (error) 1509 goto error_icur; 1510 1511 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1512 if (error) 1513 goto error_icur; 1514 1515 /* 1516 * Both trees have now been updated. We must update the perag and 1517 * superblock before we can check the freecount for each btree. 1518 */ 1519 be32_add_cpu(&agi->agi_freecount, -1); 1520 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1521 pag->pagi_freecount--; 1522 1523 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1524 1525 error = xfs_check_agi_freecount(icur); 1526 if (error) 1527 goto error_icur; 1528 error = xfs_check_agi_freecount(cur); 1529 if (error) 1530 goto error_icur; 1531 1532 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1533 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1534 *inop = ino; 1535 return 0; 1536 1537 error_icur: 1538 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1539 error_cur: 1540 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1541 return error; 1542 } 1543 1544 static int 1545 xfs_dialloc_roll( 1546 struct xfs_trans **tpp, 1547 struct xfs_buf *agibp) 1548 { 1549 struct xfs_trans *tp = *tpp; 1550 struct xfs_dquot_acct *dqinfo; 1551 int error; 1552 1553 /* 1554 * Hold to on to the agibp across the commit so no other allocation can 1555 * come in and take the free inodes we just allocated for our caller. 1556 */ 1557 xfs_trans_bhold(tp, agibp); 1558 1559 /* 1560 * We want the quota changes to be associated with the next transaction, 1561 * NOT this one. So, detach the dqinfo from this and attach it to the 1562 * next transaction. 1563 */ 1564 dqinfo = tp->t_dqinfo; 1565 tp->t_dqinfo = NULL; 1566 1567 error = xfs_trans_roll(&tp); 1568 1569 /* Re-attach the quota info that we detached from prev trx. */ 1570 tp->t_dqinfo = dqinfo; 1571 1572 /* 1573 * Join the buffer even on commit error so that the buffer is released 1574 * when the caller cancels the transaction and doesn't have to handle 1575 * this error case specially. 1576 */ 1577 xfs_trans_bjoin(tp, agibp); 1578 *tpp = tp; 1579 return error; 1580 } 1581 1582 static bool 1583 xfs_dialloc_good_ag( 1584 struct xfs_perag *pag, 1585 struct xfs_trans *tp, 1586 umode_t mode, 1587 int flags, 1588 bool ok_alloc) 1589 { 1590 struct xfs_mount *mp = tp->t_mountp; 1591 xfs_extlen_t ineed; 1592 xfs_extlen_t longest = 0; 1593 int needspace; 1594 int error; 1595 1596 if (!pag) 1597 return false; 1598 if (!xfs_perag_allows_inodes(pag)) 1599 return false; 1600 1601 if (!xfs_perag_initialised_agi(pag)) { 1602 error = xfs_ialloc_read_agi(pag, tp, NULL); 1603 if (error) 1604 return false; 1605 } 1606 1607 if (pag->pagi_freecount) 1608 return true; 1609 if (!ok_alloc) 1610 return false; 1611 1612 if (!xfs_perag_initialised_agf(pag)) { 1613 error = xfs_alloc_read_agf(pag, tp, flags, NULL); 1614 if (error) 1615 return false; 1616 } 1617 1618 /* 1619 * Check that there is enough free space for the file plus a chunk of 1620 * inodes if we need to allocate some. If this is the first pass across 1621 * the AGs, take into account the potential space needed for alignment 1622 * of inode chunks when checking the longest contiguous free space in 1623 * the AG - this prevents us from getting ENOSPC because we have free 1624 * space larger than ialloc_blks but alignment constraints prevent us 1625 * from using it. 1626 * 1627 * If we can't find an AG with space for full alignment slack to be 1628 * taken into account, we must be near ENOSPC in all AGs. Hence we 1629 * don't include alignment for the second pass and so if we fail 1630 * allocation due to alignment issues then it is most likely a real 1631 * ENOSPC condition. 1632 * 1633 * XXX(dgc): this calculation is now bogus thanks to the per-ag 1634 * reservations that xfs_alloc_fix_freelist() now does via 1635 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will 1636 * be more than large enough for the check below to succeed, but 1637 * xfs_alloc_space_available() will fail because of the non-zero 1638 * metadata reservation and hence we won't actually be able to allocate 1639 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC 1640 * because of this. 1641 */ 1642 ineed = M_IGEO(mp)->ialloc_min_blks; 1643 if (flags && ineed > 1) 1644 ineed += M_IGEO(mp)->cluster_align; 1645 longest = pag->pagf_longest; 1646 if (!longest) 1647 longest = pag->pagf_flcount > 0; 1648 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 1649 1650 if (pag->pagf_freeblks < needspace + ineed || longest < ineed) 1651 return false; 1652 return true; 1653 } 1654 1655 static int 1656 xfs_dialloc_try_ag( 1657 struct xfs_perag *pag, 1658 struct xfs_trans **tpp, 1659 xfs_ino_t parent, 1660 xfs_ino_t *new_ino, 1661 bool ok_alloc) 1662 { 1663 struct xfs_buf *agbp; 1664 xfs_ino_t ino; 1665 int error; 1666 1667 /* 1668 * Then read in the AGI buffer and recheck with the AGI buffer 1669 * lock held. 1670 */ 1671 error = xfs_ialloc_read_agi(pag, *tpp, &agbp); 1672 if (error) 1673 return error; 1674 1675 if (!pag->pagi_freecount) { 1676 if (!ok_alloc) { 1677 error = -EAGAIN; 1678 goto out_release; 1679 } 1680 1681 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp); 1682 if (error < 0) 1683 goto out_release; 1684 1685 /* 1686 * We successfully allocated space for an inode cluster in this 1687 * AG. Roll the transaction so that we can allocate one of the 1688 * new inodes. 1689 */ 1690 ASSERT(pag->pagi_freecount > 0); 1691 error = xfs_dialloc_roll(tpp, agbp); 1692 if (error) 1693 goto out_release; 1694 } 1695 1696 /* Allocate an inode in the found AG */ 1697 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino); 1698 if (!error) 1699 *new_ino = ino; 1700 return error; 1701 1702 out_release: 1703 xfs_trans_brelse(*tpp, agbp); 1704 return error; 1705 } 1706 1707 /* 1708 * Allocate an on-disk inode. 1709 * 1710 * Mode is used to tell whether the new inode is a directory and hence where to 1711 * locate it. The on-disk inode that is allocated will be returned in @new_ino 1712 * on success, otherwise an error will be set to indicate the failure (e.g. 1713 * -ENOSPC). 1714 */ 1715 int 1716 xfs_dialloc( 1717 struct xfs_trans **tpp, 1718 xfs_ino_t parent, 1719 umode_t mode, 1720 xfs_ino_t *new_ino) 1721 { 1722 struct xfs_mount *mp = (*tpp)->t_mountp; 1723 xfs_agnumber_t agno; 1724 int error = 0; 1725 xfs_agnumber_t start_agno; 1726 struct xfs_perag *pag; 1727 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1728 bool ok_alloc = true; 1729 bool low_space = false; 1730 int flags; 1731 xfs_ino_t ino = NULLFSINO; 1732 1733 /* 1734 * Directories, symlinks, and regular files frequently allocate at least 1735 * one block, so factor that potential expansion when we examine whether 1736 * an AG has enough space for file creation. 1737 */ 1738 if (S_ISDIR(mode)) 1739 start_agno = atomic_inc_return(&mp->m_agirotor) % mp->m_maxagi; 1740 else { 1741 start_agno = XFS_INO_TO_AGNO(mp, parent); 1742 if (start_agno >= mp->m_maxagi) 1743 start_agno = 0; 1744 } 1745 1746 /* 1747 * If we have already hit the ceiling of inode blocks then clear 1748 * ok_alloc so we scan all available agi structures for a free 1749 * inode. 1750 * 1751 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1752 * which will sacrifice the preciseness but improve the performance. 1753 */ 1754 if (igeo->maxicount && 1755 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1756 > igeo->maxicount) { 1757 ok_alloc = false; 1758 } 1759 1760 /* 1761 * If we are near to ENOSPC, we want to prefer allocation from AGs that 1762 * have free inodes in them rather than use up free space allocating new 1763 * inode chunks. Hence we turn off allocation for the first non-blocking 1764 * pass through the AGs if we are near ENOSPC to consume free inodes 1765 * that we can immediately allocate, but then we allow allocation on the 1766 * second pass if we fail to find an AG with free inodes in it. 1767 */ 1768 if (percpu_counter_read_positive(&mp->m_fdblocks) < 1769 mp->m_low_space[XFS_LOWSP_1_PCNT]) { 1770 ok_alloc = false; 1771 low_space = true; 1772 } 1773 1774 /* 1775 * Loop until we find an allocation group that either has free inodes 1776 * or in which we can allocate some inodes. Iterate through the 1777 * allocation groups upward, wrapping at the end. 1778 */ 1779 flags = XFS_ALLOC_FLAG_TRYLOCK; 1780 retry: 1781 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) { 1782 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) { 1783 error = xfs_dialloc_try_ag(pag, tpp, parent, 1784 &ino, ok_alloc); 1785 if (error != -EAGAIN) 1786 break; 1787 error = 0; 1788 } 1789 1790 if (xfs_is_shutdown(mp)) { 1791 error = -EFSCORRUPTED; 1792 break; 1793 } 1794 } 1795 if (pag) 1796 xfs_perag_rele(pag); 1797 if (error) 1798 return error; 1799 if (ino == NULLFSINO) { 1800 if (flags) { 1801 flags = 0; 1802 if (low_space) 1803 ok_alloc = true; 1804 goto retry; 1805 } 1806 return -ENOSPC; 1807 } 1808 *new_ino = ino; 1809 return 0; 1810 } 1811 1812 /* 1813 * Free the blocks of an inode chunk. We must consider that the inode chunk 1814 * might be sparse and only free the regions that are allocated as part of the 1815 * chunk. 1816 */ 1817 STATIC void 1818 xfs_difree_inode_chunk( 1819 struct xfs_trans *tp, 1820 xfs_agnumber_t agno, 1821 struct xfs_inobt_rec_incore *rec) 1822 { 1823 struct xfs_mount *mp = tp->t_mountp; 1824 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1825 rec->ir_startino); 1826 int startidx, endidx; 1827 int nextbit; 1828 xfs_agblock_t agbno; 1829 int contigblk; 1830 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1831 1832 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1833 /* not sparse, calculate extent info directly */ 1834 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1835 M_IGEO(mp)->ialloc_blks, 1836 &XFS_RMAP_OINFO_INODES); 1837 return; 1838 } 1839 1840 /* holemask is only 16-bits (fits in an unsigned long) */ 1841 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1842 holemask[0] = rec->ir_holemask; 1843 1844 /* 1845 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1846 * holemask and convert the start/end index of each range to an extent. 1847 * We start with the start and end index both pointing at the first 0 in 1848 * the mask. 1849 */ 1850 startidx = endidx = find_first_zero_bit(holemask, 1851 XFS_INOBT_HOLEMASK_BITS); 1852 nextbit = startidx + 1; 1853 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1854 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1855 nextbit); 1856 /* 1857 * If the next zero bit is contiguous, update the end index of 1858 * the current range and continue. 1859 */ 1860 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1861 nextbit == endidx + 1) { 1862 endidx = nextbit; 1863 goto next; 1864 } 1865 1866 /* 1867 * nextbit is not contiguous with the current end index. Convert 1868 * the current start/end to an extent and add it to the free 1869 * list. 1870 */ 1871 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1872 mp->m_sb.sb_inopblock; 1873 contigblk = ((endidx - startidx + 1) * 1874 XFS_INODES_PER_HOLEMASK_BIT) / 1875 mp->m_sb.sb_inopblock; 1876 1877 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1878 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1879 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1880 contigblk, &XFS_RMAP_OINFO_INODES); 1881 1882 /* reset range to current bit and carry on... */ 1883 startidx = endidx = nextbit; 1884 1885 next: 1886 nextbit++; 1887 } 1888 } 1889 1890 STATIC int 1891 xfs_difree_inobt( 1892 struct xfs_perag *pag, 1893 struct xfs_trans *tp, 1894 struct xfs_buf *agbp, 1895 xfs_agino_t agino, 1896 struct xfs_icluster *xic, 1897 struct xfs_inobt_rec_incore *orec) 1898 { 1899 struct xfs_mount *mp = pag->pag_mount; 1900 struct xfs_agi *agi = agbp->b_addr; 1901 struct xfs_btree_cur *cur; 1902 struct xfs_inobt_rec_incore rec; 1903 int ilen; 1904 int error; 1905 int i; 1906 int off; 1907 1908 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1909 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1910 1911 /* 1912 * Initialize the cursor. 1913 */ 1914 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1915 1916 error = xfs_check_agi_freecount(cur); 1917 if (error) 1918 goto error0; 1919 1920 /* 1921 * Look for the entry describing this inode. 1922 */ 1923 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1924 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1925 __func__, error); 1926 goto error0; 1927 } 1928 if (XFS_IS_CORRUPT(mp, i != 1)) { 1929 error = -EFSCORRUPTED; 1930 goto error0; 1931 } 1932 error = xfs_inobt_get_rec(cur, &rec, &i); 1933 if (error) { 1934 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1935 __func__, error); 1936 goto error0; 1937 } 1938 if (XFS_IS_CORRUPT(mp, i != 1)) { 1939 error = -EFSCORRUPTED; 1940 goto error0; 1941 } 1942 /* 1943 * Get the offset in the inode chunk. 1944 */ 1945 off = agino - rec.ir_startino; 1946 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1947 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1948 /* 1949 * Mark the inode free & increment the count. 1950 */ 1951 rec.ir_free |= XFS_INOBT_MASK(off); 1952 rec.ir_freecount++; 1953 1954 /* 1955 * When an inode chunk is free, it becomes eligible for removal. Don't 1956 * remove the chunk if the block size is large enough for multiple inode 1957 * chunks (that might not be free). 1958 */ 1959 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 1960 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1961 struct xfs_perag *pag = agbp->b_pag; 1962 1963 xic->deleted = true; 1964 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, 1965 rec.ir_startino); 1966 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1967 1968 /* 1969 * Remove the inode cluster from the AGI B+Tree, adjust the 1970 * AGI and Superblock inode counts, and mark the disk space 1971 * to be freed when the transaction is committed. 1972 */ 1973 ilen = rec.ir_freecount; 1974 be32_add_cpu(&agi->agi_count, -ilen); 1975 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 1976 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1977 pag->pagi_freecount -= ilen - 1; 1978 pag->pagi_count -= ilen; 1979 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 1980 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1981 1982 if ((error = xfs_btree_delete(cur, &i))) { 1983 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 1984 __func__, error); 1985 goto error0; 1986 } 1987 1988 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 1989 } else { 1990 xic->deleted = false; 1991 1992 error = xfs_inobt_update(cur, &rec); 1993 if (error) { 1994 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 1995 __func__, error); 1996 goto error0; 1997 } 1998 1999 /* 2000 * Change the inode free counts and log the ag/sb changes. 2001 */ 2002 be32_add_cpu(&agi->agi_freecount, 1); 2003 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2004 pag->pagi_freecount++; 2005 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2006 } 2007 2008 error = xfs_check_agi_freecount(cur); 2009 if (error) 2010 goto error0; 2011 2012 *orec = rec; 2013 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2014 return 0; 2015 2016 error0: 2017 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2018 return error; 2019 } 2020 2021 /* 2022 * Free an inode in the free inode btree. 2023 */ 2024 STATIC int 2025 xfs_difree_finobt( 2026 struct xfs_perag *pag, 2027 struct xfs_trans *tp, 2028 struct xfs_buf *agbp, 2029 xfs_agino_t agino, 2030 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2031 { 2032 struct xfs_mount *mp = pag->pag_mount; 2033 struct xfs_btree_cur *cur; 2034 struct xfs_inobt_rec_incore rec; 2035 int offset = agino - ibtrec->ir_startino; 2036 int error; 2037 int i; 2038 2039 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 2040 2041 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2042 if (error) 2043 goto error; 2044 if (i == 0) { 2045 /* 2046 * If the record does not exist in the finobt, we must have just 2047 * freed an inode in a previously fully allocated chunk. If not, 2048 * something is out of sync. 2049 */ 2050 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2051 error = -EFSCORRUPTED; 2052 goto error; 2053 } 2054 2055 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2056 ibtrec->ir_count, 2057 ibtrec->ir_freecount, 2058 ibtrec->ir_free, &i); 2059 if (error) 2060 goto error; 2061 ASSERT(i == 1); 2062 2063 goto out; 2064 } 2065 2066 /* 2067 * Read and update the existing record. We could just copy the ibtrec 2068 * across here, but that would defeat the purpose of having redundant 2069 * metadata. By making the modifications independently, we can catch 2070 * corruptions that we wouldn't see if we just copied from one record 2071 * to another. 2072 */ 2073 error = xfs_inobt_get_rec(cur, &rec, &i); 2074 if (error) 2075 goto error; 2076 if (XFS_IS_CORRUPT(mp, i != 1)) { 2077 error = -EFSCORRUPTED; 2078 goto error; 2079 } 2080 2081 rec.ir_free |= XFS_INOBT_MASK(offset); 2082 rec.ir_freecount++; 2083 2084 if (XFS_IS_CORRUPT(mp, 2085 rec.ir_free != ibtrec->ir_free || 2086 rec.ir_freecount != ibtrec->ir_freecount)) { 2087 error = -EFSCORRUPTED; 2088 goto error; 2089 } 2090 2091 /* 2092 * The content of inobt records should always match between the inobt 2093 * and finobt. The lifecycle of records in the finobt is different from 2094 * the inobt in that the finobt only tracks records with at least one 2095 * free inode. Hence, if all of the inodes are free and we aren't 2096 * keeping inode chunks permanently on disk, remove the record. 2097 * Otherwise, update the record with the new information. 2098 * 2099 * Note that we currently can't free chunks when the block size is large 2100 * enough for multiple chunks. Leave the finobt record to remain in sync 2101 * with the inobt. 2102 */ 2103 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 2104 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2105 error = xfs_btree_delete(cur, &i); 2106 if (error) 2107 goto error; 2108 ASSERT(i == 1); 2109 } else { 2110 error = xfs_inobt_update(cur, &rec); 2111 if (error) 2112 goto error; 2113 } 2114 2115 out: 2116 error = xfs_check_agi_freecount(cur); 2117 if (error) 2118 goto error; 2119 2120 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2121 return 0; 2122 2123 error: 2124 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2125 return error; 2126 } 2127 2128 /* 2129 * Free disk inode. Carefully avoids touching the incore inode, all 2130 * manipulations incore are the caller's responsibility. 2131 * The on-disk inode is not changed by this operation, only the 2132 * btree (free inode mask) is changed. 2133 */ 2134 int 2135 xfs_difree( 2136 struct xfs_trans *tp, 2137 struct xfs_perag *pag, 2138 xfs_ino_t inode, 2139 struct xfs_icluster *xic) 2140 { 2141 /* REFERENCED */ 2142 xfs_agblock_t agbno; /* block number containing inode */ 2143 struct xfs_buf *agbp; /* buffer for allocation group header */ 2144 xfs_agino_t agino; /* allocation group inode number */ 2145 int error; /* error return value */ 2146 struct xfs_mount *mp = tp->t_mountp; 2147 struct xfs_inobt_rec_incore rec;/* btree record */ 2148 2149 /* 2150 * Break up inode number into its components. 2151 */ 2152 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { 2153 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", 2154 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); 2155 ASSERT(0); 2156 return -EINVAL; 2157 } 2158 agino = XFS_INO_TO_AGINO(mp, inode); 2159 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2160 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2161 __func__, (unsigned long long)inode, 2162 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2163 ASSERT(0); 2164 return -EINVAL; 2165 } 2166 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2167 if (agbno >= mp->m_sb.sb_agblocks) { 2168 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2169 __func__, agbno, mp->m_sb.sb_agblocks); 2170 ASSERT(0); 2171 return -EINVAL; 2172 } 2173 /* 2174 * Get the allocation group header. 2175 */ 2176 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2177 if (error) { 2178 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2179 __func__, error); 2180 return error; 2181 } 2182 2183 /* 2184 * Fix up the inode allocation btree. 2185 */ 2186 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec); 2187 if (error) 2188 goto error0; 2189 2190 /* 2191 * Fix up the free inode btree. 2192 */ 2193 if (xfs_has_finobt(mp)) { 2194 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec); 2195 if (error) 2196 goto error0; 2197 } 2198 2199 return 0; 2200 2201 error0: 2202 return error; 2203 } 2204 2205 STATIC int 2206 xfs_imap_lookup( 2207 struct xfs_perag *pag, 2208 struct xfs_trans *tp, 2209 xfs_agino_t agino, 2210 xfs_agblock_t agbno, 2211 xfs_agblock_t *chunk_agbno, 2212 xfs_agblock_t *offset_agbno, 2213 int flags) 2214 { 2215 struct xfs_mount *mp = pag->pag_mount; 2216 struct xfs_inobt_rec_incore rec; 2217 struct xfs_btree_cur *cur; 2218 struct xfs_buf *agbp; 2219 int error; 2220 int i; 2221 2222 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2223 if (error) { 2224 xfs_alert(mp, 2225 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2226 __func__, error, pag->pag_agno); 2227 return error; 2228 } 2229 2230 /* 2231 * Lookup the inode record for the given agino. If the record cannot be 2232 * found, then it's an invalid inode number and we should abort. Once 2233 * we have a record, we need to ensure it contains the inode number 2234 * we are looking up. 2235 */ 2236 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 2237 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2238 if (!error) { 2239 if (i) 2240 error = xfs_inobt_get_rec(cur, &rec, &i); 2241 if (!error && i == 0) 2242 error = -EINVAL; 2243 } 2244 2245 xfs_trans_brelse(tp, agbp); 2246 xfs_btree_del_cursor(cur, error); 2247 if (error) 2248 return error; 2249 2250 /* check that the returned record contains the required inode */ 2251 if (rec.ir_startino > agino || 2252 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2253 return -EINVAL; 2254 2255 /* for untrusted inodes check it is allocated first */ 2256 if ((flags & XFS_IGET_UNTRUSTED) && 2257 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2258 return -EINVAL; 2259 2260 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2261 *offset_agbno = agbno - *chunk_agbno; 2262 return 0; 2263 } 2264 2265 /* 2266 * Return the location of the inode in imap, for mapping it into a buffer. 2267 */ 2268 int 2269 xfs_imap( 2270 struct xfs_perag *pag, 2271 struct xfs_trans *tp, 2272 xfs_ino_t ino, /* inode to locate */ 2273 struct xfs_imap *imap, /* location map structure */ 2274 uint flags) /* flags for inode btree lookup */ 2275 { 2276 struct xfs_mount *mp = pag->pag_mount; 2277 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2278 xfs_agino_t agino; /* inode number within alloc group */ 2279 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2280 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2281 int error; /* error code */ 2282 int offset; /* index of inode in its buffer */ 2283 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2284 2285 ASSERT(ino != NULLFSINO); 2286 2287 /* 2288 * Split up the inode number into its parts. 2289 */ 2290 agino = XFS_INO_TO_AGINO(mp, ino); 2291 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2292 if (agbno >= mp->m_sb.sb_agblocks || 2293 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2294 error = -EINVAL; 2295 #ifdef DEBUG 2296 /* 2297 * Don't output diagnostic information for untrusted inodes 2298 * as they can be invalid without implying corruption. 2299 */ 2300 if (flags & XFS_IGET_UNTRUSTED) 2301 return error; 2302 if (agbno >= mp->m_sb.sb_agblocks) { 2303 xfs_alert(mp, 2304 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2305 __func__, (unsigned long long)agbno, 2306 (unsigned long)mp->m_sb.sb_agblocks); 2307 } 2308 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2309 xfs_alert(mp, 2310 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2311 __func__, ino, 2312 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2313 } 2314 xfs_stack_trace(); 2315 #endif /* DEBUG */ 2316 return error; 2317 } 2318 2319 /* 2320 * For bulkstat and handle lookups, we have an untrusted inode number 2321 * that we have to verify is valid. We cannot do this just by reading 2322 * the inode buffer as it may have been unlinked and removed leaving 2323 * inodes in stale state on disk. Hence we have to do a btree lookup 2324 * in all cases where an untrusted inode number is passed. 2325 */ 2326 if (flags & XFS_IGET_UNTRUSTED) { 2327 error = xfs_imap_lookup(pag, tp, agino, agbno, 2328 &chunk_agbno, &offset_agbno, flags); 2329 if (error) 2330 return error; 2331 goto out_map; 2332 } 2333 2334 /* 2335 * If the inode cluster size is the same as the blocksize or 2336 * smaller we get to the buffer by simple arithmetics. 2337 */ 2338 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2339 offset = XFS_INO_TO_OFFSET(mp, ino); 2340 ASSERT(offset < mp->m_sb.sb_inopblock); 2341 2342 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno); 2343 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2344 imap->im_boffset = (unsigned short)(offset << 2345 mp->m_sb.sb_inodelog); 2346 return 0; 2347 } 2348 2349 /* 2350 * If the inode chunks are aligned then use simple maths to 2351 * find the location. Otherwise we have to do a btree 2352 * lookup to find the location. 2353 */ 2354 if (M_IGEO(mp)->inoalign_mask) { 2355 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2356 chunk_agbno = agbno - offset_agbno; 2357 } else { 2358 error = xfs_imap_lookup(pag, tp, agino, agbno, 2359 &chunk_agbno, &offset_agbno, flags); 2360 if (error) 2361 return error; 2362 } 2363 2364 out_map: 2365 ASSERT(agbno >= chunk_agbno); 2366 cluster_agbno = chunk_agbno + 2367 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2368 M_IGEO(mp)->blocks_per_cluster); 2369 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2370 XFS_INO_TO_OFFSET(mp, ino); 2371 2372 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno); 2373 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2374 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2375 2376 /* 2377 * If the inode number maps to a block outside the bounds 2378 * of the file system then return NULL rather than calling 2379 * read_buf and panicing when we get an error from the 2380 * driver. 2381 */ 2382 if ((imap->im_blkno + imap->im_len) > 2383 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2384 xfs_alert(mp, 2385 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2386 __func__, (unsigned long long) imap->im_blkno, 2387 (unsigned long long) imap->im_len, 2388 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2389 return -EINVAL; 2390 } 2391 return 0; 2392 } 2393 2394 /* 2395 * Log specified fields for the ag hdr (inode section). The growth of the agi 2396 * structure over time requires that we interpret the buffer as two logical 2397 * regions delineated by the end of the unlinked list. This is due to the size 2398 * of the hash table and its location in the middle of the agi. 2399 * 2400 * For example, a request to log a field before agi_unlinked and a field after 2401 * agi_unlinked could cause us to log the entire hash table and use an excessive 2402 * amount of log space. To avoid this behavior, log the region up through 2403 * agi_unlinked in one call and the region after agi_unlinked through the end of 2404 * the structure in another. 2405 */ 2406 void 2407 xfs_ialloc_log_agi( 2408 struct xfs_trans *tp, 2409 struct xfs_buf *bp, 2410 uint32_t fields) 2411 { 2412 int first; /* first byte number */ 2413 int last; /* last byte number */ 2414 static const short offsets[] = { /* field starting offsets */ 2415 /* keep in sync with bit definitions */ 2416 offsetof(xfs_agi_t, agi_magicnum), 2417 offsetof(xfs_agi_t, agi_versionnum), 2418 offsetof(xfs_agi_t, agi_seqno), 2419 offsetof(xfs_agi_t, agi_length), 2420 offsetof(xfs_agi_t, agi_count), 2421 offsetof(xfs_agi_t, agi_root), 2422 offsetof(xfs_agi_t, agi_level), 2423 offsetof(xfs_agi_t, agi_freecount), 2424 offsetof(xfs_agi_t, agi_newino), 2425 offsetof(xfs_agi_t, agi_dirino), 2426 offsetof(xfs_agi_t, agi_unlinked), 2427 offsetof(xfs_agi_t, agi_free_root), 2428 offsetof(xfs_agi_t, agi_free_level), 2429 offsetof(xfs_agi_t, agi_iblocks), 2430 sizeof(xfs_agi_t) 2431 }; 2432 #ifdef DEBUG 2433 struct xfs_agi *agi = bp->b_addr; 2434 2435 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2436 #endif 2437 2438 /* 2439 * Compute byte offsets for the first and last fields in the first 2440 * region and log the agi buffer. This only logs up through 2441 * agi_unlinked. 2442 */ 2443 if (fields & XFS_AGI_ALL_BITS_R1) { 2444 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2445 &first, &last); 2446 xfs_trans_log_buf(tp, bp, first, last); 2447 } 2448 2449 /* 2450 * Mask off the bits in the first region and calculate the first and 2451 * last field offsets for any bits in the second region. 2452 */ 2453 fields &= ~XFS_AGI_ALL_BITS_R1; 2454 if (fields) { 2455 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2456 &first, &last); 2457 xfs_trans_log_buf(tp, bp, first, last); 2458 } 2459 } 2460 2461 static xfs_failaddr_t 2462 xfs_agi_verify( 2463 struct xfs_buf *bp) 2464 { 2465 struct xfs_mount *mp = bp->b_mount; 2466 struct xfs_agi *agi = bp->b_addr; 2467 int i; 2468 2469 if (xfs_has_crc(mp)) { 2470 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2471 return __this_address; 2472 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn))) 2473 return __this_address; 2474 } 2475 2476 /* 2477 * Validate the magic number of the agi block. 2478 */ 2479 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2480 return __this_address; 2481 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2482 return __this_address; 2483 2484 if (be32_to_cpu(agi->agi_level) < 1 || 2485 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels) 2486 return __this_address; 2487 2488 if (xfs_has_finobt(mp) && 2489 (be32_to_cpu(agi->agi_free_level) < 1 || 2490 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels)) 2491 return __this_address; 2492 2493 /* 2494 * during growfs operations, the perag is not fully initialised, 2495 * so we can't use it for any useful checking. growfs ensures we can't 2496 * use it by using uncached buffers that don't have the perag attached 2497 * so we can detect and avoid this problem. 2498 */ 2499 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) 2500 return __this_address; 2501 2502 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2503 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2504 continue; 2505 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2506 return __this_address; 2507 } 2508 2509 return NULL; 2510 } 2511 2512 static void 2513 xfs_agi_read_verify( 2514 struct xfs_buf *bp) 2515 { 2516 struct xfs_mount *mp = bp->b_mount; 2517 xfs_failaddr_t fa; 2518 2519 if (xfs_has_crc(mp) && 2520 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2521 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2522 else { 2523 fa = xfs_agi_verify(bp); 2524 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2525 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2526 } 2527 } 2528 2529 static void 2530 xfs_agi_write_verify( 2531 struct xfs_buf *bp) 2532 { 2533 struct xfs_mount *mp = bp->b_mount; 2534 struct xfs_buf_log_item *bip = bp->b_log_item; 2535 struct xfs_agi *agi = bp->b_addr; 2536 xfs_failaddr_t fa; 2537 2538 fa = xfs_agi_verify(bp); 2539 if (fa) { 2540 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2541 return; 2542 } 2543 2544 if (!xfs_has_crc(mp)) 2545 return; 2546 2547 if (bip) 2548 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2549 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2550 } 2551 2552 const struct xfs_buf_ops xfs_agi_buf_ops = { 2553 .name = "xfs_agi", 2554 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2555 .verify_read = xfs_agi_read_verify, 2556 .verify_write = xfs_agi_write_verify, 2557 .verify_struct = xfs_agi_verify, 2558 }; 2559 2560 /* 2561 * Read in the allocation group header (inode allocation section) 2562 */ 2563 int 2564 xfs_read_agi( 2565 struct xfs_perag *pag, 2566 struct xfs_trans *tp, 2567 struct xfs_buf **agibpp) 2568 { 2569 struct xfs_mount *mp = pag->pag_mount; 2570 int error; 2571 2572 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno); 2573 2574 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2575 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), 2576 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops); 2577 if (error) 2578 return error; 2579 if (tp) 2580 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF); 2581 2582 xfs_buf_set_ref(*agibpp, XFS_AGI_REF); 2583 return 0; 2584 } 2585 2586 /* 2587 * Read in the agi and initialise the per-ag data. If the caller supplies a 2588 * @agibpp, return the locked AGI buffer to them, otherwise release it. 2589 */ 2590 int 2591 xfs_ialloc_read_agi( 2592 struct xfs_perag *pag, 2593 struct xfs_trans *tp, 2594 struct xfs_buf **agibpp) 2595 { 2596 struct xfs_buf *agibp; 2597 struct xfs_agi *agi; 2598 int error; 2599 2600 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno); 2601 2602 error = xfs_read_agi(pag, tp, &agibp); 2603 if (error) 2604 return error; 2605 2606 agi = agibp->b_addr; 2607 if (!xfs_perag_initialised_agi(pag)) { 2608 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2609 pag->pagi_count = be32_to_cpu(agi->agi_count); 2610 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 2611 } 2612 2613 /* 2614 * It's possible for these to be out of sync if 2615 * we are in the middle of a forced shutdown. 2616 */ 2617 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2618 xfs_is_shutdown(pag->pag_mount)); 2619 if (agibpp) 2620 *agibpp = agibp; 2621 else 2622 xfs_trans_brelse(tp, agibp); 2623 return 0; 2624 } 2625 2626 /* Is there an inode record covering a given range of inode numbers? */ 2627 int 2628 xfs_ialloc_has_inode_record( 2629 struct xfs_btree_cur *cur, 2630 xfs_agino_t low, 2631 xfs_agino_t high, 2632 bool *exists) 2633 { 2634 struct xfs_inobt_rec_incore irec; 2635 xfs_agino_t agino; 2636 uint16_t holemask; 2637 int has_record; 2638 int i; 2639 int error; 2640 2641 *exists = false; 2642 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2643 while (error == 0 && has_record) { 2644 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2645 if (error || irec.ir_startino > high) 2646 break; 2647 2648 agino = irec.ir_startino; 2649 holemask = irec.ir_holemask; 2650 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1, 2651 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) { 2652 if (holemask & 1) 2653 continue; 2654 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low && 2655 agino <= high) { 2656 *exists = true; 2657 return 0; 2658 } 2659 } 2660 2661 error = xfs_btree_increment(cur, 0, &has_record); 2662 } 2663 return error; 2664 } 2665 2666 /* Is there an inode record covering a given extent? */ 2667 int 2668 xfs_ialloc_has_inodes_at_extent( 2669 struct xfs_btree_cur *cur, 2670 xfs_agblock_t bno, 2671 xfs_extlen_t len, 2672 bool *exists) 2673 { 2674 xfs_agino_t low; 2675 xfs_agino_t high; 2676 2677 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2678 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2679 2680 return xfs_ialloc_has_inode_record(cur, low, high, exists); 2681 } 2682 2683 struct xfs_ialloc_count_inodes { 2684 xfs_agino_t count; 2685 xfs_agino_t freecount; 2686 }; 2687 2688 /* Record inode counts across all inobt records. */ 2689 STATIC int 2690 xfs_ialloc_count_inodes_rec( 2691 struct xfs_btree_cur *cur, 2692 const union xfs_btree_rec *rec, 2693 void *priv) 2694 { 2695 struct xfs_inobt_rec_incore irec; 2696 struct xfs_ialloc_count_inodes *ci = priv; 2697 2698 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2699 ci->count += irec.ir_count; 2700 ci->freecount += irec.ir_freecount; 2701 2702 return 0; 2703 } 2704 2705 /* Count allocated and free inodes under an inobt. */ 2706 int 2707 xfs_ialloc_count_inodes( 2708 struct xfs_btree_cur *cur, 2709 xfs_agino_t *count, 2710 xfs_agino_t *freecount) 2711 { 2712 struct xfs_ialloc_count_inodes ci = {0}; 2713 int error; 2714 2715 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2716 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2717 if (error) 2718 return error; 2719 2720 *count = ci.count; 2721 *freecount = ci.freecount; 2722 return 0; 2723 } 2724 2725 /* 2726 * Initialize inode-related geometry information. 2727 * 2728 * Compute the inode btree min and max levels and set maxicount. 2729 * 2730 * Set the inode cluster size. This may still be overridden by the file 2731 * system block size if it is larger than the chosen cluster size. 2732 * 2733 * For v5 filesystems, scale the cluster size with the inode size to keep a 2734 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2735 * inode alignment value appropriately for larger cluster sizes. 2736 * 2737 * Then compute the inode cluster alignment information. 2738 */ 2739 void 2740 xfs_ialloc_setup_geometry( 2741 struct xfs_mount *mp) 2742 { 2743 struct xfs_sb *sbp = &mp->m_sb; 2744 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2745 uint64_t icount; 2746 uint inodes; 2747 2748 igeo->new_diflags2 = 0; 2749 if (xfs_has_bigtime(mp)) 2750 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2751 if (xfs_has_large_extent_counts(mp)) 2752 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64; 2753 2754 /* Compute inode btree geometry. */ 2755 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2756 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2757 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2758 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2759 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2760 2761 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2762 sbp->sb_inopblock); 2763 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2764 2765 if (sbp->sb_spino_align) 2766 igeo->ialloc_min_blks = sbp->sb_spino_align; 2767 else 2768 igeo->ialloc_min_blks = igeo->ialloc_blks; 2769 2770 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2771 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2772 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2773 inodes); 2774 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk()); 2775 2776 /* 2777 * Set the maximum inode count for this filesystem, being careful not 2778 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2779 * users should never get here due to failing sb verification, but 2780 * certain users (xfs_db) need to be usable even with corrupt metadata. 2781 */ 2782 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2783 /* 2784 * Make sure the maximum inode count is a multiple 2785 * of the units we allocate inodes in. 2786 */ 2787 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2788 do_div(icount, 100); 2789 do_div(icount, igeo->ialloc_blks); 2790 igeo->maxicount = XFS_FSB_TO_INO(mp, 2791 icount * igeo->ialloc_blks); 2792 } else { 2793 igeo->maxicount = 0; 2794 } 2795 2796 /* 2797 * Compute the desired size of an inode cluster buffer size, which 2798 * starts at 8K and (on v5 filesystems) scales up with larger inode 2799 * sizes. 2800 * 2801 * Preserve the desired inode cluster size because the sparse inodes 2802 * feature uses that desired size (not the actual size) to compute the 2803 * sparse inode alignment. The mount code validates this value, so we 2804 * cannot change the behavior. 2805 */ 2806 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2807 if (xfs_has_v3inodes(mp)) { 2808 int new_size = igeo->inode_cluster_size_raw; 2809 2810 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2811 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2812 igeo->inode_cluster_size_raw = new_size; 2813 } 2814 2815 /* Calculate inode cluster ratios. */ 2816 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2817 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2818 igeo->inode_cluster_size_raw); 2819 else 2820 igeo->blocks_per_cluster = 1; 2821 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2822 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2823 2824 /* Calculate inode cluster alignment. */ 2825 if (xfs_has_align(mp) && 2826 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2827 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2828 else 2829 igeo->cluster_align = 1; 2830 igeo->inoalign_mask = igeo->cluster_align - 1; 2831 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2832 2833 /* 2834 * If we are using stripe alignment, check whether 2835 * the stripe unit is a multiple of the inode alignment 2836 */ 2837 if (mp->m_dalign && igeo->inoalign_mask && 2838 !(mp->m_dalign & igeo->inoalign_mask)) 2839 igeo->ialloc_align = mp->m_dalign; 2840 else 2841 igeo->ialloc_align = 0; 2842 } 2843 2844 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2845 xfs_ino_t 2846 xfs_ialloc_calc_rootino( 2847 struct xfs_mount *mp, 2848 int sunit) 2849 { 2850 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2851 xfs_agblock_t first_bno; 2852 2853 /* 2854 * Pre-calculate the geometry of AG 0. We know what it looks like 2855 * because libxfs knows how to create allocation groups now. 2856 * 2857 * first_bno is the first block in which mkfs could possibly have 2858 * allocated the root directory inode, once we factor in the metadata 2859 * that mkfs formats before it. Namely, the four AG headers... 2860 */ 2861 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2862 2863 /* ...the two free space btree roots... */ 2864 first_bno += 2; 2865 2866 /* ...the inode btree root... */ 2867 first_bno += 1; 2868 2869 /* ...the initial AGFL... */ 2870 first_bno += xfs_alloc_min_freelist(mp, NULL); 2871 2872 /* ...the free inode btree root... */ 2873 if (xfs_has_finobt(mp)) 2874 first_bno++; 2875 2876 /* ...the reverse mapping btree root... */ 2877 if (xfs_has_rmapbt(mp)) 2878 first_bno++; 2879 2880 /* ...the reference count btree... */ 2881 if (xfs_has_reflink(mp)) 2882 first_bno++; 2883 2884 /* 2885 * ...and the log, if it is allocated in the first allocation group. 2886 * 2887 * This can happen with filesystems that only have a single 2888 * allocation group, or very odd geometries created by old mkfs 2889 * versions on very small filesystems. 2890 */ 2891 if (xfs_ag_contains_log(mp, 0)) 2892 first_bno += mp->m_sb.sb_logblocks; 2893 2894 /* 2895 * Now round first_bno up to whatever allocation alignment is given 2896 * by the filesystem or was passed in. 2897 */ 2898 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2899 first_bno = roundup(first_bno, sunit); 2900 else if (xfs_has_align(mp) && 2901 mp->m_sb.sb_inoalignmt > 1) 2902 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2903 2904 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2905 } 2906 2907 /* 2908 * Ensure there are not sparse inode clusters that cross the new EOAG. 2909 * 2910 * This is a no-op for non-spinode filesystems since clusters are always fully 2911 * allocated and checking the bnobt suffices. However, a spinode filesystem 2912 * could have a record where the upper inodes are free blocks. If those blocks 2913 * were removed from the filesystem, the inode record would extend beyond EOAG, 2914 * which will be flagged as corruption. 2915 */ 2916 int 2917 xfs_ialloc_check_shrink( 2918 struct xfs_perag *pag, 2919 struct xfs_trans *tp, 2920 struct xfs_buf *agibp, 2921 xfs_agblock_t new_length) 2922 { 2923 struct xfs_inobt_rec_incore rec; 2924 struct xfs_btree_cur *cur; 2925 xfs_agino_t agino; 2926 int has; 2927 int error; 2928 2929 if (!xfs_has_sparseinodes(pag->pag_mount)) 2930 return 0; 2931 2932 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO); 2933 2934 /* Look up the inobt record that would correspond to the new EOFS. */ 2935 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); 2936 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); 2937 if (error || !has) 2938 goto out; 2939 2940 error = xfs_inobt_get_rec(cur, &rec, &has); 2941 if (error) 2942 goto out; 2943 2944 if (!has) { 2945 error = -EFSCORRUPTED; 2946 goto out; 2947 } 2948 2949 /* If the record covers inodes that would be beyond EOFS, bail out. */ 2950 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { 2951 error = -ENOSPC; 2952 goto out; 2953 } 2954 out: 2955 xfs_btree_del_cursor(cur, error); 2956 return error; 2957 } 2958