1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_bmap.h" 22 #include "xfs_trans.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_icreate_item.h" 25 #include "xfs_icache.h" 26 #include "xfs_trace.h" 27 #include "xfs_log.h" 28 #include "xfs_rmap.h" 29 #include "xfs_ag.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_has_sparseinodes(cur->bc_mp)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 const union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_has_sparseinodes(mp)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* 99 * Get the data from the pointed-to record. 100 */ 101 int 102 xfs_inobt_get_rec( 103 struct xfs_btree_cur *cur, 104 struct xfs_inobt_rec_incore *irec, 105 int *stat) 106 { 107 struct xfs_mount *mp = cur->bc_mp; 108 union xfs_btree_rec *rec; 109 int error; 110 uint64_t realfree; 111 112 error = xfs_btree_get_rec(cur, &rec, stat); 113 if (error || *stat == 0) 114 return error; 115 116 xfs_inobt_btrec_to_irec(mp, rec, irec); 117 118 if (!xfs_verify_agino(cur->bc_ag.pag, irec->ir_startino)) 119 goto out_bad_rec; 120 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 121 irec->ir_count > XFS_INODES_PER_CHUNK) 122 goto out_bad_rec; 123 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 124 goto out_bad_rec; 125 126 /* if there are no holes, return the first available offset */ 127 if (!xfs_inobt_issparse(irec->ir_holemask)) 128 realfree = irec->ir_free; 129 else 130 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec); 131 if (hweight64(realfree) != irec->ir_freecount) 132 goto out_bad_rec; 133 134 return 0; 135 136 out_bad_rec: 137 xfs_warn(mp, 138 "%s Inode BTree record corruption in AG %d detected!", 139 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", 140 cur->bc_ag.pag->pag_agno); 141 xfs_warn(mp, 142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 143 irec->ir_startino, irec->ir_count, irec->ir_freecount, 144 irec->ir_free, irec->ir_holemask); 145 return -EFSCORRUPTED; 146 } 147 148 /* 149 * Insert a single inobt record. Cursor must already point to desired location. 150 */ 151 int 152 xfs_inobt_insert_rec( 153 struct xfs_btree_cur *cur, 154 uint16_t holemask, 155 uint8_t count, 156 int32_t freecount, 157 xfs_inofree_t free, 158 int *stat) 159 { 160 cur->bc_rec.i.ir_holemask = holemask; 161 cur->bc_rec.i.ir_count = count; 162 cur->bc_rec.i.ir_freecount = freecount; 163 cur->bc_rec.i.ir_free = free; 164 return xfs_btree_insert(cur, stat); 165 } 166 167 /* 168 * Insert records describing a newly allocated inode chunk into the inobt. 169 */ 170 STATIC int 171 xfs_inobt_insert( 172 struct xfs_perag *pag, 173 struct xfs_trans *tp, 174 struct xfs_buf *agbp, 175 xfs_agino_t newino, 176 xfs_agino_t newlen, 177 xfs_btnum_t btnum) 178 { 179 struct xfs_btree_cur *cur; 180 xfs_agino_t thisino; 181 int i; 182 int error; 183 184 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 185 186 for (thisino = newino; 187 thisino < newino + newlen; 188 thisino += XFS_INODES_PER_CHUNK) { 189 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 190 if (error) { 191 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 192 return error; 193 } 194 ASSERT(i == 0); 195 196 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 197 XFS_INODES_PER_CHUNK, 198 XFS_INODES_PER_CHUNK, 199 XFS_INOBT_ALL_FREE, &i); 200 if (error) { 201 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 202 return error; 203 } 204 ASSERT(i == 1); 205 } 206 207 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 208 209 return 0; 210 } 211 212 /* 213 * Verify that the number of free inodes in the AGI is correct. 214 */ 215 #ifdef DEBUG 216 static int 217 xfs_check_agi_freecount( 218 struct xfs_btree_cur *cur) 219 { 220 if (cur->bc_nlevels == 1) { 221 xfs_inobt_rec_incore_t rec; 222 int freecount = 0; 223 int error; 224 int i; 225 226 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 227 if (error) 228 return error; 229 230 do { 231 error = xfs_inobt_get_rec(cur, &rec, &i); 232 if (error) 233 return error; 234 235 if (i) { 236 freecount += rec.ir_freecount; 237 error = xfs_btree_increment(cur, 0, &i); 238 if (error) 239 return error; 240 } 241 } while (i == 1); 242 243 if (!xfs_is_shutdown(cur->bc_mp)) 244 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); 245 } 246 return 0; 247 } 248 #else 249 #define xfs_check_agi_freecount(cur) 0 250 #endif 251 252 /* 253 * Initialise a new set of inodes. When called without a transaction context 254 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 255 * than logging them (which in a transaction context puts them into the AIL 256 * for writeback rather than the xfsbufd queue). 257 */ 258 int 259 xfs_ialloc_inode_init( 260 struct xfs_mount *mp, 261 struct xfs_trans *tp, 262 struct list_head *buffer_list, 263 int icount, 264 xfs_agnumber_t agno, 265 xfs_agblock_t agbno, 266 xfs_agblock_t length, 267 unsigned int gen) 268 { 269 struct xfs_buf *fbuf; 270 struct xfs_dinode *free; 271 int nbufs; 272 int version; 273 int i, j; 274 xfs_daddr_t d; 275 xfs_ino_t ino = 0; 276 int error; 277 278 /* 279 * Loop over the new block(s), filling in the inodes. For small block 280 * sizes, manipulate the inodes in buffers which are multiples of the 281 * blocks size. 282 */ 283 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 284 285 /* 286 * Figure out what version number to use in the inodes we create. If 287 * the superblock version has caught up to the one that supports the new 288 * inode format, then use the new inode version. Otherwise use the old 289 * version so that old kernels will continue to be able to use the file 290 * system. 291 * 292 * For v3 inodes, we also need to write the inode number into the inode, 293 * so calculate the first inode number of the chunk here as 294 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 295 * across multiple filesystem blocks (such as a cluster) and so cannot 296 * be used in the cluster buffer loop below. 297 * 298 * Further, because we are writing the inode directly into the buffer 299 * and calculating a CRC on the entire inode, we have ot log the entire 300 * inode so that the entire range the CRC covers is present in the log. 301 * That means for v3 inode we log the entire buffer rather than just the 302 * inode cores. 303 */ 304 if (xfs_has_v3inodes(mp)) { 305 version = 3; 306 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 307 308 /* 309 * log the initialisation that is about to take place as an 310 * logical operation. This means the transaction does not 311 * need to log the physical changes to the inode buffers as log 312 * recovery will know what initialisation is actually needed. 313 * Hence we only need to log the buffers as "ordered" buffers so 314 * they track in the AIL as if they were physically logged. 315 */ 316 if (tp) 317 xfs_icreate_log(tp, agno, agbno, icount, 318 mp->m_sb.sb_inodesize, length, gen); 319 } else 320 version = 2; 321 322 for (j = 0; j < nbufs; j++) { 323 /* 324 * Get the block. 325 */ 326 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 327 (j * M_IGEO(mp)->blocks_per_cluster)); 328 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 329 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 330 XBF_UNMAPPED, &fbuf); 331 if (error) 332 return error; 333 334 /* Initialize the inode buffers and log them appropriately. */ 335 fbuf->b_ops = &xfs_inode_buf_ops; 336 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 337 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 338 int ioffset = i << mp->m_sb.sb_inodelog; 339 340 free = xfs_make_iptr(mp, fbuf, i); 341 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 342 free->di_version = version; 343 free->di_gen = cpu_to_be32(gen); 344 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 345 346 if (version == 3) { 347 free->di_ino = cpu_to_be64(ino); 348 ino++; 349 uuid_copy(&free->di_uuid, 350 &mp->m_sb.sb_meta_uuid); 351 xfs_dinode_calc_crc(mp, free); 352 } else if (tp) { 353 /* just log the inode core */ 354 xfs_trans_log_buf(tp, fbuf, ioffset, 355 ioffset + XFS_DINODE_SIZE(mp) - 1); 356 } 357 } 358 359 if (tp) { 360 /* 361 * Mark the buffer as an inode allocation buffer so it 362 * sticks in AIL at the point of this allocation 363 * transaction. This ensures the they are on disk before 364 * the tail of the log can be moved past this 365 * transaction (i.e. by preventing relogging from moving 366 * it forward in the log). 367 */ 368 xfs_trans_inode_alloc_buf(tp, fbuf); 369 if (version == 3) { 370 /* 371 * Mark the buffer as ordered so that they are 372 * not physically logged in the transaction but 373 * still tracked in the AIL as part of the 374 * transaction and pin the log appropriately. 375 */ 376 xfs_trans_ordered_buf(tp, fbuf); 377 } 378 } else { 379 fbuf->b_flags |= XBF_DONE; 380 xfs_buf_delwri_queue(fbuf, buffer_list); 381 xfs_buf_relse(fbuf); 382 } 383 } 384 return 0; 385 } 386 387 /* 388 * Align startino and allocmask for a recently allocated sparse chunk such that 389 * they are fit for insertion (or merge) into the on-disk inode btrees. 390 * 391 * Background: 392 * 393 * When enabled, sparse inode support increases the inode alignment from cluster 394 * size to inode chunk size. This means that the minimum range between two 395 * non-adjacent inode records in the inobt is large enough for a full inode 396 * record. This allows for cluster sized, cluster aligned block allocation 397 * without need to worry about whether the resulting inode record overlaps with 398 * another record in the tree. Without this basic rule, we would have to deal 399 * with the consequences of overlap by potentially undoing recent allocations in 400 * the inode allocation codepath. 401 * 402 * Because of this alignment rule (which is enforced on mount), there are two 403 * inobt possibilities for newly allocated sparse chunks. One is that the 404 * aligned inode record for the chunk covers a range of inodes not already 405 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 406 * other is that a record already exists at the aligned startino that considers 407 * the newly allocated range as sparse. In the latter case, record content is 408 * merged in hope that sparse inode chunks fill to full chunks over time. 409 */ 410 STATIC void 411 xfs_align_sparse_ino( 412 struct xfs_mount *mp, 413 xfs_agino_t *startino, 414 uint16_t *allocmask) 415 { 416 xfs_agblock_t agbno; 417 xfs_agblock_t mod; 418 int offset; 419 420 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 421 mod = agbno % mp->m_sb.sb_inoalignmt; 422 if (!mod) 423 return; 424 425 /* calculate the inode offset and align startino */ 426 offset = XFS_AGB_TO_AGINO(mp, mod); 427 *startino -= offset; 428 429 /* 430 * Since startino has been aligned down, left shift allocmask such that 431 * it continues to represent the same physical inodes relative to the 432 * new startino. 433 */ 434 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 435 } 436 437 /* 438 * Determine whether the source inode record can merge into the target. Both 439 * records must be sparse, the inode ranges must match and there must be no 440 * allocation overlap between the records. 441 */ 442 STATIC bool 443 __xfs_inobt_can_merge( 444 struct xfs_inobt_rec_incore *trec, /* tgt record */ 445 struct xfs_inobt_rec_incore *srec) /* src record */ 446 { 447 uint64_t talloc; 448 uint64_t salloc; 449 450 /* records must cover the same inode range */ 451 if (trec->ir_startino != srec->ir_startino) 452 return false; 453 454 /* both records must be sparse */ 455 if (!xfs_inobt_issparse(trec->ir_holemask) || 456 !xfs_inobt_issparse(srec->ir_holemask)) 457 return false; 458 459 /* both records must track some inodes */ 460 if (!trec->ir_count || !srec->ir_count) 461 return false; 462 463 /* can't exceed capacity of a full record */ 464 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 465 return false; 466 467 /* verify there is no allocation overlap */ 468 talloc = xfs_inobt_irec_to_allocmask(trec); 469 salloc = xfs_inobt_irec_to_allocmask(srec); 470 if (talloc & salloc) 471 return false; 472 473 return true; 474 } 475 476 /* 477 * Merge the source inode record into the target. The caller must call 478 * __xfs_inobt_can_merge() to ensure the merge is valid. 479 */ 480 STATIC void 481 __xfs_inobt_rec_merge( 482 struct xfs_inobt_rec_incore *trec, /* target */ 483 struct xfs_inobt_rec_incore *srec) /* src */ 484 { 485 ASSERT(trec->ir_startino == srec->ir_startino); 486 487 /* combine the counts */ 488 trec->ir_count += srec->ir_count; 489 trec->ir_freecount += srec->ir_freecount; 490 491 /* 492 * Merge the holemask and free mask. For both fields, 0 bits refer to 493 * allocated inodes. We combine the allocated ranges with bitwise AND. 494 */ 495 trec->ir_holemask &= srec->ir_holemask; 496 trec->ir_free &= srec->ir_free; 497 } 498 499 /* 500 * Insert a new sparse inode chunk into the associated inode btree. The inode 501 * record for the sparse chunk is pre-aligned to a startino that should match 502 * any pre-existing sparse inode record in the tree. This allows sparse chunks 503 * to fill over time. 504 * 505 * This function supports two modes of handling preexisting records depending on 506 * the merge flag. If merge is true, the provided record is merged with the 507 * existing record and updated in place. The merged record is returned in nrec. 508 * If merge is false, an existing record is replaced with the provided record. 509 * If no preexisting record exists, the provided record is always inserted. 510 * 511 * It is considered corruption if a merge is requested and not possible. Given 512 * the sparse inode alignment constraints, this should never happen. 513 */ 514 STATIC int 515 xfs_inobt_insert_sprec( 516 struct xfs_perag *pag, 517 struct xfs_trans *tp, 518 struct xfs_buf *agbp, 519 int btnum, 520 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 521 bool merge) /* merge or replace */ 522 { 523 struct xfs_mount *mp = pag->pag_mount; 524 struct xfs_btree_cur *cur; 525 int error; 526 int i; 527 struct xfs_inobt_rec_incore rec; 528 529 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 530 531 /* the new record is pre-aligned so we know where to look */ 532 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 533 if (error) 534 goto error; 535 /* if nothing there, insert a new record and return */ 536 if (i == 0) { 537 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 538 nrec->ir_count, nrec->ir_freecount, 539 nrec->ir_free, &i); 540 if (error) 541 goto error; 542 if (XFS_IS_CORRUPT(mp, i != 1)) { 543 error = -EFSCORRUPTED; 544 goto error; 545 } 546 547 goto out; 548 } 549 550 /* 551 * A record exists at this startino. Merge or replace the record 552 * depending on what we've been asked to do. 553 */ 554 if (merge) { 555 error = xfs_inobt_get_rec(cur, &rec, &i); 556 if (error) 557 goto error; 558 if (XFS_IS_CORRUPT(mp, i != 1)) { 559 error = -EFSCORRUPTED; 560 goto error; 561 } 562 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 563 error = -EFSCORRUPTED; 564 goto error; 565 } 566 567 /* 568 * This should never fail. If we have coexisting records that 569 * cannot merge, something is seriously wrong. 570 */ 571 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 572 error = -EFSCORRUPTED; 573 goto error; 574 } 575 576 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, 577 rec.ir_holemask, nrec->ir_startino, 578 nrec->ir_holemask); 579 580 /* merge to nrec to output the updated record */ 581 __xfs_inobt_rec_merge(nrec, &rec); 582 583 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, 584 nrec->ir_holemask); 585 586 error = xfs_inobt_rec_check_count(mp, nrec); 587 if (error) 588 goto error; 589 } 590 591 error = xfs_inobt_update(cur, nrec); 592 if (error) 593 goto error; 594 595 out: 596 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 597 return 0; 598 error: 599 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 600 return error; 601 } 602 603 /* 604 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if 605 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so 606 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum 607 * inode count threshold, or the usual negative error code for other errors. 608 */ 609 STATIC int 610 xfs_ialloc_ag_alloc( 611 struct xfs_perag *pag, 612 struct xfs_trans *tp, 613 struct xfs_buf *agbp) 614 { 615 struct xfs_agi *agi; 616 struct xfs_alloc_arg args; 617 int error; 618 xfs_agino_t newino; /* new first inode's number */ 619 xfs_agino_t newlen; /* new number of inodes */ 620 int isaligned = 0; /* inode allocation at stripe */ 621 /* unit boundary */ 622 /* init. to full chunk */ 623 struct xfs_inobt_rec_incore rec; 624 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 625 uint16_t allocmask = (uint16_t) -1; 626 int do_sparse = 0; 627 628 memset(&args, 0, sizeof(args)); 629 args.tp = tp; 630 args.mp = tp->t_mountp; 631 args.fsbno = NULLFSBLOCK; 632 args.oinfo = XFS_RMAP_OINFO_INODES; 633 634 #ifdef DEBUG 635 /* randomly do sparse inode allocations */ 636 if (xfs_has_sparseinodes(tp->t_mountp) && 637 igeo->ialloc_min_blks < igeo->ialloc_blks) 638 do_sparse = get_random_u32_below(2); 639 #endif 640 641 /* 642 * Locking will ensure that we don't have two callers in here 643 * at one time. 644 */ 645 newlen = igeo->ialloc_inos; 646 if (igeo->maxicount && 647 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 648 igeo->maxicount) 649 return -ENOSPC; 650 args.minlen = args.maxlen = igeo->ialloc_blks; 651 /* 652 * First try to allocate inodes contiguous with the last-allocated 653 * chunk of inodes. If the filesystem is striped, this will fill 654 * an entire stripe unit with inodes. 655 */ 656 agi = agbp->b_addr; 657 newino = be32_to_cpu(agi->agi_newino); 658 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 659 igeo->ialloc_blks; 660 if (do_sparse) 661 goto sparse_alloc; 662 if (likely(newino != NULLAGINO && 663 (args.agbno < be32_to_cpu(agi->agi_length)))) { 664 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 665 args.type = XFS_ALLOCTYPE_THIS_BNO; 666 args.prod = 1; 667 668 /* 669 * We need to take into account alignment here to ensure that 670 * we don't modify the free list if we fail to have an exact 671 * block. If we don't have an exact match, and every oher 672 * attempt allocation attempt fails, we'll end up cancelling 673 * a dirty transaction and shutting down. 674 * 675 * For an exact allocation, alignment must be 1, 676 * however we need to take cluster alignment into account when 677 * fixing up the freelist. Use the minalignslop field to 678 * indicate that extra blocks might be required for alignment, 679 * but not to use them in the actual exact allocation. 680 */ 681 args.alignment = 1; 682 args.minalignslop = igeo->cluster_align - 1; 683 684 /* Allow space for the inode btree to split. */ 685 args.minleft = igeo->inobt_maxlevels; 686 if ((error = xfs_alloc_vextent(&args))) 687 return error; 688 689 /* 690 * This request might have dirtied the transaction if the AG can 691 * satisfy the request, but the exact block was not available. 692 * If the allocation did fail, subsequent requests will relax 693 * the exact agbno requirement and increase the alignment 694 * instead. It is critical that the total size of the request 695 * (len + alignment + slop) does not increase from this point 696 * on, so reset minalignslop to ensure it is not included in 697 * subsequent requests. 698 */ 699 args.minalignslop = 0; 700 } 701 702 if (unlikely(args.fsbno == NULLFSBLOCK)) { 703 /* 704 * Set the alignment for the allocation. 705 * If stripe alignment is turned on then align at stripe unit 706 * boundary. 707 * If the cluster size is smaller than a filesystem block 708 * then we're doing I/O for inodes in filesystem block size 709 * pieces, so don't need alignment anyway. 710 */ 711 isaligned = 0; 712 if (igeo->ialloc_align) { 713 ASSERT(!xfs_has_noalign(args.mp)); 714 args.alignment = args.mp->m_dalign; 715 isaligned = 1; 716 } else 717 args.alignment = igeo->cluster_align; 718 /* 719 * Need to figure out where to allocate the inode blocks. 720 * Ideally they should be spaced out through the a.g. 721 * For now, just allocate blocks up front. 722 */ 723 args.agbno = be32_to_cpu(agi->agi_root); 724 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 725 /* 726 * Allocate a fixed-size extent of inodes. 727 */ 728 args.type = XFS_ALLOCTYPE_NEAR_BNO; 729 args.prod = 1; 730 /* 731 * Allow space for the inode btree to split. 732 */ 733 args.minleft = igeo->inobt_maxlevels; 734 if ((error = xfs_alloc_vextent(&args))) 735 return error; 736 } 737 738 /* 739 * If stripe alignment is turned on, then try again with cluster 740 * alignment. 741 */ 742 if (isaligned && args.fsbno == NULLFSBLOCK) { 743 args.type = XFS_ALLOCTYPE_NEAR_BNO; 744 args.agbno = be32_to_cpu(agi->agi_root); 745 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 746 args.alignment = igeo->cluster_align; 747 if ((error = xfs_alloc_vextent(&args))) 748 return error; 749 } 750 751 /* 752 * Finally, try a sparse allocation if the filesystem supports it and 753 * the sparse allocation length is smaller than a full chunk. 754 */ 755 if (xfs_has_sparseinodes(args.mp) && 756 igeo->ialloc_min_blks < igeo->ialloc_blks && 757 args.fsbno == NULLFSBLOCK) { 758 sparse_alloc: 759 args.type = XFS_ALLOCTYPE_NEAR_BNO; 760 args.agbno = be32_to_cpu(agi->agi_root); 761 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); 762 args.alignment = args.mp->m_sb.sb_spino_align; 763 args.prod = 1; 764 765 args.minlen = igeo->ialloc_min_blks; 766 args.maxlen = args.minlen; 767 768 /* 769 * The inode record will be aligned to full chunk size. We must 770 * prevent sparse allocation from AG boundaries that result in 771 * invalid inode records, such as records that start at agbno 0 772 * or extend beyond the AG. 773 * 774 * Set min agbno to the first aligned, non-zero agbno and max to 775 * the last aligned agbno that is at least one full chunk from 776 * the end of the AG. 777 */ 778 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 779 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 780 args.mp->m_sb.sb_inoalignmt) - 781 igeo->ialloc_blks; 782 783 error = xfs_alloc_vextent(&args); 784 if (error) 785 return error; 786 787 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 788 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 789 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 790 } 791 792 if (args.fsbno == NULLFSBLOCK) 793 return -EAGAIN; 794 795 ASSERT(args.len == args.minlen); 796 797 /* 798 * Stamp and write the inode buffers. 799 * 800 * Seed the new inode cluster with a random generation number. This 801 * prevents short-term reuse of generation numbers if a chunk is 802 * freed and then immediately reallocated. We use random numbers 803 * rather than a linear progression to prevent the next generation 804 * number from being easily guessable. 805 */ 806 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 807 args.agbno, args.len, get_random_u32()); 808 809 if (error) 810 return error; 811 /* 812 * Convert the results. 813 */ 814 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 815 816 if (xfs_inobt_issparse(~allocmask)) { 817 /* 818 * We've allocated a sparse chunk. Align the startino and mask. 819 */ 820 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 821 822 rec.ir_startino = newino; 823 rec.ir_holemask = ~allocmask; 824 rec.ir_count = newlen; 825 rec.ir_freecount = newlen; 826 rec.ir_free = XFS_INOBT_ALL_FREE; 827 828 /* 829 * Insert the sparse record into the inobt and allow for a merge 830 * if necessary. If a merge does occur, rec is updated to the 831 * merged record. 832 */ 833 error = xfs_inobt_insert_sprec(pag, tp, agbp, 834 XFS_BTNUM_INO, &rec, true); 835 if (error == -EFSCORRUPTED) { 836 xfs_alert(args.mp, 837 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 838 XFS_AGINO_TO_INO(args.mp, pag->pag_agno, 839 rec.ir_startino), 840 rec.ir_holemask, rec.ir_count); 841 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 842 } 843 if (error) 844 return error; 845 846 /* 847 * We can't merge the part we've just allocated as for the inobt 848 * due to finobt semantics. The original record may or may not 849 * exist independent of whether physical inodes exist in this 850 * sparse chunk. 851 * 852 * We must update the finobt record based on the inobt record. 853 * rec contains the fully merged and up to date inobt record 854 * from the previous call. Set merge false to replace any 855 * existing record with this one. 856 */ 857 if (xfs_has_finobt(args.mp)) { 858 error = xfs_inobt_insert_sprec(pag, tp, agbp, 859 XFS_BTNUM_FINO, &rec, false); 860 if (error) 861 return error; 862 } 863 } else { 864 /* full chunk - insert new records to both btrees */ 865 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, 866 XFS_BTNUM_INO); 867 if (error) 868 return error; 869 870 if (xfs_has_finobt(args.mp)) { 871 error = xfs_inobt_insert(pag, tp, agbp, newino, 872 newlen, XFS_BTNUM_FINO); 873 if (error) 874 return error; 875 } 876 } 877 878 /* 879 * Update AGI counts and newino. 880 */ 881 be32_add_cpu(&agi->agi_count, newlen); 882 be32_add_cpu(&agi->agi_freecount, newlen); 883 pag->pagi_freecount += newlen; 884 pag->pagi_count += newlen; 885 agi->agi_newino = cpu_to_be32(newino); 886 887 /* 888 * Log allocation group header fields 889 */ 890 xfs_ialloc_log_agi(tp, agbp, 891 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 892 /* 893 * Modify/log superblock values for inode count and inode free count. 894 */ 895 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 896 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 897 return 0; 898 } 899 900 /* 901 * Try to retrieve the next record to the left/right from the current one. 902 */ 903 STATIC int 904 xfs_ialloc_next_rec( 905 struct xfs_btree_cur *cur, 906 xfs_inobt_rec_incore_t *rec, 907 int *done, 908 int left) 909 { 910 int error; 911 int i; 912 913 if (left) 914 error = xfs_btree_decrement(cur, 0, &i); 915 else 916 error = xfs_btree_increment(cur, 0, &i); 917 918 if (error) 919 return error; 920 *done = !i; 921 if (i) { 922 error = xfs_inobt_get_rec(cur, rec, &i); 923 if (error) 924 return error; 925 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 926 return -EFSCORRUPTED; 927 } 928 929 return 0; 930 } 931 932 STATIC int 933 xfs_ialloc_get_rec( 934 struct xfs_btree_cur *cur, 935 xfs_agino_t agino, 936 xfs_inobt_rec_incore_t *rec, 937 int *done) 938 { 939 int error; 940 int i; 941 942 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 943 if (error) 944 return error; 945 *done = !i; 946 if (i) { 947 error = xfs_inobt_get_rec(cur, rec, &i); 948 if (error) 949 return error; 950 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 951 return -EFSCORRUPTED; 952 } 953 954 return 0; 955 } 956 957 /* 958 * Return the offset of the first free inode in the record. If the inode chunk 959 * is sparsely allocated, we convert the record holemask to inode granularity 960 * and mask off the unallocated regions from the inode free mask. 961 */ 962 STATIC int 963 xfs_inobt_first_free_inode( 964 struct xfs_inobt_rec_incore *rec) 965 { 966 xfs_inofree_t realfree; 967 968 /* if there are no holes, return the first available offset */ 969 if (!xfs_inobt_issparse(rec->ir_holemask)) 970 return xfs_lowbit64(rec->ir_free); 971 972 realfree = xfs_inobt_irec_to_allocmask(rec); 973 realfree &= rec->ir_free; 974 975 return xfs_lowbit64(realfree); 976 } 977 978 /* 979 * Allocate an inode using the inobt-only algorithm. 980 */ 981 STATIC int 982 xfs_dialloc_ag_inobt( 983 struct xfs_perag *pag, 984 struct xfs_trans *tp, 985 struct xfs_buf *agbp, 986 xfs_ino_t parent, 987 xfs_ino_t *inop) 988 { 989 struct xfs_mount *mp = tp->t_mountp; 990 struct xfs_agi *agi = agbp->b_addr; 991 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 992 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 993 struct xfs_btree_cur *cur, *tcur; 994 struct xfs_inobt_rec_incore rec, trec; 995 xfs_ino_t ino; 996 int error; 997 int offset; 998 int i, j; 999 int searchdistance = 10; 1000 1001 ASSERT(xfs_perag_initialised_agi(pag)); 1002 ASSERT(xfs_perag_allows_inodes(pag)); 1003 ASSERT(pag->pagi_freecount > 0); 1004 1005 restart_pagno: 1006 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1007 /* 1008 * If pagino is 0 (this is the root inode allocation) use newino. 1009 * This must work because we've just allocated some. 1010 */ 1011 if (!pagino) 1012 pagino = be32_to_cpu(agi->agi_newino); 1013 1014 error = xfs_check_agi_freecount(cur); 1015 if (error) 1016 goto error0; 1017 1018 /* 1019 * If in the same AG as the parent, try to get near the parent. 1020 */ 1021 if (pagno == pag->pag_agno) { 1022 int doneleft; /* done, to the left */ 1023 int doneright; /* done, to the right */ 1024 1025 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1026 if (error) 1027 goto error0; 1028 if (XFS_IS_CORRUPT(mp, i != 1)) { 1029 error = -EFSCORRUPTED; 1030 goto error0; 1031 } 1032 1033 error = xfs_inobt_get_rec(cur, &rec, &j); 1034 if (error) 1035 goto error0; 1036 if (XFS_IS_CORRUPT(mp, j != 1)) { 1037 error = -EFSCORRUPTED; 1038 goto error0; 1039 } 1040 1041 if (rec.ir_freecount > 0) { 1042 /* 1043 * Found a free inode in the same chunk 1044 * as the parent, done. 1045 */ 1046 goto alloc_inode; 1047 } 1048 1049 1050 /* 1051 * In the same AG as parent, but parent's chunk is full. 1052 */ 1053 1054 /* duplicate the cursor, search left & right simultaneously */ 1055 error = xfs_btree_dup_cursor(cur, &tcur); 1056 if (error) 1057 goto error0; 1058 1059 /* 1060 * Skip to last blocks looked up if same parent inode. 1061 */ 1062 if (pagino != NULLAGINO && 1063 pag->pagl_pagino == pagino && 1064 pag->pagl_leftrec != NULLAGINO && 1065 pag->pagl_rightrec != NULLAGINO) { 1066 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1067 &trec, &doneleft); 1068 if (error) 1069 goto error1; 1070 1071 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1072 &rec, &doneright); 1073 if (error) 1074 goto error1; 1075 } else { 1076 /* search left with tcur, back up 1 record */ 1077 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1078 if (error) 1079 goto error1; 1080 1081 /* search right with cur, go forward 1 record. */ 1082 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1083 if (error) 1084 goto error1; 1085 } 1086 1087 /* 1088 * Loop until we find an inode chunk with a free inode. 1089 */ 1090 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1091 int useleft; /* using left inode chunk this time */ 1092 1093 /* figure out the closer block if both are valid. */ 1094 if (!doneleft && !doneright) { 1095 useleft = pagino - 1096 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1097 rec.ir_startino - pagino; 1098 } else { 1099 useleft = !doneleft; 1100 } 1101 1102 /* free inodes to the left? */ 1103 if (useleft && trec.ir_freecount) { 1104 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1105 cur = tcur; 1106 1107 pag->pagl_leftrec = trec.ir_startino; 1108 pag->pagl_rightrec = rec.ir_startino; 1109 pag->pagl_pagino = pagino; 1110 rec = trec; 1111 goto alloc_inode; 1112 } 1113 1114 /* free inodes to the right? */ 1115 if (!useleft && rec.ir_freecount) { 1116 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1117 1118 pag->pagl_leftrec = trec.ir_startino; 1119 pag->pagl_rightrec = rec.ir_startino; 1120 pag->pagl_pagino = pagino; 1121 goto alloc_inode; 1122 } 1123 1124 /* get next record to check */ 1125 if (useleft) { 1126 error = xfs_ialloc_next_rec(tcur, &trec, 1127 &doneleft, 1); 1128 } else { 1129 error = xfs_ialloc_next_rec(cur, &rec, 1130 &doneright, 0); 1131 } 1132 if (error) 1133 goto error1; 1134 } 1135 1136 if (searchdistance <= 0) { 1137 /* 1138 * Not in range - save last search 1139 * location and allocate a new inode 1140 */ 1141 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1142 pag->pagl_leftrec = trec.ir_startino; 1143 pag->pagl_rightrec = rec.ir_startino; 1144 pag->pagl_pagino = pagino; 1145 1146 } else { 1147 /* 1148 * We've reached the end of the btree. because 1149 * we are only searching a small chunk of the 1150 * btree each search, there is obviously free 1151 * inodes closer to the parent inode than we 1152 * are now. restart the search again. 1153 */ 1154 pag->pagl_pagino = NULLAGINO; 1155 pag->pagl_leftrec = NULLAGINO; 1156 pag->pagl_rightrec = NULLAGINO; 1157 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1158 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1159 goto restart_pagno; 1160 } 1161 } 1162 1163 /* 1164 * In a different AG from the parent. 1165 * See if the most recently allocated block has any free. 1166 */ 1167 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1168 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1169 XFS_LOOKUP_EQ, &i); 1170 if (error) 1171 goto error0; 1172 1173 if (i == 1) { 1174 error = xfs_inobt_get_rec(cur, &rec, &j); 1175 if (error) 1176 goto error0; 1177 1178 if (j == 1 && rec.ir_freecount > 0) { 1179 /* 1180 * The last chunk allocated in the group 1181 * still has a free inode. 1182 */ 1183 goto alloc_inode; 1184 } 1185 } 1186 } 1187 1188 /* 1189 * None left in the last group, search the whole AG 1190 */ 1191 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1192 if (error) 1193 goto error0; 1194 if (XFS_IS_CORRUPT(mp, i != 1)) { 1195 error = -EFSCORRUPTED; 1196 goto error0; 1197 } 1198 1199 for (;;) { 1200 error = xfs_inobt_get_rec(cur, &rec, &i); 1201 if (error) 1202 goto error0; 1203 if (XFS_IS_CORRUPT(mp, i != 1)) { 1204 error = -EFSCORRUPTED; 1205 goto error0; 1206 } 1207 if (rec.ir_freecount > 0) 1208 break; 1209 error = xfs_btree_increment(cur, 0, &i); 1210 if (error) 1211 goto error0; 1212 if (XFS_IS_CORRUPT(mp, i != 1)) { 1213 error = -EFSCORRUPTED; 1214 goto error0; 1215 } 1216 } 1217 1218 alloc_inode: 1219 offset = xfs_inobt_first_free_inode(&rec); 1220 ASSERT(offset >= 0); 1221 ASSERT(offset < XFS_INODES_PER_CHUNK); 1222 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1223 XFS_INODES_PER_CHUNK) == 0); 1224 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1225 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1226 rec.ir_freecount--; 1227 error = xfs_inobt_update(cur, &rec); 1228 if (error) 1229 goto error0; 1230 be32_add_cpu(&agi->agi_freecount, -1); 1231 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1232 pag->pagi_freecount--; 1233 1234 error = xfs_check_agi_freecount(cur); 1235 if (error) 1236 goto error0; 1237 1238 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1239 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1240 *inop = ino; 1241 return 0; 1242 error1: 1243 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1244 error0: 1245 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1246 return error; 1247 } 1248 1249 /* 1250 * Use the free inode btree to allocate an inode based on distance from the 1251 * parent. Note that the provided cursor may be deleted and replaced. 1252 */ 1253 STATIC int 1254 xfs_dialloc_ag_finobt_near( 1255 xfs_agino_t pagino, 1256 struct xfs_btree_cur **ocur, 1257 struct xfs_inobt_rec_incore *rec) 1258 { 1259 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1260 struct xfs_btree_cur *rcur; /* right search cursor */ 1261 struct xfs_inobt_rec_incore rrec; 1262 int error; 1263 int i, j; 1264 1265 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1266 if (error) 1267 return error; 1268 1269 if (i == 1) { 1270 error = xfs_inobt_get_rec(lcur, rec, &i); 1271 if (error) 1272 return error; 1273 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1274 return -EFSCORRUPTED; 1275 1276 /* 1277 * See if we've landed in the parent inode record. The finobt 1278 * only tracks chunks with at least one free inode, so record 1279 * existence is enough. 1280 */ 1281 if (pagino >= rec->ir_startino && 1282 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1283 return 0; 1284 } 1285 1286 error = xfs_btree_dup_cursor(lcur, &rcur); 1287 if (error) 1288 return error; 1289 1290 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1291 if (error) 1292 goto error_rcur; 1293 if (j == 1) { 1294 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1295 if (error) 1296 goto error_rcur; 1297 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1298 error = -EFSCORRUPTED; 1299 goto error_rcur; 1300 } 1301 } 1302 1303 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1304 error = -EFSCORRUPTED; 1305 goto error_rcur; 1306 } 1307 if (i == 1 && j == 1) { 1308 /* 1309 * Both the left and right records are valid. Choose the closer 1310 * inode chunk to the target. 1311 */ 1312 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1313 (rrec.ir_startino - pagino)) { 1314 *rec = rrec; 1315 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1316 *ocur = rcur; 1317 } else { 1318 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1319 } 1320 } else if (j == 1) { 1321 /* only the right record is valid */ 1322 *rec = rrec; 1323 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1324 *ocur = rcur; 1325 } else if (i == 1) { 1326 /* only the left record is valid */ 1327 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1328 } 1329 1330 return 0; 1331 1332 error_rcur: 1333 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1334 return error; 1335 } 1336 1337 /* 1338 * Use the free inode btree to find a free inode based on a newino hint. If 1339 * the hint is NULL, find the first free inode in the AG. 1340 */ 1341 STATIC int 1342 xfs_dialloc_ag_finobt_newino( 1343 struct xfs_agi *agi, 1344 struct xfs_btree_cur *cur, 1345 struct xfs_inobt_rec_incore *rec) 1346 { 1347 int error; 1348 int i; 1349 1350 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1351 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1352 XFS_LOOKUP_EQ, &i); 1353 if (error) 1354 return error; 1355 if (i == 1) { 1356 error = xfs_inobt_get_rec(cur, rec, &i); 1357 if (error) 1358 return error; 1359 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1360 return -EFSCORRUPTED; 1361 return 0; 1362 } 1363 } 1364 1365 /* 1366 * Find the first inode available in the AG. 1367 */ 1368 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1369 if (error) 1370 return error; 1371 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1372 return -EFSCORRUPTED; 1373 1374 error = xfs_inobt_get_rec(cur, rec, &i); 1375 if (error) 1376 return error; 1377 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1378 return -EFSCORRUPTED; 1379 1380 return 0; 1381 } 1382 1383 /* 1384 * Update the inobt based on a modification made to the finobt. Also ensure that 1385 * the records from both trees are equivalent post-modification. 1386 */ 1387 STATIC int 1388 xfs_dialloc_ag_update_inobt( 1389 struct xfs_btree_cur *cur, /* inobt cursor */ 1390 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1391 int offset) /* inode offset */ 1392 { 1393 struct xfs_inobt_rec_incore rec; 1394 int error; 1395 int i; 1396 1397 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1398 if (error) 1399 return error; 1400 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1401 return -EFSCORRUPTED; 1402 1403 error = xfs_inobt_get_rec(cur, &rec, &i); 1404 if (error) 1405 return error; 1406 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1407 return -EFSCORRUPTED; 1408 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1409 XFS_INODES_PER_CHUNK) == 0); 1410 1411 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1412 rec.ir_freecount--; 1413 1414 if (XFS_IS_CORRUPT(cur->bc_mp, 1415 rec.ir_free != frec->ir_free || 1416 rec.ir_freecount != frec->ir_freecount)) 1417 return -EFSCORRUPTED; 1418 1419 return xfs_inobt_update(cur, &rec); 1420 } 1421 1422 /* 1423 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1424 * back to the inobt search algorithm. 1425 * 1426 * The caller selected an AG for us, and made sure that free inodes are 1427 * available. 1428 */ 1429 static int 1430 xfs_dialloc_ag( 1431 struct xfs_perag *pag, 1432 struct xfs_trans *tp, 1433 struct xfs_buf *agbp, 1434 xfs_ino_t parent, 1435 xfs_ino_t *inop) 1436 { 1437 struct xfs_mount *mp = tp->t_mountp; 1438 struct xfs_agi *agi = agbp->b_addr; 1439 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1440 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1441 struct xfs_btree_cur *cur; /* finobt cursor */ 1442 struct xfs_btree_cur *icur; /* inobt cursor */ 1443 struct xfs_inobt_rec_incore rec; 1444 xfs_ino_t ino; 1445 int error; 1446 int offset; 1447 int i; 1448 1449 if (!xfs_has_finobt(mp)) 1450 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop); 1451 1452 /* 1453 * If pagino is 0 (this is the root inode allocation) use newino. 1454 * This must work because we've just allocated some. 1455 */ 1456 if (!pagino) 1457 pagino = be32_to_cpu(agi->agi_newino); 1458 1459 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 1460 1461 error = xfs_check_agi_freecount(cur); 1462 if (error) 1463 goto error_cur; 1464 1465 /* 1466 * The search algorithm depends on whether we're in the same AG as the 1467 * parent. If so, find the closest available inode to the parent. If 1468 * not, consider the agi hint or find the first free inode in the AG. 1469 */ 1470 if (pag->pag_agno == pagno) 1471 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1472 else 1473 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1474 if (error) 1475 goto error_cur; 1476 1477 offset = xfs_inobt_first_free_inode(&rec); 1478 ASSERT(offset >= 0); 1479 ASSERT(offset < XFS_INODES_PER_CHUNK); 1480 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1481 XFS_INODES_PER_CHUNK) == 0); 1482 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1483 1484 /* 1485 * Modify or remove the finobt record. 1486 */ 1487 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1488 rec.ir_freecount--; 1489 if (rec.ir_freecount) 1490 error = xfs_inobt_update(cur, &rec); 1491 else 1492 error = xfs_btree_delete(cur, &i); 1493 if (error) 1494 goto error_cur; 1495 1496 /* 1497 * The finobt has now been updated appropriately. We haven't updated the 1498 * agi and superblock yet, so we can create an inobt cursor and validate 1499 * the original freecount. If all is well, make the equivalent update to 1500 * the inobt using the finobt record and offset information. 1501 */ 1502 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1503 1504 error = xfs_check_agi_freecount(icur); 1505 if (error) 1506 goto error_icur; 1507 1508 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1509 if (error) 1510 goto error_icur; 1511 1512 /* 1513 * Both trees have now been updated. We must update the perag and 1514 * superblock before we can check the freecount for each btree. 1515 */ 1516 be32_add_cpu(&agi->agi_freecount, -1); 1517 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1518 pag->pagi_freecount--; 1519 1520 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1521 1522 error = xfs_check_agi_freecount(icur); 1523 if (error) 1524 goto error_icur; 1525 error = xfs_check_agi_freecount(cur); 1526 if (error) 1527 goto error_icur; 1528 1529 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1530 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1531 *inop = ino; 1532 return 0; 1533 1534 error_icur: 1535 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1536 error_cur: 1537 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1538 return error; 1539 } 1540 1541 static int 1542 xfs_dialloc_roll( 1543 struct xfs_trans **tpp, 1544 struct xfs_buf *agibp) 1545 { 1546 struct xfs_trans *tp = *tpp; 1547 struct xfs_dquot_acct *dqinfo; 1548 int error; 1549 1550 /* 1551 * Hold to on to the agibp across the commit so no other allocation can 1552 * come in and take the free inodes we just allocated for our caller. 1553 */ 1554 xfs_trans_bhold(tp, agibp); 1555 1556 /* 1557 * We want the quota changes to be associated with the next transaction, 1558 * NOT this one. So, detach the dqinfo from this and attach it to the 1559 * next transaction. 1560 */ 1561 dqinfo = tp->t_dqinfo; 1562 tp->t_dqinfo = NULL; 1563 1564 error = xfs_trans_roll(&tp); 1565 1566 /* Re-attach the quota info that we detached from prev trx. */ 1567 tp->t_dqinfo = dqinfo; 1568 1569 /* 1570 * Join the buffer even on commit error so that the buffer is released 1571 * when the caller cancels the transaction and doesn't have to handle 1572 * this error case specially. 1573 */ 1574 xfs_trans_bjoin(tp, agibp); 1575 *tpp = tp; 1576 return error; 1577 } 1578 1579 static bool 1580 xfs_dialloc_good_ag( 1581 struct xfs_perag *pag, 1582 struct xfs_trans *tp, 1583 umode_t mode, 1584 int flags, 1585 bool ok_alloc) 1586 { 1587 struct xfs_mount *mp = tp->t_mountp; 1588 xfs_extlen_t ineed; 1589 xfs_extlen_t longest = 0; 1590 int needspace; 1591 int error; 1592 1593 if (!pag) 1594 return false; 1595 if (!xfs_perag_allows_inodes(pag)) 1596 return false; 1597 1598 if (!xfs_perag_initialised_agi(pag)) { 1599 error = xfs_ialloc_read_agi(pag, tp, NULL); 1600 if (error) 1601 return false; 1602 } 1603 1604 if (pag->pagi_freecount) 1605 return true; 1606 if (!ok_alloc) 1607 return false; 1608 1609 if (!xfs_perag_initialised_agf(pag)) { 1610 error = xfs_alloc_read_agf(pag, tp, flags, NULL); 1611 if (error) 1612 return false; 1613 } 1614 1615 /* 1616 * Check that there is enough free space for the file plus a chunk of 1617 * inodes if we need to allocate some. If this is the first pass across 1618 * the AGs, take into account the potential space needed for alignment 1619 * of inode chunks when checking the longest contiguous free space in 1620 * the AG - this prevents us from getting ENOSPC because we have free 1621 * space larger than ialloc_blks but alignment constraints prevent us 1622 * from using it. 1623 * 1624 * If we can't find an AG with space for full alignment slack to be 1625 * taken into account, we must be near ENOSPC in all AGs. Hence we 1626 * don't include alignment for the second pass and so if we fail 1627 * allocation due to alignment issues then it is most likely a real 1628 * ENOSPC condition. 1629 * 1630 * XXX(dgc): this calculation is now bogus thanks to the per-ag 1631 * reservations that xfs_alloc_fix_freelist() now does via 1632 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will 1633 * be more than large enough for the check below to succeed, but 1634 * xfs_alloc_space_available() will fail because of the non-zero 1635 * metadata reservation and hence we won't actually be able to allocate 1636 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC 1637 * because of this. 1638 */ 1639 ineed = M_IGEO(mp)->ialloc_min_blks; 1640 if (flags && ineed > 1) 1641 ineed += M_IGEO(mp)->cluster_align; 1642 longest = pag->pagf_longest; 1643 if (!longest) 1644 longest = pag->pagf_flcount > 0; 1645 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 1646 1647 if (pag->pagf_freeblks < needspace + ineed || longest < ineed) 1648 return false; 1649 return true; 1650 } 1651 1652 static int 1653 xfs_dialloc_try_ag( 1654 struct xfs_perag *pag, 1655 struct xfs_trans **tpp, 1656 xfs_ino_t parent, 1657 xfs_ino_t *new_ino, 1658 bool ok_alloc) 1659 { 1660 struct xfs_buf *agbp; 1661 xfs_ino_t ino; 1662 int error; 1663 1664 /* 1665 * Then read in the AGI buffer and recheck with the AGI buffer 1666 * lock held. 1667 */ 1668 error = xfs_ialloc_read_agi(pag, *tpp, &agbp); 1669 if (error) 1670 return error; 1671 1672 if (!pag->pagi_freecount) { 1673 if (!ok_alloc) { 1674 error = -EAGAIN; 1675 goto out_release; 1676 } 1677 1678 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp); 1679 if (error < 0) 1680 goto out_release; 1681 1682 /* 1683 * We successfully allocated space for an inode cluster in this 1684 * AG. Roll the transaction so that we can allocate one of the 1685 * new inodes. 1686 */ 1687 ASSERT(pag->pagi_freecount > 0); 1688 error = xfs_dialloc_roll(tpp, agbp); 1689 if (error) 1690 goto out_release; 1691 } 1692 1693 /* Allocate an inode in the found AG */ 1694 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino); 1695 if (!error) 1696 *new_ino = ino; 1697 return error; 1698 1699 out_release: 1700 xfs_trans_brelse(*tpp, agbp); 1701 return error; 1702 } 1703 1704 /* 1705 * Allocate an on-disk inode. 1706 * 1707 * Mode is used to tell whether the new inode is a directory and hence where to 1708 * locate it. The on-disk inode that is allocated will be returned in @new_ino 1709 * on success, otherwise an error will be set to indicate the failure (e.g. 1710 * -ENOSPC). 1711 */ 1712 int 1713 xfs_dialloc( 1714 struct xfs_trans **tpp, 1715 xfs_ino_t parent, 1716 umode_t mode, 1717 xfs_ino_t *new_ino) 1718 { 1719 struct xfs_mount *mp = (*tpp)->t_mountp; 1720 xfs_agnumber_t agno; 1721 int error = 0; 1722 xfs_agnumber_t start_agno; 1723 struct xfs_perag *pag; 1724 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1725 bool ok_alloc = true; 1726 bool low_space = false; 1727 int flags; 1728 xfs_ino_t ino = NULLFSINO; 1729 1730 /* 1731 * Directories, symlinks, and regular files frequently allocate at least 1732 * one block, so factor that potential expansion when we examine whether 1733 * an AG has enough space for file creation. 1734 */ 1735 if (S_ISDIR(mode)) 1736 start_agno = atomic_inc_return(&mp->m_agirotor) % mp->m_maxagi; 1737 else { 1738 start_agno = XFS_INO_TO_AGNO(mp, parent); 1739 if (start_agno >= mp->m_maxagi) 1740 start_agno = 0; 1741 } 1742 1743 /* 1744 * If we have already hit the ceiling of inode blocks then clear 1745 * ok_alloc so we scan all available agi structures for a free 1746 * inode. 1747 * 1748 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1749 * which will sacrifice the preciseness but improve the performance. 1750 */ 1751 if (igeo->maxicount && 1752 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1753 > igeo->maxicount) { 1754 ok_alloc = false; 1755 } 1756 1757 /* 1758 * If we are near to ENOSPC, we want to prefer allocation from AGs that 1759 * have free inodes in them rather than use up free space allocating new 1760 * inode chunks. Hence we turn off allocation for the first non-blocking 1761 * pass through the AGs if we are near ENOSPC to consume free inodes 1762 * that we can immediately allocate, but then we allow allocation on the 1763 * second pass if we fail to find an AG with free inodes in it. 1764 */ 1765 if (percpu_counter_read_positive(&mp->m_fdblocks) < 1766 mp->m_low_space[XFS_LOWSP_1_PCNT]) { 1767 ok_alloc = false; 1768 low_space = true; 1769 } 1770 1771 /* 1772 * Loop until we find an allocation group that either has free inodes 1773 * or in which we can allocate some inodes. Iterate through the 1774 * allocation groups upward, wrapping at the end. 1775 */ 1776 flags = XFS_ALLOC_FLAG_TRYLOCK; 1777 retry: 1778 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) { 1779 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) { 1780 error = xfs_dialloc_try_ag(pag, tpp, parent, 1781 &ino, ok_alloc); 1782 if (error != -EAGAIN) 1783 break; 1784 error = 0; 1785 } 1786 1787 if (xfs_is_shutdown(mp)) { 1788 error = -EFSCORRUPTED; 1789 break; 1790 } 1791 } 1792 if (pag) 1793 xfs_perag_rele(pag); 1794 if (error) 1795 return error; 1796 if (ino == NULLFSINO) { 1797 if (flags) { 1798 flags = 0; 1799 if (low_space) 1800 ok_alloc = true; 1801 goto retry; 1802 } 1803 return -ENOSPC; 1804 } 1805 *new_ino = ino; 1806 return 0; 1807 } 1808 1809 /* 1810 * Free the blocks of an inode chunk. We must consider that the inode chunk 1811 * might be sparse and only free the regions that are allocated as part of the 1812 * chunk. 1813 */ 1814 STATIC void 1815 xfs_difree_inode_chunk( 1816 struct xfs_trans *tp, 1817 xfs_agnumber_t agno, 1818 struct xfs_inobt_rec_incore *rec) 1819 { 1820 struct xfs_mount *mp = tp->t_mountp; 1821 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1822 rec->ir_startino); 1823 int startidx, endidx; 1824 int nextbit; 1825 xfs_agblock_t agbno; 1826 int contigblk; 1827 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1828 1829 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1830 /* not sparse, calculate extent info directly */ 1831 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1832 M_IGEO(mp)->ialloc_blks, 1833 &XFS_RMAP_OINFO_INODES); 1834 return; 1835 } 1836 1837 /* holemask is only 16-bits (fits in an unsigned long) */ 1838 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1839 holemask[0] = rec->ir_holemask; 1840 1841 /* 1842 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1843 * holemask and convert the start/end index of each range to an extent. 1844 * We start with the start and end index both pointing at the first 0 in 1845 * the mask. 1846 */ 1847 startidx = endidx = find_first_zero_bit(holemask, 1848 XFS_INOBT_HOLEMASK_BITS); 1849 nextbit = startidx + 1; 1850 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1851 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1852 nextbit); 1853 /* 1854 * If the next zero bit is contiguous, update the end index of 1855 * the current range and continue. 1856 */ 1857 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1858 nextbit == endidx + 1) { 1859 endidx = nextbit; 1860 goto next; 1861 } 1862 1863 /* 1864 * nextbit is not contiguous with the current end index. Convert 1865 * the current start/end to an extent and add it to the free 1866 * list. 1867 */ 1868 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1869 mp->m_sb.sb_inopblock; 1870 contigblk = ((endidx - startidx + 1) * 1871 XFS_INODES_PER_HOLEMASK_BIT) / 1872 mp->m_sb.sb_inopblock; 1873 1874 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1875 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1876 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1877 contigblk, &XFS_RMAP_OINFO_INODES); 1878 1879 /* reset range to current bit and carry on... */ 1880 startidx = endidx = nextbit; 1881 1882 next: 1883 nextbit++; 1884 } 1885 } 1886 1887 STATIC int 1888 xfs_difree_inobt( 1889 struct xfs_perag *pag, 1890 struct xfs_trans *tp, 1891 struct xfs_buf *agbp, 1892 xfs_agino_t agino, 1893 struct xfs_icluster *xic, 1894 struct xfs_inobt_rec_incore *orec) 1895 { 1896 struct xfs_mount *mp = pag->pag_mount; 1897 struct xfs_agi *agi = agbp->b_addr; 1898 struct xfs_btree_cur *cur; 1899 struct xfs_inobt_rec_incore rec; 1900 int ilen; 1901 int error; 1902 int i; 1903 int off; 1904 1905 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1906 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1907 1908 /* 1909 * Initialize the cursor. 1910 */ 1911 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1912 1913 error = xfs_check_agi_freecount(cur); 1914 if (error) 1915 goto error0; 1916 1917 /* 1918 * Look for the entry describing this inode. 1919 */ 1920 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1921 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1922 __func__, error); 1923 goto error0; 1924 } 1925 if (XFS_IS_CORRUPT(mp, i != 1)) { 1926 error = -EFSCORRUPTED; 1927 goto error0; 1928 } 1929 error = xfs_inobt_get_rec(cur, &rec, &i); 1930 if (error) { 1931 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1932 __func__, error); 1933 goto error0; 1934 } 1935 if (XFS_IS_CORRUPT(mp, i != 1)) { 1936 error = -EFSCORRUPTED; 1937 goto error0; 1938 } 1939 /* 1940 * Get the offset in the inode chunk. 1941 */ 1942 off = agino - rec.ir_startino; 1943 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1944 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1945 /* 1946 * Mark the inode free & increment the count. 1947 */ 1948 rec.ir_free |= XFS_INOBT_MASK(off); 1949 rec.ir_freecount++; 1950 1951 /* 1952 * When an inode chunk is free, it becomes eligible for removal. Don't 1953 * remove the chunk if the block size is large enough for multiple inode 1954 * chunks (that might not be free). 1955 */ 1956 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 1957 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1958 struct xfs_perag *pag = agbp->b_pag; 1959 1960 xic->deleted = true; 1961 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, 1962 rec.ir_startino); 1963 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1964 1965 /* 1966 * Remove the inode cluster from the AGI B+Tree, adjust the 1967 * AGI and Superblock inode counts, and mark the disk space 1968 * to be freed when the transaction is committed. 1969 */ 1970 ilen = rec.ir_freecount; 1971 be32_add_cpu(&agi->agi_count, -ilen); 1972 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 1973 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1974 pag->pagi_freecount -= ilen - 1; 1975 pag->pagi_count -= ilen; 1976 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 1977 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1978 1979 if ((error = xfs_btree_delete(cur, &i))) { 1980 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 1981 __func__, error); 1982 goto error0; 1983 } 1984 1985 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 1986 } else { 1987 xic->deleted = false; 1988 1989 error = xfs_inobt_update(cur, &rec); 1990 if (error) { 1991 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 1992 __func__, error); 1993 goto error0; 1994 } 1995 1996 /* 1997 * Change the inode free counts and log the ag/sb changes. 1998 */ 1999 be32_add_cpu(&agi->agi_freecount, 1); 2000 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2001 pag->pagi_freecount++; 2002 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2003 } 2004 2005 error = xfs_check_agi_freecount(cur); 2006 if (error) 2007 goto error0; 2008 2009 *orec = rec; 2010 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2011 return 0; 2012 2013 error0: 2014 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2015 return error; 2016 } 2017 2018 /* 2019 * Free an inode in the free inode btree. 2020 */ 2021 STATIC int 2022 xfs_difree_finobt( 2023 struct xfs_perag *pag, 2024 struct xfs_trans *tp, 2025 struct xfs_buf *agbp, 2026 xfs_agino_t agino, 2027 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2028 { 2029 struct xfs_mount *mp = pag->pag_mount; 2030 struct xfs_btree_cur *cur; 2031 struct xfs_inobt_rec_incore rec; 2032 int offset = agino - ibtrec->ir_startino; 2033 int error; 2034 int i; 2035 2036 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 2037 2038 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2039 if (error) 2040 goto error; 2041 if (i == 0) { 2042 /* 2043 * If the record does not exist in the finobt, we must have just 2044 * freed an inode in a previously fully allocated chunk. If not, 2045 * something is out of sync. 2046 */ 2047 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2048 error = -EFSCORRUPTED; 2049 goto error; 2050 } 2051 2052 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2053 ibtrec->ir_count, 2054 ibtrec->ir_freecount, 2055 ibtrec->ir_free, &i); 2056 if (error) 2057 goto error; 2058 ASSERT(i == 1); 2059 2060 goto out; 2061 } 2062 2063 /* 2064 * Read and update the existing record. We could just copy the ibtrec 2065 * across here, but that would defeat the purpose of having redundant 2066 * metadata. By making the modifications independently, we can catch 2067 * corruptions that we wouldn't see if we just copied from one record 2068 * to another. 2069 */ 2070 error = xfs_inobt_get_rec(cur, &rec, &i); 2071 if (error) 2072 goto error; 2073 if (XFS_IS_CORRUPT(mp, i != 1)) { 2074 error = -EFSCORRUPTED; 2075 goto error; 2076 } 2077 2078 rec.ir_free |= XFS_INOBT_MASK(offset); 2079 rec.ir_freecount++; 2080 2081 if (XFS_IS_CORRUPT(mp, 2082 rec.ir_free != ibtrec->ir_free || 2083 rec.ir_freecount != ibtrec->ir_freecount)) { 2084 error = -EFSCORRUPTED; 2085 goto error; 2086 } 2087 2088 /* 2089 * The content of inobt records should always match between the inobt 2090 * and finobt. The lifecycle of records in the finobt is different from 2091 * the inobt in that the finobt only tracks records with at least one 2092 * free inode. Hence, if all of the inodes are free and we aren't 2093 * keeping inode chunks permanently on disk, remove the record. 2094 * Otherwise, update the record with the new information. 2095 * 2096 * Note that we currently can't free chunks when the block size is large 2097 * enough for multiple chunks. Leave the finobt record to remain in sync 2098 * with the inobt. 2099 */ 2100 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 2101 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2102 error = xfs_btree_delete(cur, &i); 2103 if (error) 2104 goto error; 2105 ASSERT(i == 1); 2106 } else { 2107 error = xfs_inobt_update(cur, &rec); 2108 if (error) 2109 goto error; 2110 } 2111 2112 out: 2113 error = xfs_check_agi_freecount(cur); 2114 if (error) 2115 goto error; 2116 2117 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2118 return 0; 2119 2120 error: 2121 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2122 return error; 2123 } 2124 2125 /* 2126 * Free disk inode. Carefully avoids touching the incore inode, all 2127 * manipulations incore are the caller's responsibility. 2128 * The on-disk inode is not changed by this operation, only the 2129 * btree (free inode mask) is changed. 2130 */ 2131 int 2132 xfs_difree( 2133 struct xfs_trans *tp, 2134 struct xfs_perag *pag, 2135 xfs_ino_t inode, 2136 struct xfs_icluster *xic) 2137 { 2138 /* REFERENCED */ 2139 xfs_agblock_t agbno; /* block number containing inode */ 2140 struct xfs_buf *agbp; /* buffer for allocation group header */ 2141 xfs_agino_t agino; /* allocation group inode number */ 2142 int error; /* error return value */ 2143 struct xfs_mount *mp = tp->t_mountp; 2144 struct xfs_inobt_rec_incore rec;/* btree record */ 2145 2146 /* 2147 * Break up inode number into its components. 2148 */ 2149 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { 2150 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", 2151 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); 2152 ASSERT(0); 2153 return -EINVAL; 2154 } 2155 agino = XFS_INO_TO_AGINO(mp, inode); 2156 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2157 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2158 __func__, (unsigned long long)inode, 2159 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2160 ASSERT(0); 2161 return -EINVAL; 2162 } 2163 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2164 if (agbno >= mp->m_sb.sb_agblocks) { 2165 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2166 __func__, agbno, mp->m_sb.sb_agblocks); 2167 ASSERT(0); 2168 return -EINVAL; 2169 } 2170 /* 2171 * Get the allocation group header. 2172 */ 2173 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2174 if (error) { 2175 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2176 __func__, error); 2177 return error; 2178 } 2179 2180 /* 2181 * Fix up the inode allocation btree. 2182 */ 2183 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec); 2184 if (error) 2185 goto error0; 2186 2187 /* 2188 * Fix up the free inode btree. 2189 */ 2190 if (xfs_has_finobt(mp)) { 2191 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec); 2192 if (error) 2193 goto error0; 2194 } 2195 2196 return 0; 2197 2198 error0: 2199 return error; 2200 } 2201 2202 STATIC int 2203 xfs_imap_lookup( 2204 struct xfs_perag *pag, 2205 struct xfs_trans *tp, 2206 xfs_agino_t agino, 2207 xfs_agblock_t agbno, 2208 xfs_agblock_t *chunk_agbno, 2209 xfs_agblock_t *offset_agbno, 2210 int flags) 2211 { 2212 struct xfs_mount *mp = pag->pag_mount; 2213 struct xfs_inobt_rec_incore rec; 2214 struct xfs_btree_cur *cur; 2215 struct xfs_buf *agbp; 2216 int error; 2217 int i; 2218 2219 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2220 if (error) { 2221 xfs_alert(mp, 2222 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2223 __func__, error, pag->pag_agno); 2224 return error; 2225 } 2226 2227 /* 2228 * Lookup the inode record for the given agino. If the record cannot be 2229 * found, then it's an invalid inode number and we should abort. Once 2230 * we have a record, we need to ensure it contains the inode number 2231 * we are looking up. 2232 */ 2233 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 2234 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2235 if (!error) { 2236 if (i) 2237 error = xfs_inobt_get_rec(cur, &rec, &i); 2238 if (!error && i == 0) 2239 error = -EINVAL; 2240 } 2241 2242 xfs_trans_brelse(tp, agbp); 2243 xfs_btree_del_cursor(cur, error); 2244 if (error) 2245 return error; 2246 2247 /* check that the returned record contains the required inode */ 2248 if (rec.ir_startino > agino || 2249 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2250 return -EINVAL; 2251 2252 /* for untrusted inodes check it is allocated first */ 2253 if ((flags & XFS_IGET_UNTRUSTED) && 2254 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2255 return -EINVAL; 2256 2257 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2258 *offset_agbno = agbno - *chunk_agbno; 2259 return 0; 2260 } 2261 2262 /* 2263 * Return the location of the inode in imap, for mapping it into a buffer. 2264 */ 2265 int 2266 xfs_imap( 2267 struct xfs_perag *pag, 2268 struct xfs_trans *tp, 2269 xfs_ino_t ino, /* inode to locate */ 2270 struct xfs_imap *imap, /* location map structure */ 2271 uint flags) /* flags for inode btree lookup */ 2272 { 2273 struct xfs_mount *mp = pag->pag_mount; 2274 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2275 xfs_agino_t agino; /* inode number within alloc group */ 2276 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2277 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2278 int error; /* error code */ 2279 int offset; /* index of inode in its buffer */ 2280 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2281 2282 ASSERT(ino != NULLFSINO); 2283 2284 /* 2285 * Split up the inode number into its parts. 2286 */ 2287 agino = XFS_INO_TO_AGINO(mp, ino); 2288 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2289 if (agbno >= mp->m_sb.sb_agblocks || 2290 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2291 error = -EINVAL; 2292 #ifdef DEBUG 2293 /* 2294 * Don't output diagnostic information for untrusted inodes 2295 * as they can be invalid without implying corruption. 2296 */ 2297 if (flags & XFS_IGET_UNTRUSTED) 2298 return error; 2299 if (agbno >= mp->m_sb.sb_agblocks) { 2300 xfs_alert(mp, 2301 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2302 __func__, (unsigned long long)agbno, 2303 (unsigned long)mp->m_sb.sb_agblocks); 2304 } 2305 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2306 xfs_alert(mp, 2307 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2308 __func__, ino, 2309 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2310 } 2311 xfs_stack_trace(); 2312 #endif /* DEBUG */ 2313 return error; 2314 } 2315 2316 /* 2317 * For bulkstat and handle lookups, we have an untrusted inode number 2318 * that we have to verify is valid. We cannot do this just by reading 2319 * the inode buffer as it may have been unlinked and removed leaving 2320 * inodes in stale state on disk. Hence we have to do a btree lookup 2321 * in all cases where an untrusted inode number is passed. 2322 */ 2323 if (flags & XFS_IGET_UNTRUSTED) { 2324 error = xfs_imap_lookup(pag, tp, agino, agbno, 2325 &chunk_agbno, &offset_agbno, flags); 2326 if (error) 2327 return error; 2328 goto out_map; 2329 } 2330 2331 /* 2332 * If the inode cluster size is the same as the blocksize or 2333 * smaller we get to the buffer by simple arithmetics. 2334 */ 2335 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2336 offset = XFS_INO_TO_OFFSET(mp, ino); 2337 ASSERT(offset < mp->m_sb.sb_inopblock); 2338 2339 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno); 2340 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2341 imap->im_boffset = (unsigned short)(offset << 2342 mp->m_sb.sb_inodelog); 2343 return 0; 2344 } 2345 2346 /* 2347 * If the inode chunks are aligned then use simple maths to 2348 * find the location. Otherwise we have to do a btree 2349 * lookup to find the location. 2350 */ 2351 if (M_IGEO(mp)->inoalign_mask) { 2352 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2353 chunk_agbno = agbno - offset_agbno; 2354 } else { 2355 error = xfs_imap_lookup(pag, tp, agino, agbno, 2356 &chunk_agbno, &offset_agbno, flags); 2357 if (error) 2358 return error; 2359 } 2360 2361 out_map: 2362 ASSERT(agbno >= chunk_agbno); 2363 cluster_agbno = chunk_agbno + 2364 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2365 M_IGEO(mp)->blocks_per_cluster); 2366 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2367 XFS_INO_TO_OFFSET(mp, ino); 2368 2369 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno); 2370 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2371 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2372 2373 /* 2374 * If the inode number maps to a block outside the bounds 2375 * of the file system then return NULL rather than calling 2376 * read_buf and panicing when we get an error from the 2377 * driver. 2378 */ 2379 if ((imap->im_blkno + imap->im_len) > 2380 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2381 xfs_alert(mp, 2382 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2383 __func__, (unsigned long long) imap->im_blkno, 2384 (unsigned long long) imap->im_len, 2385 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2386 return -EINVAL; 2387 } 2388 return 0; 2389 } 2390 2391 /* 2392 * Log specified fields for the ag hdr (inode section). The growth of the agi 2393 * structure over time requires that we interpret the buffer as two logical 2394 * regions delineated by the end of the unlinked list. This is due to the size 2395 * of the hash table and its location in the middle of the agi. 2396 * 2397 * For example, a request to log a field before agi_unlinked and a field after 2398 * agi_unlinked could cause us to log the entire hash table and use an excessive 2399 * amount of log space. To avoid this behavior, log the region up through 2400 * agi_unlinked in one call and the region after agi_unlinked through the end of 2401 * the structure in another. 2402 */ 2403 void 2404 xfs_ialloc_log_agi( 2405 struct xfs_trans *tp, 2406 struct xfs_buf *bp, 2407 uint32_t fields) 2408 { 2409 int first; /* first byte number */ 2410 int last; /* last byte number */ 2411 static const short offsets[] = { /* field starting offsets */ 2412 /* keep in sync with bit definitions */ 2413 offsetof(xfs_agi_t, agi_magicnum), 2414 offsetof(xfs_agi_t, agi_versionnum), 2415 offsetof(xfs_agi_t, agi_seqno), 2416 offsetof(xfs_agi_t, agi_length), 2417 offsetof(xfs_agi_t, agi_count), 2418 offsetof(xfs_agi_t, agi_root), 2419 offsetof(xfs_agi_t, agi_level), 2420 offsetof(xfs_agi_t, agi_freecount), 2421 offsetof(xfs_agi_t, agi_newino), 2422 offsetof(xfs_agi_t, agi_dirino), 2423 offsetof(xfs_agi_t, agi_unlinked), 2424 offsetof(xfs_agi_t, agi_free_root), 2425 offsetof(xfs_agi_t, agi_free_level), 2426 offsetof(xfs_agi_t, agi_iblocks), 2427 sizeof(xfs_agi_t) 2428 }; 2429 #ifdef DEBUG 2430 struct xfs_agi *agi = bp->b_addr; 2431 2432 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2433 #endif 2434 2435 /* 2436 * Compute byte offsets for the first and last fields in the first 2437 * region and log the agi buffer. This only logs up through 2438 * agi_unlinked. 2439 */ 2440 if (fields & XFS_AGI_ALL_BITS_R1) { 2441 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2442 &first, &last); 2443 xfs_trans_log_buf(tp, bp, first, last); 2444 } 2445 2446 /* 2447 * Mask off the bits in the first region and calculate the first and 2448 * last field offsets for any bits in the second region. 2449 */ 2450 fields &= ~XFS_AGI_ALL_BITS_R1; 2451 if (fields) { 2452 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2453 &first, &last); 2454 xfs_trans_log_buf(tp, bp, first, last); 2455 } 2456 } 2457 2458 static xfs_failaddr_t 2459 xfs_agi_verify( 2460 struct xfs_buf *bp) 2461 { 2462 struct xfs_mount *mp = bp->b_mount; 2463 struct xfs_agi *agi = bp->b_addr; 2464 int i; 2465 2466 if (xfs_has_crc(mp)) { 2467 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2468 return __this_address; 2469 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn))) 2470 return __this_address; 2471 } 2472 2473 /* 2474 * Validate the magic number of the agi block. 2475 */ 2476 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2477 return __this_address; 2478 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2479 return __this_address; 2480 2481 if (be32_to_cpu(agi->agi_level) < 1 || 2482 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels) 2483 return __this_address; 2484 2485 if (xfs_has_finobt(mp) && 2486 (be32_to_cpu(agi->agi_free_level) < 1 || 2487 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels)) 2488 return __this_address; 2489 2490 /* 2491 * during growfs operations, the perag is not fully initialised, 2492 * so we can't use it for any useful checking. growfs ensures we can't 2493 * use it by using uncached buffers that don't have the perag attached 2494 * so we can detect and avoid this problem. 2495 */ 2496 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) 2497 return __this_address; 2498 2499 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2500 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2501 continue; 2502 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2503 return __this_address; 2504 } 2505 2506 return NULL; 2507 } 2508 2509 static void 2510 xfs_agi_read_verify( 2511 struct xfs_buf *bp) 2512 { 2513 struct xfs_mount *mp = bp->b_mount; 2514 xfs_failaddr_t fa; 2515 2516 if (xfs_has_crc(mp) && 2517 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2518 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2519 else { 2520 fa = xfs_agi_verify(bp); 2521 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2522 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2523 } 2524 } 2525 2526 static void 2527 xfs_agi_write_verify( 2528 struct xfs_buf *bp) 2529 { 2530 struct xfs_mount *mp = bp->b_mount; 2531 struct xfs_buf_log_item *bip = bp->b_log_item; 2532 struct xfs_agi *agi = bp->b_addr; 2533 xfs_failaddr_t fa; 2534 2535 fa = xfs_agi_verify(bp); 2536 if (fa) { 2537 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2538 return; 2539 } 2540 2541 if (!xfs_has_crc(mp)) 2542 return; 2543 2544 if (bip) 2545 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2546 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2547 } 2548 2549 const struct xfs_buf_ops xfs_agi_buf_ops = { 2550 .name = "xfs_agi", 2551 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2552 .verify_read = xfs_agi_read_verify, 2553 .verify_write = xfs_agi_write_verify, 2554 .verify_struct = xfs_agi_verify, 2555 }; 2556 2557 /* 2558 * Read in the allocation group header (inode allocation section) 2559 */ 2560 int 2561 xfs_read_agi( 2562 struct xfs_perag *pag, 2563 struct xfs_trans *tp, 2564 struct xfs_buf **agibpp) 2565 { 2566 struct xfs_mount *mp = pag->pag_mount; 2567 int error; 2568 2569 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno); 2570 2571 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2572 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), 2573 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops); 2574 if (error) 2575 return error; 2576 if (tp) 2577 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF); 2578 2579 xfs_buf_set_ref(*agibpp, XFS_AGI_REF); 2580 return 0; 2581 } 2582 2583 /* 2584 * Read in the agi and initialise the per-ag data. If the caller supplies a 2585 * @agibpp, return the locked AGI buffer to them, otherwise release it. 2586 */ 2587 int 2588 xfs_ialloc_read_agi( 2589 struct xfs_perag *pag, 2590 struct xfs_trans *tp, 2591 struct xfs_buf **agibpp) 2592 { 2593 struct xfs_buf *agibp; 2594 struct xfs_agi *agi; 2595 int error; 2596 2597 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno); 2598 2599 error = xfs_read_agi(pag, tp, &agibp); 2600 if (error) 2601 return error; 2602 2603 agi = agibp->b_addr; 2604 if (!xfs_perag_initialised_agi(pag)) { 2605 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2606 pag->pagi_count = be32_to_cpu(agi->agi_count); 2607 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 2608 } 2609 2610 /* 2611 * It's possible for these to be out of sync if 2612 * we are in the middle of a forced shutdown. 2613 */ 2614 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2615 xfs_is_shutdown(pag->pag_mount)); 2616 if (agibpp) 2617 *agibpp = agibp; 2618 else 2619 xfs_trans_brelse(tp, agibp); 2620 return 0; 2621 } 2622 2623 /* Is there an inode record covering a given range of inode numbers? */ 2624 int 2625 xfs_ialloc_has_inode_record( 2626 struct xfs_btree_cur *cur, 2627 xfs_agino_t low, 2628 xfs_agino_t high, 2629 bool *exists) 2630 { 2631 struct xfs_inobt_rec_incore irec; 2632 xfs_agino_t agino; 2633 uint16_t holemask; 2634 int has_record; 2635 int i; 2636 int error; 2637 2638 *exists = false; 2639 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2640 while (error == 0 && has_record) { 2641 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2642 if (error || irec.ir_startino > high) 2643 break; 2644 2645 agino = irec.ir_startino; 2646 holemask = irec.ir_holemask; 2647 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1, 2648 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) { 2649 if (holemask & 1) 2650 continue; 2651 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low && 2652 agino <= high) { 2653 *exists = true; 2654 return 0; 2655 } 2656 } 2657 2658 error = xfs_btree_increment(cur, 0, &has_record); 2659 } 2660 return error; 2661 } 2662 2663 /* Is there an inode record covering a given extent? */ 2664 int 2665 xfs_ialloc_has_inodes_at_extent( 2666 struct xfs_btree_cur *cur, 2667 xfs_agblock_t bno, 2668 xfs_extlen_t len, 2669 bool *exists) 2670 { 2671 xfs_agino_t low; 2672 xfs_agino_t high; 2673 2674 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2675 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2676 2677 return xfs_ialloc_has_inode_record(cur, low, high, exists); 2678 } 2679 2680 struct xfs_ialloc_count_inodes { 2681 xfs_agino_t count; 2682 xfs_agino_t freecount; 2683 }; 2684 2685 /* Record inode counts across all inobt records. */ 2686 STATIC int 2687 xfs_ialloc_count_inodes_rec( 2688 struct xfs_btree_cur *cur, 2689 const union xfs_btree_rec *rec, 2690 void *priv) 2691 { 2692 struct xfs_inobt_rec_incore irec; 2693 struct xfs_ialloc_count_inodes *ci = priv; 2694 2695 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2696 ci->count += irec.ir_count; 2697 ci->freecount += irec.ir_freecount; 2698 2699 return 0; 2700 } 2701 2702 /* Count allocated and free inodes under an inobt. */ 2703 int 2704 xfs_ialloc_count_inodes( 2705 struct xfs_btree_cur *cur, 2706 xfs_agino_t *count, 2707 xfs_agino_t *freecount) 2708 { 2709 struct xfs_ialloc_count_inodes ci = {0}; 2710 int error; 2711 2712 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2713 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2714 if (error) 2715 return error; 2716 2717 *count = ci.count; 2718 *freecount = ci.freecount; 2719 return 0; 2720 } 2721 2722 /* 2723 * Initialize inode-related geometry information. 2724 * 2725 * Compute the inode btree min and max levels and set maxicount. 2726 * 2727 * Set the inode cluster size. This may still be overridden by the file 2728 * system block size if it is larger than the chosen cluster size. 2729 * 2730 * For v5 filesystems, scale the cluster size with the inode size to keep a 2731 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2732 * inode alignment value appropriately for larger cluster sizes. 2733 * 2734 * Then compute the inode cluster alignment information. 2735 */ 2736 void 2737 xfs_ialloc_setup_geometry( 2738 struct xfs_mount *mp) 2739 { 2740 struct xfs_sb *sbp = &mp->m_sb; 2741 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2742 uint64_t icount; 2743 uint inodes; 2744 2745 igeo->new_diflags2 = 0; 2746 if (xfs_has_bigtime(mp)) 2747 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2748 if (xfs_has_large_extent_counts(mp)) 2749 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64; 2750 2751 /* Compute inode btree geometry. */ 2752 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2753 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2754 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2755 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2756 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2757 2758 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2759 sbp->sb_inopblock); 2760 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2761 2762 if (sbp->sb_spino_align) 2763 igeo->ialloc_min_blks = sbp->sb_spino_align; 2764 else 2765 igeo->ialloc_min_blks = igeo->ialloc_blks; 2766 2767 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2768 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2769 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2770 inodes); 2771 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk()); 2772 2773 /* 2774 * Set the maximum inode count for this filesystem, being careful not 2775 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2776 * users should never get here due to failing sb verification, but 2777 * certain users (xfs_db) need to be usable even with corrupt metadata. 2778 */ 2779 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2780 /* 2781 * Make sure the maximum inode count is a multiple 2782 * of the units we allocate inodes in. 2783 */ 2784 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2785 do_div(icount, 100); 2786 do_div(icount, igeo->ialloc_blks); 2787 igeo->maxicount = XFS_FSB_TO_INO(mp, 2788 icount * igeo->ialloc_blks); 2789 } else { 2790 igeo->maxicount = 0; 2791 } 2792 2793 /* 2794 * Compute the desired size of an inode cluster buffer size, which 2795 * starts at 8K and (on v5 filesystems) scales up with larger inode 2796 * sizes. 2797 * 2798 * Preserve the desired inode cluster size because the sparse inodes 2799 * feature uses that desired size (not the actual size) to compute the 2800 * sparse inode alignment. The mount code validates this value, so we 2801 * cannot change the behavior. 2802 */ 2803 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2804 if (xfs_has_v3inodes(mp)) { 2805 int new_size = igeo->inode_cluster_size_raw; 2806 2807 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2808 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2809 igeo->inode_cluster_size_raw = new_size; 2810 } 2811 2812 /* Calculate inode cluster ratios. */ 2813 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2814 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2815 igeo->inode_cluster_size_raw); 2816 else 2817 igeo->blocks_per_cluster = 1; 2818 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2819 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2820 2821 /* Calculate inode cluster alignment. */ 2822 if (xfs_has_align(mp) && 2823 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2824 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2825 else 2826 igeo->cluster_align = 1; 2827 igeo->inoalign_mask = igeo->cluster_align - 1; 2828 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2829 2830 /* 2831 * If we are using stripe alignment, check whether 2832 * the stripe unit is a multiple of the inode alignment 2833 */ 2834 if (mp->m_dalign && igeo->inoalign_mask && 2835 !(mp->m_dalign & igeo->inoalign_mask)) 2836 igeo->ialloc_align = mp->m_dalign; 2837 else 2838 igeo->ialloc_align = 0; 2839 } 2840 2841 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2842 xfs_ino_t 2843 xfs_ialloc_calc_rootino( 2844 struct xfs_mount *mp, 2845 int sunit) 2846 { 2847 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2848 xfs_agblock_t first_bno; 2849 2850 /* 2851 * Pre-calculate the geometry of AG 0. We know what it looks like 2852 * because libxfs knows how to create allocation groups now. 2853 * 2854 * first_bno is the first block in which mkfs could possibly have 2855 * allocated the root directory inode, once we factor in the metadata 2856 * that mkfs formats before it. Namely, the four AG headers... 2857 */ 2858 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2859 2860 /* ...the two free space btree roots... */ 2861 first_bno += 2; 2862 2863 /* ...the inode btree root... */ 2864 first_bno += 1; 2865 2866 /* ...the initial AGFL... */ 2867 first_bno += xfs_alloc_min_freelist(mp, NULL); 2868 2869 /* ...the free inode btree root... */ 2870 if (xfs_has_finobt(mp)) 2871 first_bno++; 2872 2873 /* ...the reverse mapping btree root... */ 2874 if (xfs_has_rmapbt(mp)) 2875 first_bno++; 2876 2877 /* ...the reference count btree... */ 2878 if (xfs_has_reflink(mp)) 2879 first_bno++; 2880 2881 /* 2882 * ...and the log, if it is allocated in the first allocation group. 2883 * 2884 * This can happen with filesystems that only have a single 2885 * allocation group, or very odd geometries created by old mkfs 2886 * versions on very small filesystems. 2887 */ 2888 if (xfs_ag_contains_log(mp, 0)) 2889 first_bno += mp->m_sb.sb_logblocks; 2890 2891 /* 2892 * Now round first_bno up to whatever allocation alignment is given 2893 * by the filesystem or was passed in. 2894 */ 2895 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2896 first_bno = roundup(first_bno, sunit); 2897 else if (xfs_has_align(mp) && 2898 mp->m_sb.sb_inoalignmt > 1) 2899 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2900 2901 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2902 } 2903 2904 /* 2905 * Ensure there are not sparse inode clusters that cross the new EOAG. 2906 * 2907 * This is a no-op for non-spinode filesystems since clusters are always fully 2908 * allocated and checking the bnobt suffices. However, a spinode filesystem 2909 * could have a record where the upper inodes are free blocks. If those blocks 2910 * were removed from the filesystem, the inode record would extend beyond EOAG, 2911 * which will be flagged as corruption. 2912 */ 2913 int 2914 xfs_ialloc_check_shrink( 2915 struct xfs_perag *pag, 2916 struct xfs_trans *tp, 2917 struct xfs_buf *agibp, 2918 xfs_agblock_t new_length) 2919 { 2920 struct xfs_inobt_rec_incore rec; 2921 struct xfs_btree_cur *cur; 2922 xfs_agino_t agino; 2923 int has; 2924 int error; 2925 2926 if (!xfs_has_sparseinodes(pag->pag_mount)) 2927 return 0; 2928 2929 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO); 2930 2931 /* Look up the inobt record that would correspond to the new EOFS. */ 2932 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); 2933 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); 2934 if (error || !has) 2935 goto out; 2936 2937 error = xfs_inobt_get_rec(cur, &rec, &has); 2938 if (error) 2939 goto out; 2940 2941 if (!has) { 2942 error = -EFSCORRUPTED; 2943 goto out; 2944 } 2945 2946 /* If the record covers inodes that would be beyond EOFS, bail out. */ 2947 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { 2948 error = -ENOSPC; 2949 goto out; 2950 } 2951 out: 2952 xfs_btree_del_cursor(cur, error); 2953 return error; 2954 } 2955