1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_alloc.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_bmap.h" 22 #include "xfs_trans.h" 23 #include "xfs_buf_item.h" 24 #include "xfs_icreate_item.h" 25 #include "xfs_icache.h" 26 #include "xfs_trace.h" 27 #include "xfs_log.h" 28 #include "xfs_rmap.h" 29 #include "xfs_ag.h" 30 31 /* 32 * Lookup a record by ino in the btree given by cur. 33 */ 34 int /* error */ 35 xfs_inobt_lookup( 36 struct xfs_btree_cur *cur, /* btree cursor */ 37 xfs_agino_t ino, /* starting inode of chunk */ 38 xfs_lookup_t dir, /* <=, >=, == */ 39 int *stat) /* success/failure */ 40 { 41 cur->bc_rec.i.ir_startino = ino; 42 cur->bc_rec.i.ir_holemask = 0; 43 cur->bc_rec.i.ir_count = 0; 44 cur->bc_rec.i.ir_freecount = 0; 45 cur->bc_rec.i.ir_free = 0; 46 return xfs_btree_lookup(cur, dir, stat); 47 } 48 49 /* 50 * Update the record referred to by cur to the value given. 51 * This either works (return 0) or gets an EFSCORRUPTED error. 52 */ 53 STATIC int /* error */ 54 xfs_inobt_update( 55 struct xfs_btree_cur *cur, /* btree cursor */ 56 xfs_inobt_rec_incore_t *irec) /* btree record */ 57 { 58 union xfs_btree_rec rec; 59 60 rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); 61 if (xfs_has_sparseinodes(cur->bc_mp)) { 62 rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask); 63 rec.inobt.ir_u.sp.ir_count = irec->ir_count; 64 rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount; 65 } else { 66 /* ir_holemask/ir_count not supported on-disk */ 67 rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount); 68 } 69 rec.inobt.ir_free = cpu_to_be64(irec->ir_free); 70 return xfs_btree_update(cur, &rec); 71 } 72 73 /* Convert on-disk btree record to incore inobt record. */ 74 void 75 xfs_inobt_btrec_to_irec( 76 struct xfs_mount *mp, 77 const union xfs_btree_rec *rec, 78 struct xfs_inobt_rec_incore *irec) 79 { 80 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); 81 if (xfs_has_sparseinodes(mp)) { 82 irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask); 83 irec->ir_count = rec->inobt.ir_u.sp.ir_count; 84 irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount; 85 } else { 86 /* 87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded 88 * values for full inode chunks. 89 */ 90 irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL; 91 irec->ir_count = XFS_INODES_PER_CHUNK; 92 irec->ir_freecount = 93 be32_to_cpu(rec->inobt.ir_u.f.ir_freecount); 94 } 95 irec->ir_free = be64_to_cpu(rec->inobt.ir_free); 96 } 97 98 /* Simple checks for inode records. */ 99 xfs_failaddr_t 100 xfs_inobt_check_irec( 101 struct xfs_btree_cur *cur, 102 const struct xfs_inobt_rec_incore *irec) 103 { 104 uint64_t realfree; 105 106 if (!xfs_verify_agino(cur->bc_ag.pag, irec->ir_startino)) 107 return __this_address; 108 if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT || 109 irec->ir_count > XFS_INODES_PER_CHUNK) 110 return __this_address; 111 if (irec->ir_freecount > XFS_INODES_PER_CHUNK) 112 return __this_address; 113 114 /* if there are no holes, return the first available offset */ 115 if (!xfs_inobt_issparse(irec->ir_holemask)) 116 realfree = irec->ir_free; 117 else 118 realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec); 119 if (hweight64(realfree) != irec->ir_freecount) 120 return __this_address; 121 122 return NULL; 123 } 124 125 static inline int 126 xfs_inobt_complain_bad_rec( 127 struct xfs_btree_cur *cur, 128 xfs_failaddr_t fa, 129 const struct xfs_inobt_rec_incore *irec) 130 { 131 struct xfs_mount *mp = cur->bc_mp; 132 133 xfs_warn(mp, 134 "%s Inode BTree record corruption in AG %d detected at %pS!", 135 cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", 136 cur->bc_ag.pag->pag_agno, fa); 137 xfs_warn(mp, 138 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x", 139 irec->ir_startino, irec->ir_count, irec->ir_freecount, 140 irec->ir_free, irec->ir_holemask); 141 return -EFSCORRUPTED; 142 } 143 144 /* 145 * Get the data from the pointed-to record. 146 */ 147 int 148 xfs_inobt_get_rec( 149 struct xfs_btree_cur *cur, 150 struct xfs_inobt_rec_incore *irec, 151 int *stat) 152 { 153 struct xfs_mount *mp = cur->bc_mp; 154 union xfs_btree_rec *rec; 155 xfs_failaddr_t fa; 156 int error; 157 158 error = xfs_btree_get_rec(cur, &rec, stat); 159 if (error || *stat == 0) 160 return error; 161 162 xfs_inobt_btrec_to_irec(mp, rec, irec); 163 fa = xfs_inobt_check_irec(cur, irec); 164 if (fa) 165 return xfs_inobt_complain_bad_rec(cur, fa, irec); 166 167 return 0; 168 } 169 170 /* 171 * Insert a single inobt record. Cursor must already point to desired location. 172 */ 173 int 174 xfs_inobt_insert_rec( 175 struct xfs_btree_cur *cur, 176 uint16_t holemask, 177 uint8_t count, 178 int32_t freecount, 179 xfs_inofree_t free, 180 int *stat) 181 { 182 cur->bc_rec.i.ir_holemask = holemask; 183 cur->bc_rec.i.ir_count = count; 184 cur->bc_rec.i.ir_freecount = freecount; 185 cur->bc_rec.i.ir_free = free; 186 return xfs_btree_insert(cur, stat); 187 } 188 189 /* 190 * Insert records describing a newly allocated inode chunk into the inobt. 191 */ 192 STATIC int 193 xfs_inobt_insert( 194 struct xfs_perag *pag, 195 struct xfs_trans *tp, 196 struct xfs_buf *agbp, 197 xfs_agino_t newino, 198 xfs_agino_t newlen, 199 xfs_btnum_t btnum) 200 { 201 struct xfs_btree_cur *cur; 202 xfs_agino_t thisino; 203 int i; 204 int error; 205 206 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 207 208 for (thisino = newino; 209 thisino < newino + newlen; 210 thisino += XFS_INODES_PER_CHUNK) { 211 error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i); 212 if (error) { 213 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 214 return error; 215 } 216 ASSERT(i == 0); 217 218 error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL, 219 XFS_INODES_PER_CHUNK, 220 XFS_INODES_PER_CHUNK, 221 XFS_INOBT_ALL_FREE, &i); 222 if (error) { 223 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 224 return error; 225 } 226 ASSERT(i == 1); 227 } 228 229 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 230 231 return 0; 232 } 233 234 /* 235 * Verify that the number of free inodes in the AGI is correct. 236 */ 237 #ifdef DEBUG 238 static int 239 xfs_check_agi_freecount( 240 struct xfs_btree_cur *cur) 241 { 242 if (cur->bc_nlevels == 1) { 243 xfs_inobt_rec_incore_t rec; 244 int freecount = 0; 245 int error; 246 int i; 247 248 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 249 if (error) 250 return error; 251 252 do { 253 error = xfs_inobt_get_rec(cur, &rec, &i); 254 if (error) 255 return error; 256 257 if (i) { 258 freecount += rec.ir_freecount; 259 error = xfs_btree_increment(cur, 0, &i); 260 if (error) 261 return error; 262 } 263 } while (i == 1); 264 265 if (!xfs_is_shutdown(cur->bc_mp)) 266 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); 267 } 268 return 0; 269 } 270 #else 271 #define xfs_check_agi_freecount(cur) 0 272 #endif 273 274 /* 275 * Initialise a new set of inodes. When called without a transaction context 276 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather 277 * than logging them (which in a transaction context puts them into the AIL 278 * for writeback rather than the xfsbufd queue). 279 */ 280 int 281 xfs_ialloc_inode_init( 282 struct xfs_mount *mp, 283 struct xfs_trans *tp, 284 struct list_head *buffer_list, 285 int icount, 286 xfs_agnumber_t agno, 287 xfs_agblock_t agbno, 288 xfs_agblock_t length, 289 unsigned int gen) 290 { 291 struct xfs_buf *fbuf; 292 struct xfs_dinode *free; 293 int nbufs; 294 int version; 295 int i, j; 296 xfs_daddr_t d; 297 xfs_ino_t ino = 0; 298 int error; 299 300 /* 301 * Loop over the new block(s), filling in the inodes. For small block 302 * sizes, manipulate the inodes in buffers which are multiples of the 303 * blocks size. 304 */ 305 nbufs = length / M_IGEO(mp)->blocks_per_cluster; 306 307 /* 308 * Figure out what version number to use in the inodes we create. If 309 * the superblock version has caught up to the one that supports the new 310 * inode format, then use the new inode version. Otherwise use the old 311 * version so that old kernels will continue to be able to use the file 312 * system. 313 * 314 * For v3 inodes, we also need to write the inode number into the inode, 315 * so calculate the first inode number of the chunk here as 316 * XFS_AGB_TO_AGINO() only works within a filesystem block, not 317 * across multiple filesystem blocks (such as a cluster) and so cannot 318 * be used in the cluster buffer loop below. 319 * 320 * Further, because we are writing the inode directly into the buffer 321 * and calculating a CRC on the entire inode, we have ot log the entire 322 * inode so that the entire range the CRC covers is present in the log. 323 * That means for v3 inode we log the entire buffer rather than just the 324 * inode cores. 325 */ 326 if (xfs_has_v3inodes(mp)) { 327 version = 3; 328 ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); 329 330 /* 331 * log the initialisation that is about to take place as an 332 * logical operation. This means the transaction does not 333 * need to log the physical changes to the inode buffers as log 334 * recovery will know what initialisation is actually needed. 335 * Hence we only need to log the buffers as "ordered" buffers so 336 * they track in the AIL as if they were physically logged. 337 */ 338 if (tp) 339 xfs_icreate_log(tp, agno, agbno, icount, 340 mp->m_sb.sb_inodesize, length, gen); 341 } else 342 version = 2; 343 344 for (j = 0; j < nbufs; j++) { 345 /* 346 * Get the block. 347 */ 348 d = XFS_AGB_TO_DADDR(mp, agno, agbno + 349 (j * M_IGEO(mp)->blocks_per_cluster)); 350 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 351 mp->m_bsize * M_IGEO(mp)->blocks_per_cluster, 352 XBF_UNMAPPED, &fbuf); 353 if (error) 354 return error; 355 356 /* Initialize the inode buffers and log them appropriately. */ 357 fbuf->b_ops = &xfs_inode_buf_ops; 358 xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); 359 for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { 360 int ioffset = i << mp->m_sb.sb_inodelog; 361 362 free = xfs_make_iptr(mp, fbuf, i); 363 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 364 free->di_version = version; 365 free->di_gen = cpu_to_be32(gen); 366 free->di_next_unlinked = cpu_to_be32(NULLAGINO); 367 368 if (version == 3) { 369 free->di_ino = cpu_to_be64(ino); 370 ino++; 371 uuid_copy(&free->di_uuid, 372 &mp->m_sb.sb_meta_uuid); 373 xfs_dinode_calc_crc(mp, free); 374 } else if (tp) { 375 /* just log the inode core */ 376 xfs_trans_log_buf(tp, fbuf, ioffset, 377 ioffset + XFS_DINODE_SIZE(mp) - 1); 378 } 379 } 380 381 if (tp) { 382 /* 383 * Mark the buffer as an inode allocation buffer so it 384 * sticks in AIL at the point of this allocation 385 * transaction. This ensures the they are on disk before 386 * the tail of the log can be moved past this 387 * transaction (i.e. by preventing relogging from moving 388 * it forward in the log). 389 */ 390 xfs_trans_inode_alloc_buf(tp, fbuf); 391 if (version == 3) { 392 /* 393 * Mark the buffer as ordered so that they are 394 * not physically logged in the transaction but 395 * still tracked in the AIL as part of the 396 * transaction and pin the log appropriately. 397 */ 398 xfs_trans_ordered_buf(tp, fbuf); 399 } 400 } else { 401 fbuf->b_flags |= XBF_DONE; 402 xfs_buf_delwri_queue(fbuf, buffer_list); 403 xfs_buf_relse(fbuf); 404 } 405 } 406 return 0; 407 } 408 409 /* 410 * Align startino and allocmask for a recently allocated sparse chunk such that 411 * they are fit for insertion (or merge) into the on-disk inode btrees. 412 * 413 * Background: 414 * 415 * When enabled, sparse inode support increases the inode alignment from cluster 416 * size to inode chunk size. This means that the minimum range between two 417 * non-adjacent inode records in the inobt is large enough for a full inode 418 * record. This allows for cluster sized, cluster aligned block allocation 419 * without need to worry about whether the resulting inode record overlaps with 420 * another record in the tree. Without this basic rule, we would have to deal 421 * with the consequences of overlap by potentially undoing recent allocations in 422 * the inode allocation codepath. 423 * 424 * Because of this alignment rule (which is enforced on mount), there are two 425 * inobt possibilities for newly allocated sparse chunks. One is that the 426 * aligned inode record for the chunk covers a range of inodes not already 427 * covered in the inobt (i.e., it is safe to insert a new sparse record). The 428 * other is that a record already exists at the aligned startino that considers 429 * the newly allocated range as sparse. In the latter case, record content is 430 * merged in hope that sparse inode chunks fill to full chunks over time. 431 */ 432 STATIC void 433 xfs_align_sparse_ino( 434 struct xfs_mount *mp, 435 xfs_agino_t *startino, 436 uint16_t *allocmask) 437 { 438 xfs_agblock_t agbno; 439 xfs_agblock_t mod; 440 int offset; 441 442 agbno = XFS_AGINO_TO_AGBNO(mp, *startino); 443 mod = agbno % mp->m_sb.sb_inoalignmt; 444 if (!mod) 445 return; 446 447 /* calculate the inode offset and align startino */ 448 offset = XFS_AGB_TO_AGINO(mp, mod); 449 *startino -= offset; 450 451 /* 452 * Since startino has been aligned down, left shift allocmask such that 453 * it continues to represent the same physical inodes relative to the 454 * new startino. 455 */ 456 *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT; 457 } 458 459 /* 460 * Determine whether the source inode record can merge into the target. Both 461 * records must be sparse, the inode ranges must match and there must be no 462 * allocation overlap between the records. 463 */ 464 STATIC bool 465 __xfs_inobt_can_merge( 466 struct xfs_inobt_rec_incore *trec, /* tgt record */ 467 struct xfs_inobt_rec_incore *srec) /* src record */ 468 { 469 uint64_t talloc; 470 uint64_t salloc; 471 472 /* records must cover the same inode range */ 473 if (trec->ir_startino != srec->ir_startino) 474 return false; 475 476 /* both records must be sparse */ 477 if (!xfs_inobt_issparse(trec->ir_holemask) || 478 !xfs_inobt_issparse(srec->ir_holemask)) 479 return false; 480 481 /* both records must track some inodes */ 482 if (!trec->ir_count || !srec->ir_count) 483 return false; 484 485 /* can't exceed capacity of a full record */ 486 if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK) 487 return false; 488 489 /* verify there is no allocation overlap */ 490 talloc = xfs_inobt_irec_to_allocmask(trec); 491 salloc = xfs_inobt_irec_to_allocmask(srec); 492 if (talloc & salloc) 493 return false; 494 495 return true; 496 } 497 498 /* 499 * Merge the source inode record into the target. The caller must call 500 * __xfs_inobt_can_merge() to ensure the merge is valid. 501 */ 502 STATIC void 503 __xfs_inobt_rec_merge( 504 struct xfs_inobt_rec_incore *trec, /* target */ 505 struct xfs_inobt_rec_incore *srec) /* src */ 506 { 507 ASSERT(trec->ir_startino == srec->ir_startino); 508 509 /* combine the counts */ 510 trec->ir_count += srec->ir_count; 511 trec->ir_freecount += srec->ir_freecount; 512 513 /* 514 * Merge the holemask and free mask. For both fields, 0 bits refer to 515 * allocated inodes. We combine the allocated ranges with bitwise AND. 516 */ 517 trec->ir_holemask &= srec->ir_holemask; 518 trec->ir_free &= srec->ir_free; 519 } 520 521 /* 522 * Insert a new sparse inode chunk into the associated inode btree. The inode 523 * record for the sparse chunk is pre-aligned to a startino that should match 524 * any pre-existing sparse inode record in the tree. This allows sparse chunks 525 * to fill over time. 526 * 527 * This function supports two modes of handling preexisting records depending on 528 * the merge flag. If merge is true, the provided record is merged with the 529 * existing record and updated in place. The merged record is returned in nrec. 530 * If merge is false, an existing record is replaced with the provided record. 531 * If no preexisting record exists, the provided record is always inserted. 532 * 533 * It is considered corruption if a merge is requested and not possible. Given 534 * the sparse inode alignment constraints, this should never happen. 535 */ 536 STATIC int 537 xfs_inobt_insert_sprec( 538 struct xfs_perag *pag, 539 struct xfs_trans *tp, 540 struct xfs_buf *agbp, 541 int btnum, 542 struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */ 543 bool merge) /* merge or replace */ 544 { 545 struct xfs_mount *mp = pag->pag_mount; 546 struct xfs_btree_cur *cur; 547 int error; 548 int i; 549 struct xfs_inobt_rec_incore rec; 550 551 cur = xfs_inobt_init_cursor(pag, tp, agbp, btnum); 552 553 /* the new record is pre-aligned so we know where to look */ 554 error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i); 555 if (error) 556 goto error; 557 /* if nothing there, insert a new record and return */ 558 if (i == 0) { 559 error = xfs_inobt_insert_rec(cur, nrec->ir_holemask, 560 nrec->ir_count, nrec->ir_freecount, 561 nrec->ir_free, &i); 562 if (error) 563 goto error; 564 if (XFS_IS_CORRUPT(mp, i != 1)) { 565 error = -EFSCORRUPTED; 566 goto error; 567 } 568 569 goto out; 570 } 571 572 /* 573 * A record exists at this startino. Merge or replace the record 574 * depending on what we've been asked to do. 575 */ 576 if (merge) { 577 error = xfs_inobt_get_rec(cur, &rec, &i); 578 if (error) 579 goto error; 580 if (XFS_IS_CORRUPT(mp, i != 1)) { 581 error = -EFSCORRUPTED; 582 goto error; 583 } 584 if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) { 585 error = -EFSCORRUPTED; 586 goto error; 587 } 588 589 /* 590 * This should never fail. If we have coexisting records that 591 * cannot merge, something is seriously wrong. 592 */ 593 if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) { 594 error = -EFSCORRUPTED; 595 goto error; 596 } 597 598 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, 599 rec.ir_holemask, nrec->ir_startino, 600 nrec->ir_holemask); 601 602 /* merge to nrec to output the updated record */ 603 __xfs_inobt_rec_merge(nrec, &rec); 604 605 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, 606 nrec->ir_holemask); 607 608 error = xfs_inobt_rec_check_count(mp, nrec); 609 if (error) 610 goto error; 611 } 612 613 error = xfs_inobt_update(cur, nrec); 614 if (error) 615 goto error; 616 617 out: 618 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 619 return 0; 620 error: 621 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 622 return error; 623 } 624 625 /* 626 * Allocate new inodes in the allocation group specified by agbp. Returns 0 if 627 * inodes were allocated in this AG; -EAGAIN if there was no space in this AG so 628 * the caller knows it can try another AG, a hard -ENOSPC when over the maximum 629 * inode count threshold, or the usual negative error code for other errors. 630 */ 631 STATIC int 632 xfs_ialloc_ag_alloc( 633 struct xfs_perag *pag, 634 struct xfs_trans *tp, 635 struct xfs_buf *agbp) 636 { 637 struct xfs_agi *agi; 638 struct xfs_alloc_arg args; 639 int error; 640 xfs_agino_t newino; /* new first inode's number */ 641 xfs_agino_t newlen; /* new number of inodes */ 642 int isaligned = 0; /* inode allocation at stripe */ 643 /* unit boundary */ 644 /* init. to full chunk */ 645 struct xfs_inobt_rec_incore rec; 646 struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp); 647 uint16_t allocmask = (uint16_t) -1; 648 int do_sparse = 0; 649 650 memset(&args, 0, sizeof(args)); 651 args.tp = tp; 652 args.mp = tp->t_mountp; 653 args.fsbno = NULLFSBLOCK; 654 args.oinfo = XFS_RMAP_OINFO_INODES; 655 args.pag = pag; 656 657 #ifdef DEBUG 658 /* randomly do sparse inode allocations */ 659 if (xfs_has_sparseinodes(tp->t_mountp) && 660 igeo->ialloc_min_blks < igeo->ialloc_blks) 661 do_sparse = get_random_u32_below(2); 662 #endif 663 664 /* 665 * Locking will ensure that we don't have two callers in here 666 * at one time. 667 */ 668 newlen = igeo->ialloc_inos; 669 if (igeo->maxicount && 670 percpu_counter_read_positive(&args.mp->m_icount) + newlen > 671 igeo->maxicount) 672 return -ENOSPC; 673 args.minlen = args.maxlen = igeo->ialloc_blks; 674 /* 675 * First try to allocate inodes contiguous with the last-allocated 676 * chunk of inodes. If the filesystem is striped, this will fill 677 * an entire stripe unit with inodes. 678 */ 679 agi = agbp->b_addr; 680 newino = be32_to_cpu(agi->agi_newino); 681 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 682 igeo->ialloc_blks; 683 if (do_sparse) 684 goto sparse_alloc; 685 if (likely(newino != NULLAGINO && 686 (args.agbno < be32_to_cpu(agi->agi_length)))) { 687 args.prod = 1; 688 689 /* 690 * We need to take into account alignment here to ensure that 691 * we don't modify the free list if we fail to have an exact 692 * block. If we don't have an exact match, and every oher 693 * attempt allocation attempt fails, we'll end up cancelling 694 * a dirty transaction and shutting down. 695 * 696 * For an exact allocation, alignment must be 1, 697 * however we need to take cluster alignment into account when 698 * fixing up the freelist. Use the minalignslop field to 699 * indicate that extra blocks might be required for alignment, 700 * but not to use them in the actual exact allocation. 701 */ 702 args.alignment = 1; 703 args.minalignslop = igeo->cluster_align - 1; 704 705 /* Allow space for the inode btree to split. */ 706 args.minleft = igeo->inobt_maxlevels; 707 error = xfs_alloc_vextent_exact_bno(&args, 708 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 709 args.agbno)); 710 if (error) 711 return error; 712 713 /* 714 * This request might have dirtied the transaction if the AG can 715 * satisfy the request, but the exact block was not available. 716 * If the allocation did fail, subsequent requests will relax 717 * the exact agbno requirement and increase the alignment 718 * instead. It is critical that the total size of the request 719 * (len + alignment + slop) does not increase from this point 720 * on, so reset minalignslop to ensure it is not included in 721 * subsequent requests. 722 */ 723 args.minalignslop = 0; 724 } 725 726 if (unlikely(args.fsbno == NULLFSBLOCK)) { 727 /* 728 * Set the alignment for the allocation. 729 * If stripe alignment is turned on then align at stripe unit 730 * boundary. 731 * If the cluster size is smaller than a filesystem block 732 * then we're doing I/O for inodes in filesystem block size 733 * pieces, so don't need alignment anyway. 734 */ 735 isaligned = 0; 736 if (igeo->ialloc_align) { 737 ASSERT(!xfs_has_noalign(args.mp)); 738 args.alignment = args.mp->m_dalign; 739 isaligned = 1; 740 } else 741 args.alignment = igeo->cluster_align; 742 /* 743 * Allocate a fixed-size extent of inodes. 744 */ 745 args.prod = 1; 746 /* 747 * Allow space for the inode btree to split. 748 */ 749 args.minleft = igeo->inobt_maxlevels; 750 error = xfs_alloc_vextent_near_bno(&args, 751 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 752 be32_to_cpu(agi->agi_root))); 753 if (error) 754 return error; 755 } 756 757 /* 758 * If stripe alignment is turned on, then try again with cluster 759 * alignment. 760 */ 761 if (isaligned && args.fsbno == NULLFSBLOCK) { 762 args.alignment = igeo->cluster_align; 763 error = xfs_alloc_vextent_near_bno(&args, 764 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 765 be32_to_cpu(agi->agi_root))); 766 if (error) 767 return error; 768 } 769 770 /* 771 * Finally, try a sparse allocation if the filesystem supports it and 772 * the sparse allocation length is smaller than a full chunk. 773 */ 774 if (xfs_has_sparseinodes(args.mp) && 775 igeo->ialloc_min_blks < igeo->ialloc_blks && 776 args.fsbno == NULLFSBLOCK) { 777 sparse_alloc: 778 args.alignment = args.mp->m_sb.sb_spino_align; 779 args.prod = 1; 780 781 args.minlen = igeo->ialloc_min_blks; 782 args.maxlen = args.minlen; 783 784 /* 785 * The inode record will be aligned to full chunk size. We must 786 * prevent sparse allocation from AG boundaries that result in 787 * invalid inode records, such as records that start at agbno 0 788 * or extend beyond the AG. 789 * 790 * Set min agbno to the first aligned, non-zero agbno and max to 791 * the last aligned agbno that is at least one full chunk from 792 * the end of the AG. 793 */ 794 args.min_agbno = args.mp->m_sb.sb_inoalignmt; 795 args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, 796 args.mp->m_sb.sb_inoalignmt) - 797 igeo->ialloc_blks; 798 799 error = xfs_alloc_vextent_near_bno(&args, 800 XFS_AGB_TO_FSB(args.mp, pag->pag_agno, 801 be32_to_cpu(agi->agi_root))); 802 if (error) 803 return error; 804 805 newlen = XFS_AGB_TO_AGINO(args.mp, args.len); 806 ASSERT(newlen <= XFS_INODES_PER_CHUNK); 807 allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1; 808 } 809 810 if (args.fsbno == NULLFSBLOCK) 811 return -EAGAIN; 812 813 ASSERT(args.len == args.minlen); 814 815 /* 816 * Stamp and write the inode buffers. 817 * 818 * Seed the new inode cluster with a random generation number. This 819 * prevents short-term reuse of generation numbers if a chunk is 820 * freed and then immediately reallocated. We use random numbers 821 * rather than a linear progression to prevent the next generation 822 * number from being easily guessable. 823 */ 824 error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno, 825 args.agbno, args.len, get_random_u32()); 826 827 if (error) 828 return error; 829 /* 830 * Convert the results. 831 */ 832 newino = XFS_AGB_TO_AGINO(args.mp, args.agbno); 833 834 if (xfs_inobt_issparse(~allocmask)) { 835 /* 836 * We've allocated a sparse chunk. Align the startino and mask. 837 */ 838 xfs_align_sparse_ino(args.mp, &newino, &allocmask); 839 840 rec.ir_startino = newino; 841 rec.ir_holemask = ~allocmask; 842 rec.ir_count = newlen; 843 rec.ir_freecount = newlen; 844 rec.ir_free = XFS_INOBT_ALL_FREE; 845 846 /* 847 * Insert the sparse record into the inobt and allow for a merge 848 * if necessary. If a merge does occur, rec is updated to the 849 * merged record. 850 */ 851 error = xfs_inobt_insert_sprec(pag, tp, agbp, 852 XFS_BTNUM_INO, &rec, true); 853 if (error == -EFSCORRUPTED) { 854 xfs_alert(args.mp, 855 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u", 856 XFS_AGINO_TO_INO(args.mp, pag->pag_agno, 857 rec.ir_startino), 858 rec.ir_holemask, rec.ir_count); 859 xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE); 860 } 861 if (error) 862 return error; 863 864 /* 865 * We can't merge the part we've just allocated as for the inobt 866 * due to finobt semantics. The original record may or may not 867 * exist independent of whether physical inodes exist in this 868 * sparse chunk. 869 * 870 * We must update the finobt record based on the inobt record. 871 * rec contains the fully merged and up to date inobt record 872 * from the previous call. Set merge false to replace any 873 * existing record with this one. 874 */ 875 if (xfs_has_finobt(args.mp)) { 876 error = xfs_inobt_insert_sprec(pag, tp, agbp, 877 XFS_BTNUM_FINO, &rec, false); 878 if (error) 879 return error; 880 } 881 } else { 882 /* full chunk - insert new records to both btrees */ 883 error = xfs_inobt_insert(pag, tp, agbp, newino, newlen, 884 XFS_BTNUM_INO); 885 if (error) 886 return error; 887 888 if (xfs_has_finobt(args.mp)) { 889 error = xfs_inobt_insert(pag, tp, agbp, newino, 890 newlen, XFS_BTNUM_FINO); 891 if (error) 892 return error; 893 } 894 } 895 896 /* 897 * Update AGI counts and newino. 898 */ 899 be32_add_cpu(&agi->agi_count, newlen); 900 be32_add_cpu(&agi->agi_freecount, newlen); 901 pag->pagi_freecount += newlen; 902 pag->pagi_count += newlen; 903 agi->agi_newino = cpu_to_be32(newino); 904 905 /* 906 * Log allocation group header fields 907 */ 908 xfs_ialloc_log_agi(tp, agbp, 909 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); 910 /* 911 * Modify/log superblock values for inode count and inode free count. 912 */ 913 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); 914 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); 915 return 0; 916 } 917 918 /* 919 * Try to retrieve the next record to the left/right from the current one. 920 */ 921 STATIC int 922 xfs_ialloc_next_rec( 923 struct xfs_btree_cur *cur, 924 xfs_inobt_rec_incore_t *rec, 925 int *done, 926 int left) 927 { 928 int error; 929 int i; 930 931 if (left) 932 error = xfs_btree_decrement(cur, 0, &i); 933 else 934 error = xfs_btree_increment(cur, 0, &i); 935 936 if (error) 937 return error; 938 *done = !i; 939 if (i) { 940 error = xfs_inobt_get_rec(cur, rec, &i); 941 if (error) 942 return error; 943 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 944 return -EFSCORRUPTED; 945 } 946 947 return 0; 948 } 949 950 STATIC int 951 xfs_ialloc_get_rec( 952 struct xfs_btree_cur *cur, 953 xfs_agino_t agino, 954 xfs_inobt_rec_incore_t *rec, 955 int *done) 956 { 957 int error; 958 int i; 959 960 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); 961 if (error) 962 return error; 963 *done = !i; 964 if (i) { 965 error = xfs_inobt_get_rec(cur, rec, &i); 966 if (error) 967 return error; 968 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 969 return -EFSCORRUPTED; 970 } 971 972 return 0; 973 } 974 975 /* 976 * Return the offset of the first free inode in the record. If the inode chunk 977 * is sparsely allocated, we convert the record holemask to inode granularity 978 * and mask off the unallocated regions from the inode free mask. 979 */ 980 STATIC int 981 xfs_inobt_first_free_inode( 982 struct xfs_inobt_rec_incore *rec) 983 { 984 xfs_inofree_t realfree; 985 986 /* if there are no holes, return the first available offset */ 987 if (!xfs_inobt_issparse(rec->ir_holemask)) 988 return xfs_lowbit64(rec->ir_free); 989 990 realfree = xfs_inobt_irec_to_allocmask(rec); 991 realfree &= rec->ir_free; 992 993 return xfs_lowbit64(realfree); 994 } 995 996 /* 997 * Allocate an inode using the inobt-only algorithm. 998 */ 999 STATIC int 1000 xfs_dialloc_ag_inobt( 1001 struct xfs_perag *pag, 1002 struct xfs_trans *tp, 1003 struct xfs_buf *agbp, 1004 xfs_ino_t parent, 1005 xfs_ino_t *inop) 1006 { 1007 struct xfs_mount *mp = tp->t_mountp; 1008 struct xfs_agi *agi = agbp->b_addr; 1009 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1010 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1011 struct xfs_btree_cur *cur, *tcur; 1012 struct xfs_inobt_rec_incore rec, trec; 1013 xfs_ino_t ino; 1014 int error; 1015 int offset; 1016 int i, j; 1017 int searchdistance = 10; 1018 1019 ASSERT(xfs_perag_initialised_agi(pag)); 1020 ASSERT(xfs_perag_allows_inodes(pag)); 1021 ASSERT(pag->pagi_freecount > 0); 1022 1023 restart_pagno: 1024 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1025 /* 1026 * If pagino is 0 (this is the root inode allocation) use newino. 1027 * This must work because we've just allocated some. 1028 */ 1029 if (!pagino) 1030 pagino = be32_to_cpu(agi->agi_newino); 1031 1032 error = xfs_check_agi_freecount(cur); 1033 if (error) 1034 goto error0; 1035 1036 /* 1037 * If in the same AG as the parent, try to get near the parent. 1038 */ 1039 if (pagno == pag->pag_agno) { 1040 int doneleft; /* done, to the left */ 1041 int doneright; /* done, to the right */ 1042 1043 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 1044 if (error) 1045 goto error0; 1046 if (XFS_IS_CORRUPT(mp, i != 1)) { 1047 error = -EFSCORRUPTED; 1048 goto error0; 1049 } 1050 1051 error = xfs_inobt_get_rec(cur, &rec, &j); 1052 if (error) 1053 goto error0; 1054 if (XFS_IS_CORRUPT(mp, j != 1)) { 1055 error = -EFSCORRUPTED; 1056 goto error0; 1057 } 1058 1059 if (rec.ir_freecount > 0) { 1060 /* 1061 * Found a free inode in the same chunk 1062 * as the parent, done. 1063 */ 1064 goto alloc_inode; 1065 } 1066 1067 1068 /* 1069 * In the same AG as parent, but parent's chunk is full. 1070 */ 1071 1072 /* duplicate the cursor, search left & right simultaneously */ 1073 error = xfs_btree_dup_cursor(cur, &tcur); 1074 if (error) 1075 goto error0; 1076 1077 /* 1078 * Skip to last blocks looked up if same parent inode. 1079 */ 1080 if (pagino != NULLAGINO && 1081 pag->pagl_pagino == pagino && 1082 pag->pagl_leftrec != NULLAGINO && 1083 pag->pagl_rightrec != NULLAGINO) { 1084 error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, 1085 &trec, &doneleft); 1086 if (error) 1087 goto error1; 1088 1089 error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, 1090 &rec, &doneright); 1091 if (error) 1092 goto error1; 1093 } else { 1094 /* search left with tcur, back up 1 record */ 1095 error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); 1096 if (error) 1097 goto error1; 1098 1099 /* search right with cur, go forward 1 record. */ 1100 error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); 1101 if (error) 1102 goto error1; 1103 } 1104 1105 /* 1106 * Loop until we find an inode chunk with a free inode. 1107 */ 1108 while (--searchdistance > 0 && (!doneleft || !doneright)) { 1109 int useleft; /* using left inode chunk this time */ 1110 1111 /* figure out the closer block if both are valid. */ 1112 if (!doneleft && !doneright) { 1113 useleft = pagino - 1114 (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < 1115 rec.ir_startino - pagino; 1116 } else { 1117 useleft = !doneleft; 1118 } 1119 1120 /* free inodes to the left? */ 1121 if (useleft && trec.ir_freecount) { 1122 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1123 cur = tcur; 1124 1125 pag->pagl_leftrec = trec.ir_startino; 1126 pag->pagl_rightrec = rec.ir_startino; 1127 pag->pagl_pagino = pagino; 1128 rec = trec; 1129 goto alloc_inode; 1130 } 1131 1132 /* free inodes to the right? */ 1133 if (!useleft && rec.ir_freecount) { 1134 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1135 1136 pag->pagl_leftrec = trec.ir_startino; 1137 pag->pagl_rightrec = rec.ir_startino; 1138 pag->pagl_pagino = pagino; 1139 goto alloc_inode; 1140 } 1141 1142 /* get next record to check */ 1143 if (useleft) { 1144 error = xfs_ialloc_next_rec(tcur, &trec, 1145 &doneleft, 1); 1146 } else { 1147 error = xfs_ialloc_next_rec(cur, &rec, 1148 &doneright, 0); 1149 } 1150 if (error) 1151 goto error1; 1152 } 1153 1154 if (searchdistance <= 0) { 1155 /* 1156 * Not in range - save last search 1157 * location and allocate a new inode 1158 */ 1159 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1160 pag->pagl_leftrec = trec.ir_startino; 1161 pag->pagl_rightrec = rec.ir_startino; 1162 pag->pagl_pagino = pagino; 1163 1164 } else { 1165 /* 1166 * We've reached the end of the btree. because 1167 * we are only searching a small chunk of the 1168 * btree each search, there is obviously free 1169 * inodes closer to the parent inode than we 1170 * are now. restart the search again. 1171 */ 1172 pag->pagl_pagino = NULLAGINO; 1173 pag->pagl_leftrec = NULLAGINO; 1174 pag->pagl_rightrec = NULLAGINO; 1175 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); 1176 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1177 goto restart_pagno; 1178 } 1179 } 1180 1181 /* 1182 * In a different AG from the parent. 1183 * See if the most recently allocated block has any free. 1184 */ 1185 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1186 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1187 XFS_LOOKUP_EQ, &i); 1188 if (error) 1189 goto error0; 1190 1191 if (i == 1) { 1192 error = xfs_inobt_get_rec(cur, &rec, &j); 1193 if (error) 1194 goto error0; 1195 1196 if (j == 1 && rec.ir_freecount > 0) { 1197 /* 1198 * The last chunk allocated in the group 1199 * still has a free inode. 1200 */ 1201 goto alloc_inode; 1202 } 1203 } 1204 } 1205 1206 /* 1207 * None left in the last group, search the whole AG 1208 */ 1209 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1210 if (error) 1211 goto error0; 1212 if (XFS_IS_CORRUPT(mp, i != 1)) { 1213 error = -EFSCORRUPTED; 1214 goto error0; 1215 } 1216 1217 for (;;) { 1218 error = xfs_inobt_get_rec(cur, &rec, &i); 1219 if (error) 1220 goto error0; 1221 if (XFS_IS_CORRUPT(mp, i != 1)) { 1222 error = -EFSCORRUPTED; 1223 goto error0; 1224 } 1225 if (rec.ir_freecount > 0) 1226 break; 1227 error = xfs_btree_increment(cur, 0, &i); 1228 if (error) 1229 goto error0; 1230 if (XFS_IS_CORRUPT(mp, i != 1)) { 1231 error = -EFSCORRUPTED; 1232 goto error0; 1233 } 1234 } 1235 1236 alloc_inode: 1237 offset = xfs_inobt_first_free_inode(&rec); 1238 ASSERT(offset >= 0); 1239 ASSERT(offset < XFS_INODES_PER_CHUNK); 1240 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1241 XFS_INODES_PER_CHUNK) == 0); 1242 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1243 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1244 rec.ir_freecount--; 1245 error = xfs_inobt_update(cur, &rec); 1246 if (error) 1247 goto error0; 1248 be32_add_cpu(&agi->agi_freecount, -1); 1249 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1250 pag->pagi_freecount--; 1251 1252 error = xfs_check_agi_freecount(cur); 1253 if (error) 1254 goto error0; 1255 1256 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1257 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1258 *inop = ino; 1259 return 0; 1260 error1: 1261 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 1262 error0: 1263 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1264 return error; 1265 } 1266 1267 /* 1268 * Use the free inode btree to allocate an inode based on distance from the 1269 * parent. Note that the provided cursor may be deleted and replaced. 1270 */ 1271 STATIC int 1272 xfs_dialloc_ag_finobt_near( 1273 xfs_agino_t pagino, 1274 struct xfs_btree_cur **ocur, 1275 struct xfs_inobt_rec_incore *rec) 1276 { 1277 struct xfs_btree_cur *lcur = *ocur; /* left search cursor */ 1278 struct xfs_btree_cur *rcur; /* right search cursor */ 1279 struct xfs_inobt_rec_incore rrec; 1280 int error; 1281 int i, j; 1282 1283 error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i); 1284 if (error) 1285 return error; 1286 1287 if (i == 1) { 1288 error = xfs_inobt_get_rec(lcur, rec, &i); 1289 if (error) 1290 return error; 1291 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1)) 1292 return -EFSCORRUPTED; 1293 1294 /* 1295 * See if we've landed in the parent inode record. The finobt 1296 * only tracks chunks with at least one free inode, so record 1297 * existence is enough. 1298 */ 1299 if (pagino >= rec->ir_startino && 1300 pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK)) 1301 return 0; 1302 } 1303 1304 error = xfs_btree_dup_cursor(lcur, &rcur); 1305 if (error) 1306 return error; 1307 1308 error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j); 1309 if (error) 1310 goto error_rcur; 1311 if (j == 1) { 1312 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1313 if (error) 1314 goto error_rcur; 1315 if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) { 1316 error = -EFSCORRUPTED; 1317 goto error_rcur; 1318 } 1319 } 1320 1321 if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) { 1322 error = -EFSCORRUPTED; 1323 goto error_rcur; 1324 } 1325 if (i == 1 && j == 1) { 1326 /* 1327 * Both the left and right records are valid. Choose the closer 1328 * inode chunk to the target. 1329 */ 1330 if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) > 1331 (rrec.ir_startino - pagino)) { 1332 *rec = rrec; 1333 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1334 *ocur = rcur; 1335 } else { 1336 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1337 } 1338 } else if (j == 1) { 1339 /* only the right record is valid */ 1340 *rec = rrec; 1341 xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR); 1342 *ocur = rcur; 1343 } else if (i == 1) { 1344 /* only the left record is valid */ 1345 xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR); 1346 } 1347 1348 return 0; 1349 1350 error_rcur: 1351 xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR); 1352 return error; 1353 } 1354 1355 /* 1356 * Use the free inode btree to find a free inode based on a newino hint. If 1357 * the hint is NULL, find the first free inode in the AG. 1358 */ 1359 STATIC int 1360 xfs_dialloc_ag_finobt_newino( 1361 struct xfs_agi *agi, 1362 struct xfs_btree_cur *cur, 1363 struct xfs_inobt_rec_incore *rec) 1364 { 1365 int error; 1366 int i; 1367 1368 if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { 1369 error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), 1370 XFS_LOOKUP_EQ, &i); 1371 if (error) 1372 return error; 1373 if (i == 1) { 1374 error = xfs_inobt_get_rec(cur, rec, &i); 1375 if (error) 1376 return error; 1377 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1378 return -EFSCORRUPTED; 1379 return 0; 1380 } 1381 } 1382 1383 /* 1384 * Find the first inode available in the AG. 1385 */ 1386 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1387 if (error) 1388 return error; 1389 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1390 return -EFSCORRUPTED; 1391 1392 error = xfs_inobt_get_rec(cur, rec, &i); 1393 if (error) 1394 return error; 1395 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1396 return -EFSCORRUPTED; 1397 1398 return 0; 1399 } 1400 1401 /* 1402 * Update the inobt based on a modification made to the finobt. Also ensure that 1403 * the records from both trees are equivalent post-modification. 1404 */ 1405 STATIC int 1406 xfs_dialloc_ag_update_inobt( 1407 struct xfs_btree_cur *cur, /* inobt cursor */ 1408 struct xfs_inobt_rec_incore *frec, /* finobt record */ 1409 int offset) /* inode offset */ 1410 { 1411 struct xfs_inobt_rec_incore rec; 1412 int error; 1413 int i; 1414 1415 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1416 if (error) 1417 return error; 1418 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1419 return -EFSCORRUPTED; 1420 1421 error = xfs_inobt_get_rec(cur, &rec, &i); 1422 if (error) 1423 return error; 1424 if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) 1425 return -EFSCORRUPTED; 1426 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1427 XFS_INODES_PER_CHUNK) == 0); 1428 1429 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1430 rec.ir_freecount--; 1431 1432 if (XFS_IS_CORRUPT(cur->bc_mp, 1433 rec.ir_free != frec->ir_free || 1434 rec.ir_freecount != frec->ir_freecount)) 1435 return -EFSCORRUPTED; 1436 1437 return xfs_inobt_update(cur, &rec); 1438 } 1439 1440 /* 1441 * Allocate an inode using the free inode btree, if available. Otherwise, fall 1442 * back to the inobt search algorithm. 1443 * 1444 * The caller selected an AG for us, and made sure that free inodes are 1445 * available. 1446 */ 1447 static int 1448 xfs_dialloc_ag( 1449 struct xfs_perag *pag, 1450 struct xfs_trans *tp, 1451 struct xfs_buf *agbp, 1452 xfs_ino_t parent, 1453 xfs_ino_t *inop) 1454 { 1455 struct xfs_mount *mp = tp->t_mountp; 1456 struct xfs_agi *agi = agbp->b_addr; 1457 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); 1458 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); 1459 struct xfs_btree_cur *cur; /* finobt cursor */ 1460 struct xfs_btree_cur *icur; /* inobt cursor */ 1461 struct xfs_inobt_rec_incore rec; 1462 xfs_ino_t ino; 1463 int error; 1464 int offset; 1465 int i; 1466 1467 if (!xfs_has_finobt(mp)) 1468 return xfs_dialloc_ag_inobt(pag, tp, agbp, parent, inop); 1469 1470 /* 1471 * If pagino is 0 (this is the root inode allocation) use newino. 1472 * This must work because we've just allocated some. 1473 */ 1474 if (!pagino) 1475 pagino = be32_to_cpu(agi->agi_newino); 1476 1477 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 1478 1479 error = xfs_check_agi_freecount(cur); 1480 if (error) 1481 goto error_cur; 1482 1483 /* 1484 * The search algorithm depends on whether we're in the same AG as the 1485 * parent. If so, find the closest available inode to the parent. If 1486 * not, consider the agi hint or find the first free inode in the AG. 1487 */ 1488 if (pag->pag_agno == pagno) 1489 error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec); 1490 else 1491 error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec); 1492 if (error) 1493 goto error_cur; 1494 1495 offset = xfs_inobt_first_free_inode(&rec); 1496 ASSERT(offset >= 0); 1497 ASSERT(offset < XFS_INODES_PER_CHUNK); 1498 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 1499 XFS_INODES_PER_CHUNK) == 0); 1500 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset); 1501 1502 /* 1503 * Modify or remove the finobt record. 1504 */ 1505 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1506 rec.ir_freecount--; 1507 if (rec.ir_freecount) 1508 error = xfs_inobt_update(cur, &rec); 1509 else 1510 error = xfs_btree_delete(cur, &i); 1511 if (error) 1512 goto error_cur; 1513 1514 /* 1515 * The finobt has now been updated appropriately. We haven't updated the 1516 * agi and superblock yet, so we can create an inobt cursor and validate 1517 * the original freecount. If all is well, make the equivalent update to 1518 * the inobt using the finobt record and offset information. 1519 */ 1520 icur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1521 1522 error = xfs_check_agi_freecount(icur); 1523 if (error) 1524 goto error_icur; 1525 1526 error = xfs_dialloc_ag_update_inobt(icur, &rec, offset); 1527 if (error) 1528 goto error_icur; 1529 1530 /* 1531 * Both trees have now been updated. We must update the perag and 1532 * superblock before we can check the freecount for each btree. 1533 */ 1534 be32_add_cpu(&agi->agi_freecount, -1); 1535 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 1536 pag->pagi_freecount--; 1537 1538 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); 1539 1540 error = xfs_check_agi_freecount(icur); 1541 if (error) 1542 goto error_icur; 1543 error = xfs_check_agi_freecount(cur); 1544 if (error) 1545 goto error_icur; 1546 1547 xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); 1548 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1549 *inop = ino; 1550 return 0; 1551 1552 error_icur: 1553 xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); 1554 error_cur: 1555 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1556 return error; 1557 } 1558 1559 static int 1560 xfs_dialloc_roll( 1561 struct xfs_trans **tpp, 1562 struct xfs_buf *agibp) 1563 { 1564 struct xfs_trans *tp = *tpp; 1565 struct xfs_dquot_acct *dqinfo; 1566 int error; 1567 1568 /* 1569 * Hold to on to the agibp across the commit so no other allocation can 1570 * come in and take the free inodes we just allocated for our caller. 1571 */ 1572 xfs_trans_bhold(tp, agibp); 1573 1574 /* 1575 * We want the quota changes to be associated with the next transaction, 1576 * NOT this one. So, detach the dqinfo from this and attach it to the 1577 * next transaction. 1578 */ 1579 dqinfo = tp->t_dqinfo; 1580 tp->t_dqinfo = NULL; 1581 1582 error = xfs_trans_roll(&tp); 1583 1584 /* Re-attach the quota info that we detached from prev trx. */ 1585 tp->t_dqinfo = dqinfo; 1586 1587 /* 1588 * Join the buffer even on commit error so that the buffer is released 1589 * when the caller cancels the transaction and doesn't have to handle 1590 * this error case specially. 1591 */ 1592 xfs_trans_bjoin(tp, agibp); 1593 *tpp = tp; 1594 return error; 1595 } 1596 1597 static bool 1598 xfs_dialloc_good_ag( 1599 struct xfs_perag *pag, 1600 struct xfs_trans *tp, 1601 umode_t mode, 1602 int flags, 1603 bool ok_alloc) 1604 { 1605 struct xfs_mount *mp = tp->t_mountp; 1606 xfs_extlen_t ineed; 1607 xfs_extlen_t longest = 0; 1608 int needspace; 1609 int error; 1610 1611 if (!pag) 1612 return false; 1613 if (!xfs_perag_allows_inodes(pag)) 1614 return false; 1615 1616 if (!xfs_perag_initialised_agi(pag)) { 1617 error = xfs_ialloc_read_agi(pag, tp, NULL); 1618 if (error) 1619 return false; 1620 } 1621 1622 if (pag->pagi_freecount) 1623 return true; 1624 if (!ok_alloc) 1625 return false; 1626 1627 if (!xfs_perag_initialised_agf(pag)) { 1628 error = xfs_alloc_read_agf(pag, tp, flags, NULL); 1629 if (error) 1630 return false; 1631 } 1632 1633 /* 1634 * Check that there is enough free space for the file plus a chunk of 1635 * inodes if we need to allocate some. If this is the first pass across 1636 * the AGs, take into account the potential space needed for alignment 1637 * of inode chunks when checking the longest contiguous free space in 1638 * the AG - this prevents us from getting ENOSPC because we have free 1639 * space larger than ialloc_blks but alignment constraints prevent us 1640 * from using it. 1641 * 1642 * If we can't find an AG with space for full alignment slack to be 1643 * taken into account, we must be near ENOSPC in all AGs. Hence we 1644 * don't include alignment for the second pass and so if we fail 1645 * allocation due to alignment issues then it is most likely a real 1646 * ENOSPC condition. 1647 * 1648 * XXX(dgc): this calculation is now bogus thanks to the per-ag 1649 * reservations that xfs_alloc_fix_freelist() now does via 1650 * xfs_alloc_space_available(). When the AG fills up, pagf_freeblks will 1651 * be more than large enough for the check below to succeed, but 1652 * xfs_alloc_space_available() will fail because of the non-zero 1653 * metadata reservation and hence we won't actually be able to allocate 1654 * more inodes in this AG. We do soooo much unnecessary work near ENOSPC 1655 * because of this. 1656 */ 1657 ineed = M_IGEO(mp)->ialloc_min_blks; 1658 if (flags && ineed > 1) 1659 ineed += M_IGEO(mp)->cluster_align; 1660 longest = pag->pagf_longest; 1661 if (!longest) 1662 longest = pag->pagf_flcount > 0; 1663 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); 1664 1665 if (pag->pagf_freeblks < needspace + ineed || longest < ineed) 1666 return false; 1667 return true; 1668 } 1669 1670 static int 1671 xfs_dialloc_try_ag( 1672 struct xfs_perag *pag, 1673 struct xfs_trans **tpp, 1674 xfs_ino_t parent, 1675 xfs_ino_t *new_ino, 1676 bool ok_alloc) 1677 { 1678 struct xfs_buf *agbp; 1679 xfs_ino_t ino; 1680 int error; 1681 1682 /* 1683 * Then read in the AGI buffer and recheck with the AGI buffer 1684 * lock held. 1685 */ 1686 error = xfs_ialloc_read_agi(pag, *tpp, &agbp); 1687 if (error) 1688 return error; 1689 1690 if (!pag->pagi_freecount) { 1691 if (!ok_alloc) { 1692 error = -EAGAIN; 1693 goto out_release; 1694 } 1695 1696 error = xfs_ialloc_ag_alloc(pag, *tpp, agbp); 1697 if (error < 0) 1698 goto out_release; 1699 1700 /* 1701 * We successfully allocated space for an inode cluster in this 1702 * AG. Roll the transaction so that we can allocate one of the 1703 * new inodes. 1704 */ 1705 ASSERT(pag->pagi_freecount > 0); 1706 error = xfs_dialloc_roll(tpp, agbp); 1707 if (error) 1708 goto out_release; 1709 } 1710 1711 /* Allocate an inode in the found AG */ 1712 error = xfs_dialloc_ag(pag, *tpp, agbp, parent, &ino); 1713 if (!error) 1714 *new_ino = ino; 1715 return error; 1716 1717 out_release: 1718 xfs_trans_brelse(*tpp, agbp); 1719 return error; 1720 } 1721 1722 /* 1723 * Allocate an on-disk inode. 1724 * 1725 * Mode is used to tell whether the new inode is a directory and hence where to 1726 * locate it. The on-disk inode that is allocated will be returned in @new_ino 1727 * on success, otherwise an error will be set to indicate the failure (e.g. 1728 * -ENOSPC). 1729 */ 1730 int 1731 xfs_dialloc( 1732 struct xfs_trans **tpp, 1733 xfs_ino_t parent, 1734 umode_t mode, 1735 xfs_ino_t *new_ino) 1736 { 1737 struct xfs_mount *mp = (*tpp)->t_mountp; 1738 xfs_agnumber_t agno; 1739 int error = 0; 1740 xfs_agnumber_t start_agno; 1741 struct xfs_perag *pag; 1742 struct xfs_ino_geometry *igeo = M_IGEO(mp); 1743 bool ok_alloc = true; 1744 bool low_space = false; 1745 int flags; 1746 xfs_ino_t ino = NULLFSINO; 1747 1748 /* 1749 * Directories, symlinks, and regular files frequently allocate at least 1750 * one block, so factor that potential expansion when we examine whether 1751 * an AG has enough space for file creation. 1752 */ 1753 if (S_ISDIR(mode)) 1754 start_agno = (atomic_inc_return(&mp->m_agirotor) - 1) % 1755 mp->m_maxagi; 1756 else { 1757 start_agno = XFS_INO_TO_AGNO(mp, parent); 1758 if (start_agno >= mp->m_maxagi) 1759 start_agno = 0; 1760 } 1761 1762 /* 1763 * If we have already hit the ceiling of inode blocks then clear 1764 * ok_alloc so we scan all available agi structures for a free 1765 * inode. 1766 * 1767 * Read rough value of mp->m_icount by percpu_counter_read_positive, 1768 * which will sacrifice the preciseness but improve the performance. 1769 */ 1770 if (igeo->maxicount && 1771 percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos 1772 > igeo->maxicount) { 1773 ok_alloc = false; 1774 } 1775 1776 /* 1777 * If we are near to ENOSPC, we want to prefer allocation from AGs that 1778 * have free inodes in them rather than use up free space allocating new 1779 * inode chunks. Hence we turn off allocation for the first non-blocking 1780 * pass through the AGs if we are near ENOSPC to consume free inodes 1781 * that we can immediately allocate, but then we allow allocation on the 1782 * second pass if we fail to find an AG with free inodes in it. 1783 */ 1784 if (percpu_counter_read_positive(&mp->m_fdblocks) < 1785 mp->m_low_space[XFS_LOWSP_1_PCNT]) { 1786 ok_alloc = false; 1787 low_space = true; 1788 } 1789 1790 /* 1791 * Loop until we find an allocation group that either has free inodes 1792 * or in which we can allocate some inodes. Iterate through the 1793 * allocation groups upward, wrapping at the end. 1794 */ 1795 flags = XFS_ALLOC_FLAG_TRYLOCK; 1796 retry: 1797 for_each_perag_wrap_at(mp, start_agno, mp->m_maxagi, agno, pag) { 1798 if (xfs_dialloc_good_ag(pag, *tpp, mode, flags, ok_alloc)) { 1799 error = xfs_dialloc_try_ag(pag, tpp, parent, 1800 &ino, ok_alloc); 1801 if (error != -EAGAIN) 1802 break; 1803 error = 0; 1804 } 1805 1806 if (xfs_is_shutdown(mp)) { 1807 error = -EFSCORRUPTED; 1808 break; 1809 } 1810 } 1811 if (pag) 1812 xfs_perag_rele(pag); 1813 if (error) 1814 return error; 1815 if (ino == NULLFSINO) { 1816 if (flags) { 1817 flags = 0; 1818 if (low_space) 1819 ok_alloc = true; 1820 goto retry; 1821 } 1822 return -ENOSPC; 1823 } 1824 *new_ino = ino; 1825 return 0; 1826 } 1827 1828 /* 1829 * Free the blocks of an inode chunk. We must consider that the inode chunk 1830 * might be sparse and only free the regions that are allocated as part of the 1831 * chunk. 1832 */ 1833 STATIC void 1834 xfs_difree_inode_chunk( 1835 struct xfs_trans *tp, 1836 xfs_agnumber_t agno, 1837 struct xfs_inobt_rec_incore *rec) 1838 { 1839 struct xfs_mount *mp = tp->t_mountp; 1840 xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, 1841 rec->ir_startino); 1842 int startidx, endidx; 1843 int nextbit; 1844 xfs_agblock_t agbno; 1845 int contigblk; 1846 DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS); 1847 1848 if (!xfs_inobt_issparse(rec->ir_holemask)) { 1849 /* not sparse, calculate extent info directly */ 1850 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), 1851 M_IGEO(mp)->ialloc_blks, 1852 &XFS_RMAP_OINFO_INODES); 1853 return; 1854 } 1855 1856 /* holemask is only 16-bits (fits in an unsigned long) */ 1857 ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0])); 1858 holemask[0] = rec->ir_holemask; 1859 1860 /* 1861 * Find contiguous ranges of zeroes (i.e., allocated regions) in the 1862 * holemask and convert the start/end index of each range to an extent. 1863 * We start with the start and end index both pointing at the first 0 in 1864 * the mask. 1865 */ 1866 startidx = endidx = find_first_zero_bit(holemask, 1867 XFS_INOBT_HOLEMASK_BITS); 1868 nextbit = startidx + 1; 1869 while (startidx < XFS_INOBT_HOLEMASK_BITS) { 1870 nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, 1871 nextbit); 1872 /* 1873 * If the next zero bit is contiguous, update the end index of 1874 * the current range and continue. 1875 */ 1876 if (nextbit != XFS_INOBT_HOLEMASK_BITS && 1877 nextbit == endidx + 1) { 1878 endidx = nextbit; 1879 goto next; 1880 } 1881 1882 /* 1883 * nextbit is not contiguous with the current end index. Convert 1884 * the current start/end to an extent and add it to the free 1885 * list. 1886 */ 1887 agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) / 1888 mp->m_sb.sb_inopblock; 1889 contigblk = ((endidx - startidx + 1) * 1890 XFS_INODES_PER_HOLEMASK_BIT) / 1891 mp->m_sb.sb_inopblock; 1892 1893 ASSERT(agbno % mp->m_sb.sb_spino_align == 0); 1894 ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); 1895 xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), 1896 contigblk, &XFS_RMAP_OINFO_INODES); 1897 1898 /* reset range to current bit and carry on... */ 1899 startidx = endidx = nextbit; 1900 1901 next: 1902 nextbit++; 1903 } 1904 } 1905 1906 STATIC int 1907 xfs_difree_inobt( 1908 struct xfs_perag *pag, 1909 struct xfs_trans *tp, 1910 struct xfs_buf *agbp, 1911 xfs_agino_t agino, 1912 struct xfs_icluster *xic, 1913 struct xfs_inobt_rec_incore *orec) 1914 { 1915 struct xfs_mount *mp = pag->pag_mount; 1916 struct xfs_agi *agi = agbp->b_addr; 1917 struct xfs_btree_cur *cur; 1918 struct xfs_inobt_rec_incore rec; 1919 int ilen; 1920 int error; 1921 int i; 1922 int off; 1923 1924 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 1925 ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length)); 1926 1927 /* 1928 * Initialize the cursor. 1929 */ 1930 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 1931 1932 error = xfs_check_agi_freecount(cur); 1933 if (error) 1934 goto error0; 1935 1936 /* 1937 * Look for the entry describing this inode. 1938 */ 1939 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1940 xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1941 __func__, error); 1942 goto error0; 1943 } 1944 if (XFS_IS_CORRUPT(mp, i != 1)) { 1945 error = -EFSCORRUPTED; 1946 goto error0; 1947 } 1948 error = xfs_inobt_get_rec(cur, &rec, &i); 1949 if (error) { 1950 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1951 __func__, error); 1952 goto error0; 1953 } 1954 if (XFS_IS_CORRUPT(mp, i != 1)) { 1955 error = -EFSCORRUPTED; 1956 goto error0; 1957 } 1958 /* 1959 * Get the offset in the inode chunk. 1960 */ 1961 off = agino - rec.ir_startino; 1962 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); 1963 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); 1964 /* 1965 * Mark the inode free & increment the count. 1966 */ 1967 rec.ir_free |= XFS_INOBT_MASK(off); 1968 rec.ir_freecount++; 1969 1970 /* 1971 * When an inode chunk is free, it becomes eligible for removal. Don't 1972 * remove the chunk if the block size is large enough for multiple inode 1973 * chunks (that might not be free). 1974 */ 1975 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 1976 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1977 struct xfs_perag *pag = agbp->b_pag; 1978 1979 xic->deleted = true; 1980 xic->first_ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, 1981 rec.ir_startino); 1982 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1983 1984 /* 1985 * Remove the inode cluster from the AGI B+Tree, adjust the 1986 * AGI and Superblock inode counts, and mark the disk space 1987 * to be freed when the transaction is committed. 1988 */ 1989 ilen = rec.ir_freecount; 1990 be32_add_cpu(&agi->agi_count, -ilen); 1991 be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); 1992 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); 1993 pag->pagi_freecount -= ilen - 1; 1994 pag->pagi_count -= ilen; 1995 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); 1996 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1997 1998 if ((error = xfs_btree_delete(cur, &i))) { 1999 xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 2000 __func__, error); 2001 goto error0; 2002 } 2003 2004 xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); 2005 } else { 2006 xic->deleted = false; 2007 2008 error = xfs_inobt_update(cur, &rec); 2009 if (error) { 2010 xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 2011 __func__, error); 2012 goto error0; 2013 } 2014 2015 /* 2016 * Change the inode free counts and log the ag/sb changes. 2017 */ 2018 be32_add_cpu(&agi->agi_freecount, 1); 2019 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); 2020 pag->pagi_freecount++; 2021 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); 2022 } 2023 2024 error = xfs_check_agi_freecount(cur); 2025 if (error) 2026 goto error0; 2027 2028 *orec = rec; 2029 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2030 return 0; 2031 2032 error0: 2033 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2034 return error; 2035 } 2036 2037 /* 2038 * Free an inode in the free inode btree. 2039 */ 2040 STATIC int 2041 xfs_difree_finobt( 2042 struct xfs_perag *pag, 2043 struct xfs_trans *tp, 2044 struct xfs_buf *agbp, 2045 xfs_agino_t agino, 2046 struct xfs_inobt_rec_incore *ibtrec) /* inobt record */ 2047 { 2048 struct xfs_mount *mp = pag->pag_mount; 2049 struct xfs_btree_cur *cur; 2050 struct xfs_inobt_rec_incore rec; 2051 int offset = agino - ibtrec->ir_startino; 2052 int error; 2053 int i; 2054 2055 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_FINO); 2056 2057 error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i); 2058 if (error) 2059 goto error; 2060 if (i == 0) { 2061 /* 2062 * If the record does not exist in the finobt, we must have just 2063 * freed an inode in a previously fully allocated chunk. If not, 2064 * something is out of sync. 2065 */ 2066 if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) { 2067 error = -EFSCORRUPTED; 2068 goto error; 2069 } 2070 2071 error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask, 2072 ibtrec->ir_count, 2073 ibtrec->ir_freecount, 2074 ibtrec->ir_free, &i); 2075 if (error) 2076 goto error; 2077 ASSERT(i == 1); 2078 2079 goto out; 2080 } 2081 2082 /* 2083 * Read and update the existing record. We could just copy the ibtrec 2084 * across here, but that would defeat the purpose of having redundant 2085 * metadata. By making the modifications independently, we can catch 2086 * corruptions that we wouldn't see if we just copied from one record 2087 * to another. 2088 */ 2089 error = xfs_inobt_get_rec(cur, &rec, &i); 2090 if (error) 2091 goto error; 2092 if (XFS_IS_CORRUPT(mp, i != 1)) { 2093 error = -EFSCORRUPTED; 2094 goto error; 2095 } 2096 2097 rec.ir_free |= XFS_INOBT_MASK(offset); 2098 rec.ir_freecount++; 2099 2100 if (XFS_IS_CORRUPT(mp, 2101 rec.ir_free != ibtrec->ir_free || 2102 rec.ir_freecount != ibtrec->ir_freecount)) { 2103 error = -EFSCORRUPTED; 2104 goto error; 2105 } 2106 2107 /* 2108 * The content of inobt records should always match between the inobt 2109 * and finobt. The lifecycle of records in the finobt is different from 2110 * the inobt in that the finobt only tracks records with at least one 2111 * free inode. Hence, if all of the inodes are free and we aren't 2112 * keeping inode chunks permanently on disk, remove the record. 2113 * Otherwise, update the record with the new information. 2114 * 2115 * Note that we currently can't free chunks when the block size is large 2116 * enough for multiple chunks. Leave the finobt record to remain in sync 2117 * with the inobt. 2118 */ 2119 if (!xfs_has_ikeep(mp) && rec.ir_free == XFS_INOBT_ALL_FREE && 2120 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 2121 error = xfs_btree_delete(cur, &i); 2122 if (error) 2123 goto error; 2124 ASSERT(i == 1); 2125 } else { 2126 error = xfs_inobt_update(cur, &rec); 2127 if (error) 2128 goto error; 2129 } 2130 2131 out: 2132 error = xfs_check_agi_freecount(cur); 2133 if (error) 2134 goto error; 2135 2136 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 2137 return 0; 2138 2139 error: 2140 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 2141 return error; 2142 } 2143 2144 /* 2145 * Free disk inode. Carefully avoids touching the incore inode, all 2146 * manipulations incore are the caller's responsibility. 2147 * The on-disk inode is not changed by this operation, only the 2148 * btree (free inode mask) is changed. 2149 */ 2150 int 2151 xfs_difree( 2152 struct xfs_trans *tp, 2153 struct xfs_perag *pag, 2154 xfs_ino_t inode, 2155 struct xfs_icluster *xic) 2156 { 2157 /* REFERENCED */ 2158 xfs_agblock_t agbno; /* block number containing inode */ 2159 struct xfs_buf *agbp; /* buffer for allocation group header */ 2160 xfs_agino_t agino; /* allocation group inode number */ 2161 int error; /* error return value */ 2162 struct xfs_mount *mp = tp->t_mountp; 2163 struct xfs_inobt_rec_incore rec;/* btree record */ 2164 2165 /* 2166 * Break up inode number into its components. 2167 */ 2168 if (pag->pag_agno != XFS_INO_TO_AGNO(mp, inode)) { 2169 xfs_warn(mp, "%s: agno != pag->pag_agno (%d != %d).", 2170 __func__, XFS_INO_TO_AGNO(mp, inode), pag->pag_agno); 2171 ASSERT(0); 2172 return -EINVAL; 2173 } 2174 agino = XFS_INO_TO_AGINO(mp, inode); 2175 if (inode != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2176 xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 2177 __func__, (unsigned long long)inode, 2178 (unsigned long long)XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2179 ASSERT(0); 2180 return -EINVAL; 2181 } 2182 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2183 if (agbno >= mp->m_sb.sb_agblocks) { 2184 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 2185 __func__, agbno, mp->m_sb.sb_agblocks); 2186 ASSERT(0); 2187 return -EINVAL; 2188 } 2189 /* 2190 * Get the allocation group header. 2191 */ 2192 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2193 if (error) { 2194 xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 2195 __func__, error); 2196 return error; 2197 } 2198 2199 /* 2200 * Fix up the inode allocation btree. 2201 */ 2202 error = xfs_difree_inobt(pag, tp, agbp, agino, xic, &rec); 2203 if (error) 2204 goto error0; 2205 2206 /* 2207 * Fix up the free inode btree. 2208 */ 2209 if (xfs_has_finobt(mp)) { 2210 error = xfs_difree_finobt(pag, tp, agbp, agino, &rec); 2211 if (error) 2212 goto error0; 2213 } 2214 2215 return 0; 2216 2217 error0: 2218 return error; 2219 } 2220 2221 STATIC int 2222 xfs_imap_lookup( 2223 struct xfs_perag *pag, 2224 struct xfs_trans *tp, 2225 xfs_agino_t agino, 2226 xfs_agblock_t agbno, 2227 xfs_agblock_t *chunk_agbno, 2228 xfs_agblock_t *offset_agbno, 2229 int flags) 2230 { 2231 struct xfs_mount *mp = pag->pag_mount; 2232 struct xfs_inobt_rec_incore rec; 2233 struct xfs_btree_cur *cur; 2234 struct xfs_buf *agbp; 2235 int error; 2236 int i; 2237 2238 error = xfs_ialloc_read_agi(pag, tp, &agbp); 2239 if (error) { 2240 xfs_alert(mp, 2241 "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 2242 __func__, error, pag->pag_agno); 2243 return error; 2244 } 2245 2246 /* 2247 * Lookup the inode record for the given agino. If the record cannot be 2248 * found, then it's an invalid inode number and we should abort. Once 2249 * we have a record, we need to ensure it contains the inode number 2250 * we are looking up. 2251 */ 2252 cur = xfs_inobt_init_cursor(pag, tp, agbp, XFS_BTNUM_INO); 2253 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); 2254 if (!error) { 2255 if (i) 2256 error = xfs_inobt_get_rec(cur, &rec, &i); 2257 if (!error && i == 0) 2258 error = -EINVAL; 2259 } 2260 2261 xfs_trans_brelse(tp, agbp); 2262 xfs_btree_del_cursor(cur, error); 2263 if (error) 2264 return error; 2265 2266 /* check that the returned record contains the required inode */ 2267 if (rec.ir_startino > agino || 2268 rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino) 2269 return -EINVAL; 2270 2271 /* for untrusted inodes check it is allocated first */ 2272 if ((flags & XFS_IGET_UNTRUSTED) && 2273 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 2274 return -EINVAL; 2275 2276 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 2277 *offset_agbno = agbno - *chunk_agbno; 2278 return 0; 2279 } 2280 2281 /* 2282 * Return the location of the inode in imap, for mapping it into a buffer. 2283 */ 2284 int 2285 xfs_imap( 2286 struct xfs_perag *pag, 2287 struct xfs_trans *tp, 2288 xfs_ino_t ino, /* inode to locate */ 2289 struct xfs_imap *imap, /* location map structure */ 2290 uint flags) /* flags for inode btree lookup */ 2291 { 2292 struct xfs_mount *mp = pag->pag_mount; 2293 xfs_agblock_t agbno; /* block number of inode in the alloc group */ 2294 xfs_agino_t agino; /* inode number within alloc group */ 2295 xfs_agblock_t chunk_agbno; /* first block in inode chunk */ 2296 xfs_agblock_t cluster_agbno; /* first block in inode cluster */ 2297 int error; /* error code */ 2298 int offset; /* index of inode in its buffer */ 2299 xfs_agblock_t offset_agbno; /* blks from chunk start to inode */ 2300 2301 ASSERT(ino != NULLFSINO); 2302 2303 /* 2304 * Split up the inode number into its parts. 2305 */ 2306 agino = XFS_INO_TO_AGINO(mp, ino); 2307 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 2308 if (agbno >= mp->m_sb.sb_agblocks || 2309 ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2310 error = -EINVAL; 2311 #ifdef DEBUG 2312 /* 2313 * Don't output diagnostic information for untrusted inodes 2314 * as they can be invalid without implying corruption. 2315 */ 2316 if (flags & XFS_IGET_UNTRUSTED) 2317 return error; 2318 if (agbno >= mp->m_sb.sb_agblocks) { 2319 xfs_alert(mp, 2320 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 2321 __func__, (unsigned long long)agbno, 2322 (unsigned long)mp->m_sb.sb_agblocks); 2323 } 2324 if (ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { 2325 xfs_alert(mp, 2326 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 2327 __func__, ino, 2328 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); 2329 } 2330 xfs_stack_trace(); 2331 #endif /* DEBUG */ 2332 return error; 2333 } 2334 2335 /* 2336 * For bulkstat and handle lookups, we have an untrusted inode number 2337 * that we have to verify is valid. We cannot do this just by reading 2338 * the inode buffer as it may have been unlinked and removed leaving 2339 * inodes in stale state on disk. Hence we have to do a btree lookup 2340 * in all cases where an untrusted inode number is passed. 2341 */ 2342 if (flags & XFS_IGET_UNTRUSTED) { 2343 error = xfs_imap_lookup(pag, tp, agino, agbno, 2344 &chunk_agbno, &offset_agbno, flags); 2345 if (error) 2346 return error; 2347 goto out_map; 2348 } 2349 2350 /* 2351 * If the inode cluster size is the same as the blocksize or 2352 * smaller we get to the buffer by simple arithmetics. 2353 */ 2354 if (M_IGEO(mp)->blocks_per_cluster == 1) { 2355 offset = XFS_INO_TO_OFFSET(mp, ino); 2356 ASSERT(offset < mp->m_sb.sb_inopblock); 2357 2358 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, agbno); 2359 imap->im_len = XFS_FSB_TO_BB(mp, 1); 2360 imap->im_boffset = (unsigned short)(offset << 2361 mp->m_sb.sb_inodelog); 2362 return 0; 2363 } 2364 2365 /* 2366 * If the inode chunks are aligned then use simple maths to 2367 * find the location. Otherwise we have to do a btree 2368 * lookup to find the location. 2369 */ 2370 if (M_IGEO(mp)->inoalign_mask) { 2371 offset_agbno = agbno & M_IGEO(mp)->inoalign_mask; 2372 chunk_agbno = agbno - offset_agbno; 2373 } else { 2374 error = xfs_imap_lookup(pag, tp, agino, agbno, 2375 &chunk_agbno, &offset_agbno, flags); 2376 if (error) 2377 return error; 2378 } 2379 2380 out_map: 2381 ASSERT(agbno >= chunk_agbno); 2382 cluster_agbno = chunk_agbno + 2383 ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) * 2384 M_IGEO(mp)->blocks_per_cluster); 2385 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + 2386 XFS_INO_TO_OFFSET(mp, ino); 2387 2388 imap->im_blkno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, cluster_agbno); 2389 imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 2390 imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); 2391 2392 /* 2393 * If the inode number maps to a block outside the bounds 2394 * of the file system then return NULL rather than calling 2395 * read_buf and panicing when we get an error from the 2396 * driver. 2397 */ 2398 if ((imap->im_blkno + imap->im_len) > 2399 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 2400 xfs_alert(mp, 2401 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 2402 __func__, (unsigned long long) imap->im_blkno, 2403 (unsigned long long) imap->im_len, 2404 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 2405 return -EINVAL; 2406 } 2407 return 0; 2408 } 2409 2410 /* 2411 * Log specified fields for the ag hdr (inode section). The growth of the agi 2412 * structure over time requires that we interpret the buffer as two logical 2413 * regions delineated by the end of the unlinked list. This is due to the size 2414 * of the hash table and its location in the middle of the agi. 2415 * 2416 * For example, a request to log a field before agi_unlinked and a field after 2417 * agi_unlinked could cause us to log the entire hash table and use an excessive 2418 * amount of log space. To avoid this behavior, log the region up through 2419 * agi_unlinked in one call and the region after agi_unlinked through the end of 2420 * the structure in another. 2421 */ 2422 void 2423 xfs_ialloc_log_agi( 2424 struct xfs_trans *tp, 2425 struct xfs_buf *bp, 2426 uint32_t fields) 2427 { 2428 int first; /* first byte number */ 2429 int last; /* last byte number */ 2430 static const short offsets[] = { /* field starting offsets */ 2431 /* keep in sync with bit definitions */ 2432 offsetof(xfs_agi_t, agi_magicnum), 2433 offsetof(xfs_agi_t, agi_versionnum), 2434 offsetof(xfs_agi_t, agi_seqno), 2435 offsetof(xfs_agi_t, agi_length), 2436 offsetof(xfs_agi_t, agi_count), 2437 offsetof(xfs_agi_t, agi_root), 2438 offsetof(xfs_agi_t, agi_level), 2439 offsetof(xfs_agi_t, agi_freecount), 2440 offsetof(xfs_agi_t, agi_newino), 2441 offsetof(xfs_agi_t, agi_dirino), 2442 offsetof(xfs_agi_t, agi_unlinked), 2443 offsetof(xfs_agi_t, agi_free_root), 2444 offsetof(xfs_agi_t, agi_free_level), 2445 offsetof(xfs_agi_t, agi_iblocks), 2446 sizeof(xfs_agi_t) 2447 }; 2448 #ifdef DEBUG 2449 struct xfs_agi *agi = bp->b_addr; 2450 2451 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); 2452 #endif 2453 2454 /* 2455 * Compute byte offsets for the first and last fields in the first 2456 * region and log the agi buffer. This only logs up through 2457 * agi_unlinked. 2458 */ 2459 if (fields & XFS_AGI_ALL_BITS_R1) { 2460 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1, 2461 &first, &last); 2462 xfs_trans_log_buf(tp, bp, first, last); 2463 } 2464 2465 /* 2466 * Mask off the bits in the first region and calculate the first and 2467 * last field offsets for any bits in the second region. 2468 */ 2469 fields &= ~XFS_AGI_ALL_BITS_R1; 2470 if (fields) { 2471 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2, 2472 &first, &last); 2473 xfs_trans_log_buf(tp, bp, first, last); 2474 } 2475 } 2476 2477 static xfs_failaddr_t 2478 xfs_agi_verify( 2479 struct xfs_buf *bp) 2480 { 2481 struct xfs_mount *mp = bp->b_mount; 2482 struct xfs_agi *agi = bp->b_addr; 2483 int i; 2484 2485 if (xfs_has_crc(mp)) { 2486 if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) 2487 return __this_address; 2488 if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn))) 2489 return __this_address; 2490 } 2491 2492 /* 2493 * Validate the magic number of the agi block. 2494 */ 2495 if (!xfs_verify_magic(bp, agi->agi_magicnum)) 2496 return __this_address; 2497 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) 2498 return __this_address; 2499 2500 if (be32_to_cpu(agi->agi_level) < 1 || 2501 be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels) 2502 return __this_address; 2503 2504 if (xfs_has_finobt(mp) && 2505 (be32_to_cpu(agi->agi_free_level) < 1 || 2506 be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels)) 2507 return __this_address; 2508 2509 /* 2510 * during growfs operations, the perag is not fully initialised, 2511 * so we can't use it for any useful checking. growfs ensures we can't 2512 * use it by using uncached buffers that don't have the perag attached 2513 * so we can detect and avoid this problem. 2514 */ 2515 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) 2516 return __this_address; 2517 2518 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { 2519 if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO)) 2520 continue; 2521 if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i]))) 2522 return __this_address; 2523 } 2524 2525 return NULL; 2526 } 2527 2528 static void 2529 xfs_agi_read_verify( 2530 struct xfs_buf *bp) 2531 { 2532 struct xfs_mount *mp = bp->b_mount; 2533 xfs_failaddr_t fa; 2534 2535 if (xfs_has_crc(mp) && 2536 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2537 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2538 else { 2539 fa = xfs_agi_verify(bp); 2540 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI)) 2541 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2542 } 2543 } 2544 2545 static void 2546 xfs_agi_write_verify( 2547 struct xfs_buf *bp) 2548 { 2549 struct xfs_mount *mp = bp->b_mount; 2550 struct xfs_buf_log_item *bip = bp->b_log_item; 2551 struct xfs_agi *agi = bp->b_addr; 2552 xfs_failaddr_t fa; 2553 2554 fa = xfs_agi_verify(bp); 2555 if (fa) { 2556 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2557 return; 2558 } 2559 2560 if (!xfs_has_crc(mp)) 2561 return; 2562 2563 if (bip) 2564 agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2565 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); 2566 } 2567 2568 const struct xfs_buf_ops xfs_agi_buf_ops = { 2569 .name = "xfs_agi", 2570 .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) }, 2571 .verify_read = xfs_agi_read_verify, 2572 .verify_write = xfs_agi_write_verify, 2573 .verify_struct = xfs_agi_verify, 2574 }; 2575 2576 /* 2577 * Read in the allocation group header (inode allocation section) 2578 */ 2579 int 2580 xfs_read_agi( 2581 struct xfs_perag *pag, 2582 struct xfs_trans *tp, 2583 struct xfs_buf **agibpp) 2584 { 2585 struct xfs_mount *mp = pag->pag_mount; 2586 int error; 2587 2588 trace_xfs_read_agi(pag->pag_mount, pag->pag_agno); 2589 2590 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 2591 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)), 2592 XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops); 2593 if (error) 2594 return error; 2595 if (tp) 2596 xfs_trans_buf_set_type(tp, *agibpp, XFS_BLFT_AGI_BUF); 2597 2598 xfs_buf_set_ref(*agibpp, XFS_AGI_REF); 2599 return 0; 2600 } 2601 2602 /* 2603 * Read in the agi and initialise the per-ag data. If the caller supplies a 2604 * @agibpp, return the locked AGI buffer to them, otherwise release it. 2605 */ 2606 int 2607 xfs_ialloc_read_agi( 2608 struct xfs_perag *pag, 2609 struct xfs_trans *tp, 2610 struct xfs_buf **agibpp) 2611 { 2612 struct xfs_buf *agibp; 2613 struct xfs_agi *agi; 2614 int error; 2615 2616 trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno); 2617 2618 error = xfs_read_agi(pag, tp, &agibp); 2619 if (error) 2620 return error; 2621 2622 agi = agibp->b_addr; 2623 if (!xfs_perag_initialised_agi(pag)) { 2624 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 2625 pag->pagi_count = be32_to_cpu(agi->agi_count); 2626 set_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); 2627 } 2628 2629 /* 2630 * It's possible for these to be out of sync if 2631 * we are in the middle of a forced shutdown. 2632 */ 2633 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || 2634 xfs_is_shutdown(pag->pag_mount)); 2635 if (agibpp) 2636 *agibpp = agibp; 2637 else 2638 xfs_trans_brelse(tp, agibp); 2639 return 0; 2640 } 2641 2642 /* Is there an inode record covering a given range of inode numbers? */ 2643 int 2644 xfs_ialloc_has_inode_record( 2645 struct xfs_btree_cur *cur, 2646 xfs_agino_t low, 2647 xfs_agino_t high, 2648 bool *exists) 2649 { 2650 struct xfs_inobt_rec_incore irec; 2651 xfs_agino_t agino; 2652 uint16_t holemask; 2653 int has_record; 2654 int i; 2655 int error; 2656 2657 *exists = false; 2658 error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record); 2659 while (error == 0 && has_record) { 2660 error = xfs_inobt_get_rec(cur, &irec, &has_record); 2661 if (error || irec.ir_startino > high) 2662 break; 2663 2664 agino = irec.ir_startino; 2665 holemask = irec.ir_holemask; 2666 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1, 2667 i++, agino += XFS_INODES_PER_HOLEMASK_BIT) { 2668 if (holemask & 1) 2669 continue; 2670 if (agino + XFS_INODES_PER_HOLEMASK_BIT > low && 2671 agino <= high) { 2672 *exists = true; 2673 return 0; 2674 } 2675 } 2676 2677 error = xfs_btree_increment(cur, 0, &has_record); 2678 } 2679 return error; 2680 } 2681 2682 /* Is there an inode record covering a given extent? */ 2683 int 2684 xfs_ialloc_has_inodes_at_extent( 2685 struct xfs_btree_cur *cur, 2686 xfs_agblock_t bno, 2687 xfs_extlen_t len, 2688 bool *exists) 2689 { 2690 xfs_agino_t low; 2691 xfs_agino_t high; 2692 2693 low = XFS_AGB_TO_AGINO(cur->bc_mp, bno); 2694 high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1; 2695 2696 return xfs_ialloc_has_inode_record(cur, low, high, exists); 2697 } 2698 2699 struct xfs_ialloc_count_inodes { 2700 xfs_agino_t count; 2701 xfs_agino_t freecount; 2702 }; 2703 2704 /* Record inode counts across all inobt records. */ 2705 STATIC int 2706 xfs_ialloc_count_inodes_rec( 2707 struct xfs_btree_cur *cur, 2708 const union xfs_btree_rec *rec, 2709 void *priv) 2710 { 2711 struct xfs_inobt_rec_incore irec; 2712 struct xfs_ialloc_count_inodes *ci = priv; 2713 xfs_failaddr_t fa; 2714 2715 xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec); 2716 fa = xfs_inobt_check_irec(cur, &irec); 2717 if (fa) 2718 return xfs_inobt_complain_bad_rec(cur, fa, &irec); 2719 2720 ci->count += irec.ir_count; 2721 ci->freecount += irec.ir_freecount; 2722 2723 return 0; 2724 } 2725 2726 /* Count allocated and free inodes under an inobt. */ 2727 int 2728 xfs_ialloc_count_inodes( 2729 struct xfs_btree_cur *cur, 2730 xfs_agino_t *count, 2731 xfs_agino_t *freecount) 2732 { 2733 struct xfs_ialloc_count_inodes ci = {0}; 2734 int error; 2735 2736 ASSERT(cur->bc_btnum == XFS_BTNUM_INO); 2737 error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); 2738 if (error) 2739 return error; 2740 2741 *count = ci.count; 2742 *freecount = ci.freecount; 2743 return 0; 2744 } 2745 2746 /* 2747 * Initialize inode-related geometry information. 2748 * 2749 * Compute the inode btree min and max levels and set maxicount. 2750 * 2751 * Set the inode cluster size. This may still be overridden by the file 2752 * system block size if it is larger than the chosen cluster size. 2753 * 2754 * For v5 filesystems, scale the cluster size with the inode size to keep a 2755 * constant ratio of inode per cluster buffer, but only if mkfs has set the 2756 * inode alignment value appropriately for larger cluster sizes. 2757 * 2758 * Then compute the inode cluster alignment information. 2759 */ 2760 void 2761 xfs_ialloc_setup_geometry( 2762 struct xfs_mount *mp) 2763 { 2764 struct xfs_sb *sbp = &mp->m_sb; 2765 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2766 uint64_t icount; 2767 uint inodes; 2768 2769 igeo->new_diflags2 = 0; 2770 if (xfs_has_bigtime(mp)) 2771 igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME; 2772 if (xfs_has_large_extent_counts(mp)) 2773 igeo->new_diflags2 |= XFS_DIFLAG2_NREXT64; 2774 2775 /* Compute inode btree geometry. */ 2776 igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 2777 igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); 2778 igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); 2779 igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2; 2780 igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2; 2781 2782 igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK, 2783 sbp->sb_inopblock); 2784 igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog; 2785 2786 if (sbp->sb_spino_align) 2787 igeo->ialloc_min_blks = sbp->sb_spino_align; 2788 else 2789 igeo->ialloc_min_blks = igeo->ialloc_blks; 2790 2791 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */ 2792 inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; 2793 igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr, 2794 inodes); 2795 ASSERT(igeo->inobt_maxlevels <= xfs_iallocbt_maxlevels_ondisk()); 2796 2797 /* 2798 * Set the maximum inode count for this filesystem, being careful not 2799 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular 2800 * users should never get here due to failing sb verification, but 2801 * certain users (xfs_db) need to be usable even with corrupt metadata. 2802 */ 2803 if (sbp->sb_imax_pct && igeo->ialloc_blks) { 2804 /* 2805 * Make sure the maximum inode count is a multiple 2806 * of the units we allocate inodes in. 2807 */ 2808 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 2809 do_div(icount, 100); 2810 do_div(icount, igeo->ialloc_blks); 2811 igeo->maxicount = XFS_FSB_TO_INO(mp, 2812 icount * igeo->ialloc_blks); 2813 } else { 2814 igeo->maxicount = 0; 2815 } 2816 2817 /* 2818 * Compute the desired size of an inode cluster buffer size, which 2819 * starts at 8K and (on v5 filesystems) scales up with larger inode 2820 * sizes. 2821 * 2822 * Preserve the desired inode cluster size because the sparse inodes 2823 * feature uses that desired size (not the actual size) to compute the 2824 * sparse inode alignment. The mount code validates this value, so we 2825 * cannot change the behavior. 2826 */ 2827 igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; 2828 if (xfs_has_v3inodes(mp)) { 2829 int new_size = igeo->inode_cluster_size_raw; 2830 2831 new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 2832 if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 2833 igeo->inode_cluster_size_raw = new_size; 2834 } 2835 2836 /* Calculate inode cluster ratios. */ 2837 if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize) 2838 igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp, 2839 igeo->inode_cluster_size_raw); 2840 else 2841 igeo->blocks_per_cluster = 1; 2842 igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster); 2843 igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster); 2844 2845 /* Calculate inode cluster alignment. */ 2846 if (xfs_has_align(mp) && 2847 mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster) 2848 igeo->cluster_align = mp->m_sb.sb_inoalignmt; 2849 else 2850 igeo->cluster_align = 1; 2851 igeo->inoalign_mask = igeo->cluster_align - 1; 2852 igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align); 2853 2854 /* 2855 * If we are using stripe alignment, check whether 2856 * the stripe unit is a multiple of the inode alignment 2857 */ 2858 if (mp->m_dalign && igeo->inoalign_mask && 2859 !(mp->m_dalign & igeo->inoalign_mask)) 2860 igeo->ialloc_align = mp->m_dalign; 2861 else 2862 igeo->ialloc_align = 0; 2863 } 2864 2865 /* Compute the location of the root directory inode that is laid out by mkfs. */ 2866 xfs_ino_t 2867 xfs_ialloc_calc_rootino( 2868 struct xfs_mount *mp, 2869 int sunit) 2870 { 2871 struct xfs_ino_geometry *igeo = M_IGEO(mp); 2872 xfs_agblock_t first_bno; 2873 2874 /* 2875 * Pre-calculate the geometry of AG 0. We know what it looks like 2876 * because libxfs knows how to create allocation groups now. 2877 * 2878 * first_bno is the first block in which mkfs could possibly have 2879 * allocated the root directory inode, once we factor in the metadata 2880 * that mkfs formats before it. Namely, the four AG headers... 2881 */ 2882 first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize); 2883 2884 /* ...the two free space btree roots... */ 2885 first_bno += 2; 2886 2887 /* ...the inode btree root... */ 2888 first_bno += 1; 2889 2890 /* ...the initial AGFL... */ 2891 first_bno += xfs_alloc_min_freelist(mp, NULL); 2892 2893 /* ...the free inode btree root... */ 2894 if (xfs_has_finobt(mp)) 2895 first_bno++; 2896 2897 /* ...the reverse mapping btree root... */ 2898 if (xfs_has_rmapbt(mp)) 2899 first_bno++; 2900 2901 /* ...the reference count btree... */ 2902 if (xfs_has_reflink(mp)) 2903 first_bno++; 2904 2905 /* 2906 * ...and the log, if it is allocated in the first allocation group. 2907 * 2908 * This can happen with filesystems that only have a single 2909 * allocation group, or very odd geometries created by old mkfs 2910 * versions on very small filesystems. 2911 */ 2912 if (xfs_ag_contains_log(mp, 0)) 2913 first_bno += mp->m_sb.sb_logblocks; 2914 2915 /* 2916 * Now round first_bno up to whatever allocation alignment is given 2917 * by the filesystem or was passed in. 2918 */ 2919 if (xfs_has_dalign(mp) && igeo->ialloc_align > 0) 2920 first_bno = roundup(first_bno, sunit); 2921 else if (xfs_has_align(mp) && 2922 mp->m_sb.sb_inoalignmt > 1) 2923 first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt); 2924 2925 return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno)); 2926 } 2927 2928 /* 2929 * Ensure there are not sparse inode clusters that cross the new EOAG. 2930 * 2931 * This is a no-op for non-spinode filesystems since clusters are always fully 2932 * allocated and checking the bnobt suffices. However, a spinode filesystem 2933 * could have a record where the upper inodes are free blocks. If those blocks 2934 * were removed from the filesystem, the inode record would extend beyond EOAG, 2935 * which will be flagged as corruption. 2936 */ 2937 int 2938 xfs_ialloc_check_shrink( 2939 struct xfs_perag *pag, 2940 struct xfs_trans *tp, 2941 struct xfs_buf *agibp, 2942 xfs_agblock_t new_length) 2943 { 2944 struct xfs_inobt_rec_incore rec; 2945 struct xfs_btree_cur *cur; 2946 xfs_agino_t agino; 2947 int has; 2948 int error; 2949 2950 if (!xfs_has_sparseinodes(pag->pag_mount)) 2951 return 0; 2952 2953 cur = xfs_inobt_init_cursor(pag, tp, agibp, XFS_BTNUM_INO); 2954 2955 /* Look up the inobt record that would correspond to the new EOFS. */ 2956 agino = XFS_AGB_TO_AGINO(pag->pag_mount, new_length); 2957 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has); 2958 if (error || !has) 2959 goto out; 2960 2961 error = xfs_inobt_get_rec(cur, &rec, &has); 2962 if (error) 2963 goto out; 2964 2965 if (!has) { 2966 error = -EFSCORRUPTED; 2967 goto out; 2968 } 2969 2970 /* If the record covers inodes that would be beyond EOFS, bail out. */ 2971 if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) { 2972 error = -ENOSPC; 2973 goto out; 2974 } 2975 out: 2976 xfs_btree_del_cursor(cur, error); 2977 return error; 2978 } 2979