1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_da_format.h" 15 #include "xfs_da_btree.h" 16 #include "xfs_inode.h" 17 #include "xfs_bmap_btree.h" 18 #include "xfs_quota.h" 19 #include "xfs_trans.h" 20 #include "xfs_qm.h" 21 #include "xfs_trans_space.h" 22 23 #define _ALLOC true 24 #define _FREE false 25 26 /* 27 * A buffer has a format structure overhead in the log in addition 28 * to the data, so we need to take this into account when reserving 29 * space in a transaction for a buffer. Round the space required up 30 * to a multiple of 128 bytes so that we don't change the historical 31 * reservation that has been used for this overhead. 32 */ 33 STATIC uint 34 xfs_buf_log_overhead(void) 35 { 36 return round_up(sizeof(struct xlog_op_header) + 37 sizeof(struct xfs_buf_log_format), 128); 38 } 39 40 /* 41 * Calculate out transaction log reservation per item in bytes. 42 * 43 * The nbufs argument is used to indicate the number of items that 44 * will be changed in a transaction. size is used to tell how many 45 * bytes should be reserved per item. 46 */ 47 STATIC uint 48 xfs_calc_buf_res( 49 uint nbufs, 50 uint size) 51 { 52 return nbufs * (size + xfs_buf_log_overhead()); 53 } 54 55 /* 56 * Per-extent log reservation for the btree changes involved in freeing or 57 * allocating an extent. In classic XFS there were two trees that will be 58 * modified (bnobt + cntbt). With rmap enabled, there are three trees 59 * (rmapbt). With reflink, there are four trees (refcountbt). The number of 60 * blocks reserved is based on the formula: 61 * 62 * num trees * ((2 blocks/level * max depth) - 1) 63 * 64 * Keep in mind that max depth is calculated separately for each type of tree. 65 */ 66 uint 67 xfs_allocfree_log_count( 68 struct xfs_mount *mp, 69 uint num_ops) 70 { 71 uint blocks; 72 73 blocks = num_ops * 2 * (2 * mp->m_ag_maxlevels - 1); 74 if (xfs_has_rmapbt(mp)) 75 blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1); 76 if (xfs_has_reflink(mp)) 77 blocks += num_ops * (2 * mp->m_refc_maxlevels - 1); 78 79 return blocks; 80 } 81 82 /* 83 * Logging inodes is really tricksy. They are logged in memory format, 84 * which means that what we write into the log doesn't directly translate into 85 * the amount of space they use on disk. 86 * 87 * Case in point - btree format forks in memory format use more space than the 88 * on-disk format. In memory, the buffer contains a normal btree block header so 89 * the btree code can treat it as though it is just another generic buffer. 90 * However, when we write it to the inode fork, we don't write all of this 91 * header as it isn't needed. e.g. the root is only ever in the inode, so 92 * there's no need for sibling pointers which would waste 16 bytes of space. 93 * 94 * Hence when we have an inode with a maximally sized btree format fork, then 95 * amount of information we actually log is greater than the size of the inode 96 * on disk. Hence we need an inode reservation function that calculates all this 97 * correctly. So, we log: 98 * 99 * - 4 log op headers for object 100 * - for the ilf, the inode core and 2 forks 101 * - inode log format object 102 * - the inode core 103 * - two inode forks containing bmap btree root blocks. 104 * - the btree data contained by both forks will fit into the inode size, 105 * hence when combined with the inode core above, we have a total of the 106 * actual inode size. 107 * - the BMBT headers need to be accounted separately, as they are 108 * additional to the records and pointers that fit inside the inode 109 * forks. 110 */ 111 STATIC uint 112 xfs_calc_inode_res( 113 struct xfs_mount *mp, 114 uint ninodes) 115 { 116 return ninodes * 117 (4 * sizeof(struct xlog_op_header) + 118 sizeof(struct xfs_inode_log_format) + 119 mp->m_sb.sb_inodesize + 120 2 * XFS_BMBT_BLOCK_LEN(mp)); 121 } 122 123 /* 124 * Inode btree record insertion/removal modifies the inode btree and free space 125 * btrees (since the inobt does not use the agfl). This requires the following 126 * reservation: 127 * 128 * the inode btree: max depth * blocksize 129 * the allocation btrees: 2 trees * (max depth - 1) * block size 130 * 131 * The caller must account for SB and AG header modifications, etc. 132 */ 133 STATIC uint 134 xfs_calc_inobt_res( 135 struct xfs_mount *mp) 136 { 137 return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels, 138 XFS_FSB_TO_B(mp, 1)) + 139 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 140 XFS_FSB_TO_B(mp, 1)); 141 } 142 143 /* 144 * The free inode btree is a conditional feature. The behavior differs slightly 145 * from that of the traditional inode btree in that the finobt tracks records 146 * for inode chunks with at least one free inode. A record can be removed from 147 * the tree during individual inode allocation. Therefore the finobt 148 * reservation is unconditional for both the inode chunk allocation and 149 * individual inode allocation (modify) cases. 150 * 151 * Behavior aside, the reservation for finobt modification is equivalent to the 152 * traditional inobt: cover a full finobt shape change plus block allocation. 153 */ 154 STATIC uint 155 xfs_calc_finobt_res( 156 struct xfs_mount *mp) 157 { 158 if (!xfs_has_finobt(mp)) 159 return 0; 160 161 return xfs_calc_inobt_res(mp); 162 } 163 164 /* 165 * Calculate the reservation required to allocate or free an inode chunk. This 166 * includes: 167 * 168 * the allocation btrees: 2 trees * (max depth - 1) * block size 169 * the inode chunk: m_ino_geo.ialloc_blks * N 170 * 171 * The size N of the inode chunk reservation depends on whether it is for 172 * allocation or free and which type of create transaction is in use. An inode 173 * chunk free always invalidates the buffers and only requires reservation for 174 * headers (N == 0). An inode chunk allocation requires a chunk sized 175 * reservation on v4 and older superblocks to initialize the chunk. No chunk 176 * reservation is required for allocation on v5 supers, which use ordered 177 * buffers to initialize. 178 */ 179 STATIC uint 180 xfs_calc_inode_chunk_res( 181 struct xfs_mount *mp, 182 bool alloc) 183 { 184 uint res, size = 0; 185 186 res = xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 187 XFS_FSB_TO_B(mp, 1)); 188 if (alloc) { 189 /* icreate tx uses ordered buffers */ 190 if (xfs_has_v3inodes(mp)) 191 return res; 192 size = XFS_FSB_TO_B(mp, 1); 193 } 194 195 res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size); 196 return res; 197 } 198 199 /* 200 * Per-extent log reservation for the btree changes involved in freeing or 201 * allocating a realtime extent. We have to be able to log as many rtbitmap 202 * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, 203 * as well as the realtime summary block. 204 */ 205 static unsigned int 206 xfs_rtalloc_log_count( 207 struct xfs_mount *mp, 208 unsigned int num_ops) 209 { 210 unsigned int blksz = XFS_FSB_TO_B(mp, 1); 211 unsigned int rtbmp_bytes; 212 213 rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; 214 return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; 215 } 216 217 /* 218 * Various log reservation values. 219 * 220 * These are based on the size of the file system block because that is what 221 * most transactions manipulate. Each adds in an additional 128 bytes per 222 * item logged to try to account for the overhead of the transaction mechanism. 223 * 224 * Note: Most of the reservations underestimate the number of allocation 225 * groups into which they could free extents in the xfs_defer_finish() call. 226 * This is because the number in the worst case is quite high and quite 227 * unusual. In order to fix this we need to change xfs_defer_finish() to free 228 * extents in only a single AG at a time. This will require changes to the 229 * EFI code as well, however, so that the EFI for the extents not freed is 230 * logged again in each transaction. See SGI PV #261917. 231 * 232 * Reservation functions here avoid a huge stack in xfs_trans_init due to 233 * register overflow from temporaries in the calculations. 234 */ 235 236 237 /* 238 * In a write transaction we can allocate a maximum of 2 239 * extents. This gives (t1): 240 * the inode getting the new extents: inode size 241 * the inode's bmap btree: max depth * block size 242 * the agfs of the ags from which the extents are allocated: 2 * sector 243 * the superblock free block counter: sector size 244 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 245 * Or, if we're writing to a realtime file (t2): 246 * the inode getting the new extents: inode size 247 * the inode's bmap btree: max depth * block size 248 * the agfs of the ags from which the extents are allocated: 2 * sector 249 * the superblock free block counter: sector size 250 * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes 251 * the realtime summary: 1 block 252 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 253 * And the bmap_finish transaction can free bmap blocks in a join (t3): 254 * the agfs of the ags containing the blocks: 2 * sector size 255 * the agfls of the ags containing the blocks: 2 * sector size 256 * the super block free block counter: sector size 257 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 258 */ 259 STATIC uint 260 xfs_calc_write_reservation( 261 struct xfs_mount *mp) 262 { 263 unsigned int t1, t2, t3; 264 unsigned int blksz = XFS_FSB_TO_B(mp, 1); 265 266 t1 = xfs_calc_inode_res(mp, 1) + 267 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + 268 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 269 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); 270 271 if (xfs_has_realtime(mp)) { 272 t2 = xfs_calc_inode_res(mp, 1) + 273 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 274 blksz) + 275 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 276 xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + 277 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); 278 } else { 279 t2 = 0; 280 } 281 282 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + 283 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); 284 285 return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); 286 } 287 288 /* 289 * In truncating a file we free up to two extents at once. We can modify (t1): 290 * the inode being truncated: inode size 291 * the inode's bmap btree: (max depth + 1) * block size 292 * And the bmap_finish transaction can free the blocks and bmap blocks (t2): 293 * the agf for each of the ags: 4 * sector size 294 * the agfl for each of the ags: 4 * sector size 295 * the super block to reflect the freed blocks: sector size 296 * worst case split in allocation btrees per extent assuming 4 extents: 297 * 4 exts * 2 trees * (2 * max depth - 1) * block size 298 * Or, if it's a realtime file (t3): 299 * the agf for each of the ags: 2 * sector size 300 * the agfl for each of the ags: 2 * sector size 301 * the super block to reflect the freed blocks: sector size 302 * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes 303 * the realtime summary: 2 exts * 1 block 304 * worst case split in allocation btrees per extent assuming 2 extents: 305 * 2 exts * 2 trees * (2 * max depth - 1) * block size 306 */ 307 STATIC uint 308 xfs_calc_itruncate_reservation( 309 struct xfs_mount *mp) 310 { 311 unsigned int t1, t2, t3; 312 unsigned int blksz = XFS_FSB_TO_B(mp, 1); 313 314 t1 = xfs_calc_inode_res(mp, 1) + 315 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); 316 317 t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + 318 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); 319 320 if (xfs_has_realtime(mp)) { 321 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + 322 xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + 323 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); 324 } else { 325 t3 = 0; 326 } 327 328 return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); 329 } 330 331 /* 332 * In renaming a files we can modify: 333 * the four inodes involved: 4 * inode size 334 * the two directory btrees: 2 * (max depth + v2) * dir block size 335 * the two directory bmap btrees: 2 * max depth * block size 336 * And the bmap_finish transaction can free dir and bmap blocks (two sets 337 * of bmap blocks) giving: 338 * the agf for the ags in which the blocks live: 3 * sector size 339 * the agfl for the ags in which the blocks live: 3 * sector size 340 * the superblock for the free block count: sector size 341 * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size 342 */ 343 STATIC uint 344 xfs_calc_rename_reservation( 345 struct xfs_mount *mp) 346 { 347 return XFS_DQUOT_LOGRES(mp) + 348 max((xfs_calc_inode_res(mp, 4) + 349 xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp), 350 XFS_FSB_TO_B(mp, 1))), 351 (xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) + 352 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 3), 353 XFS_FSB_TO_B(mp, 1)))); 354 } 355 356 /* 357 * For removing an inode from unlinked list at first, we can modify: 358 * the agi hash list and counters: sector size 359 * the on disk inode before ours in the agi hash list: inode cluster size 360 * the on disk inode in the agi hash list: inode cluster size 361 */ 362 STATIC uint 363 xfs_calc_iunlink_remove_reservation( 364 struct xfs_mount *mp) 365 { 366 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 367 2 * M_IGEO(mp)->inode_cluster_size; 368 } 369 370 /* 371 * For creating a link to an inode: 372 * the parent directory inode: inode size 373 * the linked inode: inode size 374 * the directory btree could split: (max depth + v2) * dir block size 375 * the directory bmap btree could join or split: (max depth + v2) * blocksize 376 * And the bmap_finish transaction can free some bmap blocks giving: 377 * the agf for the ag in which the blocks live: sector size 378 * the agfl for the ag in which the blocks live: sector size 379 * the superblock for the free block count: sector size 380 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 381 */ 382 STATIC uint 383 xfs_calc_link_reservation( 384 struct xfs_mount *mp) 385 { 386 return XFS_DQUOT_LOGRES(mp) + 387 xfs_calc_iunlink_remove_reservation(mp) + 388 max((xfs_calc_inode_res(mp, 2) + 389 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), 390 XFS_FSB_TO_B(mp, 1))), 391 (xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 392 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 393 XFS_FSB_TO_B(mp, 1)))); 394 } 395 396 /* 397 * For adding an inode to unlinked list we can modify: 398 * the agi hash list: sector size 399 * the on disk inode: inode cluster size 400 */ 401 STATIC uint 402 xfs_calc_iunlink_add_reservation(xfs_mount_t *mp) 403 { 404 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 405 M_IGEO(mp)->inode_cluster_size; 406 } 407 408 /* 409 * For removing a directory entry we can modify: 410 * the parent directory inode: inode size 411 * the removed inode: inode size 412 * the directory btree could join: (max depth + v2) * dir block size 413 * the directory bmap btree could join or split: (max depth + v2) * blocksize 414 * And the bmap_finish transaction can free the dir and bmap blocks giving: 415 * the agf for the ag in which the blocks live: 2 * sector size 416 * the agfl for the ag in which the blocks live: 2 * sector size 417 * the superblock for the free block count: sector size 418 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 419 */ 420 STATIC uint 421 xfs_calc_remove_reservation( 422 struct xfs_mount *mp) 423 { 424 return XFS_DQUOT_LOGRES(mp) + 425 xfs_calc_iunlink_add_reservation(mp) + 426 max((xfs_calc_inode_res(mp, 1) + 427 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), 428 XFS_FSB_TO_B(mp, 1))), 429 (xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) + 430 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), 431 XFS_FSB_TO_B(mp, 1)))); 432 } 433 434 /* 435 * For create, break it in to the two cases that the transaction 436 * covers. We start with the modify case - allocation done by modification 437 * of the state of existing inodes - and the allocation case. 438 */ 439 440 /* 441 * For create we can modify: 442 * the parent directory inode: inode size 443 * the new inode: inode size 444 * the inode btree entry: block size 445 * the superblock for the nlink flag: sector size 446 * the directory btree: (max depth + v2) * dir block size 447 * the directory inode's bmap btree: (max depth + v2) * block size 448 * the finobt (record modification and allocation btrees) 449 */ 450 STATIC uint 451 xfs_calc_create_resv_modify( 452 struct xfs_mount *mp) 453 { 454 return xfs_calc_inode_res(mp, 2) + 455 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 456 (uint)XFS_FSB_TO_B(mp, 1) + 457 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) + 458 xfs_calc_finobt_res(mp); 459 } 460 461 /* 462 * For icreate we can allocate some inodes giving: 463 * the agi and agf of the ag getting the new inodes: 2 * sectorsize 464 * the superblock for the nlink flag: sector size 465 * the inode chunk (allocation, optional init) 466 * the inobt (record insertion) 467 * the finobt (optional, record insertion) 468 */ 469 STATIC uint 470 xfs_calc_icreate_resv_alloc( 471 struct xfs_mount *mp) 472 { 473 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 474 mp->m_sb.sb_sectsize + 475 xfs_calc_inode_chunk_res(mp, _ALLOC) + 476 xfs_calc_inobt_res(mp) + 477 xfs_calc_finobt_res(mp); 478 } 479 480 STATIC uint 481 xfs_calc_icreate_reservation(xfs_mount_t *mp) 482 { 483 return XFS_DQUOT_LOGRES(mp) + 484 max(xfs_calc_icreate_resv_alloc(mp), 485 xfs_calc_create_resv_modify(mp)); 486 } 487 488 STATIC uint 489 xfs_calc_create_tmpfile_reservation( 490 struct xfs_mount *mp) 491 { 492 uint res = XFS_DQUOT_LOGRES(mp); 493 494 res += xfs_calc_icreate_resv_alloc(mp); 495 return res + xfs_calc_iunlink_add_reservation(mp); 496 } 497 498 /* 499 * Making a new directory is the same as creating a new file. 500 */ 501 STATIC uint 502 xfs_calc_mkdir_reservation( 503 struct xfs_mount *mp) 504 { 505 return xfs_calc_icreate_reservation(mp); 506 } 507 508 509 /* 510 * Making a new symplink is the same as creating a new file, but 511 * with the added blocks for remote symlink data which can be up to 1kB in 512 * length (XFS_SYMLINK_MAXLEN). 513 */ 514 STATIC uint 515 xfs_calc_symlink_reservation( 516 struct xfs_mount *mp) 517 { 518 return xfs_calc_icreate_reservation(mp) + 519 xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN); 520 } 521 522 /* 523 * In freeing an inode we can modify: 524 * the inode being freed: inode size 525 * the super block free inode counter, AGF and AGFL: sector size 526 * the on disk inode (agi unlinked list removal) 527 * the inode chunk (invalidated, headers only) 528 * the inode btree 529 * the finobt (record insertion, removal or modification) 530 * 531 * Note that the inode chunk res. includes an allocfree res. for freeing of the 532 * inode chunk. This is technically extraneous because the inode chunk free is 533 * deferred (it occurs after a transaction roll). Include the extra reservation 534 * anyways since we've had reports of ifree transaction overruns due to too many 535 * agfl fixups during inode chunk frees. 536 */ 537 STATIC uint 538 xfs_calc_ifree_reservation( 539 struct xfs_mount *mp) 540 { 541 return XFS_DQUOT_LOGRES(mp) + 542 xfs_calc_inode_res(mp, 1) + 543 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 544 xfs_calc_iunlink_remove_reservation(mp) + 545 xfs_calc_inode_chunk_res(mp, _FREE) + 546 xfs_calc_inobt_res(mp) + 547 xfs_calc_finobt_res(mp); 548 } 549 550 /* 551 * When only changing the inode we log the inode and possibly the superblock 552 * We also add a bit of slop for the transaction stuff. 553 */ 554 STATIC uint 555 xfs_calc_ichange_reservation( 556 struct xfs_mount *mp) 557 { 558 return XFS_DQUOT_LOGRES(mp) + 559 xfs_calc_inode_res(mp, 1) + 560 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 561 562 } 563 564 /* 565 * Growing the data section of the filesystem. 566 * superblock 567 * agi and agf 568 * allocation btrees 569 */ 570 STATIC uint 571 xfs_calc_growdata_reservation( 572 struct xfs_mount *mp) 573 { 574 return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + 575 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 576 XFS_FSB_TO_B(mp, 1)); 577 } 578 579 /* 580 * Growing the rt section of the filesystem. 581 * In the first set of transactions (ALLOC) we allocate space to the 582 * bitmap or summary files. 583 * superblock: sector size 584 * agf of the ag from which the extent is allocated: sector size 585 * bmap btree for bitmap/summary inode: max depth * blocksize 586 * bitmap/summary inode: inode size 587 * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize 588 */ 589 STATIC uint 590 xfs_calc_growrtalloc_reservation( 591 struct xfs_mount *mp) 592 { 593 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 594 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 595 XFS_FSB_TO_B(mp, 1)) + 596 xfs_calc_inode_res(mp, 1) + 597 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 598 XFS_FSB_TO_B(mp, 1)); 599 } 600 601 /* 602 * Growing the rt section of the filesystem. 603 * In the second set of transactions (ZERO) we zero the new metadata blocks. 604 * one bitmap/summary block: blocksize 605 */ 606 STATIC uint 607 xfs_calc_growrtzero_reservation( 608 struct xfs_mount *mp) 609 { 610 return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize); 611 } 612 613 /* 614 * Growing the rt section of the filesystem. 615 * In the third set of transactions (FREE) we update metadata without 616 * allocating any new blocks. 617 * superblock: sector size 618 * bitmap inode: inode size 619 * summary inode: inode size 620 * one bitmap block: blocksize 621 * summary blocks: new summary size 622 */ 623 STATIC uint 624 xfs_calc_growrtfree_reservation( 625 struct xfs_mount *mp) 626 { 627 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 628 xfs_calc_inode_res(mp, 2) + 629 xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) + 630 xfs_calc_buf_res(1, mp->m_rsumsize); 631 } 632 633 /* 634 * Logging the inode modification timestamp on a synchronous write. 635 * inode 636 */ 637 STATIC uint 638 xfs_calc_swrite_reservation( 639 struct xfs_mount *mp) 640 { 641 return xfs_calc_inode_res(mp, 1); 642 } 643 644 /* 645 * Logging the inode mode bits when writing a setuid/setgid file 646 * inode 647 */ 648 STATIC uint 649 xfs_calc_writeid_reservation( 650 struct xfs_mount *mp) 651 { 652 return xfs_calc_inode_res(mp, 1); 653 } 654 655 /* 656 * Converting the inode from non-attributed to attributed. 657 * the inode being converted: inode size 658 * agf block and superblock (for block allocation) 659 * the new block (directory sized) 660 * bmap blocks for the new directory block 661 * allocation btrees 662 */ 663 STATIC uint 664 xfs_calc_addafork_reservation( 665 struct xfs_mount *mp) 666 { 667 return XFS_DQUOT_LOGRES(mp) + 668 xfs_calc_inode_res(mp, 1) + 669 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + 670 xfs_calc_buf_res(1, mp->m_dir_geo->blksize) + 671 xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1, 672 XFS_FSB_TO_B(mp, 1)) + 673 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), 674 XFS_FSB_TO_B(mp, 1)); 675 } 676 677 /* 678 * Removing the attribute fork of a file 679 * the inode being truncated: inode size 680 * the inode's bmap btree: max depth * block size 681 * And the bmap_finish transaction can free the blocks and bmap blocks: 682 * the agf for each of the ags: 4 * sector size 683 * the agfl for each of the ags: 4 * sector size 684 * the super block to reflect the freed blocks: sector size 685 * worst case split in allocation btrees per extent assuming 4 extents: 686 * 4 exts * 2 trees * (2 * max depth - 1) * block size 687 */ 688 STATIC uint 689 xfs_calc_attrinval_reservation( 690 struct xfs_mount *mp) 691 { 692 return max((xfs_calc_inode_res(mp, 1) + 693 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK), 694 XFS_FSB_TO_B(mp, 1))), 695 (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + 696 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), 697 XFS_FSB_TO_B(mp, 1)))); 698 } 699 700 /* 701 * Setting an attribute at mount time. 702 * the inode getting the attribute 703 * the superblock for allocations 704 * the agfs extents are allocated from 705 * the attribute btree * max depth 706 * the inode allocation btree 707 * Since attribute transaction space is dependent on the size of the attribute, 708 * the calculation is done partially at mount time and partially at runtime(see 709 * below). 710 */ 711 STATIC uint 712 xfs_calc_attrsetm_reservation( 713 struct xfs_mount *mp) 714 { 715 return XFS_DQUOT_LOGRES(mp) + 716 xfs_calc_inode_res(mp, 1) + 717 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 718 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1)); 719 } 720 721 /* 722 * Setting an attribute at runtime, transaction space unit per block. 723 * the superblock for allocations: sector size 724 * the inode bmap btree could join or split: max depth * block size 725 * Since the runtime attribute transaction space is dependent on the total 726 * blocks needed for the 1st bmap, here we calculate out the space unit for 727 * one block so that the caller could figure out the total space according 728 * to the attibute extent length in blocks by: 729 * ext * M_RES(mp)->tr_attrsetrt.tr_logres 730 */ 731 STATIC uint 732 xfs_calc_attrsetrt_reservation( 733 struct xfs_mount *mp) 734 { 735 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + 736 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK), 737 XFS_FSB_TO_B(mp, 1)); 738 } 739 740 /* 741 * Removing an attribute. 742 * the inode: inode size 743 * the attribute btree could join: max depth * block size 744 * the inode bmap btree could join or split: max depth * block size 745 * And the bmap_finish transaction can free the attr blocks freed giving: 746 * the agf for the ag in which the blocks live: 2 * sector size 747 * the agfl for the ag in which the blocks live: 2 * sector size 748 * the superblock for the free block count: sector size 749 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 750 */ 751 STATIC uint 752 xfs_calc_attrrm_reservation( 753 struct xfs_mount *mp) 754 { 755 return XFS_DQUOT_LOGRES(mp) + 756 max((xfs_calc_inode_res(mp, 1) + 757 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, 758 XFS_FSB_TO_B(mp, 1)) + 759 (uint)XFS_FSB_TO_B(mp, 760 XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + 761 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)), 762 (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + 763 xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), 764 XFS_FSB_TO_B(mp, 1)))); 765 } 766 767 /* 768 * Clearing a bad agino number in an agi hash bucket. 769 */ 770 STATIC uint 771 xfs_calc_clear_agi_bucket_reservation( 772 struct xfs_mount *mp) 773 { 774 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 775 } 776 777 /* 778 * Adjusting quota limits. 779 * the disk quota buffer: sizeof(struct xfs_disk_dquot) 780 */ 781 STATIC uint 782 xfs_calc_qm_setqlim_reservation(void) 783 { 784 return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot)); 785 } 786 787 /* 788 * Allocating quota on disk if needed. 789 * the write transaction log space for quota file extent allocation 790 * the unit of quota allocation: one system block size 791 */ 792 STATIC uint 793 xfs_calc_qm_dqalloc_reservation( 794 struct xfs_mount *mp) 795 { 796 return xfs_calc_write_reservation(mp) + 797 xfs_calc_buf_res(1, 798 XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1); 799 } 800 801 /* 802 * Syncing the incore super block changes to disk. 803 * the super block to reflect the changes: sector size 804 */ 805 STATIC uint 806 xfs_calc_sb_reservation( 807 struct xfs_mount *mp) 808 { 809 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize); 810 } 811 812 void 813 xfs_trans_resv_calc( 814 struct xfs_mount *mp, 815 struct xfs_trans_resv *resp) 816 { 817 /* 818 * The following transactions are logged in physical format and 819 * require a permanent reservation on space. 820 */ 821 resp->tr_write.tr_logres = xfs_calc_write_reservation(mp); 822 if (xfs_has_reflink(mp)) 823 resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK; 824 else 825 resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT; 826 resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 827 828 resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp); 829 if (xfs_has_reflink(mp)) 830 resp->tr_itruncate.tr_logcount = 831 XFS_ITRUNCATE_LOG_COUNT_REFLINK; 832 else 833 resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT; 834 resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 835 836 resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp); 837 resp->tr_rename.tr_logcount = XFS_RENAME_LOG_COUNT; 838 resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 839 840 resp->tr_link.tr_logres = xfs_calc_link_reservation(mp); 841 resp->tr_link.tr_logcount = XFS_LINK_LOG_COUNT; 842 resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 843 844 resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp); 845 resp->tr_remove.tr_logcount = XFS_REMOVE_LOG_COUNT; 846 resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 847 848 resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp); 849 resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT; 850 resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 851 852 resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp); 853 resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT; 854 resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 855 856 resp->tr_create_tmpfile.tr_logres = 857 xfs_calc_create_tmpfile_reservation(mp); 858 resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT; 859 resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 860 861 resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp); 862 resp->tr_mkdir.tr_logcount = XFS_MKDIR_LOG_COUNT; 863 resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 864 865 resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp); 866 resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT; 867 resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 868 869 resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp); 870 resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT; 871 resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 872 873 resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp); 874 resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT; 875 resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 876 877 resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp); 878 resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT; 879 resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 880 881 resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp); 882 resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT; 883 resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 884 885 resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp); 886 resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT; 887 resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 888 889 resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp); 890 if (xfs_has_reflink(mp)) 891 resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK; 892 else 893 resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT; 894 resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 895 896 /* 897 * The following transactions are logged in logical format with 898 * a default log count. 899 */ 900 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(); 901 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT; 902 903 resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp); 904 resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT; 905 906 /* growdata requires permanent res; it can free space to the last AG */ 907 resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp); 908 resp->tr_growdata.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT; 909 resp->tr_growdata.tr_logflags |= XFS_TRANS_PERM_LOG_RES; 910 911 /* The following transaction are logged in logical format */ 912 resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp); 913 resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp); 914 resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp); 915 resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp); 916 resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp); 917 resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp); 918 resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp); 919 } 920