1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/balloc.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/capability.h> 17 #include <linux/fs.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 #include <trace/events/ext4.h> 25 26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 27 ext4_group_t block_group); 28 /* 29 * balloc.c contains the blocks allocation and deallocation routines 30 */ 31 32 /* 33 * Calculate block group number for a given block number 34 */ 35 ext4_group_t ext4_get_group_number(struct super_block *sb, 36 ext4_fsblk_t block) 37 { 38 ext4_group_t group; 39 40 if (test_opt2(sb, STD_GROUP_SIZE)) 41 group = (block - 42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> 43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); 44 else 45 ext4_get_group_no_and_offset(sb, block, &group, NULL); 46 return group; 47 } 48 49 /* 50 * Calculate the block group number and offset into the block/cluster 51 * allocation bitmap, given a block number 52 */ 53 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 55 { 56 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 57 ext4_grpblk_t offset; 58 59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> 61 EXT4_SB(sb)->s_cluster_bits; 62 if (offsetp) 63 *offsetp = offset; 64 if (blockgrpp) 65 *blockgrpp = blocknr; 66 67 } 68 69 /* 70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so 71 * and 0 otherwise. 72 */ 73 static inline int ext4_block_in_group(struct super_block *sb, 74 ext4_fsblk_t block, 75 ext4_group_t block_group) 76 { 77 ext4_group_t actual_group; 78 79 actual_group = ext4_get_group_number(sb, block); 80 return (actual_group == block_group) ? 1 : 0; 81 } 82 83 /* Return the number of clusters used for file system metadata; this 84 * represents the overhead needed by the file system. 85 */ 86 static unsigned ext4_num_overhead_clusters(struct super_block *sb, 87 ext4_group_t block_group, 88 struct ext4_group_desc *gdp) 89 { 90 unsigned num_clusters; 91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; 92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); 93 ext4_fsblk_t itbl_blk; 94 struct ext4_sb_info *sbi = EXT4_SB(sb); 95 96 /* This is the number of clusters used by the superblock, 97 * block group descriptors, and reserved block group 98 * descriptor blocks */ 99 num_clusters = ext4_num_base_meta_clusters(sb, block_group); 100 101 /* 102 * For the allocation bitmaps and inode table, we first need 103 * to check to see if the block is in the block group. If it 104 * is, then check to see if the cluster is already accounted 105 * for in the clusters used for the base metadata cluster, or 106 * if we can increment the base metadata cluster to include 107 * that block. Otherwise, we will have to track the cluster 108 * used for the allocation bitmap or inode table explicitly. 109 * Normally all of these blocks are contiguous, so the special 110 * case handling shouldn't be necessary except for *very* 111 * unusual file system layouts. 112 */ 113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 114 block_cluster = EXT4_B2C(sbi, 115 ext4_block_bitmap(sb, gdp) - start); 116 if (block_cluster < num_clusters) 117 block_cluster = -1; 118 else if (block_cluster == num_clusters) { 119 num_clusters++; 120 block_cluster = -1; 121 } 122 } 123 124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 125 inode_cluster = EXT4_B2C(sbi, 126 ext4_inode_bitmap(sb, gdp) - start); 127 if (inode_cluster < num_clusters) 128 inode_cluster = -1; 129 else if (inode_cluster == num_clusters) { 130 num_clusters++; 131 inode_cluster = -1; 132 } 133 } 134 135 itbl_blk = ext4_inode_table(sb, gdp); 136 for (i = 0; i < sbi->s_itb_per_group; i++) { 137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 138 c = EXT4_B2C(sbi, itbl_blk + i - start); 139 if ((c < num_clusters) || (c == inode_cluster) || 140 (c == block_cluster) || (c == itbl_cluster)) 141 continue; 142 if (c == num_clusters) { 143 num_clusters++; 144 continue; 145 } 146 num_clusters++; 147 itbl_cluster = c; 148 } 149 } 150 151 if (block_cluster != -1) 152 num_clusters++; 153 if (inode_cluster != -1) 154 num_clusters++; 155 156 return num_clusters; 157 } 158 159 static unsigned int num_clusters_in_group(struct super_block *sb, 160 ext4_group_t block_group) 161 { 162 unsigned int blocks; 163 164 if (block_group == ext4_get_groups_count(sb) - 1) { 165 /* 166 * Even though mke2fs always initializes the first and 167 * last group, just in case some other tool was used, 168 * we need to make sure we calculate the right free 169 * blocks. 170 */ 171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - 172 ext4_group_first_block_no(sb, block_group); 173 } else 174 blocks = EXT4_BLOCKS_PER_GROUP(sb); 175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks); 176 } 177 178 /* Initializes an uninitialized block bitmap */ 179 static int ext4_init_block_bitmap(struct super_block *sb, 180 struct buffer_head *bh, 181 ext4_group_t block_group, 182 struct ext4_group_desc *gdp) 183 { 184 unsigned int bit, bit_max; 185 struct ext4_sb_info *sbi = EXT4_SB(sb); 186 ext4_fsblk_t start, tmp; 187 int flex_bg = 0; 188 struct ext4_group_info *grp; 189 190 J_ASSERT_BH(bh, buffer_locked(bh)); 191 192 /* If checksum is bad mark all blocks used to prevent allocation 193 * essentially implementing a per-group read-only flag. */ 194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 195 grp = ext4_get_group_info(sb, block_group); 196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 197 percpu_counter_sub(&sbi->s_freeclusters_counter, 198 grp->bb_free); 199 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 200 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { 201 int count; 202 count = ext4_free_inodes_count(sb, gdp); 203 percpu_counter_sub(&sbi->s_freeinodes_counter, 204 count); 205 } 206 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 207 return -EFSBADCRC; 208 } 209 memset(bh->b_data, 0, sb->s_blocksize); 210 211 bit_max = ext4_num_base_meta_clusters(sb, block_group); 212 if ((bit_max >> 3) >= bh->b_size) 213 return -EFSCORRUPTED; 214 215 for (bit = 0; bit < bit_max; bit++) 216 ext4_set_bit(bit, bh->b_data); 217 218 start = ext4_group_first_block_no(sb, block_group); 219 220 if (ext4_has_feature_flex_bg(sb)) 221 flex_bg = 1; 222 223 /* Set bits for block and inode bitmaps, and inode table */ 224 tmp = ext4_block_bitmap(sb, gdp); 225 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 226 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 227 228 tmp = ext4_inode_bitmap(sb, gdp); 229 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 230 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 231 232 tmp = ext4_inode_table(sb, gdp); 233 for (; tmp < ext4_inode_table(sb, gdp) + 234 sbi->s_itb_per_group; tmp++) { 235 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 236 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 237 } 238 239 /* 240 * Also if the number of blocks within the group is less than 241 * the blocksize * 8 ( which is the size of bitmap ), set rest 242 * of the block bitmap to 1 243 */ 244 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 245 sb->s_blocksize * 8, bh->b_data); 246 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); 247 ext4_group_desc_csum_set(sb, block_group, gdp); 248 return 0; 249 } 250 251 /* Return the number of free blocks in a block group. It is used when 252 * the block bitmap is uninitialized, so we can't just count the bits 253 * in the bitmap. */ 254 unsigned ext4_free_clusters_after_init(struct super_block *sb, 255 ext4_group_t block_group, 256 struct ext4_group_desc *gdp) 257 { 258 return num_clusters_in_group(sb, block_group) - 259 ext4_num_overhead_clusters(sb, block_group, gdp); 260 } 261 262 /* 263 * The free blocks are managed by bitmaps. A file system contains several 264 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 265 * block for inodes, N blocks for the inode table and data blocks. 266 * 267 * The file system contains group descriptors which are located after the 268 * super block. Each descriptor contains the number of the bitmap block and 269 * the free blocks count in the block. The descriptors are loaded in memory 270 * when a file system is mounted (see ext4_fill_super). 271 */ 272 273 /** 274 * ext4_get_group_desc() -- load group descriptor from disk 275 * @sb: super block 276 * @block_group: given block group 277 * @bh: pointer to the buffer head to store the block 278 * group descriptor 279 */ 280 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 281 ext4_group_t block_group, 282 struct buffer_head **bh) 283 { 284 unsigned int group_desc; 285 unsigned int offset; 286 ext4_group_t ngroups = ext4_get_groups_count(sb); 287 struct ext4_group_desc *desc; 288 struct ext4_sb_info *sbi = EXT4_SB(sb); 289 290 if (block_group >= ngroups) { 291 ext4_error(sb, "block_group >= groups_count - block_group = %u," 292 " groups_count = %u", block_group, ngroups); 293 294 return NULL; 295 } 296 297 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 298 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 299 if (!sbi->s_group_desc[group_desc]) { 300 ext4_error(sb, "Group descriptor not loaded - " 301 "block_group = %u, group_desc = %u, desc = %u", 302 block_group, group_desc, offset); 303 return NULL; 304 } 305 306 desc = (struct ext4_group_desc *)( 307 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 308 offset * EXT4_DESC_SIZE(sb)); 309 if (bh) 310 *bh = sbi->s_group_desc[group_desc]; 311 return desc; 312 } 313 314 /* 315 * Return the block number which was discovered to be invalid, or 0 if 316 * the block bitmap is valid. 317 */ 318 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, 319 struct ext4_group_desc *desc, 320 ext4_group_t block_group, 321 struct buffer_head *bh) 322 { 323 struct ext4_sb_info *sbi = EXT4_SB(sb); 324 ext4_grpblk_t offset; 325 ext4_grpblk_t next_zero_bit; 326 ext4_fsblk_t blk; 327 ext4_fsblk_t group_first_block; 328 329 if (ext4_has_feature_flex_bg(sb)) { 330 /* with FLEX_BG, the inode/block bitmaps and itable 331 * blocks may not be in the group at all 332 * so the bitmap validation will be skipped for those groups 333 * or it has to also read the block group where the bitmaps 334 * are located to verify they are set. 335 */ 336 return 0; 337 } 338 group_first_block = ext4_group_first_block_no(sb, block_group); 339 340 /* check whether block bitmap block number is set */ 341 blk = ext4_block_bitmap(sb, desc); 342 offset = blk - group_first_block; 343 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 344 /* bad block bitmap */ 345 return blk; 346 347 /* check whether the inode bitmap block number is set */ 348 blk = ext4_inode_bitmap(sb, desc); 349 offset = blk - group_first_block; 350 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 351 /* bad block bitmap */ 352 return blk; 353 354 /* check whether the inode table block number is set */ 355 blk = ext4_inode_table(sb, desc); 356 offset = blk - group_first_block; 357 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 358 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group), 359 EXT4_B2C(sbi, offset)); 360 if (next_zero_bit < 361 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group)) 362 /* bad bitmap for inode tables */ 363 return blk; 364 return 0; 365 } 366 367 static int ext4_validate_block_bitmap(struct super_block *sb, 368 struct ext4_group_desc *desc, 369 ext4_group_t block_group, 370 struct buffer_head *bh) 371 { 372 ext4_fsblk_t blk; 373 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); 374 struct ext4_sb_info *sbi = EXT4_SB(sb); 375 376 if (buffer_verified(bh)) 377 return 0; 378 if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 379 return -EFSCORRUPTED; 380 381 ext4_lock_group(sb, block_group); 382 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 383 desc, bh))) { 384 ext4_unlock_group(sb, block_group); 385 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 386 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 387 percpu_counter_sub(&sbi->s_freeclusters_counter, 388 grp->bb_free); 389 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 390 return -EFSBADCRC; 391 } 392 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); 393 if (unlikely(blk != 0)) { 394 ext4_unlock_group(sb, block_group); 395 ext4_error(sb, "bg %u: block %llu: invalid block bitmap", 396 block_group, blk); 397 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 398 percpu_counter_sub(&sbi->s_freeclusters_counter, 399 grp->bb_free); 400 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 401 return -EFSCORRUPTED; 402 } 403 set_buffer_verified(bh); 404 ext4_unlock_group(sb, block_group); 405 return 0; 406 } 407 408 /** 409 * ext4_read_block_bitmap_nowait() 410 * @sb: super block 411 * @block_group: given block group 412 * 413 * Read the bitmap for a given block_group,and validate the 414 * bits for block/inode/inode tables are set in the bitmaps 415 * 416 * Return buffer_head on success or NULL in case of failure. 417 */ 418 struct buffer_head * 419 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) 420 { 421 struct ext4_group_desc *desc; 422 struct buffer_head *bh; 423 ext4_fsblk_t bitmap_blk; 424 int err; 425 426 desc = ext4_get_group_desc(sb, block_group, NULL); 427 if (!desc) 428 return ERR_PTR(-EFSCORRUPTED); 429 bitmap_blk = ext4_block_bitmap(sb, desc); 430 bh = sb_getblk(sb, bitmap_blk); 431 if (unlikely(!bh)) { 432 ext4_error(sb, "Cannot get buffer for block bitmap - " 433 "block_group = %u, block_bitmap = %llu", 434 block_group, bitmap_blk); 435 return ERR_PTR(-ENOMEM); 436 } 437 438 if (bitmap_uptodate(bh)) 439 goto verify; 440 441 lock_buffer(bh); 442 if (bitmap_uptodate(bh)) { 443 unlock_buffer(bh); 444 goto verify; 445 } 446 ext4_lock_group(sb, block_group); 447 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 448 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 449 set_bitmap_uptodate(bh); 450 set_buffer_uptodate(bh); 451 ext4_unlock_group(sb, block_group); 452 unlock_buffer(bh); 453 if (err) { 454 ext4_error(sb, "Failed to init block bitmap for group " 455 "%u: %d", block_group, err); 456 goto out; 457 } 458 goto verify; 459 } 460 ext4_unlock_group(sb, block_group); 461 if (buffer_uptodate(bh)) { 462 /* 463 * if not uninit if bh is uptodate, 464 * bitmap is also uptodate 465 */ 466 set_bitmap_uptodate(bh); 467 unlock_buffer(bh); 468 goto verify; 469 } 470 /* 471 * submit the buffer_head for reading 472 */ 473 set_buffer_new(bh); 474 trace_ext4_read_block_bitmap_load(sb, block_group); 475 bh->b_end_io = ext4_end_bitmap_read; 476 get_bh(bh); 477 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 478 return bh; 479 verify: 480 err = ext4_validate_block_bitmap(sb, desc, block_group, bh); 481 if (err) 482 goto out; 483 return bh; 484 out: 485 put_bh(bh); 486 return ERR_PTR(err); 487 } 488 489 /* Returns 0 on success, 1 on error */ 490 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, 491 struct buffer_head *bh) 492 { 493 struct ext4_group_desc *desc; 494 495 if (!buffer_new(bh)) 496 return 0; 497 desc = ext4_get_group_desc(sb, block_group, NULL); 498 if (!desc) 499 return -EFSCORRUPTED; 500 wait_on_buffer(bh); 501 if (!buffer_uptodate(bh)) { 502 ext4_error(sb, "Cannot read block bitmap - " 503 "block_group = %u, block_bitmap = %llu", 504 block_group, (unsigned long long) bh->b_blocknr); 505 return -EIO; 506 } 507 clear_buffer_new(bh); 508 /* Panic or remount fs read-only if block bitmap is invalid */ 509 return ext4_validate_block_bitmap(sb, desc, block_group, bh); 510 } 511 512 struct buffer_head * 513 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 514 { 515 struct buffer_head *bh; 516 int err; 517 518 bh = ext4_read_block_bitmap_nowait(sb, block_group); 519 if (IS_ERR(bh)) 520 return bh; 521 err = ext4_wait_block_bitmap(sb, block_group, bh); 522 if (err) { 523 put_bh(bh); 524 return ERR_PTR(err); 525 } 526 return bh; 527 } 528 529 /** 530 * ext4_has_free_clusters() 531 * @sbi: in-core super block structure. 532 * @nclusters: number of needed blocks 533 * @flags: flags from ext4_mb_new_blocks() 534 * 535 * Check if filesystem has nclusters free & available for allocation. 536 * On success return 1, return 0 on failure. 537 */ 538 static int ext4_has_free_clusters(struct ext4_sb_info *sbi, 539 s64 nclusters, unsigned int flags) 540 { 541 s64 free_clusters, dirty_clusters, rsv, resv_clusters; 542 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; 543 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; 544 545 free_clusters = percpu_counter_read_positive(fcc); 546 dirty_clusters = percpu_counter_read_positive(dcc); 547 resv_clusters = atomic64_read(&sbi->s_resv_clusters); 548 549 /* 550 * r_blocks_count should always be multiple of the cluster ratio so 551 * we are safe to do a plane bit shift only. 552 */ 553 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + 554 resv_clusters; 555 556 if (free_clusters - (nclusters + rsv + dirty_clusters) < 557 EXT4_FREECLUSTERS_WATERMARK) { 558 free_clusters = percpu_counter_sum_positive(fcc); 559 dirty_clusters = percpu_counter_sum_positive(dcc); 560 } 561 /* Check whether we have space after accounting for current 562 * dirty clusters & root reserved clusters. 563 */ 564 if (free_clusters >= (rsv + nclusters + dirty_clusters)) 565 return 1; 566 567 /* Hm, nope. Are (enough) root reserved clusters available? */ 568 if (uid_eq(sbi->s_resuid, current_fsuid()) || 569 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || 570 capable(CAP_SYS_RESOURCE) || 571 (flags & EXT4_MB_USE_ROOT_BLOCKS)) { 572 573 if (free_clusters >= (nclusters + dirty_clusters + 574 resv_clusters)) 575 return 1; 576 } 577 /* No free blocks. Let's see if we can dip into reserved pool */ 578 if (flags & EXT4_MB_USE_RESERVED) { 579 if (free_clusters >= (nclusters + dirty_clusters)) 580 return 1; 581 } 582 583 return 0; 584 } 585 586 int ext4_claim_free_clusters(struct ext4_sb_info *sbi, 587 s64 nclusters, unsigned int flags) 588 { 589 if (ext4_has_free_clusters(sbi, nclusters, flags)) { 590 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); 591 return 0; 592 } else 593 return -ENOSPC; 594 } 595 596 /** 597 * ext4_should_retry_alloc() 598 * @sb: super block 599 * @retries number of attemps has been made 600 * 601 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 602 * it is profitable to retry the operation, this function will wait 603 * for the current or committing transaction to complete, and then 604 * return TRUE. We will only retry once. 605 */ 606 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 607 { 608 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || 609 (*retries)++ > 1 || 610 !EXT4_SB(sb)->s_journal) 611 return 0; 612 613 smp_mb(); 614 if (EXT4_SB(sb)->s_mb_free_pending == 0) 615 return 0; 616 617 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 618 jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 619 return 1; 620 } 621 622 /* 623 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 624 * 625 * @handle: handle to this transaction 626 * @inode: file inode 627 * @goal: given target block(filesystem wide) 628 * @count: pointer to total number of clusters needed 629 * @errp: error code 630 * 631 * Return 1st allocated block number on success, *count stores total account 632 * error stores in errp pointer 633 */ 634 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 635 ext4_fsblk_t goal, unsigned int flags, 636 unsigned long *count, int *errp) 637 { 638 struct ext4_allocation_request ar; 639 ext4_fsblk_t ret; 640 641 memset(&ar, 0, sizeof(ar)); 642 /* Fill with neighbour allocated blocks */ 643 ar.inode = inode; 644 ar.goal = goal; 645 ar.len = count ? *count : 1; 646 ar.flags = flags; 647 648 ret = ext4_mb_new_blocks(handle, &ar, errp); 649 if (count) 650 *count = ar.len; 651 /* 652 * Account for the allocated meta blocks. We will never 653 * fail EDQUOT for metdata, but we do account for it. 654 */ 655 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { 656 dquot_alloc_block_nofail(inode, 657 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 658 } 659 return ret; 660 } 661 662 /** 663 * ext4_count_free_clusters() -- count filesystem free clusters 664 * @sb: superblock 665 * 666 * Adds up the number of free clusters from each block group. 667 */ 668 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) 669 { 670 ext4_fsblk_t desc_count; 671 struct ext4_group_desc *gdp; 672 ext4_group_t i; 673 ext4_group_t ngroups = ext4_get_groups_count(sb); 674 struct ext4_group_info *grp; 675 #ifdef EXT4FS_DEBUG 676 struct ext4_super_block *es; 677 ext4_fsblk_t bitmap_count; 678 unsigned int x; 679 struct buffer_head *bitmap_bh = NULL; 680 681 es = EXT4_SB(sb)->s_es; 682 desc_count = 0; 683 bitmap_count = 0; 684 gdp = NULL; 685 686 for (i = 0; i < ngroups; i++) { 687 gdp = ext4_get_group_desc(sb, i, NULL); 688 if (!gdp) 689 continue; 690 grp = NULL; 691 if (EXT4_SB(sb)->s_group_info) 692 grp = ext4_get_group_info(sb, i); 693 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 694 desc_count += ext4_free_group_clusters(sb, gdp); 695 brelse(bitmap_bh); 696 bitmap_bh = ext4_read_block_bitmap(sb, i); 697 if (IS_ERR(bitmap_bh)) { 698 bitmap_bh = NULL; 699 continue; 700 } 701 702 x = ext4_count_free(bitmap_bh->b_data, 703 EXT4_CLUSTERS_PER_GROUP(sb) / 8); 704 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 705 i, ext4_free_group_clusters(sb, gdp), x); 706 bitmap_count += x; 707 } 708 brelse(bitmap_bh); 709 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 710 ", computed = %llu, %llu\n", 711 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), 712 desc_count, bitmap_count); 713 return bitmap_count; 714 #else 715 desc_count = 0; 716 for (i = 0; i < ngroups; i++) { 717 gdp = ext4_get_group_desc(sb, i, NULL); 718 if (!gdp) 719 continue; 720 grp = NULL; 721 if (EXT4_SB(sb)->s_group_info) 722 grp = ext4_get_group_info(sb, i); 723 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 724 desc_count += ext4_free_group_clusters(sb, gdp); 725 } 726 727 return desc_count; 728 #endif 729 } 730 731 static inline int test_root(ext4_group_t a, int b) 732 { 733 while (1) { 734 if (a < b) 735 return 0; 736 if (a == b) 737 return 1; 738 if ((a % b) != 0) 739 return 0; 740 a = a / b; 741 } 742 } 743 744 /** 745 * ext4_bg_has_super - number of blocks used by the superblock in group 746 * @sb: superblock for filesystem 747 * @group: group number to check 748 * 749 * Return the number of blocks used by the superblock (primary or backup) 750 * in this group. Currently this will be only 0 or 1. 751 */ 752 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 753 { 754 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 755 756 if (group == 0) 757 return 1; 758 if (ext4_has_feature_sparse_super2(sb)) { 759 if (group == le32_to_cpu(es->s_backup_bgs[0]) || 760 group == le32_to_cpu(es->s_backup_bgs[1])) 761 return 1; 762 return 0; 763 } 764 if ((group <= 1) || !ext4_has_feature_sparse_super(sb)) 765 return 1; 766 if (!(group & 1)) 767 return 0; 768 if (test_root(group, 3) || (test_root(group, 5)) || 769 test_root(group, 7)) 770 return 1; 771 772 return 0; 773 } 774 775 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 776 ext4_group_t group) 777 { 778 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 779 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 780 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 781 782 if (group == first || group == first + 1 || group == last) 783 return 1; 784 return 0; 785 } 786 787 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 788 ext4_group_t group) 789 { 790 if (!ext4_bg_has_super(sb, group)) 791 return 0; 792 793 if (ext4_has_feature_meta_bg(sb)) 794 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 795 else 796 return EXT4_SB(sb)->s_gdb_count; 797 } 798 799 /** 800 * ext4_bg_num_gdb - number of blocks used by the group table in group 801 * @sb: superblock for filesystem 802 * @group: group number to check 803 * 804 * Return the number of blocks used by the group descriptor table 805 * (primary or backup) in this group. In the future there may be a 806 * different number of descriptor blocks in each group. 807 */ 808 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 809 { 810 unsigned long first_meta_bg = 811 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 812 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 813 814 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) 815 return ext4_bg_num_gdb_nometa(sb, group); 816 817 return ext4_bg_num_gdb_meta(sb,group); 818 819 } 820 821 /* 822 * This function returns the number of file system metadata clusters at 823 * the beginning of a block group, including the reserved gdt blocks. 824 */ 825 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 826 ext4_group_t block_group) 827 { 828 struct ext4_sb_info *sbi = EXT4_SB(sb); 829 unsigned num; 830 831 /* Check for superblock and gdt backups in this group */ 832 num = ext4_bg_has_super(sb, block_group); 833 834 if (!ext4_has_feature_meta_bg(sb) || 835 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 836 sbi->s_desc_per_block) { 837 if (num) { 838 num += ext4_bg_num_gdb(sb, block_group); 839 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 840 } 841 } else { /* For META_BG_BLOCK_GROUPS */ 842 num += ext4_bg_num_gdb(sb, block_group); 843 } 844 return EXT4_NUM_B2C(sbi, num); 845 } 846 /** 847 * ext4_inode_to_goal_block - return a hint for block allocation 848 * @inode: inode for block allocation 849 * 850 * Return the ideal location to start allocating blocks for a 851 * newly created inode. 852 */ 853 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) 854 { 855 struct ext4_inode_info *ei = EXT4_I(inode); 856 ext4_group_t block_group; 857 ext4_grpblk_t colour; 858 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 859 ext4_fsblk_t bg_start; 860 ext4_fsblk_t last_block; 861 862 block_group = ei->i_block_group; 863 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 864 /* 865 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 866 * block groups per flexgroup, reserve the first block 867 * group for directories and special files. Regular 868 * files will start at the second block group. This 869 * tends to speed up directory access and improves 870 * fsck times. 871 */ 872 block_group &= ~(flex_size-1); 873 if (S_ISREG(inode->i_mode)) 874 block_group++; 875 } 876 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 877 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 878 879 /* 880 * If we are doing delayed allocation, we don't need take 881 * colour into account. 882 */ 883 if (test_opt(inode->i_sb, DELALLOC)) 884 return bg_start; 885 886 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 887 colour = (current->pid % 16) * 888 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 889 else 890 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 891 return bg_start + colour; 892 } 893 894