1 /* 2 * linux/fs/ext4/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14 #include <linux/time.h> 15 #include <linux/capability.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 /* 25 * balloc.c contains the blocks allocation and deallocation routines 26 */ 27 28 /* 29 * Calculate the block group number and offset, given a block number 30 */ 31 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 32 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 33 { 34 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 35 ext4_grpblk_t offset; 36 37 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 38 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); 39 if (offsetp) 40 *offsetp = offset; 41 if (blockgrpp) 42 *blockgrpp = blocknr; 43 44 } 45 46 static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block, 47 ext4_group_t block_group) 48 { 49 ext4_group_t actual_group; 50 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL); 51 if (actual_group == block_group) 52 return 1; 53 return 0; 54 } 55 56 static int ext4_group_used_meta_blocks(struct super_block *sb, 57 ext4_group_t block_group, 58 struct ext4_group_desc *gdp) 59 { 60 ext4_fsblk_t tmp; 61 struct ext4_sb_info *sbi = EXT4_SB(sb); 62 /* block bitmap, inode bitmap, and inode table blocks */ 63 int used_blocks = sbi->s_itb_per_group + 2; 64 65 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 66 if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), 67 block_group)) 68 used_blocks--; 69 70 if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), 71 block_group)) 72 used_blocks--; 73 74 tmp = ext4_inode_table(sb, gdp); 75 for (; tmp < ext4_inode_table(sb, gdp) + 76 sbi->s_itb_per_group; tmp++) { 77 if (!ext4_block_in_group(sb, tmp, block_group)) 78 used_blocks -= 1; 79 } 80 } 81 return used_blocks; 82 } 83 84 /* Initializes an uninitialized block bitmap if given, and returns the 85 * number of blocks free in the group. */ 86 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, 87 ext4_group_t block_group, struct ext4_group_desc *gdp) 88 { 89 int bit, bit_max; 90 ext4_group_t ngroups = ext4_get_groups_count(sb); 91 unsigned free_blocks, group_blocks; 92 struct ext4_sb_info *sbi = EXT4_SB(sb); 93 94 if (bh) { 95 J_ASSERT_BH(bh, buffer_locked(bh)); 96 97 /* If checksum is bad mark all blocks used to prevent allocation 98 * essentially implementing a per-group read-only flag. */ 99 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 100 ext4_error(sb, "Checksum bad for group %u", 101 block_group); 102 ext4_free_blks_set(sb, gdp, 0); 103 ext4_free_inodes_set(sb, gdp, 0); 104 ext4_itable_unused_set(sb, gdp, 0); 105 memset(bh->b_data, 0xff, sb->s_blocksize); 106 return 0; 107 } 108 memset(bh->b_data, 0, sb->s_blocksize); 109 } 110 111 /* Check for superblock and gdt backups in this group */ 112 bit_max = ext4_bg_has_super(sb, block_group); 113 114 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 115 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 116 sbi->s_desc_per_block) { 117 if (bit_max) { 118 bit_max += ext4_bg_num_gdb(sb, block_group); 119 bit_max += 120 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 121 } 122 } else { /* For META_BG_BLOCK_GROUPS */ 123 bit_max += ext4_bg_num_gdb(sb, block_group); 124 } 125 126 if (block_group == ngroups - 1) { 127 /* 128 * Even though mke2fs always initialize first and last group 129 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need 130 * to make sure we calculate the right free blocks 131 */ 132 group_blocks = ext4_blocks_count(sbi->s_es) - 133 ext4_group_first_block_no(sb, ngroups - 1); 134 } else { 135 group_blocks = EXT4_BLOCKS_PER_GROUP(sb); 136 } 137 138 free_blocks = group_blocks - bit_max; 139 140 if (bh) { 141 ext4_fsblk_t start, tmp; 142 int flex_bg = 0; 143 144 for (bit = 0; bit < bit_max; bit++) 145 ext4_set_bit(bit, bh->b_data); 146 147 start = ext4_group_first_block_no(sb, block_group); 148 149 if (EXT4_HAS_INCOMPAT_FEATURE(sb, 150 EXT4_FEATURE_INCOMPAT_FLEX_BG)) 151 flex_bg = 1; 152 153 /* Set bits for block and inode bitmaps, and inode table */ 154 tmp = ext4_block_bitmap(sb, gdp); 155 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 156 ext4_set_bit(tmp - start, bh->b_data); 157 158 tmp = ext4_inode_bitmap(sb, gdp); 159 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 160 ext4_set_bit(tmp - start, bh->b_data); 161 162 tmp = ext4_inode_table(sb, gdp); 163 for (; tmp < ext4_inode_table(sb, gdp) + 164 sbi->s_itb_per_group; tmp++) { 165 if (!flex_bg || 166 ext4_block_in_group(sb, tmp, block_group)) 167 ext4_set_bit(tmp - start, bh->b_data); 168 } 169 /* 170 * Also if the number of blocks within the group is 171 * less than the blocksize * 8 ( which is the size 172 * of bitmap ), set rest of the block bitmap to 1 173 */ 174 ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8, 175 bh->b_data); 176 } 177 return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp); 178 } 179 180 181 /* 182 * The free blocks are managed by bitmaps. A file system contains several 183 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 184 * block for inodes, N blocks for the inode table and data blocks. 185 * 186 * The file system contains group descriptors which are located after the 187 * super block. Each descriptor contains the number of the bitmap block and 188 * the free blocks count in the block. The descriptors are loaded in memory 189 * when a file system is mounted (see ext4_fill_super). 190 */ 191 192 /** 193 * ext4_get_group_desc() -- load group descriptor from disk 194 * @sb: super block 195 * @block_group: given block group 196 * @bh: pointer to the buffer head to store the block 197 * group descriptor 198 */ 199 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 200 ext4_group_t block_group, 201 struct buffer_head **bh) 202 { 203 unsigned int group_desc; 204 unsigned int offset; 205 ext4_group_t ngroups = ext4_get_groups_count(sb); 206 struct ext4_group_desc *desc; 207 struct ext4_sb_info *sbi = EXT4_SB(sb); 208 209 if (block_group >= ngroups) { 210 ext4_error(sb, "block_group >= groups_count - block_group = %u," 211 " groups_count = %u", block_group, ngroups); 212 213 return NULL; 214 } 215 216 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 217 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 218 if (!sbi->s_group_desc[group_desc]) { 219 ext4_error(sb, "Group descriptor not loaded - " 220 "block_group = %u, group_desc = %u, desc = %u", 221 block_group, group_desc, offset); 222 return NULL; 223 } 224 225 desc = (struct ext4_group_desc *)( 226 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 227 offset * EXT4_DESC_SIZE(sb)); 228 if (bh) 229 *bh = sbi->s_group_desc[group_desc]; 230 return desc; 231 } 232 233 static int ext4_valid_block_bitmap(struct super_block *sb, 234 struct ext4_group_desc *desc, 235 unsigned int block_group, 236 struct buffer_head *bh) 237 { 238 ext4_grpblk_t offset; 239 ext4_grpblk_t next_zero_bit; 240 ext4_fsblk_t bitmap_blk; 241 ext4_fsblk_t group_first_block; 242 243 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 244 /* with FLEX_BG, the inode/block bitmaps and itable 245 * blocks may not be in the group at all 246 * so the bitmap validation will be skipped for those groups 247 * or it has to also read the block group where the bitmaps 248 * are located to verify they are set. 249 */ 250 return 1; 251 } 252 group_first_block = ext4_group_first_block_no(sb, block_group); 253 254 /* check whether block bitmap block number is set */ 255 bitmap_blk = ext4_block_bitmap(sb, desc); 256 offset = bitmap_blk - group_first_block; 257 if (!ext4_test_bit(offset, bh->b_data)) 258 /* bad block bitmap */ 259 goto err_out; 260 261 /* check whether the inode bitmap block number is set */ 262 bitmap_blk = ext4_inode_bitmap(sb, desc); 263 offset = bitmap_blk - group_first_block; 264 if (!ext4_test_bit(offset, bh->b_data)) 265 /* bad block bitmap */ 266 goto err_out; 267 268 /* check whether the inode table block number is set */ 269 bitmap_blk = ext4_inode_table(sb, desc); 270 offset = bitmap_blk - group_first_block; 271 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 272 offset + EXT4_SB(sb)->s_itb_per_group, 273 offset); 274 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) 275 /* good bitmap for inode tables */ 276 return 1; 277 278 err_out: 279 ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", 280 block_group, bitmap_blk); 281 return 0; 282 } 283 /** 284 * ext4_read_block_bitmap() 285 * @sb: super block 286 * @block_group: given block group 287 * 288 * Read the bitmap for a given block_group,and validate the 289 * bits for block/inode/inode tables are set in the bitmaps 290 * 291 * Return buffer_head on success or NULL in case of failure. 292 */ 293 struct buffer_head * 294 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 295 { 296 struct ext4_group_desc *desc; 297 struct buffer_head *bh = NULL; 298 ext4_fsblk_t bitmap_blk; 299 300 desc = ext4_get_group_desc(sb, block_group, NULL); 301 if (!desc) 302 return NULL; 303 bitmap_blk = ext4_block_bitmap(sb, desc); 304 bh = sb_getblk(sb, bitmap_blk); 305 if (unlikely(!bh)) { 306 ext4_error(sb, "Cannot read block bitmap - " 307 "block_group = %u, block_bitmap = %llu", 308 block_group, bitmap_blk); 309 return NULL; 310 } 311 312 if (bitmap_uptodate(bh)) 313 return bh; 314 315 lock_buffer(bh); 316 if (bitmap_uptodate(bh)) { 317 unlock_buffer(bh); 318 return bh; 319 } 320 ext4_lock_group(sb, block_group); 321 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 322 ext4_init_block_bitmap(sb, bh, block_group, desc); 323 set_bitmap_uptodate(bh); 324 set_buffer_uptodate(bh); 325 ext4_unlock_group(sb, block_group); 326 unlock_buffer(bh); 327 return bh; 328 } 329 ext4_unlock_group(sb, block_group); 330 if (buffer_uptodate(bh)) { 331 /* 332 * if not uninit if bh is uptodate, 333 * bitmap is also uptodate 334 */ 335 set_bitmap_uptodate(bh); 336 unlock_buffer(bh); 337 return bh; 338 } 339 /* 340 * submit the buffer_head for read. We can 341 * safely mark the bitmap as uptodate now. 342 * We do it here so the bitmap uptodate bit 343 * get set with buffer lock held. 344 */ 345 set_bitmap_uptodate(bh); 346 if (bh_submit_read(bh) < 0) { 347 put_bh(bh); 348 ext4_error(sb, "Cannot read block bitmap - " 349 "block_group = %u, block_bitmap = %llu", 350 block_group, bitmap_blk); 351 return NULL; 352 } 353 ext4_valid_block_bitmap(sb, desc, block_group, bh); 354 /* 355 * file system mounted not to panic on error, 356 * continue with corrupt bitmap 357 */ 358 return bh; 359 } 360 361 /** 362 * ext4_add_groupblocks() -- Add given blocks to an existing group 363 * @handle: handle to this transaction 364 * @sb: super block 365 * @block: start physcial block to add to the block group 366 * @count: number of blocks to free 367 * 368 * This marks the blocks as free in the bitmap. We ask the 369 * mballoc to reload the buddy after this by setting group 370 * EXT4_GROUP_INFO_NEED_INIT_BIT flag 371 */ 372 void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, 373 ext4_fsblk_t block, unsigned long count) 374 { 375 struct buffer_head *bitmap_bh = NULL; 376 struct buffer_head *gd_bh; 377 ext4_group_t block_group; 378 ext4_grpblk_t bit; 379 unsigned int i; 380 struct ext4_group_desc *desc; 381 struct ext4_sb_info *sbi = EXT4_SB(sb); 382 int err = 0, ret, blk_free_count; 383 ext4_grpblk_t blocks_freed; 384 struct ext4_group_info *grp; 385 386 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 387 388 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 389 grp = ext4_get_group_info(sb, block_group); 390 /* 391 * Check to see if we are freeing blocks across a group 392 * boundary. 393 */ 394 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 395 goto error_return; 396 } 397 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 398 if (!bitmap_bh) 399 goto error_return; 400 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 401 if (!desc) 402 goto error_return; 403 404 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 405 in_range(ext4_inode_bitmap(sb, desc), block, count) || 406 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 407 in_range(block + count - 1, ext4_inode_table(sb, desc), 408 sbi->s_itb_per_group)) { 409 ext4_error(sb, "Adding blocks in system zones - " 410 "Block = %llu, count = %lu", 411 block, count); 412 goto error_return; 413 } 414 415 /* 416 * We are about to add blocks to the bitmap, 417 * so we need undo access. 418 */ 419 BUFFER_TRACE(bitmap_bh, "getting undo access"); 420 err = ext4_journal_get_undo_access(handle, bitmap_bh); 421 if (err) 422 goto error_return; 423 424 /* 425 * We are about to modify some metadata. Call the journal APIs 426 * to unshare ->b_data if a currently-committing transaction is 427 * using it 428 */ 429 BUFFER_TRACE(gd_bh, "get_write_access"); 430 err = ext4_journal_get_write_access(handle, gd_bh); 431 if (err) 432 goto error_return; 433 /* 434 * make sure we don't allow a parallel init on other groups in the 435 * same buddy cache 436 */ 437 down_write(&grp->alloc_sem); 438 for (i = 0, blocks_freed = 0; i < count; i++) { 439 BUFFER_TRACE(bitmap_bh, "clear bit"); 440 if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), 441 bit + i, bitmap_bh->b_data)) { 442 ext4_error(sb, "bit already cleared for block %llu", 443 (ext4_fsblk_t)(block + i)); 444 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 445 } else { 446 blocks_freed++; 447 } 448 } 449 ext4_lock_group(sb, block_group); 450 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); 451 ext4_free_blks_set(sb, desc, blk_free_count); 452 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); 453 ext4_unlock_group(sb, block_group); 454 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); 455 456 if (sbi->s_log_groups_per_flex) { 457 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 458 atomic_add(blocks_freed, 459 &sbi->s_flex_groups[flex_group].free_blocks); 460 } 461 /* 462 * request to reload the buddy with the 463 * new bitmap information 464 */ 465 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 466 grp->bb_free += blocks_freed; 467 up_write(&grp->alloc_sem); 468 469 /* We dirtied the bitmap block */ 470 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 471 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 472 473 /* And the group descriptor block */ 474 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 475 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 476 if (!err) 477 err = ret; 478 479 error_return: 480 brelse(bitmap_bh); 481 ext4_std_error(sb, err); 482 return; 483 } 484 485 /** 486 * ext4_has_free_blocks() 487 * @sbi: in-core super block structure. 488 * @nblocks: number of needed blocks 489 * 490 * Check if filesystem has nblocks free & available for allocation. 491 * On success return 1, return 0 on failure. 492 */ 493 static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) 494 { 495 s64 free_blocks, dirty_blocks, root_blocks; 496 struct percpu_counter *fbc = &sbi->s_freeblocks_counter; 497 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter; 498 499 free_blocks = percpu_counter_read_positive(fbc); 500 dirty_blocks = percpu_counter_read_positive(dbc); 501 root_blocks = ext4_r_blocks_count(sbi->s_es); 502 503 if (free_blocks - (nblocks + root_blocks + dirty_blocks) < 504 EXT4_FREEBLOCKS_WATERMARK) { 505 free_blocks = percpu_counter_sum_positive(fbc); 506 dirty_blocks = percpu_counter_sum_positive(dbc); 507 if (dirty_blocks < 0) { 508 printk(KERN_CRIT "Dirty block accounting " 509 "went wrong %lld\n", 510 (long long)dirty_blocks); 511 } 512 } 513 /* Check whether we have space after 514 * accounting for current dirty blocks & root reserved blocks. 515 */ 516 if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks)) 517 return 1; 518 519 /* Hm, nope. Are (enough) root reserved blocks available? */ 520 if (sbi->s_resuid == current_fsuid() || 521 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || 522 capable(CAP_SYS_RESOURCE)) { 523 if (free_blocks >= (nblocks + dirty_blocks)) 524 return 1; 525 } 526 527 return 0; 528 } 529 530 int ext4_claim_free_blocks(struct ext4_sb_info *sbi, 531 s64 nblocks) 532 { 533 if (ext4_has_free_blocks(sbi, nblocks)) { 534 percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); 535 return 0; 536 } else 537 return -ENOSPC; 538 } 539 540 /** 541 * ext4_should_retry_alloc() 542 * @sb: super block 543 * @retries number of attemps has been made 544 * 545 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 546 * it is profitable to retry the operation, this function will wait 547 * for the current or commiting transaction to complete, and then 548 * return TRUE. 549 * 550 * if the total number of retries exceed three times, return FALSE. 551 */ 552 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 553 { 554 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || 555 (*retries)++ > 3 || 556 !EXT4_SB(sb)->s_journal) 557 return 0; 558 559 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 560 561 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 562 } 563 564 /* 565 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 566 * 567 * @handle: handle to this transaction 568 * @inode: file inode 569 * @goal: given target block(filesystem wide) 570 * @count: pointer to total number of blocks needed 571 * @errp: error code 572 * 573 * Return 1st allocated block number on success, *count stores total account 574 * error stores in errp pointer 575 */ 576 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 577 ext4_fsblk_t goal, unsigned long *count, int *errp) 578 { 579 struct ext4_allocation_request ar; 580 ext4_fsblk_t ret; 581 582 memset(&ar, 0, sizeof(ar)); 583 /* Fill with neighbour allocated blocks */ 584 ar.inode = inode; 585 ar.goal = goal; 586 ar.len = count ? *count : 1; 587 588 ret = ext4_mb_new_blocks(handle, &ar, errp); 589 if (count) 590 *count = ar.len; 591 /* 592 * Account for the allocated meta blocks. We will never 593 * fail EDQUOT for metdata, but we do account for it. 594 */ 595 if (!(*errp) && 596 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { 597 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 598 EXT4_I(inode)->i_allocated_meta_blocks += ar.len; 599 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 600 dquot_alloc_block_nofail(inode, ar.len); 601 } 602 return ret; 603 } 604 605 /** 606 * ext4_count_free_blocks() -- count filesystem free blocks 607 * @sb: superblock 608 * 609 * Adds up the number of free blocks from each block group. 610 */ 611 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) 612 { 613 ext4_fsblk_t desc_count; 614 struct ext4_group_desc *gdp; 615 ext4_group_t i; 616 ext4_group_t ngroups = ext4_get_groups_count(sb); 617 #ifdef EXT4FS_DEBUG 618 struct ext4_super_block *es; 619 ext4_fsblk_t bitmap_count; 620 unsigned int x; 621 struct buffer_head *bitmap_bh = NULL; 622 623 es = EXT4_SB(sb)->s_es; 624 desc_count = 0; 625 bitmap_count = 0; 626 gdp = NULL; 627 628 for (i = 0; i < ngroups; i++) { 629 gdp = ext4_get_group_desc(sb, i, NULL); 630 if (!gdp) 631 continue; 632 desc_count += ext4_free_blks_count(sb, gdp); 633 brelse(bitmap_bh); 634 bitmap_bh = ext4_read_block_bitmap(sb, i); 635 if (bitmap_bh == NULL) 636 continue; 637 638 x = ext4_count_free(bitmap_bh, sb->s_blocksize); 639 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 640 i, ext4_free_blks_count(sb, gdp), x); 641 bitmap_count += x; 642 } 643 brelse(bitmap_bh); 644 printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu" 645 ", computed = %llu, %llu\n", ext4_free_blocks_count(es), 646 desc_count, bitmap_count); 647 return bitmap_count; 648 #else 649 desc_count = 0; 650 for (i = 0; i < ngroups; i++) { 651 gdp = ext4_get_group_desc(sb, i, NULL); 652 if (!gdp) 653 continue; 654 desc_count += ext4_free_blks_count(sb, gdp); 655 } 656 657 return desc_count; 658 #endif 659 } 660 661 static inline int test_root(ext4_group_t a, int b) 662 { 663 int num = b; 664 665 while (a > num) 666 num *= b; 667 return num == a; 668 } 669 670 static int ext4_group_sparse(ext4_group_t group) 671 { 672 if (group <= 1) 673 return 1; 674 if (!(group & 1)) 675 return 0; 676 return (test_root(group, 7) || test_root(group, 5) || 677 test_root(group, 3)); 678 } 679 680 /** 681 * ext4_bg_has_super - number of blocks used by the superblock in group 682 * @sb: superblock for filesystem 683 * @group: group number to check 684 * 685 * Return the number of blocks used by the superblock (primary or backup) 686 * in this group. Currently this will be only 0 or 1. 687 */ 688 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 689 { 690 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 691 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && 692 !ext4_group_sparse(group)) 693 return 0; 694 return 1; 695 } 696 697 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 698 ext4_group_t group) 699 { 700 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 701 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 702 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 703 704 if (group == first || group == first + 1 || group == last) 705 return 1; 706 return 0; 707 } 708 709 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 710 ext4_group_t group) 711 { 712 if (!ext4_bg_has_super(sb, group)) 713 return 0; 714 715 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) 716 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 717 else 718 return EXT4_SB(sb)->s_gdb_count; 719 } 720 721 /** 722 * ext4_bg_num_gdb - number of blocks used by the group table in group 723 * @sb: superblock for filesystem 724 * @group: group number to check 725 * 726 * Return the number of blocks used by the group descriptor table 727 * (primary or backup) in this group. In the future there may be a 728 * different number of descriptor blocks in each group. 729 */ 730 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 731 { 732 unsigned long first_meta_bg = 733 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 734 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 735 736 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || 737 metagroup < first_meta_bg) 738 return ext4_bg_num_gdb_nometa(sb, group); 739 740 return ext4_bg_num_gdb_meta(sb,group); 741 742 } 743 744