1 /* 2 * linux/fs/ext4/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14 #include <linux/time.h> 15 #include <linux/capability.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 #include <trace/events/ext4.h> 25 26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 27 ext4_group_t block_group); 28 /* 29 * balloc.c contains the blocks allocation and deallocation routines 30 */ 31 32 /* 33 * Calculate block group number for a given block number 34 */ 35 ext4_group_t ext4_get_group_number(struct super_block *sb, 36 ext4_fsblk_t block) 37 { 38 ext4_group_t group; 39 40 if (test_opt2(sb, STD_GROUP_SIZE)) 41 group = (block - 42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> 43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); 44 else 45 ext4_get_group_no_and_offset(sb, block, &group, NULL); 46 return group; 47 } 48 49 /* 50 * Calculate the block group number and offset into the block/cluster 51 * allocation bitmap, given a block number 52 */ 53 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 55 { 56 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 57 ext4_grpblk_t offset; 58 59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> 61 EXT4_SB(sb)->s_cluster_bits; 62 if (offsetp) 63 *offsetp = offset; 64 if (blockgrpp) 65 *blockgrpp = blocknr; 66 67 } 68 69 /* 70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so 71 * and 0 otherwise. 72 */ 73 static inline int ext4_block_in_group(struct super_block *sb, 74 ext4_fsblk_t block, 75 ext4_group_t block_group) 76 { 77 ext4_group_t actual_group; 78 79 actual_group = ext4_get_group_number(sb, block); 80 return (actual_group == block_group) ? 1 : 0; 81 } 82 83 /* Return the number of clusters used for file system metadata; this 84 * represents the overhead needed by the file system. 85 */ 86 static unsigned ext4_num_overhead_clusters(struct super_block *sb, 87 ext4_group_t block_group, 88 struct ext4_group_desc *gdp) 89 { 90 unsigned num_clusters; 91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; 92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); 93 ext4_fsblk_t itbl_blk; 94 struct ext4_sb_info *sbi = EXT4_SB(sb); 95 96 /* This is the number of clusters used by the superblock, 97 * block group descriptors, and reserved block group 98 * descriptor blocks */ 99 num_clusters = ext4_num_base_meta_clusters(sb, block_group); 100 101 /* 102 * For the allocation bitmaps and inode table, we first need 103 * to check to see if the block is in the block group. If it 104 * is, then check to see if the cluster is already accounted 105 * for in the clusters used for the base metadata cluster, or 106 * if we can increment the base metadata cluster to include 107 * that block. Otherwise, we will have to track the cluster 108 * used for the allocation bitmap or inode table explicitly. 109 * Normally all of these blocks are contiguous, so the special 110 * case handling shouldn't be necessary except for *very* 111 * unusual file system layouts. 112 */ 113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 114 block_cluster = EXT4_B2C(sbi, 115 ext4_block_bitmap(sb, gdp) - start); 116 if (block_cluster < num_clusters) 117 block_cluster = -1; 118 else if (block_cluster == num_clusters) { 119 num_clusters++; 120 block_cluster = -1; 121 } 122 } 123 124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 125 inode_cluster = EXT4_B2C(sbi, 126 ext4_inode_bitmap(sb, gdp) - start); 127 if (inode_cluster < num_clusters) 128 inode_cluster = -1; 129 else if (inode_cluster == num_clusters) { 130 num_clusters++; 131 inode_cluster = -1; 132 } 133 } 134 135 itbl_blk = ext4_inode_table(sb, gdp); 136 for (i = 0; i < sbi->s_itb_per_group; i++) { 137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 138 c = EXT4_B2C(sbi, itbl_blk + i - start); 139 if ((c < num_clusters) || (c == inode_cluster) || 140 (c == block_cluster) || (c == itbl_cluster)) 141 continue; 142 if (c == num_clusters) { 143 num_clusters++; 144 continue; 145 } 146 num_clusters++; 147 itbl_cluster = c; 148 } 149 } 150 151 if (block_cluster != -1) 152 num_clusters++; 153 if (inode_cluster != -1) 154 num_clusters++; 155 156 return num_clusters; 157 } 158 159 static unsigned int num_clusters_in_group(struct super_block *sb, 160 ext4_group_t block_group) 161 { 162 unsigned int blocks; 163 164 if (block_group == ext4_get_groups_count(sb) - 1) { 165 /* 166 * Even though mke2fs always initializes the first and 167 * last group, just in case some other tool was used, 168 * we need to make sure we calculate the right free 169 * blocks. 170 */ 171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - 172 ext4_group_first_block_no(sb, block_group); 173 } else 174 blocks = EXT4_BLOCKS_PER_GROUP(sb); 175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks); 176 } 177 178 /* Initializes an uninitialized block bitmap */ 179 static void ext4_init_block_bitmap(struct super_block *sb, 180 struct buffer_head *bh, 181 ext4_group_t block_group, 182 struct ext4_group_desc *gdp) 183 { 184 unsigned int bit, bit_max; 185 struct ext4_sb_info *sbi = EXT4_SB(sb); 186 ext4_fsblk_t start, tmp; 187 int flex_bg = 0; 188 struct ext4_group_info *grp; 189 190 J_ASSERT_BH(bh, buffer_locked(bh)); 191 192 /* If checksum is bad mark all blocks used to prevent allocation 193 * essentially implementing a per-group read-only flag. */ 194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 195 ext4_error(sb, "Checksum bad for group %u", block_group); 196 grp = ext4_get_group_info(sb, block_group); 197 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 198 percpu_counter_sub(&sbi->s_freeclusters_counter, 199 grp->bb_free); 200 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 201 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { 202 int count; 203 count = ext4_free_inodes_count(sb, gdp); 204 percpu_counter_sub(&sbi->s_freeinodes_counter, 205 count); 206 } 207 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 208 return; 209 } 210 memset(bh->b_data, 0, sb->s_blocksize); 211 212 bit_max = ext4_num_base_meta_clusters(sb, block_group); 213 for (bit = 0; bit < bit_max; bit++) 214 ext4_set_bit(bit, bh->b_data); 215 216 start = ext4_group_first_block_no(sb, block_group); 217 218 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 219 flex_bg = 1; 220 221 /* Set bits for block and inode bitmaps, and inode table */ 222 tmp = ext4_block_bitmap(sb, gdp); 223 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 224 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 225 226 tmp = ext4_inode_bitmap(sb, gdp); 227 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 228 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 229 230 tmp = ext4_inode_table(sb, gdp); 231 for (; tmp < ext4_inode_table(sb, gdp) + 232 sbi->s_itb_per_group; tmp++) { 233 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) 234 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 235 } 236 237 /* 238 * Also if the number of blocks within the group is less than 239 * the blocksize * 8 ( which is the size of bitmap ), set rest 240 * of the block bitmap to 1 241 */ 242 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 243 sb->s_blocksize * 8, bh->b_data); 244 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); 245 ext4_group_desc_csum_set(sb, block_group, gdp); 246 } 247 248 /* Return the number of free blocks in a block group. It is used when 249 * the block bitmap is uninitialized, so we can't just count the bits 250 * in the bitmap. */ 251 unsigned ext4_free_clusters_after_init(struct super_block *sb, 252 ext4_group_t block_group, 253 struct ext4_group_desc *gdp) 254 { 255 return num_clusters_in_group(sb, block_group) - 256 ext4_num_overhead_clusters(sb, block_group, gdp); 257 } 258 259 /* 260 * The free blocks are managed by bitmaps. A file system contains several 261 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 262 * block for inodes, N blocks for the inode table and data blocks. 263 * 264 * The file system contains group descriptors which are located after the 265 * super block. Each descriptor contains the number of the bitmap block and 266 * the free blocks count in the block. The descriptors are loaded in memory 267 * when a file system is mounted (see ext4_fill_super). 268 */ 269 270 /** 271 * ext4_get_group_desc() -- load group descriptor from disk 272 * @sb: super block 273 * @block_group: given block group 274 * @bh: pointer to the buffer head to store the block 275 * group descriptor 276 */ 277 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 278 ext4_group_t block_group, 279 struct buffer_head **bh) 280 { 281 unsigned int group_desc; 282 unsigned int offset; 283 ext4_group_t ngroups = ext4_get_groups_count(sb); 284 struct ext4_group_desc *desc; 285 struct ext4_sb_info *sbi = EXT4_SB(sb); 286 287 if (block_group >= ngroups) { 288 ext4_error(sb, "block_group >= groups_count - block_group = %u," 289 " groups_count = %u", block_group, ngroups); 290 291 return NULL; 292 } 293 294 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 295 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 296 if (!sbi->s_group_desc[group_desc]) { 297 ext4_error(sb, "Group descriptor not loaded - " 298 "block_group = %u, group_desc = %u, desc = %u", 299 block_group, group_desc, offset); 300 return NULL; 301 } 302 303 desc = (struct ext4_group_desc *)( 304 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 305 offset * EXT4_DESC_SIZE(sb)); 306 if (bh) 307 *bh = sbi->s_group_desc[group_desc]; 308 return desc; 309 } 310 311 /* 312 * Return the block number which was discovered to be invalid, or 0 if 313 * the block bitmap is valid. 314 */ 315 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, 316 struct ext4_group_desc *desc, 317 ext4_group_t block_group, 318 struct buffer_head *bh) 319 { 320 struct ext4_sb_info *sbi = EXT4_SB(sb); 321 ext4_grpblk_t offset; 322 ext4_grpblk_t next_zero_bit; 323 ext4_fsblk_t blk; 324 ext4_fsblk_t group_first_block; 325 326 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 327 /* with FLEX_BG, the inode/block bitmaps and itable 328 * blocks may not be in the group at all 329 * so the bitmap validation will be skipped for those groups 330 * or it has to also read the block group where the bitmaps 331 * are located to verify they are set. 332 */ 333 return 0; 334 } 335 group_first_block = ext4_group_first_block_no(sb, block_group); 336 337 /* check whether block bitmap block number is set */ 338 blk = ext4_block_bitmap(sb, desc); 339 offset = blk - group_first_block; 340 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 341 /* bad block bitmap */ 342 return blk; 343 344 /* check whether the inode bitmap block number is set */ 345 blk = ext4_inode_bitmap(sb, desc); 346 offset = blk - group_first_block; 347 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 348 /* bad block bitmap */ 349 return blk; 350 351 /* check whether the inode table block number is set */ 352 blk = ext4_inode_table(sb, desc); 353 offset = blk - group_first_block; 354 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 355 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group), 356 EXT4_B2C(sbi, offset)); 357 if (next_zero_bit < 358 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group)) 359 /* bad bitmap for inode tables */ 360 return blk; 361 return 0; 362 } 363 364 static void ext4_validate_block_bitmap(struct super_block *sb, 365 struct ext4_group_desc *desc, 366 ext4_group_t block_group, 367 struct buffer_head *bh) 368 { 369 ext4_fsblk_t blk; 370 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); 371 struct ext4_sb_info *sbi = EXT4_SB(sb); 372 373 if (buffer_verified(bh)) 374 return; 375 376 ext4_lock_group(sb, block_group); 377 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); 378 if (unlikely(blk != 0)) { 379 ext4_unlock_group(sb, block_group); 380 ext4_error(sb, "bg %u: block %llu: invalid block bitmap", 381 block_group, blk); 382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 383 percpu_counter_sub(&sbi->s_freeclusters_counter, 384 grp->bb_free); 385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 386 return; 387 } 388 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 389 desc, bh))) { 390 ext4_unlock_group(sb, block_group); 391 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 392 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 393 percpu_counter_sub(&sbi->s_freeclusters_counter, 394 grp->bb_free); 395 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 396 return; 397 } 398 set_buffer_verified(bh); 399 ext4_unlock_group(sb, block_group); 400 } 401 402 /** 403 * ext4_read_block_bitmap_nowait() 404 * @sb: super block 405 * @block_group: given block group 406 * 407 * Read the bitmap for a given block_group,and validate the 408 * bits for block/inode/inode tables are set in the bitmaps 409 * 410 * Return buffer_head on success or NULL in case of failure. 411 */ 412 struct buffer_head * 413 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) 414 { 415 struct ext4_group_desc *desc; 416 struct buffer_head *bh; 417 ext4_fsblk_t bitmap_blk; 418 419 desc = ext4_get_group_desc(sb, block_group, NULL); 420 if (!desc) 421 return NULL; 422 bitmap_blk = ext4_block_bitmap(sb, desc); 423 bh = sb_getblk(sb, bitmap_blk); 424 if (unlikely(!bh)) { 425 ext4_error(sb, "Cannot get buffer for block bitmap - " 426 "block_group = %u, block_bitmap = %llu", 427 block_group, bitmap_blk); 428 return NULL; 429 } 430 431 if (bitmap_uptodate(bh)) 432 goto verify; 433 434 lock_buffer(bh); 435 if (bitmap_uptodate(bh)) { 436 unlock_buffer(bh); 437 goto verify; 438 } 439 ext4_lock_group(sb, block_group); 440 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 441 ext4_init_block_bitmap(sb, bh, block_group, desc); 442 set_bitmap_uptodate(bh); 443 set_buffer_uptodate(bh); 444 ext4_unlock_group(sb, block_group); 445 unlock_buffer(bh); 446 return bh; 447 } 448 ext4_unlock_group(sb, block_group); 449 if (buffer_uptodate(bh)) { 450 /* 451 * if not uninit if bh is uptodate, 452 * bitmap is also uptodate 453 */ 454 set_bitmap_uptodate(bh); 455 unlock_buffer(bh); 456 goto verify; 457 } 458 /* 459 * submit the buffer_head for reading 460 */ 461 set_buffer_new(bh); 462 trace_ext4_read_block_bitmap_load(sb, block_group); 463 bh->b_end_io = ext4_end_bitmap_read; 464 get_bh(bh); 465 submit_bh(READ | REQ_META | REQ_PRIO, bh); 466 return bh; 467 verify: 468 ext4_validate_block_bitmap(sb, desc, block_group, bh); 469 if (buffer_verified(bh)) 470 return bh; 471 put_bh(bh); 472 return NULL; 473 } 474 475 /* Returns 0 on success, 1 on error */ 476 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, 477 struct buffer_head *bh) 478 { 479 struct ext4_group_desc *desc; 480 481 if (!buffer_new(bh)) 482 return 0; 483 desc = ext4_get_group_desc(sb, block_group, NULL); 484 if (!desc) 485 return 1; 486 wait_on_buffer(bh); 487 if (!buffer_uptodate(bh)) { 488 ext4_error(sb, "Cannot read block bitmap - " 489 "block_group = %u, block_bitmap = %llu", 490 block_group, (unsigned long long) bh->b_blocknr); 491 return 1; 492 } 493 clear_buffer_new(bh); 494 /* Panic or remount fs read-only if block bitmap is invalid */ 495 ext4_validate_block_bitmap(sb, desc, block_group, bh); 496 /* ...but check for error just in case errors=continue. */ 497 return !buffer_verified(bh); 498 } 499 500 struct buffer_head * 501 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 502 { 503 struct buffer_head *bh; 504 505 bh = ext4_read_block_bitmap_nowait(sb, block_group); 506 if (!bh) 507 return NULL; 508 if (ext4_wait_block_bitmap(sb, block_group, bh)) { 509 put_bh(bh); 510 return NULL; 511 } 512 return bh; 513 } 514 515 /** 516 * ext4_has_free_clusters() 517 * @sbi: in-core super block structure. 518 * @nclusters: number of needed blocks 519 * @flags: flags from ext4_mb_new_blocks() 520 * 521 * Check if filesystem has nclusters free & available for allocation. 522 * On success return 1, return 0 on failure. 523 */ 524 static int ext4_has_free_clusters(struct ext4_sb_info *sbi, 525 s64 nclusters, unsigned int flags) 526 { 527 s64 free_clusters, dirty_clusters, rsv, resv_clusters; 528 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; 529 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; 530 531 free_clusters = percpu_counter_read_positive(fcc); 532 dirty_clusters = percpu_counter_read_positive(dcc); 533 resv_clusters = atomic64_read(&sbi->s_resv_clusters); 534 535 /* 536 * r_blocks_count should always be multiple of the cluster ratio so 537 * we are safe to do a plane bit shift only. 538 */ 539 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + 540 resv_clusters; 541 542 if (free_clusters - (nclusters + rsv + dirty_clusters) < 543 EXT4_FREECLUSTERS_WATERMARK) { 544 free_clusters = percpu_counter_sum_positive(fcc); 545 dirty_clusters = percpu_counter_sum_positive(dcc); 546 } 547 /* Check whether we have space after accounting for current 548 * dirty clusters & root reserved clusters. 549 */ 550 if (free_clusters >= (rsv + nclusters + dirty_clusters)) 551 return 1; 552 553 /* Hm, nope. Are (enough) root reserved clusters available? */ 554 if (uid_eq(sbi->s_resuid, current_fsuid()) || 555 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || 556 capable(CAP_SYS_RESOURCE) || 557 (flags & EXT4_MB_USE_ROOT_BLOCKS)) { 558 559 if (free_clusters >= (nclusters + dirty_clusters + 560 resv_clusters)) 561 return 1; 562 } 563 /* No free blocks. Let's see if we can dip into reserved pool */ 564 if (flags & EXT4_MB_USE_RESERVED) { 565 if (free_clusters >= (nclusters + dirty_clusters)) 566 return 1; 567 } 568 569 return 0; 570 } 571 572 int ext4_claim_free_clusters(struct ext4_sb_info *sbi, 573 s64 nclusters, unsigned int flags) 574 { 575 if (ext4_has_free_clusters(sbi, nclusters, flags)) { 576 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); 577 return 0; 578 } else 579 return -ENOSPC; 580 } 581 582 /** 583 * ext4_should_retry_alloc() 584 * @sb: super block 585 * @retries number of attemps has been made 586 * 587 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 588 * it is profitable to retry the operation, this function will wait 589 * for the current or committing transaction to complete, and then 590 * return TRUE. 591 * 592 * if the total number of retries exceed three times, return FALSE. 593 */ 594 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 595 { 596 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) || 597 (*retries)++ > 3 || 598 !EXT4_SB(sb)->s_journal) 599 return 0; 600 601 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 602 603 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 604 } 605 606 /* 607 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 608 * 609 * @handle: handle to this transaction 610 * @inode: file inode 611 * @goal: given target block(filesystem wide) 612 * @count: pointer to total number of clusters needed 613 * @errp: error code 614 * 615 * Return 1st allocated block number on success, *count stores total account 616 * error stores in errp pointer 617 */ 618 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 619 ext4_fsblk_t goal, unsigned int flags, 620 unsigned long *count, int *errp) 621 { 622 struct ext4_allocation_request ar; 623 ext4_fsblk_t ret; 624 625 memset(&ar, 0, sizeof(ar)); 626 /* Fill with neighbour allocated blocks */ 627 ar.inode = inode; 628 ar.goal = goal; 629 ar.len = count ? *count : 1; 630 ar.flags = flags; 631 632 ret = ext4_mb_new_blocks(handle, &ar, errp); 633 if (count) 634 *count = ar.len; 635 /* 636 * Account for the allocated meta blocks. We will never 637 * fail EDQUOT for metdata, but we do account for it. 638 */ 639 if (!(*errp) && 640 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { 641 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 642 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 643 dquot_alloc_block_nofail(inode, 644 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 645 } 646 return ret; 647 } 648 649 /** 650 * ext4_count_free_clusters() -- count filesystem free clusters 651 * @sb: superblock 652 * 653 * Adds up the number of free clusters from each block group. 654 */ 655 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) 656 { 657 ext4_fsblk_t desc_count; 658 struct ext4_group_desc *gdp; 659 ext4_group_t i; 660 ext4_group_t ngroups = ext4_get_groups_count(sb); 661 struct ext4_group_info *grp; 662 #ifdef EXT4FS_DEBUG 663 struct ext4_super_block *es; 664 ext4_fsblk_t bitmap_count; 665 unsigned int x; 666 struct buffer_head *bitmap_bh = NULL; 667 668 es = EXT4_SB(sb)->s_es; 669 desc_count = 0; 670 bitmap_count = 0; 671 gdp = NULL; 672 673 for (i = 0; i < ngroups; i++) { 674 gdp = ext4_get_group_desc(sb, i, NULL); 675 if (!gdp) 676 continue; 677 grp = NULL; 678 if (EXT4_SB(sb)->s_group_info) 679 grp = ext4_get_group_info(sb, i); 680 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 681 desc_count += ext4_free_group_clusters(sb, gdp); 682 brelse(bitmap_bh); 683 bitmap_bh = ext4_read_block_bitmap(sb, i); 684 if (bitmap_bh == NULL) 685 continue; 686 687 x = ext4_count_free(bitmap_bh->b_data, 688 EXT4_CLUSTERS_PER_GROUP(sb) / 8); 689 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 690 i, ext4_free_group_clusters(sb, gdp), x); 691 bitmap_count += x; 692 } 693 brelse(bitmap_bh); 694 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 695 ", computed = %llu, %llu\n", 696 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), 697 desc_count, bitmap_count); 698 return bitmap_count; 699 #else 700 desc_count = 0; 701 for (i = 0; i < ngroups; i++) { 702 gdp = ext4_get_group_desc(sb, i, NULL); 703 if (!gdp) 704 continue; 705 grp = NULL; 706 if (EXT4_SB(sb)->s_group_info) 707 grp = ext4_get_group_info(sb, i); 708 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 709 desc_count += ext4_free_group_clusters(sb, gdp); 710 } 711 712 return desc_count; 713 #endif 714 } 715 716 static inline int test_root(ext4_group_t a, int b) 717 { 718 while (1) { 719 if (a < b) 720 return 0; 721 if (a == b) 722 return 1; 723 if ((a % b) != 0) 724 return 0; 725 a = a / b; 726 } 727 } 728 729 /** 730 * ext4_bg_has_super - number of blocks used by the superblock in group 731 * @sb: superblock for filesystem 732 * @group: group number to check 733 * 734 * Return the number of blocks used by the superblock (primary or backup) 735 * in this group. Currently this will be only 0 or 1. 736 */ 737 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 738 { 739 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 740 741 if (group == 0) 742 return 1; 743 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) { 744 if (group == le32_to_cpu(es->s_backup_bgs[0]) || 745 group == le32_to_cpu(es->s_backup_bgs[1])) 746 return 1; 747 return 0; 748 } 749 if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb, 750 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) 751 return 1; 752 if (!(group & 1)) 753 return 0; 754 if (test_root(group, 3) || (test_root(group, 5)) || 755 test_root(group, 7)) 756 return 1; 757 758 return 0; 759 } 760 761 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 762 ext4_group_t group) 763 { 764 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 765 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 766 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 767 768 if (group == first || group == first + 1 || group == last) 769 return 1; 770 return 0; 771 } 772 773 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 774 ext4_group_t group) 775 { 776 if (!ext4_bg_has_super(sb, group)) 777 return 0; 778 779 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) 780 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 781 else 782 return EXT4_SB(sb)->s_gdb_count; 783 } 784 785 /** 786 * ext4_bg_num_gdb - number of blocks used by the group table in group 787 * @sb: superblock for filesystem 788 * @group: group number to check 789 * 790 * Return the number of blocks used by the group descriptor table 791 * (primary or backup) in this group. In the future there may be a 792 * different number of descriptor blocks in each group. 793 */ 794 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 795 { 796 unsigned long first_meta_bg = 797 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 798 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 799 800 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || 801 metagroup < first_meta_bg) 802 return ext4_bg_num_gdb_nometa(sb, group); 803 804 return ext4_bg_num_gdb_meta(sb,group); 805 806 } 807 808 /* 809 * This function returns the number of file system metadata clusters at 810 * the beginning of a block group, including the reserved gdt blocks. 811 */ 812 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 813 ext4_group_t block_group) 814 { 815 struct ext4_sb_info *sbi = EXT4_SB(sb); 816 unsigned num; 817 818 /* Check for superblock and gdt backups in this group */ 819 num = ext4_bg_has_super(sb, block_group); 820 821 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 822 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 823 sbi->s_desc_per_block) { 824 if (num) { 825 num += ext4_bg_num_gdb(sb, block_group); 826 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 827 } 828 } else { /* For META_BG_BLOCK_GROUPS */ 829 num += ext4_bg_num_gdb(sb, block_group); 830 } 831 return EXT4_NUM_B2C(sbi, num); 832 } 833 /** 834 * ext4_inode_to_goal_block - return a hint for block allocation 835 * @inode: inode for block allocation 836 * 837 * Return the ideal location to start allocating blocks for a 838 * newly created inode. 839 */ 840 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) 841 { 842 struct ext4_inode_info *ei = EXT4_I(inode); 843 ext4_group_t block_group; 844 ext4_grpblk_t colour; 845 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 846 ext4_fsblk_t bg_start; 847 ext4_fsblk_t last_block; 848 849 block_group = ei->i_block_group; 850 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 851 /* 852 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 853 * block groups per flexgroup, reserve the first block 854 * group for directories and special files. Regular 855 * files will start at the second block group. This 856 * tends to speed up directory access and improves 857 * fsck times. 858 */ 859 block_group &= ~(flex_size-1); 860 if (S_ISREG(inode->i_mode)) 861 block_group++; 862 } 863 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 864 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 865 866 /* 867 * If we are doing delayed allocation, we don't need take 868 * colour into account. 869 */ 870 if (test_opt(inode->i_sb, DELALLOC)) 871 return bg_start; 872 873 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 874 colour = (current->pid % 16) * 875 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 876 else 877 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 878 return bg_start + colour; 879 } 880 881