1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/balloc.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/capability.h> 17 #include <linux/fs.h> 18 #include <linux/quotaops.h> 19 #include <linux/buffer_head.h> 20 #include "ext4.h" 21 #include "ext4_jbd2.h" 22 #include "mballoc.h" 23 24 #include <trace/events/ext4.h> 25 26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 27 ext4_group_t block_group); 28 /* 29 * balloc.c contains the blocks allocation and deallocation routines 30 */ 31 32 /* 33 * Calculate block group number for a given block number 34 */ 35 ext4_group_t ext4_get_group_number(struct super_block *sb, 36 ext4_fsblk_t block) 37 { 38 ext4_group_t group; 39 40 if (test_opt2(sb, STD_GROUP_SIZE)) 41 group = (block - 42 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> 43 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); 44 else 45 ext4_get_group_no_and_offset(sb, block, &group, NULL); 46 return group; 47 } 48 49 /* 50 * Calculate the block group number and offset into the block/cluster 51 * allocation bitmap, given a block number 52 */ 53 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 54 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 55 { 56 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 57 ext4_grpblk_t offset; 58 59 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 60 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> 61 EXT4_SB(sb)->s_cluster_bits; 62 if (offsetp) 63 *offsetp = offset; 64 if (blockgrpp) 65 *blockgrpp = blocknr; 66 67 } 68 69 /* 70 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so 71 * and 0 otherwise. 72 */ 73 static inline int ext4_block_in_group(struct super_block *sb, 74 ext4_fsblk_t block, 75 ext4_group_t block_group) 76 { 77 ext4_group_t actual_group; 78 79 actual_group = ext4_get_group_number(sb, block); 80 return (actual_group == block_group) ? 1 : 0; 81 } 82 83 /* Return the number of clusters used for file system metadata; this 84 * represents the overhead needed by the file system. 85 */ 86 static unsigned ext4_num_overhead_clusters(struct super_block *sb, 87 ext4_group_t block_group, 88 struct ext4_group_desc *gdp) 89 { 90 unsigned num_clusters; 91 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; 92 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group); 93 ext4_fsblk_t itbl_blk; 94 struct ext4_sb_info *sbi = EXT4_SB(sb); 95 96 /* This is the number of clusters used by the superblock, 97 * block group descriptors, and reserved block group 98 * descriptor blocks */ 99 num_clusters = ext4_num_base_meta_clusters(sb, block_group); 100 101 /* 102 * For the allocation bitmaps and inode table, we first need 103 * to check to see if the block is in the block group. If it 104 * is, then check to see if the cluster is already accounted 105 * for in the clusters used for the base metadata cluster, or 106 * if we can increment the base metadata cluster to include 107 * that block. Otherwise, we will have to track the cluster 108 * used for the allocation bitmap or inode table explicitly. 109 * Normally all of these blocks are contiguous, so the special 110 * case handling shouldn't be necessary except for *very* 111 * unusual file system layouts. 112 */ 113 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 114 block_cluster = EXT4_B2C(sbi, 115 ext4_block_bitmap(sb, gdp) - start); 116 if (block_cluster < num_clusters) 117 block_cluster = -1; 118 else if (block_cluster == num_clusters) { 119 num_clusters++; 120 block_cluster = -1; 121 } 122 } 123 124 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 125 inode_cluster = EXT4_B2C(sbi, 126 ext4_inode_bitmap(sb, gdp) - start); 127 if (inode_cluster < num_clusters) 128 inode_cluster = -1; 129 else if (inode_cluster == num_clusters) { 130 num_clusters++; 131 inode_cluster = -1; 132 } 133 } 134 135 itbl_blk = ext4_inode_table(sb, gdp); 136 for (i = 0; i < sbi->s_itb_per_group; i++) { 137 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 138 c = EXT4_B2C(sbi, itbl_blk + i - start); 139 if ((c < num_clusters) || (c == inode_cluster) || 140 (c == block_cluster) || (c == itbl_cluster)) 141 continue; 142 if (c == num_clusters) { 143 num_clusters++; 144 continue; 145 } 146 num_clusters++; 147 itbl_cluster = c; 148 } 149 } 150 151 if (block_cluster != -1) 152 num_clusters++; 153 if (inode_cluster != -1) 154 num_clusters++; 155 156 return num_clusters; 157 } 158 159 static unsigned int num_clusters_in_group(struct super_block *sb, 160 ext4_group_t block_group) 161 { 162 unsigned int blocks; 163 164 if (block_group == ext4_get_groups_count(sb) - 1) { 165 /* 166 * Even though mke2fs always initializes the first and 167 * last group, just in case some other tool was used, 168 * we need to make sure we calculate the right free 169 * blocks. 170 */ 171 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - 172 ext4_group_first_block_no(sb, block_group); 173 } else 174 blocks = EXT4_BLOCKS_PER_GROUP(sb); 175 return EXT4_NUM_B2C(EXT4_SB(sb), blocks); 176 } 177 178 /* Initializes an uninitialized block bitmap */ 179 static int ext4_init_block_bitmap(struct super_block *sb, 180 struct buffer_head *bh, 181 ext4_group_t block_group, 182 struct ext4_group_desc *gdp) 183 { 184 unsigned int bit, bit_max; 185 struct ext4_sb_info *sbi = EXT4_SB(sb); 186 ext4_fsblk_t start, tmp; 187 188 ASSERT(buffer_locked(bh)); 189 190 /* If checksum is bad mark all blocks used to prevent allocation 191 * essentially implementing a per-group read-only flag. */ 192 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 193 ext4_mark_group_bitmap_corrupted(sb, block_group, 194 EXT4_GROUP_INFO_BBITMAP_CORRUPT | 195 EXT4_GROUP_INFO_IBITMAP_CORRUPT); 196 return -EFSBADCRC; 197 } 198 memset(bh->b_data, 0, sb->s_blocksize); 199 200 bit_max = ext4_num_base_meta_clusters(sb, block_group); 201 if ((bit_max >> 3) >= bh->b_size) 202 return -EFSCORRUPTED; 203 204 for (bit = 0; bit < bit_max; bit++) 205 ext4_set_bit(bit, bh->b_data); 206 207 start = ext4_group_first_block_no(sb, block_group); 208 209 /* Set bits for block and inode bitmaps, and inode table */ 210 tmp = ext4_block_bitmap(sb, gdp); 211 if (ext4_block_in_group(sb, tmp, block_group)) 212 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 213 214 tmp = ext4_inode_bitmap(sb, gdp); 215 if (ext4_block_in_group(sb, tmp, block_group)) 216 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 217 218 tmp = ext4_inode_table(sb, gdp); 219 for (; tmp < ext4_inode_table(sb, gdp) + 220 sbi->s_itb_per_group; tmp++) { 221 if (ext4_block_in_group(sb, tmp, block_group)) 222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); 223 } 224 225 /* 226 * Also if the number of blocks within the group is less than 227 * the blocksize * 8 ( which is the size of bitmap ), set rest 228 * of the block bitmap to 1 229 */ 230 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 231 sb->s_blocksize * 8, bh->b_data); 232 return 0; 233 } 234 235 /* Return the number of free blocks in a block group. It is used when 236 * the block bitmap is uninitialized, so we can't just count the bits 237 * in the bitmap. */ 238 unsigned ext4_free_clusters_after_init(struct super_block *sb, 239 ext4_group_t block_group, 240 struct ext4_group_desc *gdp) 241 { 242 return num_clusters_in_group(sb, block_group) - 243 ext4_num_overhead_clusters(sb, block_group, gdp); 244 } 245 246 /* 247 * The free blocks are managed by bitmaps. A file system contains several 248 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 249 * block for inodes, N blocks for the inode table and data blocks. 250 * 251 * The file system contains group descriptors which are located after the 252 * super block. Each descriptor contains the number of the bitmap block and 253 * the free blocks count in the block. The descriptors are loaded in memory 254 * when a file system is mounted (see ext4_fill_super). 255 */ 256 257 /** 258 * ext4_get_group_desc() -- load group descriptor from disk 259 * @sb: super block 260 * @block_group: given block group 261 * @bh: pointer to the buffer head to store the block 262 * group descriptor 263 */ 264 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, 265 ext4_group_t block_group, 266 struct buffer_head **bh) 267 { 268 unsigned int group_desc; 269 unsigned int offset; 270 ext4_group_t ngroups = ext4_get_groups_count(sb); 271 struct ext4_group_desc *desc; 272 struct ext4_sb_info *sbi = EXT4_SB(sb); 273 struct buffer_head *bh_p; 274 275 if (block_group >= ngroups) { 276 ext4_error(sb, "block_group >= groups_count - block_group = %u," 277 " groups_count = %u", block_group, ngroups); 278 279 return NULL; 280 } 281 282 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 283 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 284 bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc); 285 /* 286 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since 287 * the pointer being dereferenced won't be dereferenced again. By 288 * looking at the usage in add_new_gdb() the value isn't modified, 289 * just the pointer, and so it remains valid. 290 */ 291 if (!bh_p) { 292 ext4_error(sb, "Group descriptor not loaded - " 293 "block_group = %u, group_desc = %u, desc = %u", 294 block_group, group_desc, offset); 295 return NULL; 296 } 297 298 desc = (struct ext4_group_desc *)( 299 (__u8 *)bh_p->b_data + 300 offset * EXT4_DESC_SIZE(sb)); 301 if (bh) 302 *bh = bh_p; 303 return desc; 304 } 305 306 /* 307 * Return the block number which was discovered to be invalid, or 0 if 308 * the block bitmap is valid. 309 */ 310 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, 311 struct ext4_group_desc *desc, 312 ext4_group_t block_group, 313 struct buffer_head *bh) 314 { 315 struct ext4_sb_info *sbi = EXT4_SB(sb); 316 ext4_grpblk_t offset; 317 ext4_grpblk_t next_zero_bit; 318 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb); 319 ext4_fsblk_t blk; 320 ext4_fsblk_t group_first_block; 321 322 if (ext4_has_feature_flex_bg(sb)) { 323 /* with FLEX_BG, the inode/block bitmaps and itable 324 * blocks may not be in the group at all 325 * so the bitmap validation will be skipped for those groups 326 * or it has to also read the block group where the bitmaps 327 * are located to verify they are set. 328 */ 329 return 0; 330 } 331 group_first_block = ext4_group_first_block_no(sb, block_group); 332 333 /* check whether block bitmap block number is set */ 334 blk = ext4_block_bitmap(sb, desc); 335 offset = blk - group_first_block; 336 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 337 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 338 /* bad block bitmap */ 339 return blk; 340 341 /* check whether the inode bitmap block number is set */ 342 blk = ext4_inode_bitmap(sb, desc); 343 offset = blk - group_first_block; 344 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 345 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 346 /* bad block bitmap */ 347 return blk; 348 349 /* check whether the inode table block number is set */ 350 blk = ext4_inode_table(sb, desc); 351 offset = blk - group_first_block; 352 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 353 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit) 354 return blk; 355 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 356 EXT4_B2C(sbi, offset + sbi->s_itb_per_group), 357 EXT4_B2C(sbi, offset)); 358 if (next_zero_bit < 359 EXT4_B2C(sbi, offset + sbi->s_itb_per_group)) 360 /* bad bitmap for inode tables */ 361 return blk; 362 return 0; 363 } 364 365 static int ext4_validate_block_bitmap(struct super_block *sb, 366 struct ext4_group_desc *desc, 367 ext4_group_t block_group, 368 struct buffer_head *bh) 369 { 370 ext4_fsblk_t blk; 371 struct ext4_group_info *grp; 372 373 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 374 return 0; 375 376 grp = ext4_get_group_info(sb, block_group); 377 378 if (buffer_verified(bh)) 379 return 0; 380 if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 381 return -EFSCORRUPTED; 382 383 ext4_lock_group(sb, block_group); 384 if (buffer_verified(bh)) 385 goto verified; 386 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 387 desc, bh) || 388 ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) { 389 ext4_unlock_group(sb, block_group); 390 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 391 ext4_mark_group_bitmap_corrupted(sb, block_group, 392 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 393 return -EFSBADCRC; 394 } 395 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); 396 if (unlikely(blk != 0)) { 397 ext4_unlock_group(sb, block_group); 398 ext4_error(sb, "bg %u: block %llu: invalid block bitmap", 399 block_group, blk); 400 ext4_mark_group_bitmap_corrupted(sb, block_group, 401 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 402 return -EFSCORRUPTED; 403 } 404 set_buffer_verified(bh); 405 verified: 406 ext4_unlock_group(sb, block_group); 407 return 0; 408 } 409 410 /** 411 * ext4_read_block_bitmap_nowait() 412 * @sb: super block 413 * @block_group: given block group 414 * @ignore_locked: ignore locked buffers 415 * 416 * Read the bitmap for a given block_group,and validate the 417 * bits for block/inode/inode tables are set in the bitmaps 418 * 419 * Return buffer_head on success or an ERR_PTR in case of failure. 420 */ 421 struct buffer_head * 422 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, 423 bool ignore_locked) 424 { 425 struct ext4_group_desc *desc; 426 struct ext4_sb_info *sbi = EXT4_SB(sb); 427 struct buffer_head *bh; 428 ext4_fsblk_t bitmap_blk; 429 int err; 430 431 desc = ext4_get_group_desc(sb, block_group, NULL); 432 if (!desc) 433 return ERR_PTR(-EFSCORRUPTED); 434 bitmap_blk = ext4_block_bitmap(sb, desc); 435 if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || 436 (bitmap_blk >= ext4_blocks_count(sbi->s_es))) { 437 ext4_error(sb, "Invalid block bitmap block %llu in " 438 "block_group %u", bitmap_blk, block_group); 439 ext4_mark_group_bitmap_corrupted(sb, block_group, 440 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 441 return ERR_PTR(-EFSCORRUPTED); 442 } 443 bh = sb_getblk(sb, bitmap_blk); 444 if (unlikely(!bh)) { 445 ext4_warning(sb, "Cannot get buffer for block bitmap - " 446 "block_group = %u, block_bitmap = %llu", 447 block_group, bitmap_blk); 448 return ERR_PTR(-ENOMEM); 449 } 450 451 if (ignore_locked && buffer_locked(bh)) { 452 /* buffer under IO already, return if called for prefetching */ 453 put_bh(bh); 454 return NULL; 455 } 456 457 if (bitmap_uptodate(bh)) 458 goto verify; 459 460 lock_buffer(bh); 461 if (bitmap_uptodate(bh)) { 462 unlock_buffer(bh); 463 goto verify; 464 } 465 ext4_lock_group(sb, block_group); 466 if (ext4_has_group_desc_csum(sb) && 467 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 468 if (block_group == 0) { 469 ext4_unlock_group(sb, block_group); 470 unlock_buffer(bh); 471 ext4_error(sb, "Block bitmap for bg 0 marked " 472 "uninitialized"); 473 err = -EFSCORRUPTED; 474 goto out; 475 } 476 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 477 set_bitmap_uptodate(bh); 478 set_buffer_uptodate(bh); 479 set_buffer_verified(bh); 480 ext4_unlock_group(sb, block_group); 481 unlock_buffer(bh); 482 if (err) { 483 ext4_error(sb, "Failed to init block bitmap for group " 484 "%u: %d", block_group, err); 485 goto out; 486 } 487 goto verify; 488 } 489 ext4_unlock_group(sb, block_group); 490 if (buffer_uptodate(bh)) { 491 /* 492 * if not uninit if bh is uptodate, 493 * bitmap is also uptodate 494 */ 495 set_bitmap_uptodate(bh); 496 unlock_buffer(bh); 497 goto verify; 498 } 499 /* 500 * submit the buffer_head for reading 501 */ 502 set_buffer_new(bh); 503 trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); 504 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO | 505 (ignore_locked ? REQ_RAHEAD : 0), 506 ext4_end_bitmap_read); 507 return bh; 508 verify: 509 err = ext4_validate_block_bitmap(sb, desc, block_group, bh); 510 if (err) 511 goto out; 512 return bh; 513 out: 514 put_bh(bh); 515 return ERR_PTR(err); 516 } 517 518 /* Returns 0 on success, -errno on error */ 519 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, 520 struct buffer_head *bh) 521 { 522 struct ext4_group_desc *desc; 523 524 if (!buffer_new(bh)) 525 return 0; 526 desc = ext4_get_group_desc(sb, block_group, NULL); 527 if (!desc) 528 return -EFSCORRUPTED; 529 wait_on_buffer(bh); 530 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO); 531 if (!buffer_uptodate(bh)) { 532 ext4_error_err(sb, EIO, "Cannot read block bitmap - " 533 "block_group = %u, block_bitmap = %llu", 534 block_group, (unsigned long long) bh->b_blocknr); 535 ext4_mark_group_bitmap_corrupted(sb, block_group, 536 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 537 return -EIO; 538 } 539 clear_buffer_new(bh); 540 /* Panic or remount fs read-only if block bitmap is invalid */ 541 return ext4_validate_block_bitmap(sb, desc, block_group, bh); 542 } 543 544 struct buffer_head * 545 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 546 { 547 struct buffer_head *bh; 548 int err; 549 550 bh = ext4_read_block_bitmap_nowait(sb, block_group, false); 551 if (IS_ERR(bh)) 552 return bh; 553 err = ext4_wait_block_bitmap(sb, block_group, bh); 554 if (err) { 555 put_bh(bh); 556 return ERR_PTR(err); 557 } 558 return bh; 559 } 560 561 /** 562 * ext4_has_free_clusters() 563 * @sbi: in-core super block structure. 564 * @nclusters: number of needed blocks 565 * @flags: flags from ext4_mb_new_blocks() 566 * 567 * Check if filesystem has nclusters free & available for allocation. 568 * On success return 1, return 0 on failure. 569 */ 570 static int ext4_has_free_clusters(struct ext4_sb_info *sbi, 571 s64 nclusters, unsigned int flags) 572 { 573 s64 free_clusters, dirty_clusters, rsv, resv_clusters; 574 struct percpu_counter *fcc = &sbi->s_freeclusters_counter; 575 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter; 576 577 free_clusters = percpu_counter_read_positive(fcc); 578 dirty_clusters = percpu_counter_read_positive(dcc); 579 resv_clusters = atomic64_read(&sbi->s_resv_clusters); 580 581 /* 582 * r_blocks_count should always be multiple of the cluster ratio so 583 * we are safe to do a plane bit shift only. 584 */ 585 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) + 586 resv_clusters; 587 588 if (free_clusters - (nclusters + rsv + dirty_clusters) < 589 EXT4_FREECLUSTERS_WATERMARK) { 590 free_clusters = percpu_counter_sum_positive(fcc); 591 dirty_clusters = percpu_counter_sum_positive(dcc); 592 } 593 /* Check whether we have space after accounting for current 594 * dirty clusters & root reserved clusters. 595 */ 596 if (free_clusters >= (rsv + nclusters + dirty_clusters)) 597 return 1; 598 599 /* Hm, nope. Are (enough) root reserved clusters available? */ 600 if (uid_eq(sbi->s_resuid, current_fsuid()) || 601 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || 602 capable(CAP_SYS_RESOURCE) || 603 (flags & EXT4_MB_USE_ROOT_BLOCKS)) { 604 605 if (free_clusters >= (nclusters + dirty_clusters + 606 resv_clusters)) 607 return 1; 608 } 609 /* No free blocks. Let's see if we can dip into reserved pool */ 610 if (flags & EXT4_MB_USE_RESERVED) { 611 if (free_clusters >= (nclusters + dirty_clusters)) 612 return 1; 613 } 614 615 return 0; 616 } 617 618 int ext4_claim_free_clusters(struct ext4_sb_info *sbi, 619 s64 nclusters, unsigned int flags) 620 { 621 if (ext4_has_free_clusters(sbi, nclusters, flags)) { 622 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters); 623 return 0; 624 } else 625 return -ENOSPC; 626 } 627 628 /** 629 * ext4_should_retry_alloc() - check if a block allocation should be retried 630 * @sb: superblock 631 * @retries: number of retry attempts made so far 632 * 633 * ext4_should_retry_alloc() is called when ENOSPC is returned while 634 * attempting to allocate blocks. If there's an indication that a pending 635 * journal transaction might free some space and allow another attempt to 636 * succeed, this function will wait for the current or committing transaction 637 * to complete and then return TRUE. 638 */ 639 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 640 { 641 struct ext4_sb_info *sbi = EXT4_SB(sb); 642 643 if (!sbi->s_journal) 644 return 0; 645 646 if (++(*retries) > 3) { 647 percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit); 648 return 0; 649 } 650 651 /* 652 * if there's no indication that blocks are about to be freed it's 653 * possible we just missed a transaction commit that did so 654 */ 655 smp_mb(); 656 if (sbi->s_mb_free_pending == 0) { 657 if (test_opt(sb, DISCARD)) { 658 atomic_inc(&sbi->s_retry_alloc_pending); 659 flush_work(&sbi->s_discard_work); 660 atomic_dec(&sbi->s_retry_alloc_pending); 661 } 662 return ext4_has_free_clusters(sbi, 1, 0); 663 } 664 665 /* 666 * it's possible we've just missed a transaction commit here, 667 * so ignore the returned status 668 */ 669 ext4_debug("%s: retrying operation after ENOSPC\n", sb->s_id); 670 (void) jbd2_journal_force_commit_nested(sbi->s_journal); 671 return 1; 672 } 673 674 /* 675 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 676 * 677 * @handle: handle to this transaction 678 * @inode: file inode 679 * @goal: given target block(filesystem wide) 680 * @count: pointer to total number of clusters needed 681 * @errp: error code 682 * 683 * Return 1st allocated block number on success, *count stores total account 684 * error stores in errp pointer 685 */ 686 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 687 ext4_fsblk_t goal, unsigned int flags, 688 unsigned long *count, int *errp) 689 { 690 struct ext4_allocation_request ar; 691 ext4_fsblk_t ret; 692 693 memset(&ar, 0, sizeof(ar)); 694 /* Fill with neighbour allocated blocks */ 695 ar.inode = inode; 696 ar.goal = goal; 697 ar.len = count ? *count : 1; 698 ar.flags = flags; 699 700 ret = ext4_mb_new_blocks(handle, &ar, errp); 701 if (count) 702 *count = ar.len; 703 /* 704 * Account for the allocated meta blocks. We will never 705 * fail EDQUOT for metdata, but we do account for it. 706 */ 707 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { 708 dquot_alloc_block_nofail(inode, 709 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 710 } 711 return ret; 712 } 713 714 /** 715 * ext4_count_free_clusters() -- count filesystem free clusters 716 * @sb: superblock 717 * 718 * Adds up the number of free clusters from each block group. 719 */ 720 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) 721 { 722 ext4_fsblk_t desc_count; 723 struct ext4_group_desc *gdp; 724 ext4_group_t i; 725 ext4_group_t ngroups = ext4_get_groups_count(sb); 726 struct ext4_group_info *grp; 727 #ifdef EXT4FS_DEBUG 728 struct ext4_super_block *es; 729 ext4_fsblk_t bitmap_count; 730 unsigned int x; 731 struct buffer_head *bitmap_bh = NULL; 732 733 es = EXT4_SB(sb)->s_es; 734 desc_count = 0; 735 bitmap_count = 0; 736 gdp = NULL; 737 738 for (i = 0; i < ngroups; i++) { 739 gdp = ext4_get_group_desc(sb, i, NULL); 740 if (!gdp) 741 continue; 742 grp = NULL; 743 if (EXT4_SB(sb)->s_group_info) 744 grp = ext4_get_group_info(sb, i); 745 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 746 desc_count += ext4_free_group_clusters(sb, gdp); 747 brelse(bitmap_bh); 748 bitmap_bh = ext4_read_block_bitmap(sb, i); 749 if (IS_ERR(bitmap_bh)) { 750 bitmap_bh = NULL; 751 continue; 752 } 753 754 x = ext4_count_free(bitmap_bh->b_data, 755 EXT4_CLUSTERS_PER_GROUP(sb) / 8); 756 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", 757 i, ext4_free_group_clusters(sb, gdp), x); 758 bitmap_count += x; 759 } 760 brelse(bitmap_bh); 761 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" 762 ", computed = %llu, %llu\n", 763 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), 764 desc_count, bitmap_count); 765 return bitmap_count; 766 #else 767 desc_count = 0; 768 for (i = 0; i < ngroups; i++) { 769 gdp = ext4_get_group_desc(sb, i, NULL); 770 if (!gdp) 771 continue; 772 grp = NULL; 773 if (EXT4_SB(sb)->s_group_info) 774 grp = ext4_get_group_info(sb, i); 775 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 776 desc_count += ext4_free_group_clusters(sb, gdp); 777 } 778 779 return desc_count; 780 #endif 781 } 782 783 static inline int test_root(ext4_group_t a, int b) 784 { 785 while (1) { 786 if (a < b) 787 return 0; 788 if (a == b) 789 return 1; 790 if ((a % b) != 0) 791 return 0; 792 a = a / b; 793 } 794 } 795 796 /** 797 * ext4_bg_has_super - number of blocks used by the superblock in group 798 * @sb: superblock for filesystem 799 * @group: group number to check 800 * 801 * Return the number of blocks used by the superblock (primary or backup) 802 * in this group. Currently this will be only 0 or 1. 803 */ 804 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 805 { 806 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 807 808 if (group == 0) 809 return 1; 810 if (ext4_has_feature_sparse_super2(sb)) { 811 if (group == le32_to_cpu(es->s_backup_bgs[0]) || 812 group == le32_to_cpu(es->s_backup_bgs[1])) 813 return 1; 814 return 0; 815 } 816 if ((group <= 1) || !ext4_has_feature_sparse_super(sb)) 817 return 1; 818 if (!(group & 1)) 819 return 0; 820 if (test_root(group, 3) || (test_root(group, 5)) || 821 test_root(group, 7)) 822 return 1; 823 824 return 0; 825 } 826 827 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 828 ext4_group_t group) 829 { 830 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 831 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 832 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 833 834 if (group == first || group == first + 1 || group == last) 835 return 1; 836 return 0; 837 } 838 839 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 840 ext4_group_t group) 841 { 842 if (!ext4_bg_has_super(sb, group)) 843 return 0; 844 845 if (ext4_has_feature_meta_bg(sb)) 846 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 847 else 848 return EXT4_SB(sb)->s_gdb_count; 849 } 850 851 /** 852 * ext4_bg_num_gdb - number of blocks used by the group table in group 853 * @sb: superblock for filesystem 854 * @group: group number to check 855 * 856 * Return the number of blocks used by the group descriptor table 857 * (primary or backup) in this group. In the future there may be a 858 * different number of descriptor blocks in each group. 859 */ 860 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 861 { 862 unsigned long first_meta_bg = 863 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 864 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 865 866 if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) 867 return ext4_bg_num_gdb_nometa(sb, group); 868 869 return ext4_bg_num_gdb_meta(sb,group); 870 871 } 872 873 /* 874 * This function returns the number of file system metadata clusters at 875 * the beginning of a block group, including the reserved gdt blocks. 876 */ 877 static unsigned ext4_num_base_meta_clusters(struct super_block *sb, 878 ext4_group_t block_group) 879 { 880 struct ext4_sb_info *sbi = EXT4_SB(sb); 881 unsigned num; 882 883 /* Check for superblock and gdt backups in this group */ 884 num = ext4_bg_has_super(sb, block_group); 885 886 if (!ext4_has_feature_meta_bg(sb) || 887 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 888 sbi->s_desc_per_block) { 889 if (num) { 890 num += ext4_bg_num_gdb(sb, block_group); 891 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 892 } 893 } else { /* For META_BG_BLOCK_GROUPS */ 894 num += ext4_bg_num_gdb(sb, block_group); 895 } 896 return EXT4_NUM_B2C(sbi, num); 897 } 898 /** 899 * ext4_inode_to_goal_block - return a hint for block allocation 900 * @inode: inode for block allocation 901 * 902 * Return the ideal location to start allocating blocks for a 903 * newly created inode. 904 */ 905 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) 906 { 907 struct ext4_inode_info *ei = EXT4_I(inode); 908 ext4_group_t block_group; 909 ext4_grpblk_t colour; 910 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 911 ext4_fsblk_t bg_start; 912 ext4_fsblk_t last_block; 913 914 block_group = ei->i_block_group; 915 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 916 /* 917 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 918 * block groups per flexgroup, reserve the first block 919 * group for directories and special files. Regular 920 * files will start at the second block group. This 921 * tends to speed up directory access and improves 922 * fsck times. 923 */ 924 block_group &= ~(flex_size-1); 925 if (S_ISREG(inode->i_mode)) 926 block_group++; 927 } 928 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 929 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 930 931 /* 932 * If we are doing delayed allocation, we don't need take 933 * colour into account. 934 */ 935 if (test_opt(inode->i_sb, DELALLOC)) 936 return bg_start; 937 938 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 939 colour = (task_pid_nr(current) % 16) * 940 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 941 else 942 colour = (task_pid_nr(current) % 16) * 943 ((last_block - bg_start) / 16); 944 return bg_start + colour; 945 } 946 947