1 /* 2 * linux/fs/ext4/balloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 10 * Big-endian to little-endian byte-swapping/bitmaps by 11 * David S. Miller (davem@caip.rutgers.edu), 1995 12 */ 13 14 #include <linux/time.h> 15 #include <linux/capability.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/ext4_fs.h> 19 #include <linux/ext4_jbd2.h> 20 #include <linux/quotaops.h> 21 #include <linux/buffer_head.h> 22 23 #include "group.h" 24 /* 25 * balloc.c contains the blocks allocation and deallocation routines 26 */ 27 28 /* 29 * Calculate the block group number and offset, given a block number 30 */ 31 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, 32 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp) 33 { 34 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 35 ext4_grpblk_t offset; 36 37 blocknr = blocknr - le32_to_cpu(es->s_first_data_block); 38 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); 39 if (offsetp) 40 *offsetp = offset; 41 if (blockgrpp) 42 *blockgrpp = blocknr; 43 44 } 45 46 /* Initializes an uninitialized block bitmap if given, and returns the 47 * number of blocks free in the group. */ 48 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, 49 ext4_group_t block_group, struct ext4_group_desc *gdp) 50 { 51 unsigned long start; 52 int bit, bit_max; 53 unsigned free_blocks, group_blocks; 54 struct ext4_sb_info *sbi = EXT4_SB(sb); 55 56 if (bh) { 57 J_ASSERT_BH(bh, buffer_locked(bh)); 58 59 /* If checksum is bad mark all blocks used to prevent allocation 60 * essentially implementing a per-group read-only flag. */ 61 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 62 ext4_error(sb, __FUNCTION__, 63 "Checksum bad for group %lu\n", block_group); 64 gdp->bg_free_blocks_count = 0; 65 gdp->bg_free_inodes_count = 0; 66 gdp->bg_itable_unused = 0; 67 memset(bh->b_data, 0xff, sb->s_blocksize); 68 return 0; 69 } 70 memset(bh->b_data, 0, sb->s_blocksize); 71 } 72 73 /* Check for superblock and gdt backups in this group */ 74 bit_max = ext4_bg_has_super(sb, block_group); 75 76 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 77 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) * 78 sbi->s_desc_per_block) { 79 if (bit_max) { 80 bit_max += ext4_bg_num_gdb(sb, block_group); 81 bit_max += 82 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks); 83 } 84 } else { /* For META_BG_BLOCK_GROUPS */ 85 int group_rel = (block_group - 86 le32_to_cpu(sbi->s_es->s_first_meta_bg)) % 87 EXT4_DESC_PER_BLOCK(sb); 88 if (group_rel == 0 || group_rel == 1 || 89 (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1)) 90 bit_max += 1; 91 } 92 93 if (block_group == sbi->s_groups_count - 1) { 94 /* 95 * Even though mke2fs always initialize first and last group 96 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need 97 * to make sure we calculate the right free blocks 98 */ 99 group_blocks = ext4_blocks_count(sbi->s_es) - 100 le32_to_cpu(sbi->s_es->s_first_data_block) - 101 (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1)); 102 } else { 103 group_blocks = EXT4_BLOCKS_PER_GROUP(sb); 104 } 105 106 free_blocks = group_blocks - bit_max; 107 108 if (bh) { 109 for (bit = 0; bit < bit_max; bit++) 110 ext4_set_bit(bit, bh->b_data); 111 112 start = block_group * EXT4_BLOCKS_PER_GROUP(sb) + 113 le32_to_cpu(sbi->s_es->s_first_data_block); 114 115 /* Set bits for block and inode bitmaps, and inode table */ 116 ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data); 117 ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data); 118 for (bit = (ext4_inode_table(sb, gdp) - start), 119 bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++) 120 ext4_set_bit(bit, bh->b_data); 121 122 /* 123 * Also if the number of blocks within the group is 124 * less than the blocksize * 8 ( which is the size 125 * of bitmap ), set rest of the block bitmap to 1 126 */ 127 mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data); 128 } 129 130 return free_blocks - sbi->s_itb_per_group - 2; 131 } 132 133 134 /* 135 * The free blocks are managed by bitmaps. A file system contains several 136 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 137 * block for inodes, N blocks for the inode table and data blocks. 138 * 139 * The file system contains group descriptors which are located after the 140 * super block. Each descriptor contains the number of the bitmap block and 141 * the free blocks count in the block. The descriptors are loaded in memory 142 * when a file system is mounted (see ext4_fill_super). 143 */ 144 145 146 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 147 148 /** 149 * ext4_get_group_desc() -- load group descriptor from disk 150 * @sb: super block 151 * @block_group: given block group 152 * @bh: pointer to the buffer head to store the block 153 * group descriptor 154 */ 155 struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, 156 ext4_group_t block_group, 157 struct buffer_head ** bh) 158 { 159 unsigned long group_desc; 160 unsigned long offset; 161 struct ext4_group_desc * desc; 162 struct ext4_sb_info *sbi = EXT4_SB(sb); 163 164 if (block_group >= sbi->s_groups_count) { 165 ext4_error (sb, "ext4_get_group_desc", 166 "block_group >= groups_count - " 167 "block_group = %lu, groups_count = %lu", 168 block_group, sbi->s_groups_count); 169 170 return NULL; 171 } 172 smp_rmb(); 173 174 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); 175 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); 176 if (!sbi->s_group_desc[group_desc]) { 177 ext4_error (sb, "ext4_get_group_desc", 178 "Group descriptor not loaded - " 179 "block_group = %lu, group_desc = %lu, desc = %lu", 180 block_group, group_desc, offset); 181 return NULL; 182 } 183 184 desc = (struct ext4_group_desc *)( 185 (__u8 *)sbi->s_group_desc[group_desc]->b_data + 186 offset * EXT4_DESC_SIZE(sb)); 187 if (bh) 188 *bh = sbi->s_group_desc[group_desc]; 189 return desc; 190 } 191 192 static int ext4_valid_block_bitmap(struct super_block *sb, 193 struct ext4_group_desc *desc, 194 unsigned int block_group, 195 struct buffer_head *bh) 196 { 197 ext4_grpblk_t offset; 198 ext4_grpblk_t next_zero_bit; 199 ext4_fsblk_t bitmap_blk; 200 ext4_fsblk_t group_first_block; 201 202 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 203 /* with FLEX_BG, the inode/block bitmaps and itable 204 * blocks may not be in the group at all 205 * so the bitmap validation will be skipped for those groups 206 * or it has to also read the block group where the bitmaps 207 * are located to verify they are set. 208 */ 209 return 1; 210 } 211 group_first_block = ext4_group_first_block_no(sb, block_group); 212 213 /* check whether block bitmap block number is set */ 214 bitmap_blk = ext4_block_bitmap(sb, desc); 215 offset = bitmap_blk - group_first_block; 216 if (!ext4_test_bit(offset, bh->b_data)) 217 /* bad block bitmap */ 218 goto err_out; 219 220 /* check whether the inode bitmap block number is set */ 221 bitmap_blk = ext4_inode_bitmap(sb, desc); 222 offset = bitmap_blk - group_first_block; 223 if (!ext4_test_bit(offset, bh->b_data)) 224 /* bad block bitmap */ 225 goto err_out; 226 227 /* check whether the inode table block number is set */ 228 bitmap_blk = ext4_inode_table(sb, desc); 229 offset = bitmap_blk - group_first_block; 230 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 231 offset + EXT4_SB(sb)->s_itb_per_group, 232 offset); 233 if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) 234 /* good bitmap for inode tables */ 235 return 1; 236 237 err_out: 238 ext4_error(sb, __FUNCTION__, 239 "Invalid block bitmap - " 240 "block_group = %d, block = %llu", 241 block_group, bitmap_blk); 242 return 0; 243 } 244 /** 245 * read_block_bitmap() 246 * @sb: super block 247 * @block_group: given block group 248 * 249 * Read the bitmap for a given block_group,and validate the 250 * bits for block/inode/inode tables are set in the bitmaps 251 * 252 * Return buffer_head on success or NULL in case of failure. 253 */ 254 struct buffer_head * 255 read_block_bitmap(struct super_block *sb, ext4_group_t block_group) 256 { 257 struct ext4_group_desc * desc; 258 struct buffer_head * bh = NULL; 259 ext4_fsblk_t bitmap_blk; 260 261 desc = ext4_get_group_desc(sb, block_group, NULL); 262 if (!desc) 263 return NULL; 264 bitmap_blk = ext4_block_bitmap(sb, desc); 265 bh = sb_getblk(sb, bitmap_blk); 266 if (unlikely(!bh)) { 267 ext4_error(sb, __FUNCTION__, 268 "Cannot read block bitmap - " 269 "block_group = %d, block_bitmap = %llu", 270 (int)block_group, (unsigned long long)bitmap_blk); 271 return NULL; 272 } 273 if (bh_uptodate_or_lock(bh)) 274 return bh; 275 276 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 277 ext4_init_block_bitmap(sb, bh, block_group, desc); 278 set_buffer_uptodate(bh); 279 unlock_buffer(bh); 280 return bh; 281 } 282 if (bh_submit_read(bh) < 0) { 283 put_bh(bh); 284 ext4_error(sb, __FUNCTION__, 285 "Cannot read block bitmap - " 286 "block_group = %d, block_bitmap = %llu", 287 (int)block_group, (unsigned long long)bitmap_blk); 288 return NULL; 289 } 290 if (!ext4_valid_block_bitmap(sb, desc, block_group, bh)) { 291 put_bh(bh); 292 return NULL; 293 } 294 295 return bh; 296 } 297 /* 298 * The reservation window structure operations 299 * -------------------------------------------- 300 * Operations include: 301 * dump, find, add, remove, is_empty, find_next_reservable_window, etc. 302 * 303 * We use a red-black tree to represent per-filesystem reservation 304 * windows. 305 * 306 */ 307 308 /** 309 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map 310 * @rb_root: root of per-filesystem reservation rb tree 311 * @verbose: verbose mode 312 * @fn: function which wishes to dump the reservation map 313 * 314 * If verbose is turned on, it will print the whole block reservation 315 * windows(start, end). Otherwise, it will only print out the "bad" windows, 316 * those windows that overlap with their immediate neighbors. 317 */ 318 #if 1 319 static void __rsv_window_dump(struct rb_root *root, int verbose, 320 const char *fn) 321 { 322 struct rb_node *n; 323 struct ext4_reserve_window_node *rsv, *prev; 324 int bad; 325 326 restart: 327 n = rb_first(root); 328 bad = 0; 329 prev = NULL; 330 331 printk("Block Allocation Reservation Windows Map (%s):\n", fn); 332 while (n) { 333 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); 334 if (verbose) 335 printk("reservation window 0x%p " 336 "start: %llu, end: %llu\n", 337 rsv, rsv->rsv_start, rsv->rsv_end); 338 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { 339 printk("Bad reservation %p (start >= end)\n", 340 rsv); 341 bad = 1; 342 } 343 if (prev && prev->rsv_end >= rsv->rsv_start) { 344 printk("Bad reservation %p (prev->end >= start)\n", 345 rsv); 346 bad = 1; 347 } 348 if (bad) { 349 if (!verbose) { 350 printk("Restarting reservation walk in verbose mode\n"); 351 verbose = 1; 352 goto restart; 353 } 354 } 355 n = rb_next(n); 356 prev = rsv; 357 } 358 printk("Window map complete.\n"); 359 if (bad) 360 BUG(); 361 } 362 #define rsv_window_dump(root, verbose) \ 363 __rsv_window_dump((root), (verbose), __FUNCTION__) 364 #else 365 #define rsv_window_dump(root, verbose) do {} while (0) 366 #endif 367 368 /** 369 * goal_in_my_reservation() 370 * @rsv: inode's reservation window 371 * @grp_goal: given goal block relative to the allocation block group 372 * @group: the current allocation block group 373 * @sb: filesystem super block 374 * 375 * Test if the given goal block (group relative) is within the file's 376 * own block reservation window range. 377 * 378 * If the reservation window is outside the goal allocation group, return 0; 379 * grp_goal (given goal block) could be -1, which means no specific 380 * goal block. In this case, always return 1. 381 * If the goal block is within the reservation window, return 1; 382 * otherwise, return 0; 383 */ 384 static int 385 goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal, 386 ext4_group_t group, struct super_block *sb) 387 { 388 ext4_fsblk_t group_first_block, group_last_block; 389 390 group_first_block = ext4_group_first_block_no(sb, group); 391 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); 392 393 if ((rsv->_rsv_start > group_last_block) || 394 (rsv->_rsv_end < group_first_block)) 395 return 0; 396 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start) 397 || (grp_goal + group_first_block > rsv->_rsv_end))) 398 return 0; 399 return 1; 400 } 401 402 /** 403 * search_reserve_window() 404 * @rb_root: root of reservation tree 405 * @goal: target allocation block 406 * 407 * Find the reserved window which includes the goal, or the previous one 408 * if the goal is not in any window. 409 * Returns NULL if there are no windows or if all windows start after the goal. 410 */ 411 static struct ext4_reserve_window_node * 412 search_reserve_window(struct rb_root *root, ext4_fsblk_t goal) 413 { 414 struct rb_node *n = root->rb_node; 415 struct ext4_reserve_window_node *rsv; 416 417 if (!n) 418 return NULL; 419 420 do { 421 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); 422 423 if (goal < rsv->rsv_start) 424 n = n->rb_left; 425 else if (goal > rsv->rsv_end) 426 n = n->rb_right; 427 else 428 return rsv; 429 } while (n); 430 /* 431 * We've fallen off the end of the tree: the goal wasn't inside 432 * any particular node. OK, the previous node must be to one 433 * side of the interval containing the goal. If it's the RHS, 434 * we need to back up one. 435 */ 436 if (rsv->rsv_start > goal) { 437 n = rb_prev(&rsv->rsv_node); 438 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node); 439 } 440 return rsv; 441 } 442 443 /** 444 * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree. 445 * @sb: super block 446 * @rsv: reservation window to add 447 * 448 * Must be called with rsv_lock hold. 449 */ 450 void ext4_rsv_window_add(struct super_block *sb, 451 struct ext4_reserve_window_node *rsv) 452 { 453 struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root; 454 struct rb_node *node = &rsv->rsv_node; 455 ext4_fsblk_t start = rsv->rsv_start; 456 457 struct rb_node ** p = &root->rb_node; 458 struct rb_node * parent = NULL; 459 struct ext4_reserve_window_node *this; 460 461 while (*p) 462 { 463 parent = *p; 464 this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node); 465 466 if (start < this->rsv_start) 467 p = &(*p)->rb_left; 468 else if (start > this->rsv_end) 469 p = &(*p)->rb_right; 470 else { 471 rsv_window_dump(root, 1); 472 BUG(); 473 } 474 } 475 476 rb_link_node(node, parent, p); 477 rb_insert_color(node, root); 478 } 479 480 /** 481 * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree 482 * @sb: super block 483 * @rsv: reservation window to remove 484 * 485 * Mark the block reservation window as not allocated, and unlink it 486 * from the filesystem reservation window rb tree. Must be called with 487 * rsv_lock hold. 488 */ 489 static void rsv_window_remove(struct super_block *sb, 490 struct ext4_reserve_window_node *rsv) 491 { 492 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; 493 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; 494 rsv->rsv_alloc_hit = 0; 495 rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root); 496 } 497 498 /* 499 * rsv_is_empty() -- Check if the reservation window is allocated. 500 * @rsv: given reservation window to check 501 * 502 * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED. 503 */ 504 static inline int rsv_is_empty(struct ext4_reserve_window *rsv) 505 { 506 /* a valid reservation end block could not be 0 */ 507 return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED; 508 } 509 510 /** 511 * ext4_init_block_alloc_info() 512 * @inode: file inode structure 513 * 514 * Allocate and initialize the reservation window structure, and 515 * link the window to the ext4 inode structure at last 516 * 517 * The reservation window structure is only dynamically allocated 518 * and linked to ext4 inode the first time the open file 519 * needs a new block. So, before every ext4_new_block(s) call, for 520 * regular files, we should check whether the reservation window 521 * structure exists or not. In the latter case, this function is called. 522 * Fail to do so will result in block reservation being turned off for that 523 * open file. 524 * 525 * This function is called from ext4_get_blocks_handle(), also called 526 * when setting the reservation window size through ioctl before the file 527 * is open for write (needs block allocation). 528 * 529 * Needs down_write(i_data_sem) protection prior to call this function. 530 */ 531 void ext4_init_block_alloc_info(struct inode *inode) 532 { 533 struct ext4_inode_info *ei = EXT4_I(inode); 534 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; 535 struct super_block *sb = inode->i_sb; 536 537 block_i = kmalloc(sizeof(*block_i), GFP_NOFS); 538 if (block_i) { 539 struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node; 540 541 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; 542 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED; 543 544 /* 545 * if filesystem is mounted with NORESERVATION, the goal 546 * reservation window size is set to zero to indicate 547 * block reservation is off 548 */ 549 if (!test_opt(sb, RESERVATION)) 550 rsv->rsv_goal_size = 0; 551 else 552 rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS; 553 rsv->rsv_alloc_hit = 0; 554 block_i->last_alloc_logical_block = 0; 555 block_i->last_alloc_physical_block = 0; 556 } 557 ei->i_block_alloc_info = block_i; 558 } 559 560 /** 561 * ext4_discard_reservation() 562 * @inode: inode 563 * 564 * Discard(free) block reservation window on last file close, or truncate 565 * or at last iput(). 566 * 567 * It is being called in three cases: 568 * ext4_release_file(): last writer close the file 569 * ext4_clear_inode(): last iput(), when nobody link to this file. 570 * ext4_truncate(): when the block indirect map is about to change. 571 * 572 */ 573 void ext4_discard_reservation(struct inode *inode) 574 { 575 struct ext4_inode_info *ei = EXT4_I(inode); 576 struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; 577 struct ext4_reserve_window_node *rsv; 578 spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock; 579 580 ext4_mb_discard_inode_preallocations(inode); 581 582 if (!block_i) 583 return; 584 585 rsv = &block_i->rsv_window_node; 586 if (!rsv_is_empty(&rsv->rsv_window)) { 587 spin_lock(rsv_lock); 588 if (!rsv_is_empty(&rsv->rsv_window)) 589 rsv_window_remove(inode->i_sb, rsv); 590 spin_unlock(rsv_lock); 591 } 592 } 593 594 /** 595 * ext4_free_blocks_sb() -- Free given blocks and update quota 596 * @handle: handle to this transaction 597 * @sb: super block 598 * @block: start physcial block to free 599 * @count: number of blocks to free 600 * @pdquot_freed_blocks: pointer to quota 601 */ 602 void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, 603 ext4_fsblk_t block, unsigned long count, 604 unsigned long *pdquot_freed_blocks) 605 { 606 struct buffer_head *bitmap_bh = NULL; 607 struct buffer_head *gd_bh; 608 ext4_group_t block_group; 609 ext4_grpblk_t bit; 610 unsigned long i; 611 unsigned long overflow; 612 struct ext4_group_desc * desc; 613 struct ext4_super_block * es; 614 struct ext4_sb_info *sbi; 615 int err = 0, ret; 616 ext4_grpblk_t group_freed; 617 618 *pdquot_freed_blocks = 0; 619 sbi = EXT4_SB(sb); 620 es = sbi->s_es; 621 if (block < le32_to_cpu(es->s_first_data_block) || 622 block + count < block || 623 block + count > ext4_blocks_count(es)) { 624 ext4_error (sb, "ext4_free_blocks", 625 "Freeing blocks not in datazone - " 626 "block = %llu, count = %lu", block, count); 627 goto error_return; 628 } 629 630 ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1); 631 632 do_more: 633 overflow = 0; 634 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 635 /* 636 * Check to see if we are freeing blocks across a group 637 * boundary. 638 */ 639 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 640 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); 641 count -= overflow; 642 } 643 brelse(bitmap_bh); 644 bitmap_bh = read_block_bitmap(sb, block_group); 645 if (!bitmap_bh) 646 goto error_return; 647 desc = ext4_get_group_desc (sb, block_group, &gd_bh); 648 if (!desc) 649 goto error_return; 650 651 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 652 in_range(ext4_inode_bitmap(sb, desc), block, count) || 653 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 654 in_range(block + count - 1, ext4_inode_table(sb, desc), 655 sbi->s_itb_per_group)) { 656 ext4_error (sb, "ext4_free_blocks", 657 "Freeing blocks in system zones - " 658 "Block = %llu, count = %lu", 659 block, count); 660 goto error_return; 661 } 662 663 /* 664 * We are about to start releasing blocks in the bitmap, 665 * so we need undo access. 666 */ 667 /* @@@ check errors */ 668 BUFFER_TRACE(bitmap_bh, "getting undo access"); 669 err = ext4_journal_get_undo_access(handle, bitmap_bh); 670 if (err) 671 goto error_return; 672 673 /* 674 * We are about to modify some metadata. Call the journal APIs 675 * to unshare ->b_data if a currently-committing transaction is 676 * using it 677 */ 678 BUFFER_TRACE(gd_bh, "get_write_access"); 679 err = ext4_journal_get_write_access(handle, gd_bh); 680 if (err) 681 goto error_return; 682 683 jbd_lock_bh_state(bitmap_bh); 684 685 for (i = 0, group_freed = 0; i < count; i++) { 686 /* 687 * An HJ special. This is expensive... 688 */ 689 #ifdef CONFIG_JBD2_DEBUG 690 jbd_unlock_bh_state(bitmap_bh); 691 { 692 struct buffer_head *debug_bh; 693 debug_bh = sb_find_get_block(sb, block + i); 694 if (debug_bh) { 695 BUFFER_TRACE(debug_bh, "Deleted!"); 696 if (!bh2jh(bitmap_bh)->b_committed_data) 697 BUFFER_TRACE(debug_bh, 698 "No commited data in bitmap"); 699 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); 700 __brelse(debug_bh); 701 } 702 } 703 jbd_lock_bh_state(bitmap_bh); 704 #endif 705 if (need_resched()) { 706 jbd_unlock_bh_state(bitmap_bh); 707 cond_resched(); 708 jbd_lock_bh_state(bitmap_bh); 709 } 710 /* @@@ This prevents newly-allocated data from being 711 * freed and then reallocated within the same 712 * transaction. 713 * 714 * Ideally we would want to allow that to happen, but to 715 * do so requires making jbd2_journal_forget() capable of 716 * revoking the queued write of a data block, which 717 * implies blocking on the journal lock. *forget() 718 * cannot block due to truncate races. 719 * 720 * Eventually we can fix this by making jbd2_journal_forget() 721 * return a status indicating whether or not it was able 722 * to revoke the buffer. On successful revoke, it is 723 * safe not to set the allocation bit in the committed 724 * bitmap, because we know that there is no outstanding 725 * activity on the buffer any more and so it is safe to 726 * reallocate it. 727 */ 728 BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); 729 J_ASSERT_BH(bitmap_bh, 730 bh2jh(bitmap_bh)->b_committed_data != NULL); 731 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, 732 bh2jh(bitmap_bh)->b_committed_data); 733 734 /* 735 * We clear the bit in the bitmap after setting the committed 736 * data bit, because this is the reverse order to that which 737 * the allocator uses. 738 */ 739 BUFFER_TRACE(bitmap_bh, "clear bit"); 740 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 741 bit + i, bitmap_bh->b_data)) { 742 jbd_unlock_bh_state(bitmap_bh); 743 ext4_error(sb, __FUNCTION__, 744 "bit already cleared for block %llu", 745 (ext4_fsblk_t)(block + i)); 746 jbd_lock_bh_state(bitmap_bh); 747 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 748 } else { 749 group_freed++; 750 } 751 } 752 jbd_unlock_bh_state(bitmap_bh); 753 754 spin_lock(sb_bgl_lock(sbi, block_group)); 755 desc->bg_free_blocks_count = 756 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + 757 group_freed); 758 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); 759 spin_unlock(sb_bgl_lock(sbi, block_group)); 760 percpu_counter_add(&sbi->s_freeblocks_counter, count); 761 762 /* We dirtied the bitmap block */ 763 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 764 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 765 766 /* And the group descriptor block */ 767 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 768 ret = ext4_journal_dirty_metadata(handle, gd_bh); 769 if (!err) err = ret; 770 *pdquot_freed_blocks += group_freed; 771 772 if (overflow && !err) { 773 block += count; 774 count = overflow; 775 goto do_more; 776 } 777 sb->s_dirt = 1; 778 error_return: 779 brelse(bitmap_bh); 780 ext4_std_error(sb, err); 781 return; 782 } 783 784 /** 785 * ext4_free_blocks() -- Free given blocks and update quota 786 * @handle: handle for this transaction 787 * @inode: inode 788 * @block: start physical block to free 789 * @count: number of blocks to count 790 * @metadata: Are these metadata blocks 791 */ 792 void ext4_free_blocks(handle_t *handle, struct inode *inode, 793 ext4_fsblk_t block, unsigned long count, 794 int metadata) 795 { 796 struct super_block * sb; 797 unsigned long dquot_freed_blocks; 798 799 /* this isn't the right place to decide whether block is metadata 800 * inode.c/extents.c knows better, but for safety ... */ 801 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 802 ext4_should_journal_data(inode)) 803 metadata = 1; 804 805 sb = inode->i_sb; 806 807 if (!test_opt(sb, MBALLOC) || !EXT4_SB(sb)->s_group_info) 808 ext4_free_blocks_sb(handle, sb, block, count, 809 &dquot_freed_blocks); 810 else 811 ext4_mb_free_blocks(handle, inode, block, count, 812 metadata, &dquot_freed_blocks); 813 if (dquot_freed_blocks) 814 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); 815 return; 816 } 817 818 /** 819 * ext4_test_allocatable() 820 * @nr: given allocation block group 821 * @bh: bufferhead contains the bitmap of the given block group 822 * 823 * For ext4 allocations, we must not reuse any blocks which are 824 * allocated in the bitmap buffer's "last committed data" copy. This 825 * prevents deletes from freeing up the page for reuse until we have 826 * committed the delete transaction. 827 * 828 * If we didn't do this, then deleting something and reallocating it as 829 * data would allow the old block to be overwritten before the 830 * transaction committed (because we force data to disk before commit). 831 * This would lead to corruption if we crashed between overwriting the 832 * data and committing the delete. 833 * 834 * @@@ We may want to make this allocation behaviour conditional on 835 * data-writes at some point, and disable it for metadata allocations or 836 * sync-data inodes. 837 */ 838 static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh) 839 { 840 int ret; 841 struct journal_head *jh = bh2jh(bh); 842 843 if (ext4_test_bit(nr, bh->b_data)) 844 return 0; 845 846 jbd_lock_bh_state(bh); 847 if (!jh->b_committed_data) 848 ret = 1; 849 else 850 ret = !ext4_test_bit(nr, jh->b_committed_data); 851 jbd_unlock_bh_state(bh); 852 return ret; 853 } 854 855 /** 856 * bitmap_search_next_usable_block() 857 * @start: the starting block (group relative) of the search 858 * @bh: bufferhead contains the block group bitmap 859 * @maxblocks: the ending block (group relative) of the reservation 860 * 861 * The bitmap search --- search forward alternately through the actual 862 * bitmap on disk and the last-committed copy in journal, until we find a 863 * bit free in both bitmaps. 864 */ 865 static ext4_grpblk_t 866 bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, 867 ext4_grpblk_t maxblocks) 868 { 869 ext4_grpblk_t next; 870 struct journal_head *jh = bh2jh(bh); 871 872 while (start < maxblocks) { 873 next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start); 874 if (next >= maxblocks) 875 return -1; 876 if (ext4_test_allocatable(next, bh)) 877 return next; 878 jbd_lock_bh_state(bh); 879 if (jh->b_committed_data) 880 start = ext4_find_next_zero_bit(jh->b_committed_data, 881 maxblocks, next); 882 jbd_unlock_bh_state(bh); 883 } 884 return -1; 885 } 886 887 /** 888 * find_next_usable_block() 889 * @start: the starting block (group relative) to find next 890 * allocatable block in bitmap. 891 * @bh: bufferhead contains the block group bitmap 892 * @maxblocks: the ending block (group relative) for the search 893 * 894 * Find an allocatable block in a bitmap. We honor both the bitmap and 895 * its last-committed copy (if that exists), and perform the "most 896 * appropriate allocation" algorithm of looking for a free block near 897 * the initial goal; then for a free byte somewhere in the bitmap; then 898 * for any free bit in the bitmap. 899 */ 900 static ext4_grpblk_t 901 find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, 902 ext4_grpblk_t maxblocks) 903 { 904 ext4_grpblk_t here, next; 905 char *p, *r; 906 907 if (start > 0) { 908 /* 909 * The goal was occupied; search forward for a free 910 * block within the next XX blocks. 911 * 912 * end_goal is more or less random, but it has to be 913 * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the 914 * next 64-bit boundary is simple.. 915 */ 916 ext4_grpblk_t end_goal = (start + 63) & ~63; 917 if (end_goal > maxblocks) 918 end_goal = maxblocks; 919 here = ext4_find_next_zero_bit(bh->b_data, end_goal, start); 920 if (here < end_goal && ext4_test_allocatable(here, bh)) 921 return here; 922 ext4_debug("Bit not found near goal\n"); 923 } 924 925 here = start; 926 if (here < 0) 927 here = 0; 928 929 p = ((char *)bh->b_data) + (here >> 3); 930 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); 931 next = (r - ((char *)bh->b_data)) << 3; 932 933 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) 934 return next; 935 936 /* 937 * The bitmap search --- search forward alternately through the actual 938 * bitmap and the last-committed copy until we find a bit free in 939 * both 940 */ 941 here = bitmap_search_next_usable_block(here, bh, maxblocks); 942 return here; 943 } 944 945 /** 946 * claim_block() 947 * @block: the free block (group relative) to allocate 948 * @bh: the bufferhead containts the block group bitmap 949 * 950 * We think we can allocate this block in this bitmap. Try to set the bit. 951 * If that succeeds then check that nobody has allocated and then freed the 952 * block since we saw that is was not marked in b_committed_data. If it _was_ 953 * allocated and freed then clear the bit in the bitmap again and return 954 * zero (failure). 955 */ 956 static inline int 957 claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh) 958 { 959 struct journal_head *jh = bh2jh(bh); 960 int ret; 961 962 if (ext4_set_bit_atomic(lock, block, bh->b_data)) 963 return 0; 964 jbd_lock_bh_state(bh); 965 if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) { 966 ext4_clear_bit_atomic(lock, block, bh->b_data); 967 ret = 0; 968 } else { 969 ret = 1; 970 } 971 jbd_unlock_bh_state(bh); 972 return ret; 973 } 974 975 /** 976 * ext4_try_to_allocate() 977 * @sb: superblock 978 * @handle: handle to this transaction 979 * @group: given allocation block group 980 * @bitmap_bh: bufferhead holds the block bitmap 981 * @grp_goal: given target block within the group 982 * @count: target number of blocks to allocate 983 * @my_rsv: reservation window 984 * 985 * Attempt to allocate blocks within a give range. Set the range of allocation 986 * first, then find the first free bit(s) from the bitmap (within the range), 987 * and at last, allocate the blocks by claiming the found free bit as allocated. 988 * 989 * To set the range of this allocation: 990 * if there is a reservation window, only try to allocate block(s) from the 991 * file's own reservation window; 992 * Otherwise, the allocation range starts from the give goal block, ends at 993 * the block group's last block. 994 * 995 * If we failed to allocate the desired block then we may end up crossing to a 996 * new bitmap. In that case we must release write access to the old one via 997 * ext4_journal_release_buffer(), else we'll run out of credits. 998 */ 999 static ext4_grpblk_t 1000 ext4_try_to_allocate(struct super_block *sb, handle_t *handle, 1001 ext4_group_t group, struct buffer_head *bitmap_bh, 1002 ext4_grpblk_t grp_goal, unsigned long *count, 1003 struct ext4_reserve_window *my_rsv) 1004 { 1005 ext4_fsblk_t group_first_block; 1006 ext4_grpblk_t start, end; 1007 unsigned long num = 0; 1008 1009 /* we do allocation within the reservation window if we have a window */ 1010 if (my_rsv) { 1011 group_first_block = ext4_group_first_block_no(sb, group); 1012 if (my_rsv->_rsv_start >= group_first_block) 1013 start = my_rsv->_rsv_start - group_first_block; 1014 else 1015 /* reservation window cross group boundary */ 1016 start = 0; 1017 end = my_rsv->_rsv_end - group_first_block + 1; 1018 if (end > EXT4_BLOCKS_PER_GROUP(sb)) 1019 /* reservation window crosses group boundary */ 1020 end = EXT4_BLOCKS_PER_GROUP(sb); 1021 if ((start <= grp_goal) && (grp_goal < end)) 1022 start = grp_goal; 1023 else 1024 grp_goal = -1; 1025 } else { 1026 if (grp_goal > 0) 1027 start = grp_goal; 1028 else 1029 start = 0; 1030 end = EXT4_BLOCKS_PER_GROUP(sb); 1031 } 1032 1033 BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb)); 1034 1035 repeat: 1036 if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) { 1037 grp_goal = find_next_usable_block(start, bitmap_bh, end); 1038 if (grp_goal < 0) 1039 goto fail_access; 1040 if (!my_rsv) { 1041 int i; 1042 1043 for (i = 0; i < 7 && grp_goal > start && 1044 ext4_test_allocatable(grp_goal - 1, 1045 bitmap_bh); 1046 i++, grp_goal--) 1047 ; 1048 } 1049 } 1050 start = grp_goal; 1051 1052 if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group), 1053 grp_goal, bitmap_bh)) { 1054 /* 1055 * The block was allocated by another thread, or it was 1056 * allocated and then freed by another thread 1057 */ 1058 start++; 1059 grp_goal++; 1060 if (start >= end) 1061 goto fail_access; 1062 goto repeat; 1063 } 1064 num++; 1065 grp_goal++; 1066 while (num < *count && grp_goal < end 1067 && ext4_test_allocatable(grp_goal, bitmap_bh) 1068 && claim_block(sb_bgl_lock(EXT4_SB(sb), group), 1069 grp_goal, bitmap_bh)) { 1070 num++; 1071 grp_goal++; 1072 } 1073 *count = num; 1074 return grp_goal - num; 1075 fail_access: 1076 *count = num; 1077 return -1; 1078 } 1079 1080 /** 1081 * find_next_reservable_window(): 1082 * find a reservable space within the given range. 1083 * It does not allocate the reservation window for now: 1084 * alloc_new_reservation() will do the work later. 1085 * 1086 * @search_head: the head of the searching list; 1087 * This is not necessarily the list head of the whole filesystem 1088 * 1089 * We have both head and start_block to assist the search 1090 * for the reservable space. The list starts from head, 1091 * but we will shift to the place where start_block is, 1092 * then start from there, when looking for a reservable space. 1093 * 1094 * @size: the target new reservation window size 1095 * 1096 * @group_first_block: the first block we consider to start 1097 * the real search from 1098 * 1099 * @last_block: 1100 * the maximum block number that our goal reservable space 1101 * could start from. This is normally the last block in this 1102 * group. The search will end when we found the start of next 1103 * possible reservable space is out of this boundary. 1104 * This could handle the cross boundary reservation window 1105 * request. 1106 * 1107 * basically we search from the given range, rather than the whole 1108 * reservation double linked list, (start_block, last_block) 1109 * to find a free region that is of my size and has not 1110 * been reserved. 1111 * 1112 */ 1113 static int find_next_reservable_window( 1114 struct ext4_reserve_window_node *search_head, 1115 struct ext4_reserve_window_node *my_rsv, 1116 struct super_block * sb, 1117 ext4_fsblk_t start_block, 1118 ext4_fsblk_t last_block) 1119 { 1120 struct rb_node *next; 1121 struct ext4_reserve_window_node *rsv, *prev; 1122 ext4_fsblk_t cur; 1123 int size = my_rsv->rsv_goal_size; 1124 1125 /* TODO: make the start of the reservation window byte-aligned */ 1126 /* cur = *start_block & ~7;*/ 1127 cur = start_block; 1128 rsv = search_head; 1129 if (!rsv) 1130 return -1; 1131 1132 while (1) { 1133 if (cur <= rsv->rsv_end) 1134 cur = rsv->rsv_end + 1; 1135 1136 /* TODO? 1137 * in the case we could not find a reservable space 1138 * that is what is expected, during the re-search, we could 1139 * remember what's the largest reservable space we could have 1140 * and return that one. 1141 * 1142 * For now it will fail if we could not find the reservable 1143 * space with expected-size (or more)... 1144 */ 1145 if (cur > last_block) 1146 return -1; /* fail */ 1147 1148 prev = rsv; 1149 next = rb_next(&rsv->rsv_node); 1150 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node); 1151 1152 /* 1153 * Reached the last reservation, we can just append to the 1154 * previous one. 1155 */ 1156 if (!next) 1157 break; 1158 1159 if (cur + size <= rsv->rsv_start) { 1160 /* 1161 * Found a reserveable space big enough. We could 1162 * have a reservation across the group boundary here 1163 */ 1164 break; 1165 } 1166 } 1167 /* 1168 * we come here either : 1169 * when we reach the end of the whole list, 1170 * and there is empty reservable space after last entry in the list. 1171 * append it to the end of the list. 1172 * 1173 * or we found one reservable space in the middle of the list, 1174 * return the reservation window that we could append to. 1175 * succeed. 1176 */ 1177 1178 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) 1179 rsv_window_remove(sb, my_rsv); 1180 1181 /* 1182 * Let's book the whole avaliable window for now. We will check the 1183 * disk bitmap later and then, if there are free blocks then we adjust 1184 * the window size if it's larger than requested. 1185 * Otherwise, we will remove this node from the tree next time 1186 * call find_next_reservable_window. 1187 */ 1188 my_rsv->rsv_start = cur; 1189 my_rsv->rsv_end = cur + size - 1; 1190 my_rsv->rsv_alloc_hit = 0; 1191 1192 if (prev != my_rsv) 1193 ext4_rsv_window_add(sb, my_rsv); 1194 1195 return 0; 1196 } 1197 1198 /** 1199 * alloc_new_reservation()--allocate a new reservation window 1200 * 1201 * To make a new reservation, we search part of the filesystem 1202 * reservation list (the list that inside the group). We try to 1203 * allocate a new reservation window near the allocation goal, 1204 * or the beginning of the group, if there is no goal. 1205 * 1206 * We first find a reservable space after the goal, then from 1207 * there, we check the bitmap for the first free block after 1208 * it. If there is no free block until the end of group, then the 1209 * whole group is full, we failed. Otherwise, check if the free 1210 * block is inside the expected reservable space, if so, we 1211 * succeed. 1212 * If the first free block is outside the reservable space, then 1213 * start from the first free block, we search for next available 1214 * space, and go on. 1215 * 1216 * on succeed, a new reservation will be found and inserted into the list 1217 * It contains at least one free block, and it does not overlap with other 1218 * reservation windows. 1219 * 1220 * failed: we failed to find a reservation window in this group 1221 * 1222 * @rsv: the reservation 1223 * 1224 * @grp_goal: The goal (group-relative). It is where the search for a 1225 * free reservable space should start from. 1226 * if we have a grp_goal(grp_goal >0 ), then start from there, 1227 * no grp_goal(grp_goal = -1), we start from the first block 1228 * of the group. 1229 * 1230 * @sb: the super block 1231 * @group: the group we are trying to allocate in 1232 * @bitmap_bh: the block group block bitmap 1233 * 1234 */ 1235 static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv, 1236 ext4_grpblk_t grp_goal, struct super_block *sb, 1237 ext4_group_t group, struct buffer_head *bitmap_bh) 1238 { 1239 struct ext4_reserve_window_node *search_head; 1240 ext4_fsblk_t group_first_block, group_end_block, start_block; 1241 ext4_grpblk_t first_free_block; 1242 struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root; 1243 unsigned long size; 1244 int ret; 1245 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; 1246 1247 group_first_block = ext4_group_first_block_no(sb, group); 1248 group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); 1249 1250 if (grp_goal < 0) 1251 start_block = group_first_block; 1252 else 1253 start_block = grp_goal + group_first_block; 1254 1255 size = my_rsv->rsv_goal_size; 1256 1257 if (!rsv_is_empty(&my_rsv->rsv_window)) { 1258 /* 1259 * if the old reservation is cross group boundary 1260 * and if the goal is inside the old reservation window, 1261 * we will come here when we just failed to allocate from 1262 * the first part of the window. We still have another part 1263 * that belongs to the next group. In this case, there is no 1264 * point to discard our window and try to allocate a new one 1265 * in this group(which will fail). we should 1266 * keep the reservation window, just simply move on. 1267 * 1268 * Maybe we could shift the start block of the reservation 1269 * window to the first block of next group. 1270 */ 1271 1272 if ((my_rsv->rsv_start <= group_end_block) && 1273 (my_rsv->rsv_end > group_end_block) && 1274 (start_block >= my_rsv->rsv_start)) 1275 return -1; 1276 1277 if ((my_rsv->rsv_alloc_hit > 1278 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { 1279 /* 1280 * if the previously allocation hit ratio is 1281 * greater than 1/2, then we double the size of 1282 * the reservation window the next time, 1283 * otherwise we keep the same size window 1284 */ 1285 size = size * 2; 1286 if (size > EXT4_MAX_RESERVE_BLOCKS) 1287 size = EXT4_MAX_RESERVE_BLOCKS; 1288 my_rsv->rsv_goal_size= size; 1289 } 1290 } 1291 1292 spin_lock(rsv_lock); 1293 /* 1294 * shift the search start to the window near the goal block 1295 */ 1296 search_head = search_reserve_window(fs_rsv_root, start_block); 1297 1298 /* 1299 * find_next_reservable_window() simply finds a reservable window 1300 * inside the given range(start_block, group_end_block). 1301 * 1302 * To make sure the reservation window has a free bit inside it, we 1303 * need to check the bitmap after we found a reservable window. 1304 */ 1305 retry: 1306 ret = find_next_reservable_window(search_head, my_rsv, sb, 1307 start_block, group_end_block); 1308 1309 if (ret == -1) { 1310 if (!rsv_is_empty(&my_rsv->rsv_window)) 1311 rsv_window_remove(sb, my_rsv); 1312 spin_unlock(rsv_lock); 1313 return -1; 1314 } 1315 1316 /* 1317 * On success, find_next_reservable_window() returns the 1318 * reservation window where there is a reservable space after it. 1319 * Before we reserve this reservable space, we need 1320 * to make sure there is at least a free block inside this region. 1321 * 1322 * searching the first free bit on the block bitmap and copy of 1323 * last committed bitmap alternatively, until we found a allocatable 1324 * block. Search start from the start block of the reservable space 1325 * we just found. 1326 */ 1327 spin_unlock(rsv_lock); 1328 first_free_block = bitmap_search_next_usable_block( 1329 my_rsv->rsv_start - group_first_block, 1330 bitmap_bh, group_end_block - group_first_block + 1); 1331 1332 if (first_free_block < 0) { 1333 /* 1334 * no free block left on the bitmap, no point 1335 * to reserve the space. return failed. 1336 */ 1337 spin_lock(rsv_lock); 1338 if (!rsv_is_empty(&my_rsv->rsv_window)) 1339 rsv_window_remove(sb, my_rsv); 1340 spin_unlock(rsv_lock); 1341 return -1; /* failed */ 1342 } 1343 1344 start_block = first_free_block + group_first_block; 1345 /* 1346 * check if the first free block is within the 1347 * free space we just reserved 1348 */ 1349 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end) 1350 return 0; /* success */ 1351 /* 1352 * if the first free bit we found is out of the reservable space 1353 * continue search for next reservable space, 1354 * start from where the free block is, 1355 * we also shift the list head to where we stopped last time 1356 */ 1357 search_head = my_rsv; 1358 spin_lock(rsv_lock); 1359 goto retry; 1360 } 1361 1362 /** 1363 * try_to_extend_reservation() 1364 * @my_rsv: given reservation window 1365 * @sb: super block 1366 * @size: the delta to extend 1367 * 1368 * Attempt to expand the reservation window large enough to have 1369 * required number of free blocks 1370 * 1371 * Since ext4_try_to_allocate() will always allocate blocks within 1372 * the reservation window range, if the window size is too small, 1373 * multiple blocks allocation has to stop at the end of the reservation 1374 * window. To make this more efficient, given the total number of 1375 * blocks needed and the current size of the window, we try to 1376 * expand the reservation window size if necessary on a best-effort 1377 * basis before ext4_new_blocks() tries to allocate blocks, 1378 */ 1379 static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv, 1380 struct super_block *sb, int size) 1381 { 1382 struct ext4_reserve_window_node *next_rsv; 1383 struct rb_node *next; 1384 spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock; 1385 1386 if (!spin_trylock(rsv_lock)) 1387 return; 1388 1389 next = rb_next(&my_rsv->rsv_node); 1390 1391 if (!next) 1392 my_rsv->rsv_end += size; 1393 else { 1394 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node); 1395 1396 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) 1397 my_rsv->rsv_end += size; 1398 else 1399 my_rsv->rsv_end = next_rsv->rsv_start - 1; 1400 } 1401 spin_unlock(rsv_lock); 1402 } 1403 1404 /** 1405 * ext4_try_to_allocate_with_rsv() 1406 * @sb: superblock 1407 * @handle: handle to this transaction 1408 * @group: given allocation block group 1409 * @bitmap_bh: bufferhead holds the block bitmap 1410 * @grp_goal: given target block within the group 1411 * @count: target number of blocks to allocate 1412 * @my_rsv: reservation window 1413 * @errp: pointer to store the error code 1414 * 1415 * This is the main function used to allocate a new block and its reservation 1416 * window. 1417 * 1418 * Each time when a new block allocation is need, first try to allocate from 1419 * its own reservation. If it does not have a reservation window, instead of 1420 * looking for a free bit on bitmap first, then look up the reservation list to 1421 * see if it is inside somebody else's reservation window, we try to allocate a 1422 * reservation window for it starting from the goal first. Then do the block 1423 * allocation within the reservation window. 1424 * 1425 * This will avoid keeping on searching the reservation list again and 1426 * again when somebody is looking for a free block (without 1427 * reservation), and there are lots of free blocks, but they are all 1428 * being reserved. 1429 * 1430 * We use a red-black tree for the per-filesystem reservation list. 1431 * 1432 */ 1433 static ext4_grpblk_t 1434 ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, 1435 ext4_group_t group, struct buffer_head *bitmap_bh, 1436 ext4_grpblk_t grp_goal, 1437 struct ext4_reserve_window_node * my_rsv, 1438 unsigned long *count, int *errp) 1439 { 1440 ext4_fsblk_t group_first_block, group_last_block; 1441 ext4_grpblk_t ret = 0; 1442 int fatal; 1443 unsigned long num = *count; 1444 1445 *errp = 0; 1446 1447 /* 1448 * Make sure we use undo access for the bitmap, because it is critical 1449 * that we do the frozen_data COW on bitmap buffers in all cases even 1450 * if the buffer is in BJ_Forget state in the committing transaction. 1451 */ 1452 BUFFER_TRACE(bitmap_bh, "get undo access for new block"); 1453 fatal = ext4_journal_get_undo_access(handle, bitmap_bh); 1454 if (fatal) { 1455 *errp = fatal; 1456 return -1; 1457 } 1458 1459 /* 1460 * we don't deal with reservation when 1461 * filesystem is mounted without reservation 1462 * or the file is not a regular file 1463 * or last attempt to allocate a block with reservation turned on failed 1464 */ 1465 if (my_rsv == NULL ) { 1466 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, 1467 grp_goal, count, NULL); 1468 goto out; 1469 } 1470 /* 1471 * grp_goal is a group relative block number (if there is a goal) 1472 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb) 1473 * first block is a filesystem wide block number 1474 * first block is the block number of the first block in this group 1475 */ 1476 group_first_block = ext4_group_first_block_no(sb, group); 1477 group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); 1478 1479 /* 1480 * Basically we will allocate a new block from inode's reservation 1481 * window. 1482 * 1483 * We need to allocate a new reservation window, if: 1484 * a) inode does not have a reservation window; or 1485 * b) last attempt to allocate a block from existing reservation 1486 * failed; or 1487 * c) we come here with a goal and with a reservation window 1488 * 1489 * We do not need to allocate a new reservation window if we come here 1490 * at the beginning with a goal and the goal is inside the window, or 1491 * we don't have a goal but already have a reservation window. 1492 * then we could go to allocate from the reservation window directly. 1493 */ 1494 while (1) { 1495 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1496 !goal_in_my_reservation(&my_rsv->rsv_window, 1497 grp_goal, group, sb)) { 1498 if (my_rsv->rsv_goal_size < *count) 1499 my_rsv->rsv_goal_size = *count; 1500 ret = alloc_new_reservation(my_rsv, grp_goal, sb, 1501 group, bitmap_bh); 1502 if (ret < 0) 1503 break; /* failed */ 1504 1505 if (!goal_in_my_reservation(&my_rsv->rsv_window, 1506 grp_goal, group, sb)) 1507 grp_goal = -1; 1508 } else if (grp_goal >= 0) { 1509 int curr = my_rsv->rsv_end - 1510 (grp_goal + group_first_block) + 1; 1511 1512 if (curr < *count) 1513 try_to_extend_reservation(my_rsv, sb, 1514 *count - curr); 1515 } 1516 1517 if ((my_rsv->rsv_start > group_last_block) || 1518 (my_rsv->rsv_end < group_first_block)) { 1519 rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1); 1520 BUG(); 1521 } 1522 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh, 1523 grp_goal, &num, &my_rsv->rsv_window); 1524 if (ret >= 0) { 1525 my_rsv->rsv_alloc_hit += num; 1526 *count = num; 1527 break; /* succeed */ 1528 } 1529 num = *count; 1530 } 1531 out: 1532 if (ret >= 0) { 1533 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " 1534 "bitmap block"); 1535 fatal = ext4_journal_dirty_metadata(handle, bitmap_bh); 1536 if (fatal) { 1537 *errp = fatal; 1538 return -1; 1539 } 1540 return ret; 1541 } 1542 1543 BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); 1544 ext4_journal_release_buffer(handle, bitmap_bh); 1545 return ret; 1546 } 1547 1548 /** 1549 * ext4_has_free_blocks() 1550 * @sbi: in-core super block structure. 1551 * 1552 * Check if filesystem has at least 1 free block available for allocation. 1553 */ 1554 static int ext4_has_free_blocks(struct ext4_sb_info *sbi) 1555 { 1556 ext4_fsblk_t free_blocks, root_blocks; 1557 1558 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 1559 root_blocks = ext4_r_blocks_count(sbi->s_es); 1560 if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && 1561 sbi->s_resuid != current->fsuid && 1562 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { 1563 return 0; 1564 } 1565 return 1; 1566 } 1567 1568 /** 1569 * ext4_should_retry_alloc() 1570 * @sb: super block 1571 * @retries number of attemps has been made 1572 * 1573 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if 1574 * it is profitable to retry the operation, this function will wait 1575 * for the current or commiting transaction to complete, and then 1576 * return TRUE. 1577 * 1578 * if the total number of retries exceed three times, return FALSE. 1579 */ 1580 int ext4_should_retry_alloc(struct super_block *sb, int *retries) 1581 { 1582 if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3) 1583 return 0; 1584 1585 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); 1586 1587 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal); 1588 } 1589 1590 /** 1591 * ext4_new_blocks_old() -- core block(s) allocation function 1592 * @handle: handle to this transaction 1593 * @inode: file inode 1594 * @goal: given target block(filesystem wide) 1595 * @count: target number of blocks to allocate 1596 * @errp: error code 1597 * 1598 * ext4_new_blocks uses a goal block to assist allocation. It tries to 1599 * allocate block(s) from the block group contains the goal block first. If that 1600 * fails, it will try to allocate block(s) from other block groups without 1601 * any specific goal block. 1602 * 1603 */ 1604 ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode, 1605 ext4_fsblk_t goal, unsigned long *count, int *errp) 1606 { 1607 struct buffer_head *bitmap_bh = NULL; 1608 struct buffer_head *gdp_bh; 1609 ext4_group_t group_no; 1610 ext4_group_t goal_group; 1611 ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */ 1612 ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/ 1613 ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */ 1614 ext4_group_t bgi; /* blockgroup iteration index */ 1615 int fatal = 0, err; 1616 int performed_allocation = 0; 1617 ext4_grpblk_t free_blocks; /* number of free blocks in a group */ 1618 struct super_block *sb; 1619 struct ext4_group_desc *gdp; 1620 struct ext4_super_block *es; 1621 struct ext4_sb_info *sbi; 1622 struct ext4_reserve_window_node *my_rsv = NULL; 1623 struct ext4_block_alloc_info *block_i; 1624 unsigned short windowsz = 0; 1625 ext4_group_t ngroups; 1626 unsigned long num = *count; 1627 1628 *errp = -ENOSPC; 1629 sb = inode->i_sb; 1630 if (!sb) { 1631 printk("ext4_new_block: nonexistent device"); 1632 return 0; 1633 } 1634 1635 /* 1636 * Check quota for allocation of this block. 1637 */ 1638 if (DQUOT_ALLOC_BLOCK(inode, num)) { 1639 *errp = -EDQUOT; 1640 return 0; 1641 } 1642 1643 sbi = EXT4_SB(sb); 1644 es = EXT4_SB(sb)->s_es; 1645 ext4_debug("goal=%llu.\n", goal); 1646 /* 1647 * Allocate a block from reservation only when 1648 * filesystem is mounted with reservation(default,-o reservation), and 1649 * it's a regular file, and 1650 * the desired window size is greater than 0 (One could use ioctl 1651 * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off 1652 * reservation on that particular file) 1653 */ 1654 block_i = EXT4_I(inode)->i_block_alloc_info; 1655 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) 1656 my_rsv = &block_i->rsv_window_node; 1657 1658 if (!ext4_has_free_blocks(sbi)) { 1659 *errp = -ENOSPC; 1660 goto out; 1661 } 1662 1663 /* 1664 * First, test whether the goal block is free. 1665 */ 1666 if (goal < le32_to_cpu(es->s_first_data_block) || 1667 goal >= ext4_blocks_count(es)) 1668 goal = le32_to_cpu(es->s_first_data_block); 1669 ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk); 1670 goal_group = group_no; 1671 retry_alloc: 1672 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); 1673 if (!gdp) 1674 goto io_error; 1675 1676 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1677 /* 1678 * if there is not enough free blocks to make a new resevation 1679 * turn off reservation for this allocation 1680 */ 1681 if (my_rsv && (free_blocks < windowsz) 1682 && (rsv_is_empty(&my_rsv->rsv_window))) 1683 my_rsv = NULL; 1684 1685 if (free_blocks > 0) { 1686 bitmap_bh = read_block_bitmap(sb, group_no); 1687 if (!bitmap_bh) 1688 goto io_error; 1689 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, 1690 group_no, bitmap_bh, grp_target_blk, 1691 my_rsv, &num, &fatal); 1692 if (fatal) 1693 goto out; 1694 if (grp_alloc_blk >= 0) 1695 goto allocated; 1696 } 1697 1698 ngroups = EXT4_SB(sb)->s_groups_count; 1699 smp_rmb(); 1700 1701 /* 1702 * Now search the rest of the groups. We assume that 1703 * group_no and gdp correctly point to the last group visited. 1704 */ 1705 for (bgi = 0; bgi < ngroups; bgi++) { 1706 group_no++; 1707 if (group_no >= ngroups) 1708 group_no = 0; 1709 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); 1710 if (!gdp) 1711 goto io_error; 1712 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1713 /* 1714 * skip this group if the number of 1715 * free blocks is less than half of the reservation 1716 * window size. 1717 */ 1718 if (free_blocks <= (windowsz/2)) 1719 continue; 1720 1721 brelse(bitmap_bh); 1722 bitmap_bh = read_block_bitmap(sb, group_no); 1723 if (!bitmap_bh) 1724 goto io_error; 1725 /* 1726 * try to allocate block(s) from this group, without a goal(-1). 1727 */ 1728 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle, 1729 group_no, bitmap_bh, -1, my_rsv, 1730 &num, &fatal); 1731 if (fatal) 1732 goto out; 1733 if (grp_alloc_blk >= 0) 1734 goto allocated; 1735 } 1736 /* 1737 * We may end up a bogus ealier ENOSPC error due to 1738 * filesystem is "full" of reservations, but 1739 * there maybe indeed free blocks avaliable on disk 1740 * In this case, we just forget about the reservations 1741 * just do block allocation as without reservations. 1742 */ 1743 if (my_rsv) { 1744 my_rsv = NULL; 1745 windowsz = 0; 1746 group_no = goal_group; 1747 goto retry_alloc; 1748 } 1749 /* No space left on the device */ 1750 *errp = -ENOSPC; 1751 goto out; 1752 1753 allocated: 1754 1755 ext4_debug("using block group %lu(%d)\n", 1756 group_no, gdp->bg_free_blocks_count); 1757 1758 BUFFER_TRACE(gdp_bh, "get_write_access"); 1759 fatal = ext4_journal_get_write_access(handle, gdp_bh); 1760 if (fatal) 1761 goto out; 1762 1763 ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no); 1764 1765 if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) || 1766 in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) || 1767 in_range(ret_block, ext4_inode_table(sb, gdp), 1768 EXT4_SB(sb)->s_itb_per_group) || 1769 in_range(ret_block + num - 1, ext4_inode_table(sb, gdp), 1770 EXT4_SB(sb)->s_itb_per_group)) { 1771 ext4_error(sb, "ext4_new_block", 1772 "Allocating block in system zone - " 1773 "blocks from %llu, length %lu", 1774 ret_block, num); 1775 goto out; 1776 } 1777 1778 performed_allocation = 1; 1779 1780 #ifdef CONFIG_JBD2_DEBUG 1781 { 1782 struct buffer_head *debug_bh; 1783 1784 /* Record bitmap buffer state in the newly allocated block */ 1785 debug_bh = sb_find_get_block(sb, ret_block); 1786 if (debug_bh) { 1787 BUFFER_TRACE(debug_bh, "state when allocated"); 1788 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); 1789 brelse(debug_bh); 1790 } 1791 } 1792 jbd_lock_bh_state(bitmap_bh); 1793 spin_lock(sb_bgl_lock(sbi, group_no)); 1794 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { 1795 int i; 1796 1797 for (i = 0; i < num; i++) { 1798 if (ext4_test_bit(grp_alloc_blk+i, 1799 bh2jh(bitmap_bh)->b_committed_data)) { 1800 printk("%s: block was unexpectedly set in " 1801 "b_committed_data\n", __FUNCTION__); 1802 } 1803 } 1804 } 1805 ext4_debug("found bit %d\n", grp_alloc_blk); 1806 spin_unlock(sb_bgl_lock(sbi, group_no)); 1807 jbd_unlock_bh_state(bitmap_bh); 1808 #endif 1809 1810 if (ret_block + num - 1 >= ext4_blocks_count(es)) { 1811 ext4_error(sb, "ext4_new_block", 1812 "block(%llu) >= blocks count(%llu) - " 1813 "block_group = %lu, es == %p ", ret_block, 1814 ext4_blocks_count(es), group_no, es); 1815 goto out; 1816 } 1817 1818 /* 1819 * It is up to the caller to add the new buffer to a journal 1820 * list of some description. We don't know in advance whether 1821 * the caller wants to use it as metadata or data. 1822 */ 1823 spin_lock(sb_bgl_lock(sbi, group_no)); 1824 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) 1825 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 1826 gdp->bg_free_blocks_count = 1827 cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); 1828 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp); 1829 spin_unlock(sb_bgl_lock(sbi, group_no)); 1830 percpu_counter_sub(&sbi->s_freeblocks_counter, num); 1831 1832 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); 1833 err = ext4_journal_dirty_metadata(handle, gdp_bh); 1834 if (!fatal) 1835 fatal = err; 1836 1837 sb->s_dirt = 1; 1838 if (fatal) 1839 goto out; 1840 1841 *errp = 0; 1842 brelse(bitmap_bh); 1843 DQUOT_FREE_BLOCK(inode, *count-num); 1844 *count = num; 1845 return ret_block; 1846 1847 io_error: 1848 *errp = -EIO; 1849 out: 1850 if (fatal) { 1851 *errp = fatal; 1852 ext4_std_error(sb, fatal); 1853 } 1854 /* 1855 * Undo the block allocation 1856 */ 1857 if (!performed_allocation) 1858 DQUOT_FREE_BLOCK(inode, *count); 1859 brelse(bitmap_bh); 1860 return 0; 1861 } 1862 1863 ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode, 1864 ext4_fsblk_t goal, int *errp) 1865 { 1866 struct ext4_allocation_request ar; 1867 ext4_fsblk_t ret; 1868 1869 if (!test_opt(inode->i_sb, MBALLOC)) { 1870 unsigned long count = 1; 1871 ret = ext4_new_blocks_old(handle, inode, goal, &count, errp); 1872 return ret; 1873 } 1874 1875 memset(&ar, 0, sizeof(ar)); 1876 ar.inode = inode; 1877 ar.goal = goal; 1878 ar.len = 1; 1879 ret = ext4_mb_new_blocks(handle, &ar, errp); 1880 return ret; 1881 } 1882 1883 ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode, 1884 ext4_fsblk_t goal, unsigned long *count, int *errp) 1885 { 1886 struct ext4_allocation_request ar; 1887 ext4_fsblk_t ret; 1888 1889 if (!test_opt(inode->i_sb, MBALLOC)) { 1890 ret = ext4_new_blocks_old(handle, inode, goal, count, errp); 1891 return ret; 1892 } 1893 1894 memset(&ar, 0, sizeof(ar)); 1895 ar.inode = inode; 1896 ar.goal = goal; 1897 ar.len = *count; 1898 ret = ext4_mb_new_blocks(handle, &ar, errp); 1899 *count = ar.len; 1900 return ret; 1901 } 1902 1903 1904 /** 1905 * ext4_count_free_blocks() -- count filesystem free blocks 1906 * @sb: superblock 1907 * 1908 * Adds up the number of free blocks from each block group. 1909 */ 1910 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb) 1911 { 1912 ext4_fsblk_t desc_count; 1913 struct ext4_group_desc *gdp; 1914 ext4_group_t i; 1915 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 1916 #ifdef EXT4FS_DEBUG 1917 struct ext4_super_block *es; 1918 ext4_fsblk_t bitmap_count; 1919 unsigned long x; 1920 struct buffer_head *bitmap_bh = NULL; 1921 1922 es = EXT4_SB(sb)->s_es; 1923 desc_count = 0; 1924 bitmap_count = 0; 1925 gdp = NULL; 1926 1927 smp_rmb(); 1928 for (i = 0; i < ngroups; i++) { 1929 gdp = ext4_get_group_desc(sb, i, NULL); 1930 if (!gdp) 1931 continue; 1932 desc_count += le16_to_cpu(gdp->bg_free_blocks_count); 1933 brelse(bitmap_bh); 1934 bitmap_bh = read_block_bitmap(sb, i); 1935 if (bitmap_bh == NULL) 1936 continue; 1937 1938 x = ext4_count_free(bitmap_bh, sb->s_blocksize); 1939 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 1940 i, le16_to_cpu(gdp->bg_free_blocks_count), x); 1941 bitmap_count += x; 1942 } 1943 brelse(bitmap_bh); 1944 printk("ext4_count_free_blocks: stored = %llu" 1945 ", computed = %llu, %llu\n", 1946 ext4_free_blocks_count(es), 1947 desc_count, bitmap_count); 1948 return bitmap_count; 1949 #else 1950 desc_count = 0; 1951 smp_rmb(); 1952 for (i = 0; i < ngroups; i++) { 1953 gdp = ext4_get_group_desc(sb, i, NULL); 1954 if (!gdp) 1955 continue; 1956 desc_count += le16_to_cpu(gdp->bg_free_blocks_count); 1957 } 1958 1959 return desc_count; 1960 #endif 1961 } 1962 1963 static inline int test_root(ext4_group_t a, int b) 1964 { 1965 int num = b; 1966 1967 while (a > num) 1968 num *= b; 1969 return num == a; 1970 } 1971 1972 static int ext4_group_sparse(ext4_group_t group) 1973 { 1974 if (group <= 1) 1975 return 1; 1976 if (!(group & 1)) 1977 return 0; 1978 return (test_root(group, 7) || test_root(group, 5) || 1979 test_root(group, 3)); 1980 } 1981 1982 /** 1983 * ext4_bg_has_super - number of blocks used by the superblock in group 1984 * @sb: superblock for filesystem 1985 * @group: group number to check 1986 * 1987 * Return the number of blocks used by the superblock (primary or backup) 1988 * in this group. Currently this will be only 0 or 1. 1989 */ 1990 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) 1991 { 1992 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 1993 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && 1994 !ext4_group_sparse(group)) 1995 return 0; 1996 return 1; 1997 } 1998 1999 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, 2000 ext4_group_t group) 2001 { 2002 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 2003 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb); 2004 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1; 2005 2006 if (group == first || group == first + 1 || group == last) 2007 return 1; 2008 return 0; 2009 } 2010 2011 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, 2012 ext4_group_t group) 2013 { 2014 return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0; 2015 } 2016 2017 /** 2018 * ext4_bg_num_gdb - number of blocks used by the group table in group 2019 * @sb: superblock for filesystem 2020 * @group: group number to check 2021 * 2022 * Return the number of blocks used by the group descriptor table 2023 * (primary or backup) in this group. In the future there may be a 2024 * different number of descriptor blocks in each group. 2025 */ 2026 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) 2027 { 2028 unsigned long first_meta_bg = 2029 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); 2030 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); 2031 2032 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) || 2033 metagroup < first_meta_bg) 2034 return ext4_bg_num_gdb_nometa(sb,group); 2035 2036 return ext4_bg_num_gdb_meta(sb,group); 2037 2038 } 2039