1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/resize.c 4 * 5 * Support for resizing an ext4 filesystem while it is mounted. 6 * 7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 8 * 9 * This could probably be made into a module, because it is not often in use. 10 */ 11 12 13 #define EXT4FS_DEBUG 14 15 #include <linux/errno.h> 16 #include <linux/slab.h> 17 #include <linux/jiffies.h> 18 19 #include "ext4_jbd2.h" 20 21 struct ext4_rcu_ptr { 22 struct rcu_head rcu; 23 void *ptr; 24 }; 25 26 static void ext4_rcu_ptr_callback(struct rcu_head *head) 27 { 28 struct ext4_rcu_ptr *ptr; 29 30 ptr = container_of(head, struct ext4_rcu_ptr, rcu); 31 kvfree(ptr->ptr); 32 kfree(ptr); 33 } 34 35 void ext4_kvfree_array_rcu(void *to_free) 36 { 37 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 38 39 if (ptr) { 40 ptr->ptr = to_free; 41 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); 42 return; 43 } 44 synchronize_rcu(); 45 kvfree(to_free); 46 } 47 48 int ext4_resize_begin(struct super_block *sb) 49 { 50 struct ext4_sb_info *sbi = EXT4_SB(sb); 51 int ret = 0; 52 53 if (!capable(CAP_SYS_RESOURCE)) 54 return -EPERM; 55 56 /* 57 * If the reserved GDT blocks is non-zero, the resize_inode feature 58 * should always be set. 59 */ 60 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks && 61 !ext4_has_feature_resize_inode(sb)) { 62 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); 63 return -EFSCORRUPTED; 64 } 65 66 /* 67 * If we are not using the primary superblock/GDT copy don't resize, 68 * because the user tools have no way of handling this. Probably a 69 * bad time to do it anyways. 70 */ 71 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != 72 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 73 ext4_warning(sb, "won't resize using backup superblock at %llu", 74 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 75 return -EPERM; 76 } 77 78 /* 79 * We are not allowed to do online-resizing on a filesystem mounted 80 * with error, because it can destroy the filesystem easily. 81 */ 82 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 83 ext4_warning(sb, "There are errors in the filesystem, " 84 "so online resizing is not allowed"); 85 return -EPERM; 86 } 87 88 if (ext4_has_feature_sparse_super2(sb)) { 89 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); 90 return -EOPNOTSUPP; 91 } 92 93 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 94 &EXT4_SB(sb)->s_ext4_flags)) 95 ret = -EBUSY; 96 97 return ret; 98 } 99 100 int ext4_resize_end(struct super_block *sb, bool update_backups) 101 { 102 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); 103 smp_mb__after_atomic(); 104 if (update_backups) 105 return ext4_update_overhead(sb, true); 106 return 0; 107 } 108 109 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, 110 ext4_group_t group) { 111 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << 112 EXT4_DESC_PER_BLOCK_BITS(sb); 113 } 114 115 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, 116 ext4_group_t group) { 117 group = ext4_meta_bg_first_group(sb, group); 118 return ext4_group_first_block_no(sb, group); 119 } 120 121 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 122 ext4_group_t group) { 123 ext4_grpblk_t overhead; 124 overhead = ext4_bg_num_gdb(sb, group); 125 if (ext4_bg_has_super(sb, group)) 126 overhead += 1 + 127 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 128 return overhead; 129 } 130 131 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 132 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 133 134 static int verify_group_input(struct super_block *sb, 135 struct ext4_new_group_data *input) 136 { 137 struct ext4_sb_info *sbi = EXT4_SB(sb); 138 struct ext4_super_block *es = sbi->s_es; 139 ext4_fsblk_t start = ext4_blocks_count(es); 140 ext4_fsblk_t end = start + input->blocks_count; 141 ext4_group_t group = input->group; 142 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 143 unsigned overhead; 144 ext4_fsblk_t metaend; 145 struct buffer_head *bh = NULL; 146 ext4_grpblk_t free_blocks_count, offset; 147 int err = -EINVAL; 148 149 if (group != sbi->s_groups_count) { 150 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 151 input->group, sbi->s_groups_count); 152 return -EINVAL; 153 } 154 155 overhead = ext4_group_overhead_blocks(sb, group); 156 metaend = start + overhead; 157 input->free_clusters_count = free_blocks_count = 158 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; 159 160 if (test_opt(sb, DEBUG)) 161 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 162 "(%d free, %u reserved)\n", 163 ext4_bg_has_super(sb, input->group) ? "normal" : 164 "no-super", input->group, input->blocks_count, 165 free_blocks_count, input->reserved_blocks); 166 167 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 168 if (offset != 0) 169 ext4_warning(sb, "Last group not full"); 170 else if (input->reserved_blocks > input->blocks_count / 5) 171 ext4_warning(sb, "Reserved blocks too high (%u)", 172 input->reserved_blocks); 173 else if (free_blocks_count < 0) 174 ext4_warning(sb, "Bad blocks count %u", 175 input->blocks_count); 176 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) { 177 err = PTR_ERR(bh); 178 bh = NULL; 179 ext4_warning(sb, "Cannot read last block (%llu)", 180 end - 1); 181 } else if (outside(input->block_bitmap, start, end)) 182 ext4_warning(sb, "Block bitmap not in group (block %llu)", 183 (unsigned long long)input->block_bitmap); 184 else if (outside(input->inode_bitmap, start, end)) 185 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 186 (unsigned long long)input->inode_bitmap); 187 else if (outside(input->inode_table, start, end) || 188 outside(itend - 1, start, end)) 189 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 190 (unsigned long long)input->inode_table, itend - 1); 191 else if (input->inode_bitmap == input->block_bitmap) 192 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 193 (unsigned long long)input->block_bitmap); 194 else if (inside(input->block_bitmap, input->inode_table, itend)) 195 ext4_warning(sb, "Block bitmap (%llu) in inode table " 196 "(%llu-%llu)", 197 (unsigned long long)input->block_bitmap, 198 (unsigned long long)input->inode_table, itend - 1); 199 else if (inside(input->inode_bitmap, input->inode_table, itend)) 200 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 201 "(%llu-%llu)", 202 (unsigned long long)input->inode_bitmap, 203 (unsigned long long)input->inode_table, itend - 1); 204 else if (inside(input->block_bitmap, start, metaend)) 205 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 206 (unsigned long long)input->block_bitmap, 207 start, metaend - 1); 208 else if (inside(input->inode_bitmap, start, metaend)) 209 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 210 (unsigned long long)input->inode_bitmap, 211 start, metaend - 1); 212 else if (inside(input->inode_table, start, metaend) || 213 inside(itend - 1, start, metaend)) 214 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 215 "(%llu-%llu)", 216 (unsigned long long)input->inode_table, 217 itend - 1, start, metaend - 1); 218 else 219 err = 0; 220 brelse(bh); 221 222 return err; 223 } 224 225 /* 226 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 227 * group each time. 228 */ 229 struct ext4_new_flex_group_data { 230 struct ext4_new_group_data *groups; /* new_group_data for groups 231 in the flex group */ 232 __u16 *bg_flags; /* block group flags of groups 233 in @groups */ 234 ext4_group_t count; /* number of groups in @groups 235 */ 236 }; 237 238 /* 239 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 240 * @flexbg_size. 241 * 242 * Returns NULL on failure otherwise address of the allocated structure. 243 */ 244 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size) 245 { 246 struct ext4_new_flex_group_data *flex_gd; 247 248 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 249 if (flex_gd == NULL) 250 goto out3; 251 252 flex_gd->count = flexbg_size; 253 flex_gd->groups = kmalloc_array(flexbg_size, 254 sizeof(struct ext4_new_group_data), 255 GFP_NOFS); 256 if (flex_gd->groups == NULL) 257 goto out2; 258 259 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16), 260 GFP_NOFS); 261 if (flex_gd->bg_flags == NULL) 262 goto out1; 263 264 return flex_gd; 265 266 out1: 267 kfree(flex_gd->groups); 268 out2: 269 kfree(flex_gd); 270 out3: 271 return NULL; 272 } 273 274 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 275 { 276 kfree(flex_gd->bg_flags); 277 kfree(flex_gd->groups); 278 kfree(flex_gd); 279 } 280 281 /* 282 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 283 * and inode tables for a flex group. 284 * 285 * This function is used by 64bit-resize. Note that this function allocates 286 * group tables from the 1st group of groups contained by @flexgd, which may 287 * be a partial of a flex group. 288 * 289 * @sb: super block of fs to which the groups belongs 290 * 291 * Returns 0 on a successful allocation of the metadata blocks in the 292 * block group. 293 */ 294 static int ext4_alloc_group_tables(struct super_block *sb, 295 struct ext4_new_flex_group_data *flex_gd, 296 unsigned int flexbg_size) 297 { 298 struct ext4_new_group_data *group_data = flex_gd->groups; 299 ext4_fsblk_t start_blk; 300 ext4_fsblk_t last_blk; 301 ext4_group_t src_group; 302 ext4_group_t bb_index = 0; 303 ext4_group_t ib_index = 0; 304 ext4_group_t it_index = 0; 305 ext4_group_t group; 306 ext4_group_t last_group; 307 unsigned overhead; 308 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 309 int i; 310 311 BUG_ON(flex_gd->count == 0 || group_data == NULL); 312 313 src_group = group_data[0].group; 314 last_group = src_group + flex_gd->count - 1; 315 316 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 317 (last_group & ~(flexbg_size - 1)))); 318 next_group: 319 group = group_data[0].group; 320 if (src_group >= group_data[0].group + flex_gd->count) 321 return -ENOSPC; 322 start_blk = ext4_group_first_block_no(sb, src_group); 323 last_blk = start_blk + group_data[src_group - group].blocks_count; 324 325 overhead = ext4_group_overhead_blocks(sb, src_group); 326 327 start_blk += overhead; 328 329 /* We collect contiguous blocks as much as possible. */ 330 src_group++; 331 for (; src_group <= last_group; src_group++) { 332 overhead = ext4_group_overhead_blocks(sb, src_group); 333 if (overhead == 0) 334 last_blk += group_data[src_group - group].blocks_count; 335 else 336 break; 337 } 338 339 /* Allocate block bitmaps */ 340 for (; bb_index < flex_gd->count; bb_index++) { 341 if (start_blk >= last_blk) 342 goto next_group; 343 group_data[bb_index].block_bitmap = start_blk++; 344 group = ext4_get_group_number(sb, start_blk - 1); 345 group -= group_data[0].group; 346 group_data[group].mdata_blocks++; 347 flex_gd->bg_flags[group] &= uninit_mask; 348 } 349 350 /* Allocate inode bitmaps */ 351 for (; ib_index < flex_gd->count; ib_index++) { 352 if (start_blk >= last_blk) 353 goto next_group; 354 group_data[ib_index].inode_bitmap = start_blk++; 355 group = ext4_get_group_number(sb, start_blk - 1); 356 group -= group_data[0].group; 357 group_data[group].mdata_blocks++; 358 flex_gd->bg_flags[group] &= uninit_mask; 359 } 360 361 /* Allocate inode tables */ 362 for (; it_index < flex_gd->count; it_index++) { 363 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 364 ext4_fsblk_t next_group_start; 365 366 if (start_blk + itb > last_blk) 367 goto next_group; 368 group_data[it_index].inode_table = start_blk; 369 group = ext4_get_group_number(sb, start_blk); 370 next_group_start = ext4_group_first_block_no(sb, group + 1); 371 group -= group_data[0].group; 372 373 if (start_blk + itb > next_group_start) { 374 flex_gd->bg_flags[group + 1] &= uninit_mask; 375 overhead = start_blk + itb - next_group_start; 376 group_data[group + 1].mdata_blocks += overhead; 377 itb -= overhead; 378 } 379 380 group_data[group].mdata_blocks += itb; 381 flex_gd->bg_flags[group] &= uninit_mask; 382 start_blk += EXT4_SB(sb)->s_itb_per_group; 383 } 384 385 /* Update free clusters count to exclude metadata blocks */ 386 for (i = 0; i < flex_gd->count; i++) { 387 group_data[i].free_clusters_count -= 388 EXT4_NUM_B2C(EXT4_SB(sb), 389 group_data[i].mdata_blocks); 390 } 391 392 if (test_opt(sb, DEBUG)) { 393 int i; 394 group = group_data[0].group; 395 396 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 397 "%u groups, flexbg size is %u:\n", flex_gd->count, 398 flexbg_size); 399 400 for (i = 0; i < flex_gd->count; i++) { 401 ext4_debug( 402 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n", 403 ext4_bg_has_super(sb, group + i) ? "normal" : 404 "no-super", group + i, 405 group_data[i].blocks_count, 406 group_data[i].free_clusters_count, 407 group_data[i].mdata_blocks); 408 } 409 } 410 return 0; 411 } 412 413 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 414 ext4_fsblk_t blk) 415 { 416 struct buffer_head *bh; 417 int err; 418 419 bh = sb_getblk(sb, blk); 420 if (unlikely(!bh)) 421 return ERR_PTR(-ENOMEM); 422 BUFFER_TRACE(bh, "get_write_access"); 423 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 424 if (err) { 425 brelse(bh); 426 bh = ERR_PTR(err); 427 } else { 428 memset(bh->b_data, 0, sb->s_blocksize); 429 set_buffer_uptodate(bh); 430 } 431 432 return bh; 433 } 434 435 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) 436 { 437 return ext4_journal_ensure_credits_fn(handle, credits, 438 EXT4_MAX_TRANS_DATA, 0, 0); 439 } 440 441 /* 442 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. 443 * 444 * Helper function for ext4_setup_new_group_blocks() which set . 445 * 446 * @sb: super block 447 * @handle: journal handle 448 * @flex_gd: flex group data 449 */ 450 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 451 struct ext4_new_flex_group_data *flex_gd, 452 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) 453 { 454 struct ext4_sb_info *sbi = EXT4_SB(sb); 455 ext4_group_t count = last_cluster - first_cluster + 1; 456 ext4_group_t count2; 457 458 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, 459 last_cluster); 460 for (count2 = count; count > 0; 461 count -= count2, first_cluster += count2) { 462 ext4_fsblk_t start; 463 struct buffer_head *bh; 464 ext4_group_t group; 465 int err; 466 467 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); 468 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); 469 group -= flex_gd->groups[0].group; 470 471 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); 472 if (count2 > count) 473 count2 = count; 474 475 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 476 BUG_ON(flex_gd->count > 1); 477 continue; 478 } 479 480 err = ext4_resize_ensure_credits_batch(handle, 1); 481 if (err < 0) 482 return err; 483 484 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 485 if (unlikely(!bh)) 486 return -ENOMEM; 487 488 BUFFER_TRACE(bh, "get_write_access"); 489 err = ext4_journal_get_write_access(handle, sb, bh, 490 EXT4_JTR_NONE); 491 if (err) { 492 brelse(bh); 493 return err; 494 } 495 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 496 first_cluster, first_cluster - start, count2); 497 mb_set_bits(bh->b_data, first_cluster - start, count2); 498 499 err = ext4_handle_dirty_metadata(handle, NULL, bh); 500 brelse(bh); 501 if (unlikely(err)) 502 return err; 503 } 504 505 return 0; 506 } 507 508 /* 509 * Set up the block and inode bitmaps, and the inode table for the new groups. 510 * This doesn't need to be part of the main transaction, since we are only 511 * changing blocks outside the actual filesystem. We still do journaling to 512 * ensure the recovery is correct in case of a failure just after resize. 513 * If any part of this fails, we simply abort the resize. 514 * 515 * setup_new_flex_group_blocks handles a flex group as follow: 516 * 1. copy super block and GDT, and initialize group tables if necessary. 517 * In this step, we only set bits in blocks bitmaps for blocks taken by 518 * super block and GDT. 519 * 2. allocate group tables in block bitmaps, that is, set bits in block 520 * bitmap for blocks taken by group tables. 521 */ 522 static int setup_new_flex_group_blocks(struct super_block *sb, 523 struct ext4_new_flex_group_data *flex_gd) 524 { 525 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 526 ext4_fsblk_t start; 527 ext4_fsblk_t block; 528 struct ext4_sb_info *sbi = EXT4_SB(sb); 529 struct ext4_super_block *es = sbi->s_es; 530 struct ext4_new_group_data *group_data = flex_gd->groups; 531 __u16 *bg_flags = flex_gd->bg_flags; 532 handle_t *handle; 533 ext4_group_t group, count; 534 struct buffer_head *bh = NULL; 535 int reserved_gdb, i, j, err = 0, err2; 536 int meta_bg; 537 538 BUG_ON(!flex_gd->count || !group_data || 539 group_data[0].group != sbi->s_groups_count); 540 541 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 542 meta_bg = ext4_has_feature_meta_bg(sb); 543 544 /* This transaction may be extended/restarted along the way */ 545 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 546 if (IS_ERR(handle)) 547 return PTR_ERR(handle); 548 549 group = group_data[0].group; 550 for (i = 0; i < flex_gd->count; i++, group++) { 551 unsigned long gdblocks; 552 ext4_grpblk_t overhead; 553 554 gdblocks = ext4_bg_num_gdb(sb, group); 555 start = ext4_group_first_block_no(sb, group); 556 557 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 558 goto handle_itb; 559 560 if (meta_bg == 1) 561 goto handle_itb; 562 563 block = start + ext4_bg_has_super(sb, group); 564 /* Copy all of the GDT blocks into the backup in this group */ 565 for (j = 0; j < gdblocks; j++, block++) { 566 struct buffer_head *gdb; 567 568 ext4_debug("update backup group %#04llx\n", block); 569 err = ext4_resize_ensure_credits_batch(handle, 1); 570 if (err < 0) 571 goto out; 572 573 gdb = sb_getblk(sb, block); 574 if (unlikely(!gdb)) { 575 err = -ENOMEM; 576 goto out; 577 } 578 579 BUFFER_TRACE(gdb, "get_write_access"); 580 err = ext4_journal_get_write_access(handle, sb, gdb, 581 EXT4_JTR_NONE); 582 if (err) { 583 brelse(gdb); 584 goto out; 585 } 586 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, 587 s_group_desc, j)->b_data, gdb->b_size); 588 set_buffer_uptodate(gdb); 589 590 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 591 if (unlikely(err)) { 592 brelse(gdb); 593 goto out; 594 } 595 brelse(gdb); 596 } 597 598 /* Zero out all of the reserved backup group descriptor 599 * table blocks 600 */ 601 if (ext4_bg_has_super(sb, group)) { 602 err = sb_issue_zeroout(sb, gdblocks + start + 1, 603 reserved_gdb, GFP_NOFS); 604 if (err) 605 goto out; 606 } 607 608 handle_itb: 609 /* Initialize group tables of the grop @group */ 610 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 611 goto handle_bb; 612 613 /* Zero out all of the inode table blocks */ 614 block = group_data[i].inode_table; 615 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 616 block, sbi->s_itb_per_group); 617 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 618 GFP_NOFS); 619 if (err) 620 goto out; 621 622 handle_bb: 623 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 624 goto handle_ib; 625 626 /* Initialize block bitmap of the @group */ 627 block = group_data[i].block_bitmap; 628 err = ext4_resize_ensure_credits_batch(handle, 1); 629 if (err < 0) 630 goto out; 631 632 bh = bclean(handle, sb, block); 633 if (IS_ERR(bh)) { 634 err = PTR_ERR(bh); 635 goto out; 636 } 637 overhead = ext4_group_overhead_blocks(sb, group); 638 if (overhead != 0) { 639 ext4_debug("mark backup superblock %#04llx (+0)\n", 640 start); 641 mb_set_bits(bh->b_data, 0, 642 EXT4_NUM_B2C(sbi, overhead)); 643 } 644 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 645 sb->s_blocksize * 8, bh->b_data); 646 err = ext4_handle_dirty_metadata(handle, NULL, bh); 647 brelse(bh); 648 if (err) 649 goto out; 650 651 handle_ib: 652 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 653 continue; 654 655 /* Initialize inode bitmap of the @group */ 656 block = group_data[i].inode_bitmap; 657 err = ext4_resize_ensure_credits_batch(handle, 1); 658 if (err < 0) 659 goto out; 660 /* Mark unused entries in inode bitmap used */ 661 bh = bclean(handle, sb, block); 662 if (IS_ERR(bh)) { 663 err = PTR_ERR(bh); 664 goto out; 665 } 666 667 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 668 sb->s_blocksize * 8, bh->b_data); 669 err = ext4_handle_dirty_metadata(handle, NULL, bh); 670 brelse(bh); 671 if (err) 672 goto out; 673 } 674 675 /* Mark group tables in block bitmap */ 676 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 677 count = group_table_count[j]; 678 start = (&group_data[0].block_bitmap)[j]; 679 block = start; 680 for (i = 1; i < flex_gd->count; i++) { 681 block += group_table_count[j]; 682 if (block == (&group_data[i].block_bitmap)[j]) { 683 count += group_table_count[j]; 684 continue; 685 } 686 err = set_flexbg_block_bitmap(sb, handle, 687 flex_gd, 688 EXT4_B2C(sbi, start), 689 EXT4_B2C(sbi, 690 start + count 691 - 1)); 692 if (err) 693 goto out; 694 count = group_table_count[j]; 695 start = (&group_data[i].block_bitmap)[j]; 696 block = start; 697 } 698 699 if (count) { 700 err = set_flexbg_block_bitmap(sb, handle, 701 flex_gd, 702 EXT4_B2C(sbi, start), 703 EXT4_B2C(sbi, 704 start + count 705 - 1)); 706 if (err) 707 goto out; 708 } 709 } 710 711 out: 712 err2 = ext4_journal_stop(handle); 713 if (err2 && !err) 714 err = err2; 715 716 return err; 717 } 718 719 /* 720 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 721 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 722 * calling this for the first time. In a sparse filesystem it will be the 723 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 724 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 725 */ 726 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three, 727 unsigned int *five, unsigned int *seven) 728 { 729 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 730 unsigned int *min = three; 731 int mult = 3; 732 unsigned int ret; 733 734 if (ext4_has_feature_sparse_super2(sb)) { 735 do { 736 if (*min > 2) 737 return UINT_MAX; 738 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]); 739 *min += 1; 740 } while (!ret); 741 return ret; 742 } 743 744 if (!ext4_has_feature_sparse_super(sb)) { 745 ret = *min; 746 *min += 1; 747 return ret; 748 } 749 750 if (*five < *min) { 751 min = five; 752 mult = 5; 753 } 754 if (*seven < *min) { 755 min = seven; 756 mult = 7; 757 } 758 759 ret = *min; 760 *min *= mult; 761 762 return ret; 763 } 764 765 /* 766 * Check that all of the backup GDT blocks are held in the primary GDT block. 767 * It is assumed that they are stored in group order. Returns the number of 768 * groups in current filesystem that have BACKUPS, or -ve error code. 769 */ 770 static int verify_reserved_gdb(struct super_block *sb, 771 ext4_group_t end, 772 struct buffer_head *primary) 773 { 774 const ext4_fsblk_t blk = primary->b_blocknr; 775 unsigned three = 1; 776 unsigned five = 5; 777 unsigned seven = 7; 778 unsigned grp; 779 __le32 *p = (__le32 *)primary->b_data; 780 int gdbackups = 0; 781 782 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 783 if (le32_to_cpu(*p++) != 784 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 785 ext4_warning(sb, "reserved GDT %llu" 786 " missing grp %d (%llu)", 787 blk, grp, 788 grp * 789 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 790 blk); 791 return -EINVAL; 792 } 793 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 794 return -EFBIG; 795 } 796 797 return gdbackups; 798 } 799 800 /* 801 * Called when we need to bring a reserved group descriptor table block into 802 * use from the resize inode. The primary copy of the new GDT block currently 803 * is an indirect block (under the double indirect block in the resize inode). 804 * The new backup GDT blocks will be stored as leaf blocks in this indirect 805 * block, in group order. Even though we know all the block numbers we need, 806 * we check to ensure that the resize inode has actually reserved these blocks. 807 * 808 * Don't need to update the block bitmaps because the blocks are still in use. 809 * 810 * We get all of the error cases out of the way, so that we are sure to not 811 * fail once we start modifying the data on disk, because JBD has no rollback. 812 */ 813 static int add_new_gdb(handle_t *handle, struct inode *inode, 814 ext4_group_t group) 815 { 816 struct super_block *sb = inode->i_sb; 817 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 818 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 819 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 820 struct buffer_head **o_group_desc, **n_group_desc = NULL; 821 struct buffer_head *dind = NULL; 822 struct buffer_head *gdb_bh = NULL; 823 int gdbackups; 824 struct ext4_iloc iloc = { .bh = NULL }; 825 __le32 *data; 826 int err; 827 828 if (test_opt(sb, DEBUG)) 829 printk(KERN_DEBUG 830 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 831 gdb_num); 832 833 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 834 if (IS_ERR(gdb_bh)) 835 return PTR_ERR(gdb_bh); 836 837 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 838 if (gdbackups < 0) { 839 err = gdbackups; 840 goto errout; 841 } 842 843 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 844 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 845 if (IS_ERR(dind)) { 846 err = PTR_ERR(dind); 847 dind = NULL; 848 goto errout; 849 } 850 851 data = (__le32 *)dind->b_data; 852 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 853 ext4_warning(sb, "new group %u GDT block %llu not reserved", 854 group, gdblock); 855 err = -EINVAL; 856 goto errout; 857 } 858 859 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 860 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 861 EXT4_JTR_NONE); 862 if (unlikely(err)) 863 goto errout; 864 865 BUFFER_TRACE(gdb_bh, "get_write_access"); 866 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 867 if (unlikely(err)) 868 goto errout; 869 870 BUFFER_TRACE(dind, "get_write_access"); 871 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); 872 if (unlikely(err)) { 873 ext4_std_error(sb, err); 874 goto errout; 875 } 876 877 /* ext4_reserve_inode_write() gets a reference on the iloc */ 878 err = ext4_reserve_inode_write(handle, inode, &iloc); 879 if (unlikely(err)) 880 goto errout; 881 882 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 883 GFP_KERNEL); 884 if (!n_group_desc) { 885 err = -ENOMEM; 886 ext4_warning(sb, "not enough memory for %lu groups", 887 gdb_num + 1); 888 goto errout; 889 } 890 891 /* 892 * Finally, we have all of the possible failures behind us... 893 * 894 * Remove new GDT block from inode double-indirect block and clear out 895 * the new GDT block for use (which also "frees" the backup GDT blocks 896 * from the reserved inode). We don't need to change the bitmaps for 897 * these blocks, because they are marked as in-use from being in the 898 * reserved inode, and will become GDT blocks (primary and backup). 899 */ 900 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 901 err = ext4_handle_dirty_metadata(handle, NULL, dind); 902 if (unlikely(err)) { 903 ext4_std_error(sb, err); 904 goto errout; 905 } 906 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 907 (9 - EXT4_SB(sb)->s_cluster_bits); 908 ext4_mark_iloc_dirty(handle, inode, &iloc); 909 memset(gdb_bh->b_data, 0, sb->s_blocksize); 910 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 911 if (unlikely(err)) { 912 ext4_std_error(sb, err); 913 iloc.bh = NULL; 914 goto errout; 915 } 916 brelse(dind); 917 918 rcu_read_lock(); 919 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 920 memcpy(n_group_desc, o_group_desc, 921 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 922 rcu_read_unlock(); 923 n_group_desc[gdb_num] = gdb_bh; 924 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 925 EXT4_SB(sb)->s_gdb_count++; 926 ext4_kvfree_array_rcu(o_group_desc); 927 928 lock_buffer(EXT4_SB(sb)->s_sbh); 929 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 930 ext4_superblock_csum_set(sb); 931 unlock_buffer(EXT4_SB(sb)->s_sbh); 932 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 933 if (err) 934 ext4_std_error(sb, err); 935 return err; 936 errout: 937 kvfree(n_group_desc); 938 brelse(iloc.bh); 939 brelse(dind); 940 brelse(gdb_bh); 941 942 ext4_debug("leaving with error %d\n", err); 943 return err; 944 } 945 946 /* 947 * add_new_gdb_meta_bg is the sister of add_new_gdb. 948 */ 949 static int add_new_gdb_meta_bg(struct super_block *sb, 950 handle_t *handle, ext4_group_t group) { 951 ext4_fsblk_t gdblock; 952 struct buffer_head *gdb_bh; 953 struct buffer_head **o_group_desc, **n_group_desc; 954 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 955 int err; 956 957 gdblock = ext4_meta_bg_first_block_no(sb, group) + 958 ext4_bg_has_super(sb, group); 959 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 960 if (IS_ERR(gdb_bh)) 961 return PTR_ERR(gdb_bh); 962 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 963 GFP_KERNEL); 964 if (!n_group_desc) { 965 brelse(gdb_bh); 966 err = -ENOMEM; 967 ext4_warning(sb, "not enough memory for %lu groups", 968 gdb_num + 1); 969 return err; 970 } 971 972 rcu_read_lock(); 973 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 974 memcpy(n_group_desc, o_group_desc, 975 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 976 rcu_read_unlock(); 977 n_group_desc[gdb_num] = gdb_bh; 978 979 BUFFER_TRACE(gdb_bh, "get_write_access"); 980 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 981 if (err) { 982 kvfree(n_group_desc); 983 brelse(gdb_bh); 984 return err; 985 } 986 987 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 988 EXT4_SB(sb)->s_gdb_count++; 989 ext4_kvfree_array_rcu(o_group_desc); 990 return err; 991 } 992 993 /* 994 * Called when we are adding a new group which has a backup copy of each of 995 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 996 * We need to add these reserved backup GDT blocks to the resize inode, so 997 * that they are kept for future resizing and not allocated to files. 998 * 999 * Each reserved backup GDT block will go into a different indirect block. 1000 * The indirect blocks are actually the primary reserved GDT blocks, 1001 * so we know in advance what their block numbers are. We only get the 1002 * double-indirect block to verify it is pointing to the primary reserved 1003 * GDT blocks so we don't overwrite a data block by accident. The reserved 1004 * backup GDT blocks are stored in their reserved primary GDT block. 1005 */ 1006 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 1007 ext4_group_t group) 1008 { 1009 struct super_block *sb = inode->i_sb; 1010 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 1011 int cluster_bits = EXT4_SB(sb)->s_cluster_bits; 1012 struct buffer_head **primary; 1013 struct buffer_head *dind; 1014 struct ext4_iloc iloc; 1015 ext4_fsblk_t blk; 1016 __le32 *data, *end; 1017 int gdbackups = 0; 1018 int res, i; 1019 int err; 1020 1021 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); 1022 if (!primary) 1023 return -ENOMEM; 1024 1025 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 1026 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 1027 if (IS_ERR(dind)) { 1028 err = PTR_ERR(dind); 1029 dind = NULL; 1030 goto exit_free; 1031 } 1032 1033 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 1034 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 1035 EXT4_ADDR_PER_BLOCK(sb)); 1036 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 1037 1038 /* Get each reserved primary GDT block and verify it holds backups */ 1039 for (res = 0; res < reserved_gdb; res++, blk++) { 1040 if (le32_to_cpu(*data) != blk) { 1041 ext4_warning(sb, "reserved block %llu" 1042 " not at offset %ld", 1043 blk, 1044 (long)(data - (__le32 *)dind->b_data)); 1045 err = -EINVAL; 1046 goto exit_bh; 1047 } 1048 primary[res] = ext4_sb_bread(sb, blk, 0); 1049 if (IS_ERR(primary[res])) { 1050 err = PTR_ERR(primary[res]); 1051 primary[res] = NULL; 1052 goto exit_bh; 1053 } 1054 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 1055 if (gdbackups < 0) { 1056 brelse(primary[res]); 1057 err = gdbackups; 1058 goto exit_bh; 1059 } 1060 if (++data >= end) 1061 data = (__le32 *)dind->b_data; 1062 } 1063 1064 for (i = 0; i < reserved_gdb; i++) { 1065 BUFFER_TRACE(primary[i], "get_write_access"); 1066 if ((err = ext4_journal_get_write_access(handle, sb, primary[i], 1067 EXT4_JTR_NONE))) 1068 goto exit_bh; 1069 } 1070 1071 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 1072 goto exit_bh; 1073 1074 /* 1075 * Finally we can add each of the reserved backup GDT blocks from 1076 * the new group to its reserved primary GDT block. 1077 */ 1078 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1079 for (i = 0; i < reserved_gdb; i++) { 1080 int err2; 1081 data = (__le32 *)primary[i]->b_data; 1082 /* printk("reserving backup %lu[%u] = %lu\n", 1083 primary[i]->b_blocknr, gdbackups, 1084 blk + primary[i]->b_blocknr); */ 1085 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1086 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1087 if (!err) 1088 err = err2; 1089 } 1090 1091 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); 1092 ext4_mark_iloc_dirty(handle, inode, &iloc); 1093 1094 exit_bh: 1095 while (--res >= 0) 1096 brelse(primary[res]); 1097 brelse(dind); 1098 1099 exit_free: 1100 kfree(primary); 1101 1102 return err; 1103 } 1104 1105 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, 1106 ext4_group_t group) 1107 { 1108 struct ext4_super_block *es = (struct ext4_super_block *) data; 1109 1110 es->s_block_group_nr = cpu_to_le16(group); 1111 if (ext4_has_metadata_csum(sb)) 1112 es->s_checksum = ext4_superblock_csum(sb, es); 1113 } 1114 1115 /* 1116 * Update the backup copies of the ext4 metadata. These don't need to be part 1117 * of the main resize transaction, because e2fsck will re-write them if there 1118 * is a problem (basically only OOM will cause a problem). However, we 1119 * _should_ update the backups if possible, in case the primary gets trashed 1120 * for some reason and we need to run e2fsck from a backup superblock. The 1121 * important part is that the new block and inode counts are in the backup 1122 * superblocks, and the location of the new group metadata in the GDT backups. 1123 * 1124 * We do not need take the s_resize_lock for this, because these 1125 * blocks are not otherwise touched by the filesystem code when it is 1126 * mounted. We don't need to worry about last changing from 1127 * sbi->s_groups_count, because the worst that can happen is that we 1128 * do not copy the full number of backups at this time. The resize 1129 * which changed s_groups_count will backup again. 1130 */ 1131 static void update_backups(struct super_block *sb, sector_t blk_off, char *data, 1132 int size, int meta_bg) 1133 { 1134 struct ext4_sb_info *sbi = EXT4_SB(sb); 1135 ext4_group_t last; 1136 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1137 unsigned three = 1; 1138 unsigned five = 5; 1139 unsigned seven = 7; 1140 ext4_group_t group = 0; 1141 int rest = sb->s_blocksize - size; 1142 handle_t *handle; 1143 int err = 0, err2; 1144 1145 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1146 if (IS_ERR(handle)) { 1147 group = 1; 1148 err = PTR_ERR(handle); 1149 goto exit_err; 1150 } 1151 1152 if (meta_bg == 0) { 1153 group = ext4_list_backups(sb, &three, &five, &seven); 1154 last = sbi->s_groups_count; 1155 } else { 1156 group = ext4_get_group_number(sb, blk_off) + 1; 1157 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1158 } 1159 1160 while (group < sbi->s_groups_count) { 1161 struct buffer_head *bh; 1162 ext4_fsblk_t backup_block; 1163 int has_super = ext4_bg_has_super(sb, group); 1164 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group); 1165 1166 /* Out of journal space, and can't get more - abort - so sad */ 1167 err = ext4_resize_ensure_credits_batch(handle, 1); 1168 if (err < 0) 1169 break; 1170 1171 if (meta_bg == 0) 1172 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1173 else 1174 backup_block = first_block + has_super; 1175 1176 bh = sb_getblk(sb, backup_block); 1177 if (unlikely(!bh)) { 1178 err = -ENOMEM; 1179 break; 1180 } 1181 ext4_debug("update metadata backup %llu(+%llu)\n", 1182 backup_block, backup_block - 1183 ext4_group_first_block_no(sb, group)); 1184 BUFFER_TRACE(bh, "get_write_access"); 1185 if ((err = ext4_journal_get_write_access(handle, sb, bh, 1186 EXT4_JTR_NONE))) { 1187 brelse(bh); 1188 break; 1189 } 1190 lock_buffer(bh); 1191 memcpy(bh->b_data, data, size); 1192 if (rest) 1193 memset(bh->b_data + size, 0, rest); 1194 if (has_super && (backup_block == first_block)) 1195 ext4_set_block_group_nr(sb, bh->b_data, group); 1196 set_buffer_uptodate(bh); 1197 unlock_buffer(bh); 1198 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1199 if (unlikely(err)) 1200 ext4_std_error(sb, err); 1201 brelse(bh); 1202 1203 if (meta_bg == 0) 1204 group = ext4_list_backups(sb, &three, &five, &seven); 1205 else if (group == last) 1206 break; 1207 else 1208 group = last; 1209 } 1210 if ((err2 = ext4_journal_stop(handle)) && !err) 1211 err = err2; 1212 1213 /* 1214 * Ugh! Need to have e2fsck write the backup copies. It is too 1215 * late to revert the resize, we shouldn't fail just because of 1216 * the backup copies (they are only needed in case of corruption). 1217 * 1218 * However, if we got here we have a journal problem too, so we 1219 * can't really start a transaction to mark the superblock. 1220 * Chicken out and just set the flag on the hope it will be written 1221 * to disk, and if not - we will simply wait until next fsck. 1222 */ 1223 exit_err: 1224 if (err) { 1225 ext4_warning(sb, "can't update backup for group %u (err %d), " 1226 "forcing fsck on next reboot", group, err); 1227 sbi->s_mount_state &= ~EXT4_VALID_FS; 1228 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1229 mark_buffer_dirty(sbi->s_sbh); 1230 } 1231 } 1232 1233 /* 1234 * ext4_add_new_descs() adds @count group descriptor of groups 1235 * starting at @group 1236 * 1237 * @handle: journal handle 1238 * @sb: super block 1239 * @group: the group no. of the first group desc to be added 1240 * @resize_inode: the resize inode 1241 * @count: number of group descriptors to be added 1242 */ 1243 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1244 ext4_group_t group, struct inode *resize_inode, 1245 ext4_group_t count) 1246 { 1247 struct ext4_sb_info *sbi = EXT4_SB(sb); 1248 struct ext4_super_block *es = sbi->s_es; 1249 struct buffer_head *gdb_bh; 1250 int i, gdb_off, gdb_num, err = 0; 1251 int meta_bg; 1252 1253 meta_bg = ext4_has_feature_meta_bg(sb); 1254 for (i = 0; i < count; i++, group++) { 1255 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1256 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1257 1258 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1259 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1260 1261 /* 1262 * We will only either add reserved group blocks to a backup group 1263 * or remove reserved blocks for the first group in a new group block. 1264 * Doing both would be mean more complex code, and sane people don't 1265 * use non-sparse filesystems anymore. This is already checked above. 1266 */ 1267 if (gdb_off) { 1268 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1269 gdb_num); 1270 BUFFER_TRACE(gdb_bh, "get_write_access"); 1271 err = ext4_journal_get_write_access(handle, sb, gdb_bh, 1272 EXT4_JTR_NONE); 1273 1274 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1275 err = reserve_backup_gdb(handle, resize_inode, group); 1276 } else if (meta_bg != 0) { 1277 err = add_new_gdb_meta_bg(sb, handle, group); 1278 } else { 1279 err = add_new_gdb(handle, resize_inode, group); 1280 } 1281 if (err) 1282 break; 1283 } 1284 return err; 1285 } 1286 1287 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1288 { 1289 struct buffer_head *bh = sb_getblk(sb, block); 1290 if (unlikely(!bh)) 1291 return NULL; 1292 if (!bh_uptodate_or_lock(bh)) { 1293 if (ext4_read_bh(bh, 0, NULL) < 0) { 1294 brelse(bh); 1295 return NULL; 1296 } 1297 } 1298 1299 return bh; 1300 } 1301 1302 static int ext4_set_bitmap_checksums(struct super_block *sb, 1303 struct ext4_group_desc *gdp, 1304 struct ext4_new_group_data *group_data) 1305 { 1306 struct buffer_head *bh; 1307 1308 if (!ext4_has_metadata_csum(sb)) 1309 return 0; 1310 1311 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1312 if (!bh) 1313 return -EIO; 1314 ext4_inode_bitmap_csum_set(sb, gdp, bh, 1315 EXT4_INODES_PER_GROUP(sb) / 8); 1316 brelse(bh); 1317 1318 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1319 if (!bh) 1320 return -EIO; 1321 ext4_block_bitmap_csum_set(sb, gdp, bh); 1322 brelse(bh); 1323 1324 return 0; 1325 } 1326 1327 /* 1328 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1329 */ 1330 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1331 struct ext4_new_flex_group_data *flex_gd) 1332 { 1333 struct ext4_new_group_data *group_data = flex_gd->groups; 1334 struct ext4_group_desc *gdp; 1335 struct ext4_sb_info *sbi = EXT4_SB(sb); 1336 struct buffer_head *gdb_bh; 1337 ext4_group_t group; 1338 __u16 *bg_flags = flex_gd->bg_flags; 1339 int i, gdb_off, gdb_num, err = 0; 1340 1341 1342 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1343 group = group_data->group; 1344 1345 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1346 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1347 1348 /* 1349 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1350 */ 1351 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); 1352 /* Update group descriptor block for new group */ 1353 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1354 gdb_off * EXT4_DESC_SIZE(sb)); 1355 1356 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1357 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1358 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1359 err = ext4_set_bitmap_checksums(sb, gdp, group_data); 1360 if (err) { 1361 ext4_std_error(sb, err); 1362 break; 1363 } 1364 1365 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1366 ext4_free_group_clusters_set(sb, gdp, 1367 group_data->free_clusters_count); 1368 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1369 if (ext4_has_group_desc_csum(sb)) 1370 ext4_itable_unused_set(sb, gdp, 1371 EXT4_INODES_PER_GROUP(sb)); 1372 gdp->bg_flags = cpu_to_le16(*bg_flags); 1373 ext4_group_desc_csum_set(sb, group, gdp); 1374 1375 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1376 if (unlikely(err)) { 1377 ext4_std_error(sb, err); 1378 break; 1379 } 1380 1381 /* 1382 * We can allocate memory for mb_alloc based on the new group 1383 * descriptor 1384 */ 1385 err = ext4_mb_add_groupinfo(sb, group, gdp); 1386 if (err) 1387 break; 1388 } 1389 return err; 1390 } 1391 1392 static void ext4_add_overhead(struct super_block *sb, 1393 const ext4_fsblk_t overhead) 1394 { 1395 struct ext4_sb_info *sbi = EXT4_SB(sb); 1396 struct ext4_super_block *es = sbi->s_es; 1397 1398 sbi->s_overhead += overhead; 1399 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1400 smp_wmb(); 1401 } 1402 1403 /* 1404 * ext4_update_super() updates the super block so that the newly added 1405 * groups can be seen by the filesystem. 1406 * 1407 * @sb: super block 1408 * @flex_gd: new added groups 1409 */ 1410 static void ext4_update_super(struct super_block *sb, 1411 struct ext4_new_flex_group_data *flex_gd) 1412 { 1413 ext4_fsblk_t blocks_count = 0; 1414 ext4_fsblk_t free_blocks = 0; 1415 ext4_fsblk_t reserved_blocks = 0; 1416 struct ext4_new_group_data *group_data = flex_gd->groups; 1417 struct ext4_sb_info *sbi = EXT4_SB(sb); 1418 struct ext4_super_block *es = sbi->s_es; 1419 int i; 1420 1421 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1422 /* 1423 * Make the new blocks and inodes valid next. We do this before 1424 * increasing the group count so that once the group is enabled, 1425 * all of its blocks and inodes are already valid. 1426 * 1427 * We always allocate group-by-group, then block-by-block or 1428 * inode-by-inode within a group, so enabling these 1429 * blocks/inodes before the group is live won't actually let us 1430 * allocate the new space yet. 1431 */ 1432 for (i = 0; i < flex_gd->count; i++) { 1433 blocks_count += group_data[i].blocks_count; 1434 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); 1435 } 1436 1437 reserved_blocks = ext4_r_blocks_count(es) * 100; 1438 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1439 reserved_blocks *= blocks_count; 1440 do_div(reserved_blocks, 100); 1441 1442 lock_buffer(sbi->s_sbh); 1443 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1444 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1445 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1446 flex_gd->count); 1447 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1448 flex_gd->count); 1449 1450 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1451 /* 1452 * We need to protect s_groups_count against other CPUs seeing 1453 * inconsistent state in the superblock. 1454 * 1455 * The precise rules we use are: 1456 * 1457 * * Writers must perform a smp_wmb() after updating all 1458 * dependent data and before modifying the groups count 1459 * 1460 * * Readers must perform an smp_rmb() after reading the groups 1461 * count and before reading any dependent data. 1462 * 1463 * NB. These rules can be relaxed when checking the group count 1464 * while freeing data, as we can only allocate from a block 1465 * group after serialising against the group count, and we can 1466 * only then free after serialising in turn against that 1467 * allocation. 1468 */ 1469 smp_wmb(); 1470 1471 /* Update the global fs size fields */ 1472 sbi->s_groups_count += flex_gd->count; 1473 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1474 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1475 1476 /* Update the reserved block counts only once the new group is 1477 * active. */ 1478 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1479 reserved_blocks); 1480 1481 /* Update the free space counts */ 1482 percpu_counter_add(&sbi->s_freeclusters_counter, 1483 EXT4_NUM_B2C(sbi, free_blocks)); 1484 percpu_counter_add(&sbi->s_freeinodes_counter, 1485 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1486 1487 ext4_debug("free blocks count %llu", 1488 percpu_counter_read(&sbi->s_freeclusters_counter)); 1489 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { 1490 ext4_group_t flex_group; 1491 struct flex_groups *fg; 1492 1493 flex_group = ext4_flex_group(sbi, group_data[0].group); 1494 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 1495 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1496 &fg->free_clusters); 1497 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1498 &fg->free_inodes); 1499 } 1500 1501 /* 1502 * Update the fs overhead information. 1503 * 1504 * For bigalloc, if the superblock already has a properly calculated 1505 * overhead, update it with a value based on numbers already computed 1506 * above for the newly allocated capacity. 1507 */ 1508 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0)) 1509 ext4_add_overhead(sb, 1510 EXT4_NUM_B2C(sbi, blocks_count - free_blocks)); 1511 else 1512 ext4_calculate_overhead(sb); 1513 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1514 1515 ext4_superblock_csum_set(sb); 1516 unlock_buffer(sbi->s_sbh); 1517 if (test_opt(sb, DEBUG)) 1518 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1519 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1520 blocks_count, free_blocks, reserved_blocks); 1521 } 1522 1523 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1524 * _before_ we start modifying the filesystem, because we cannot abort the 1525 * transaction and not have it write the data to disk. 1526 */ 1527 static int ext4_flex_group_add(struct super_block *sb, 1528 struct inode *resize_inode, 1529 struct ext4_new_flex_group_data *flex_gd) 1530 { 1531 struct ext4_sb_info *sbi = EXT4_SB(sb); 1532 struct ext4_super_block *es = sbi->s_es; 1533 ext4_fsblk_t o_blocks_count; 1534 ext4_grpblk_t last; 1535 ext4_group_t group; 1536 handle_t *handle; 1537 unsigned reserved_gdb; 1538 int err = 0, err2 = 0, credit; 1539 1540 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1541 1542 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1543 o_blocks_count = ext4_blocks_count(es); 1544 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1545 BUG_ON(last); 1546 1547 err = setup_new_flex_group_blocks(sb, flex_gd); 1548 if (err) 1549 goto exit; 1550 /* 1551 * We will always be modifying at least the superblock and GDT 1552 * blocks. If we are adding a group past the last current GDT block, 1553 * we will also modify the inode and the dindirect block. If we 1554 * are adding a group with superblock/GDT backups we will also 1555 * modify each of the reserved GDT dindirect blocks. 1556 */ 1557 credit = 3; /* sb, resize inode, resize inode dindirect */ 1558 /* GDT blocks */ 1559 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); 1560 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ 1561 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1562 if (IS_ERR(handle)) { 1563 err = PTR_ERR(handle); 1564 goto exit; 1565 } 1566 1567 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1568 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1569 EXT4_JTR_NONE); 1570 if (err) 1571 goto exit_journal; 1572 1573 group = flex_gd->groups[0].group; 1574 BUG_ON(group != sbi->s_groups_count); 1575 err = ext4_add_new_descs(handle, sb, group, 1576 resize_inode, flex_gd->count); 1577 if (err) 1578 goto exit_journal; 1579 1580 err = ext4_setup_new_descs(handle, sb, flex_gd); 1581 if (err) 1582 goto exit_journal; 1583 1584 ext4_update_super(sb, flex_gd); 1585 1586 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1587 1588 exit_journal: 1589 err2 = ext4_journal_stop(handle); 1590 if (!err) 1591 err = err2; 1592 1593 if (!err) { 1594 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1595 int gdb_num_end = ((group + flex_gd->count - 1) / 1596 EXT4_DESC_PER_BLOCK(sb)); 1597 int meta_bg = ext4_has_feature_meta_bg(sb); 1598 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - 1599 ext4_group_first_block_no(sb, 0); 1600 sector_t old_gdb = 0; 1601 1602 update_backups(sb, ext4_group_first_block_no(sb, 0), 1603 (char *)es, sizeof(struct ext4_super_block), 0); 1604 for (; gdb_num <= gdb_num_end; gdb_num++) { 1605 struct buffer_head *gdb_bh; 1606 1607 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1608 gdb_num); 1609 if (old_gdb == gdb_bh->b_blocknr) 1610 continue; 1611 update_backups(sb, gdb_bh->b_blocknr - padding_blocks, 1612 gdb_bh->b_data, gdb_bh->b_size, meta_bg); 1613 old_gdb = gdb_bh->b_blocknr; 1614 } 1615 } 1616 exit: 1617 return err; 1618 } 1619 1620 static int ext4_setup_next_flex_gd(struct super_block *sb, 1621 struct ext4_new_flex_group_data *flex_gd, 1622 ext4_fsblk_t n_blocks_count, 1623 unsigned int flexbg_size) 1624 { 1625 struct ext4_sb_info *sbi = EXT4_SB(sb); 1626 struct ext4_super_block *es = sbi->s_es; 1627 struct ext4_new_group_data *group_data = flex_gd->groups; 1628 ext4_fsblk_t o_blocks_count; 1629 ext4_group_t n_group; 1630 ext4_group_t group; 1631 ext4_group_t last_group; 1632 ext4_grpblk_t last; 1633 ext4_grpblk_t clusters_per_group; 1634 unsigned long i; 1635 1636 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); 1637 1638 o_blocks_count = ext4_blocks_count(es); 1639 1640 if (o_blocks_count == n_blocks_count) 1641 return 0; 1642 1643 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1644 BUG_ON(last); 1645 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1646 1647 last_group = group | (flexbg_size - 1); 1648 if (last_group > n_group) 1649 last_group = n_group; 1650 1651 flex_gd->count = last_group - group + 1; 1652 1653 for (i = 0; i < flex_gd->count; i++) { 1654 int overhead; 1655 1656 group_data[i].group = group + i; 1657 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); 1658 overhead = ext4_group_overhead_blocks(sb, group + i); 1659 group_data[i].mdata_blocks = overhead; 1660 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); 1661 if (ext4_has_group_desc_csum(sb)) { 1662 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1663 EXT4_BG_INODE_UNINIT; 1664 if (!test_opt(sb, INIT_INODE_TABLE)) 1665 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1666 } else 1667 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1668 } 1669 1670 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1671 /* We need to initialize block bitmap of last group. */ 1672 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1673 1674 if ((last_group == n_group) && (last != clusters_per_group - 1)) { 1675 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); 1676 group_data[i - 1].free_clusters_count -= clusters_per_group - 1677 last - 1; 1678 } 1679 1680 return 1; 1681 } 1682 1683 /* Add group descriptor data to an existing or new group descriptor block. 1684 * Ensure we handle all possible error conditions _before_ we start modifying 1685 * the filesystem, because we cannot abort the transaction and not have it 1686 * write the data to disk. 1687 * 1688 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1689 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1690 * 1691 * We only need to hold the superblock lock while we are actually adding 1692 * in the new group's counts to the superblock. Prior to that we have 1693 * not really "added" the group at all. We re-check that we are still 1694 * adding in the last group in case things have changed since verifying. 1695 */ 1696 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1697 { 1698 struct ext4_new_flex_group_data flex_gd; 1699 struct ext4_sb_info *sbi = EXT4_SB(sb); 1700 struct ext4_super_block *es = sbi->s_es; 1701 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1702 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1703 struct inode *inode = NULL; 1704 int gdb_off; 1705 int err; 1706 __u16 bg_flags = 0; 1707 1708 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1709 1710 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { 1711 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1712 return -EPERM; 1713 } 1714 1715 if (ext4_blocks_count(es) + input->blocks_count < 1716 ext4_blocks_count(es)) { 1717 ext4_warning(sb, "blocks_count overflow"); 1718 return -EINVAL; 1719 } 1720 1721 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1722 le32_to_cpu(es->s_inodes_count)) { 1723 ext4_warning(sb, "inodes_count overflow"); 1724 return -EINVAL; 1725 } 1726 1727 if (reserved_gdb || gdb_off == 0) { 1728 if (!ext4_has_feature_resize_inode(sb) || 1729 !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1730 ext4_warning(sb, 1731 "No reserved GDT blocks, can't resize"); 1732 return -EPERM; 1733 } 1734 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); 1735 if (IS_ERR(inode)) { 1736 ext4_warning(sb, "Error opening resize inode"); 1737 return PTR_ERR(inode); 1738 } 1739 } 1740 1741 1742 err = verify_group_input(sb, input); 1743 if (err) 1744 goto out; 1745 1746 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1747 if (err) 1748 goto out; 1749 1750 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1751 if (err) 1752 goto out; 1753 1754 flex_gd.count = 1; 1755 flex_gd.groups = input; 1756 flex_gd.bg_flags = &bg_flags; 1757 err = ext4_flex_group_add(sb, inode, &flex_gd); 1758 out: 1759 iput(inode); 1760 return err; 1761 } /* ext4_group_add */ 1762 1763 /* 1764 * extend a group without checking assuming that checking has been done. 1765 */ 1766 static int ext4_group_extend_no_check(struct super_block *sb, 1767 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1768 { 1769 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1770 handle_t *handle; 1771 int err = 0, err2; 1772 1773 /* We will update the superblock, one block bitmap, and 1774 * one group descriptor via ext4_group_add_blocks(). 1775 */ 1776 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1777 if (IS_ERR(handle)) { 1778 err = PTR_ERR(handle); 1779 ext4_warning(sb, "error %d on journal start", err); 1780 return err; 1781 } 1782 1783 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1784 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 1785 EXT4_JTR_NONE); 1786 if (err) { 1787 ext4_warning(sb, "error %d on journal write access", err); 1788 goto errout; 1789 } 1790 1791 lock_buffer(EXT4_SB(sb)->s_sbh); 1792 ext4_blocks_count_set(es, o_blocks_count + add); 1793 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1794 ext4_superblock_csum_set(sb); 1795 unlock_buffer(EXT4_SB(sb)->s_sbh); 1796 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1797 o_blocks_count + add); 1798 /* We add the blocks to the bitmap and set the group need init bit */ 1799 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1800 if (err) 1801 goto errout; 1802 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 1803 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1804 o_blocks_count + add); 1805 errout: 1806 err2 = ext4_journal_stop(handle); 1807 if (err2 && !err) 1808 err = err2; 1809 1810 if (!err) { 1811 if (test_opt(sb, DEBUG)) 1812 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1813 "blocks\n", ext4_blocks_count(es)); 1814 update_backups(sb, ext4_group_first_block_no(sb, 0), 1815 (char *)es, sizeof(struct ext4_super_block), 0); 1816 } 1817 return err; 1818 } 1819 1820 /* 1821 * Extend the filesystem to the new number of blocks specified. This entry 1822 * point is only used to extend the current filesystem to the end of the last 1823 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1824 * for emergencies (because it has no dependencies on reserved blocks). 1825 * 1826 * If we _really_ wanted, we could use default values to call ext4_group_add() 1827 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1828 * GDT blocks are reserved to grow to the desired size. 1829 */ 1830 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1831 ext4_fsblk_t n_blocks_count) 1832 { 1833 ext4_fsblk_t o_blocks_count; 1834 ext4_grpblk_t last; 1835 ext4_grpblk_t add; 1836 struct buffer_head *bh; 1837 ext4_group_t group; 1838 1839 o_blocks_count = ext4_blocks_count(es); 1840 1841 if (test_opt(sb, DEBUG)) 1842 ext4_msg(sb, KERN_DEBUG, 1843 "extending last group from %llu to %llu blocks", 1844 o_blocks_count, n_blocks_count); 1845 1846 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1847 return 0; 1848 1849 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1850 ext4_msg(sb, KERN_ERR, 1851 "filesystem too large to resize to %llu blocks safely", 1852 n_blocks_count); 1853 return -EINVAL; 1854 } 1855 1856 if (n_blocks_count < o_blocks_count) { 1857 ext4_warning(sb, "can't shrink FS - resize aborted"); 1858 return -EINVAL; 1859 } 1860 1861 /* Handle the remaining blocks in the last group only. */ 1862 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1863 1864 if (last == 0) { 1865 ext4_warning(sb, "need to use ext2online to resize further"); 1866 return -EPERM; 1867 } 1868 1869 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1870 1871 if (o_blocks_count + add < o_blocks_count) { 1872 ext4_warning(sb, "blocks_count overflow"); 1873 return -EINVAL; 1874 } 1875 1876 if (o_blocks_count + add > n_blocks_count) 1877 add = n_blocks_count - o_blocks_count; 1878 1879 if (o_blocks_count + add < n_blocks_count) 1880 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1881 o_blocks_count + add, add); 1882 1883 /* See if the device is actually as big as what was requested */ 1884 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); 1885 if (IS_ERR(bh)) { 1886 ext4_warning(sb, "can't read last block, resize aborted"); 1887 return -ENOSPC; 1888 } 1889 brelse(bh); 1890 1891 return ext4_group_extend_no_check(sb, o_blocks_count, add); 1892 } /* ext4_group_extend */ 1893 1894 1895 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1896 { 1897 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1898 } 1899 1900 /* 1901 * Release the resize inode and drop the resize_inode feature if there 1902 * are no more reserved gdt blocks, and then convert the file system 1903 * to enable meta_bg 1904 */ 1905 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1906 { 1907 handle_t *handle; 1908 struct ext4_sb_info *sbi = EXT4_SB(sb); 1909 struct ext4_super_block *es = sbi->s_es; 1910 struct ext4_inode_info *ei = EXT4_I(inode); 1911 ext4_fsblk_t nr; 1912 int i, ret, err = 0; 1913 int credits = 1; 1914 1915 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1916 if (inode) { 1917 if (es->s_reserved_gdt_blocks) { 1918 ext4_error(sb, "Unexpected non-zero " 1919 "s_reserved_gdt_blocks"); 1920 return -EPERM; 1921 } 1922 1923 /* Do a quick sanity check of the resize inode */ 1924 if (inode->i_blocks != 1 << (inode->i_blkbits - 1925 (9 - sbi->s_cluster_bits))) 1926 goto invalid_resize_inode; 1927 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1928 if (i == EXT4_DIND_BLOCK) { 1929 if (ei->i_data[i]) 1930 continue; 1931 else 1932 goto invalid_resize_inode; 1933 } 1934 if (ei->i_data[i]) 1935 goto invalid_resize_inode; 1936 } 1937 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1938 } 1939 1940 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1941 if (IS_ERR(handle)) 1942 return PTR_ERR(handle); 1943 1944 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1945 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1946 EXT4_JTR_NONE); 1947 if (err) 1948 goto errout; 1949 1950 lock_buffer(sbi->s_sbh); 1951 ext4_clear_feature_resize_inode(sb); 1952 ext4_set_feature_meta_bg(sb); 1953 sbi->s_es->s_first_meta_bg = 1954 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1955 ext4_superblock_csum_set(sb); 1956 unlock_buffer(sbi->s_sbh); 1957 1958 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1959 if (err) { 1960 ext4_std_error(sb, err); 1961 goto errout; 1962 } 1963 1964 if (inode) { 1965 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1966 ext4_free_blocks(handle, inode, NULL, nr, 1, 1967 EXT4_FREE_BLOCKS_METADATA | 1968 EXT4_FREE_BLOCKS_FORGET); 1969 ei->i_data[EXT4_DIND_BLOCK] = 0; 1970 inode->i_blocks = 0; 1971 1972 err = ext4_mark_inode_dirty(handle, inode); 1973 if (err) 1974 ext4_std_error(sb, err); 1975 } 1976 1977 errout: 1978 ret = ext4_journal_stop(handle); 1979 return err ? err : ret; 1980 1981 invalid_resize_inode: 1982 ext4_error(sb, "corrupted/inconsistent resize inode"); 1983 return -EINVAL; 1984 } 1985 1986 /* 1987 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1988 * 1989 * @sb: super block of the fs to be resized 1990 * @n_blocks_count: the number of blocks resides in the resized fs 1991 */ 1992 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1993 { 1994 struct ext4_new_flex_group_data *flex_gd = NULL; 1995 struct ext4_sb_info *sbi = EXT4_SB(sb); 1996 struct ext4_super_block *es = sbi->s_es; 1997 struct buffer_head *bh; 1998 struct inode *resize_inode = NULL; 1999 ext4_grpblk_t add, offset; 2000 unsigned long n_desc_blocks; 2001 unsigned long o_desc_blocks; 2002 ext4_group_t o_group; 2003 ext4_group_t n_group; 2004 ext4_fsblk_t o_blocks_count; 2005 ext4_fsblk_t n_blocks_count_retry = 0; 2006 unsigned long last_update_time = 0; 2007 int err = 0; 2008 int meta_bg; 2009 unsigned int flexbg_size = ext4_flex_bg_size(sbi); 2010 2011 /* See if the device is actually as big as what was requested */ 2012 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); 2013 if (IS_ERR(bh)) { 2014 ext4_warning(sb, "can't read last block, resize aborted"); 2015 return -ENOSPC; 2016 } 2017 brelse(bh); 2018 2019 /* 2020 * For bigalloc, trim the requested size to the nearest cluster 2021 * boundary to avoid creating an unusable filesystem. We do this 2022 * silently, instead of returning an error, to avoid breaking 2023 * callers that blindly resize the filesystem to the full size of 2024 * the underlying block device. 2025 */ 2026 if (ext4_has_feature_bigalloc(sb)) 2027 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); 2028 2029 retry: 2030 o_blocks_count = ext4_blocks_count(es); 2031 2032 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 2033 "to %llu blocks", o_blocks_count, n_blocks_count); 2034 2035 if (n_blocks_count < o_blocks_count) { 2036 /* On-line shrinking not supported */ 2037 ext4_warning(sb, "can't shrink FS - resize aborted"); 2038 return -EINVAL; 2039 } 2040 2041 if (n_blocks_count == o_blocks_count) 2042 /* Nothing need to do */ 2043 return 0; 2044 2045 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 2046 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 2047 ext4_warning(sb, "resize would cause inodes_count overflow"); 2048 return -EINVAL; 2049 } 2050 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 2051 2052 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 2053 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 2054 2055 meta_bg = ext4_has_feature_meta_bg(sb); 2056 2057 if (ext4_has_feature_resize_inode(sb)) { 2058 if (meta_bg) { 2059 ext4_error(sb, "resize_inode and meta_bg enabled " 2060 "simultaneously"); 2061 return -EINVAL; 2062 } 2063 if (n_desc_blocks > o_desc_blocks + 2064 le16_to_cpu(es->s_reserved_gdt_blocks)) { 2065 n_blocks_count_retry = n_blocks_count; 2066 n_desc_blocks = o_desc_blocks + 2067 le16_to_cpu(es->s_reserved_gdt_blocks); 2068 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 2069 n_blocks_count = (ext4_fsblk_t)n_group * 2070 EXT4_BLOCKS_PER_GROUP(sb) + 2071 le32_to_cpu(es->s_first_data_block); 2072 n_group--; /* set to last group number */ 2073 } 2074 2075 if (!resize_inode) 2076 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, 2077 EXT4_IGET_SPECIAL); 2078 if (IS_ERR(resize_inode)) { 2079 ext4_warning(sb, "Error opening resize inode"); 2080 return PTR_ERR(resize_inode); 2081 } 2082 } 2083 2084 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 2085 err = ext4_convert_meta_bg(sb, resize_inode); 2086 if (err) 2087 goto out; 2088 if (resize_inode) { 2089 iput(resize_inode); 2090 resize_inode = NULL; 2091 } 2092 if (n_blocks_count_retry) { 2093 n_blocks_count = n_blocks_count_retry; 2094 n_blocks_count_retry = 0; 2095 goto retry; 2096 } 2097 } 2098 2099 /* 2100 * Make sure the last group has enough space so that it's 2101 * guaranteed to have enough space for all metadata blocks 2102 * that it might need to hold. (We might not need to store 2103 * the inode table blocks in the last block group, but there 2104 * will be cases where this might be needed.) 2105 */ 2106 if ((ext4_group_first_block_no(sb, n_group) + 2107 ext4_group_overhead_blocks(sb, n_group) + 2 + 2108 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { 2109 n_blocks_count = ext4_group_first_block_no(sb, n_group); 2110 n_group--; 2111 n_blocks_count_retry = 0; 2112 if (resize_inode) { 2113 iput(resize_inode); 2114 resize_inode = NULL; 2115 } 2116 goto retry; 2117 } 2118 2119 /* extend the last group */ 2120 if (n_group == o_group) 2121 add = n_blocks_count - o_blocks_count; 2122 else 2123 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); 2124 if (add > 0) { 2125 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 2126 if (err) 2127 goto out; 2128 } 2129 2130 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) 2131 goto out; 2132 2133 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2134 if (err) 2135 goto out; 2136 2137 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2138 if (err) 2139 goto out; 2140 2141 flex_gd = alloc_flex_gd(flexbg_size); 2142 if (flex_gd == NULL) { 2143 err = -ENOMEM; 2144 goto out; 2145 } 2146 2147 /* Add flex groups. Note that a regular group is a 2148 * flex group with 1 group. 2149 */ 2150 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 2151 flexbg_size)) { 2152 if (time_is_before_jiffies(last_update_time + HZ * 10)) { 2153 if (last_update_time) 2154 ext4_msg(sb, KERN_INFO, 2155 "resized to %llu blocks", 2156 ext4_blocks_count(es)); 2157 last_update_time = jiffies; 2158 } 2159 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2160 break; 2161 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2162 if (unlikely(err)) 2163 break; 2164 } 2165 2166 if (!err && n_blocks_count_retry) { 2167 n_blocks_count = n_blocks_count_retry; 2168 n_blocks_count_retry = 0; 2169 free_flex_gd(flex_gd); 2170 flex_gd = NULL; 2171 if (resize_inode) { 2172 iput(resize_inode); 2173 resize_inode = NULL; 2174 } 2175 goto retry; 2176 } 2177 2178 out: 2179 if (flex_gd) 2180 free_flex_gd(flex_gd); 2181 if (resize_inode != NULL) 2182 iput(resize_inode); 2183 if (err) 2184 ext4_warning(sb, "error (%d) occurred during " 2185 "file system resize", err); 2186 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", 2187 ext4_blocks_count(es)); 2188 return err; 2189 } 2190