1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/resize.c 4 * 5 * Support for resizing an ext4 filesystem while it is mounted. 6 * 7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 8 * 9 * This could probably be made into a module, because it is not often in use. 10 */ 11 12 13 #define EXT4FS_DEBUG 14 15 #include <linux/errno.h> 16 #include <linux/slab.h> 17 #include <linux/jiffies.h> 18 19 #include "ext4_jbd2.h" 20 21 struct ext4_rcu_ptr { 22 struct rcu_head rcu; 23 void *ptr; 24 }; 25 26 static void ext4_rcu_ptr_callback(struct rcu_head *head) 27 { 28 struct ext4_rcu_ptr *ptr; 29 30 ptr = container_of(head, struct ext4_rcu_ptr, rcu); 31 kvfree(ptr->ptr); 32 kfree(ptr); 33 } 34 35 void ext4_kvfree_array_rcu(void *to_free) 36 { 37 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 38 39 if (ptr) { 40 ptr->ptr = to_free; 41 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); 42 return; 43 } 44 synchronize_rcu(); 45 kvfree(to_free); 46 } 47 48 int ext4_resize_begin(struct super_block *sb) 49 { 50 struct ext4_sb_info *sbi = EXT4_SB(sb); 51 int ret = 0; 52 53 if (!capable(CAP_SYS_RESOURCE)) 54 return -EPERM; 55 56 /* 57 * If the reserved GDT blocks is non-zero, the resize_inode feature 58 * should always be set. 59 */ 60 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks && 61 !ext4_has_feature_resize_inode(sb)) { 62 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); 63 return -EFSCORRUPTED; 64 } 65 66 /* 67 * If we are not using the primary superblock/GDT copy don't resize, 68 * because the user tools have no way of handling this. Probably a 69 * bad time to do it anyways. 70 */ 71 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != 72 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 73 ext4_warning(sb, "won't resize using backup superblock at %llu", 74 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 75 return -EPERM; 76 } 77 78 /* 79 * We are not allowed to do online-resizing on a filesystem mounted 80 * with error, because it can destroy the filesystem easily. 81 */ 82 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 83 ext4_warning(sb, "There are errors in the filesystem, " 84 "so online resizing is not allowed"); 85 return -EPERM; 86 } 87 88 if (ext4_has_feature_sparse_super2(sb)) { 89 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); 90 return -EOPNOTSUPP; 91 } 92 93 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 94 &EXT4_SB(sb)->s_ext4_flags)) 95 ret = -EBUSY; 96 97 return ret; 98 } 99 100 int ext4_resize_end(struct super_block *sb, bool update_backups) 101 { 102 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); 103 smp_mb__after_atomic(); 104 if (update_backups) 105 return ext4_update_overhead(sb, true); 106 return 0; 107 } 108 109 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, 110 ext4_group_t group) { 111 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << 112 EXT4_DESC_PER_BLOCK_BITS(sb); 113 } 114 115 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, 116 ext4_group_t group) { 117 group = ext4_meta_bg_first_group(sb, group); 118 return ext4_group_first_block_no(sb, group); 119 } 120 121 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 122 ext4_group_t group) { 123 ext4_grpblk_t overhead; 124 overhead = ext4_bg_num_gdb(sb, group); 125 if (ext4_bg_has_super(sb, group)) 126 overhead += 1 + 127 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 128 return overhead; 129 } 130 131 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 132 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 133 134 static int verify_group_input(struct super_block *sb, 135 struct ext4_new_group_data *input) 136 { 137 struct ext4_sb_info *sbi = EXT4_SB(sb); 138 struct ext4_super_block *es = sbi->s_es; 139 ext4_fsblk_t start = ext4_blocks_count(es); 140 ext4_fsblk_t end = start + input->blocks_count; 141 ext4_group_t group = input->group; 142 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 143 unsigned overhead; 144 ext4_fsblk_t metaend; 145 struct buffer_head *bh = NULL; 146 ext4_grpblk_t free_blocks_count, offset; 147 int err = -EINVAL; 148 149 if (group != sbi->s_groups_count) { 150 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 151 input->group, sbi->s_groups_count); 152 return -EINVAL; 153 } 154 155 overhead = ext4_group_overhead_blocks(sb, group); 156 metaend = start + overhead; 157 input->free_clusters_count = free_blocks_count = 158 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; 159 160 if (test_opt(sb, DEBUG)) 161 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 162 "(%d free, %u reserved)\n", 163 ext4_bg_has_super(sb, input->group) ? "normal" : 164 "no-super", input->group, input->blocks_count, 165 free_blocks_count, input->reserved_blocks); 166 167 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 168 if (offset != 0) 169 ext4_warning(sb, "Last group not full"); 170 else if (input->reserved_blocks > input->blocks_count / 5) 171 ext4_warning(sb, "Reserved blocks too high (%u)", 172 input->reserved_blocks); 173 else if (free_blocks_count < 0) 174 ext4_warning(sb, "Bad blocks count %u", 175 input->blocks_count); 176 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) { 177 err = PTR_ERR(bh); 178 bh = NULL; 179 ext4_warning(sb, "Cannot read last block (%llu)", 180 end - 1); 181 } else if (outside(input->block_bitmap, start, end)) 182 ext4_warning(sb, "Block bitmap not in group (block %llu)", 183 (unsigned long long)input->block_bitmap); 184 else if (outside(input->inode_bitmap, start, end)) 185 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 186 (unsigned long long)input->inode_bitmap); 187 else if (outside(input->inode_table, start, end) || 188 outside(itend - 1, start, end)) 189 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 190 (unsigned long long)input->inode_table, itend - 1); 191 else if (input->inode_bitmap == input->block_bitmap) 192 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 193 (unsigned long long)input->block_bitmap); 194 else if (inside(input->block_bitmap, input->inode_table, itend)) 195 ext4_warning(sb, "Block bitmap (%llu) in inode table " 196 "(%llu-%llu)", 197 (unsigned long long)input->block_bitmap, 198 (unsigned long long)input->inode_table, itend - 1); 199 else if (inside(input->inode_bitmap, input->inode_table, itend)) 200 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 201 "(%llu-%llu)", 202 (unsigned long long)input->inode_bitmap, 203 (unsigned long long)input->inode_table, itend - 1); 204 else if (inside(input->block_bitmap, start, metaend)) 205 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 206 (unsigned long long)input->block_bitmap, 207 start, metaend - 1); 208 else if (inside(input->inode_bitmap, start, metaend)) 209 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 210 (unsigned long long)input->inode_bitmap, 211 start, metaend - 1); 212 else if (inside(input->inode_table, start, metaend) || 213 inside(itend - 1, start, metaend)) 214 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 215 "(%llu-%llu)", 216 (unsigned long long)input->inode_table, 217 itend - 1, start, metaend - 1); 218 else 219 err = 0; 220 brelse(bh); 221 222 return err; 223 } 224 225 /* 226 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 227 * group each time. 228 */ 229 struct ext4_new_flex_group_data { 230 struct ext4_new_group_data *groups; /* new_group_data for groups 231 in the flex group */ 232 __u16 *bg_flags; /* block group flags of groups 233 in @groups */ 234 ext4_group_t count; /* number of groups in @groups 235 */ 236 }; 237 238 /* 239 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 240 * @flexbg_size. 241 * 242 * Returns NULL on failure otherwise address of the allocated structure. 243 */ 244 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) 245 { 246 struct ext4_new_flex_group_data *flex_gd; 247 248 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 249 if (flex_gd == NULL) 250 goto out3; 251 252 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) 253 goto out2; 254 flex_gd->count = flexbg_size; 255 256 flex_gd->groups = kmalloc_array(flexbg_size, 257 sizeof(struct ext4_new_group_data), 258 GFP_NOFS); 259 if (flex_gd->groups == NULL) 260 goto out2; 261 262 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16), 263 GFP_NOFS); 264 if (flex_gd->bg_flags == NULL) 265 goto out1; 266 267 return flex_gd; 268 269 out1: 270 kfree(flex_gd->groups); 271 out2: 272 kfree(flex_gd); 273 out3: 274 return NULL; 275 } 276 277 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 278 { 279 kfree(flex_gd->bg_flags); 280 kfree(flex_gd->groups); 281 kfree(flex_gd); 282 } 283 284 /* 285 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 286 * and inode tables for a flex group. 287 * 288 * This function is used by 64bit-resize. Note that this function allocates 289 * group tables from the 1st group of groups contained by @flexgd, which may 290 * be a partial of a flex group. 291 * 292 * @sb: super block of fs to which the groups belongs 293 * 294 * Returns 0 on a successful allocation of the metadata blocks in the 295 * block group. 296 */ 297 static int ext4_alloc_group_tables(struct super_block *sb, 298 struct ext4_new_flex_group_data *flex_gd, 299 int flexbg_size) 300 { 301 struct ext4_new_group_data *group_data = flex_gd->groups; 302 ext4_fsblk_t start_blk; 303 ext4_fsblk_t last_blk; 304 ext4_group_t src_group; 305 ext4_group_t bb_index = 0; 306 ext4_group_t ib_index = 0; 307 ext4_group_t it_index = 0; 308 ext4_group_t group; 309 ext4_group_t last_group; 310 unsigned overhead; 311 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 312 int i; 313 314 BUG_ON(flex_gd->count == 0 || group_data == NULL); 315 316 src_group = group_data[0].group; 317 last_group = src_group + flex_gd->count - 1; 318 319 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 320 (last_group & ~(flexbg_size - 1)))); 321 next_group: 322 group = group_data[0].group; 323 if (src_group >= group_data[0].group + flex_gd->count) 324 return -ENOSPC; 325 start_blk = ext4_group_first_block_no(sb, src_group); 326 last_blk = start_blk + group_data[src_group - group].blocks_count; 327 328 overhead = ext4_group_overhead_blocks(sb, src_group); 329 330 start_blk += overhead; 331 332 /* We collect contiguous blocks as much as possible. */ 333 src_group++; 334 for (; src_group <= last_group; src_group++) { 335 overhead = ext4_group_overhead_blocks(sb, src_group); 336 if (overhead == 0) 337 last_blk += group_data[src_group - group].blocks_count; 338 else 339 break; 340 } 341 342 /* Allocate block bitmaps */ 343 for (; bb_index < flex_gd->count; bb_index++) { 344 if (start_blk >= last_blk) 345 goto next_group; 346 group_data[bb_index].block_bitmap = start_blk++; 347 group = ext4_get_group_number(sb, start_blk - 1); 348 group -= group_data[0].group; 349 group_data[group].mdata_blocks++; 350 flex_gd->bg_flags[group] &= uninit_mask; 351 } 352 353 /* Allocate inode bitmaps */ 354 for (; ib_index < flex_gd->count; ib_index++) { 355 if (start_blk >= last_blk) 356 goto next_group; 357 group_data[ib_index].inode_bitmap = start_blk++; 358 group = ext4_get_group_number(sb, start_blk - 1); 359 group -= group_data[0].group; 360 group_data[group].mdata_blocks++; 361 flex_gd->bg_flags[group] &= uninit_mask; 362 } 363 364 /* Allocate inode tables */ 365 for (; it_index < flex_gd->count; it_index++) { 366 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 367 ext4_fsblk_t next_group_start; 368 369 if (start_blk + itb > last_blk) 370 goto next_group; 371 group_data[it_index].inode_table = start_blk; 372 group = ext4_get_group_number(sb, start_blk); 373 next_group_start = ext4_group_first_block_no(sb, group + 1); 374 group -= group_data[0].group; 375 376 if (start_blk + itb > next_group_start) { 377 flex_gd->bg_flags[group + 1] &= uninit_mask; 378 overhead = start_blk + itb - next_group_start; 379 group_data[group + 1].mdata_blocks += overhead; 380 itb -= overhead; 381 } 382 383 group_data[group].mdata_blocks += itb; 384 flex_gd->bg_flags[group] &= uninit_mask; 385 start_blk += EXT4_SB(sb)->s_itb_per_group; 386 } 387 388 /* Update free clusters count to exclude metadata blocks */ 389 for (i = 0; i < flex_gd->count; i++) { 390 group_data[i].free_clusters_count -= 391 EXT4_NUM_B2C(EXT4_SB(sb), 392 group_data[i].mdata_blocks); 393 } 394 395 if (test_opt(sb, DEBUG)) { 396 int i; 397 group = group_data[0].group; 398 399 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 400 "%d groups, flexbg size is %d:\n", flex_gd->count, 401 flexbg_size); 402 403 for (i = 0; i < flex_gd->count; i++) { 404 ext4_debug( 405 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n", 406 ext4_bg_has_super(sb, group + i) ? "normal" : 407 "no-super", group + i, 408 group_data[i].blocks_count, 409 group_data[i].free_clusters_count, 410 group_data[i].mdata_blocks); 411 } 412 } 413 return 0; 414 } 415 416 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 417 ext4_fsblk_t blk) 418 { 419 struct buffer_head *bh; 420 int err; 421 422 bh = sb_getblk(sb, blk); 423 if (unlikely(!bh)) 424 return ERR_PTR(-ENOMEM); 425 BUFFER_TRACE(bh, "get_write_access"); 426 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 427 if (err) { 428 brelse(bh); 429 bh = ERR_PTR(err); 430 } else { 431 memset(bh->b_data, 0, sb->s_blocksize); 432 set_buffer_uptodate(bh); 433 } 434 435 return bh; 436 } 437 438 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) 439 { 440 return ext4_journal_ensure_credits_fn(handle, credits, 441 EXT4_MAX_TRANS_DATA, 0, 0); 442 } 443 444 /* 445 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. 446 * 447 * Helper function for ext4_setup_new_group_blocks() which set . 448 * 449 * @sb: super block 450 * @handle: journal handle 451 * @flex_gd: flex group data 452 */ 453 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 454 struct ext4_new_flex_group_data *flex_gd, 455 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) 456 { 457 struct ext4_sb_info *sbi = EXT4_SB(sb); 458 ext4_group_t count = last_cluster - first_cluster + 1; 459 ext4_group_t count2; 460 461 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, 462 last_cluster); 463 for (count2 = count; count > 0; 464 count -= count2, first_cluster += count2) { 465 ext4_fsblk_t start; 466 struct buffer_head *bh; 467 ext4_group_t group; 468 int err; 469 470 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); 471 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); 472 group -= flex_gd->groups[0].group; 473 474 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); 475 if (count2 > count) 476 count2 = count; 477 478 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 479 BUG_ON(flex_gd->count > 1); 480 continue; 481 } 482 483 err = ext4_resize_ensure_credits_batch(handle, 1); 484 if (err < 0) 485 return err; 486 487 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 488 if (unlikely(!bh)) 489 return -ENOMEM; 490 491 BUFFER_TRACE(bh, "get_write_access"); 492 err = ext4_journal_get_write_access(handle, sb, bh, 493 EXT4_JTR_NONE); 494 if (err) { 495 brelse(bh); 496 return err; 497 } 498 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 499 first_cluster, first_cluster - start, count2); 500 mb_set_bits(bh->b_data, first_cluster - start, count2); 501 502 err = ext4_handle_dirty_metadata(handle, NULL, bh); 503 brelse(bh); 504 if (unlikely(err)) 505 return err; 506 } 507 508 return 0; 509 } 510 511 /* 512 * Set up the block and inode bitmaps, and the inode table for the new groups. 513 * This doesn't need to be part of the main transaction, since we are only 514 * changing blocks outside the actual filesystem. We still do journaling to 515 * ensure the recovery is correct in case of a failure just after resize. 516 * If any part of this fails, we simply abort the resize. 517 * 518 * setup_new_flex_group_blocks handles a flex group as follow: 519 * 1. copy super block and GDT, and initialize group tables if necessary. 520 * In this step, we only set bits in blocks bitmaps for blocks taken by 521 * super block and GDT. 522 * 2. allocate group tables in block bitmaps, that is, set bits in block 523 * bitmap for blocks taken by group tables. 524 */ 525 static int setup_new_flex_group_blocks(struct super_block *sb, 526 struct ext4_new_flex_group_data *flex_gd) 527 { 528 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 529 ext4_fsblk_t start; 530 ext4_fsblk_t block; 531 struct ext4_sb_info *sbi = EXT4_SB(sb); 532 struct ext4_super_block *es = sbi->s_es; 533 struct ext4_new_group_data *group_data = flex_gd->groups; 534 __u16 *bg_flags = flex_gd->bg_flags; 535 handle_t *handle; 536 ext4_group_t group, count; 537 struct buffer_head *bh = NULL; 538 int reserved_gdb, i, j, err = 0, err2; 539 int meta_bg; 540 541 BUG_ON(!flex_gd->count || !group_data || 542 group_data[0].group != sbi->s_groups_count); 543 544 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 545 meta_bg = ext4_has_feature_meta_bg(sb); 546 547 /* This transaction may be extended/restarted along the way */ 548 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 549 if (IS_ERR(handle)) 550 return PTR_ERR(handle); 551 552 group = group_data[0].group; 553 for (i = 0; i < flex_gd->count; i++, group++) { 554 unsigned long gdblocks; 555 ext4_grpblk_t overhead; 556 557 gdblocks = ext4_bg_num_gdb(sb, group); 558 start = ext4_group_first_block_no(sb, group); 559 560 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 561 goto handle_itb; 562 563 if (meta_bg == 1) 564 goto handle_itb; 565 566 block = start + ext4_bg_has_super(sb, group); 567 /* Copy all of the GDT blocks into the backup in this group */ 568 for (j = 0; j < gdblocks; j++, block++) { 569 struct buffer_head *gdb; 570 571 ext4_debug("update backup group %#04llx\n", block); 572 err = ext4_resize_ensure_credits_batch(handle, 1); 573 if (err < 0) 574 goto out; 575 576 gdb = sb_getblk(sb, block); 577 if (unlikely(!gdb)) { 578 err = -ENOMEM; 579 goto out; 580 } 581 582 BUFFER_TRACE(gdb, "get_write_access"); 583 err = ext4_journal_get_write_access(handle, sb, gdb, 584 EXT4_JTR_NONE); 585 if (err) { 586 brelse(gdb); 587 goto out; 588 } 589 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, 590 s_group_desc, j)->b_data, gdb->b_size); 591 set_buffer_uptodate(gdb); 592 593 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 594 if (unlikely(err)) { 595 brelse(gdb); 596 goto out; 597 } 598 brelse(gdb); 599 } 600 601 /* Zero out all of the reserved backup group descriptor 602 * table blocks 603 */ 604 if (ext4_bg_has_super(sb, group)) { 605 err = sb_issue_zeroout(sb, gdblocks + start + 1, 606 reserved_gdb, GFP_NOFS); 607 if (err) 608 goto out; 609 } 610 611 handle_itb: 612 /* Initialize group tables of the grop @group */ 613 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 614 goto handle_bb; 615 616 /* Zero out all of the inode table blocks */ 617 block = group_data[i].inode_table; 618 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 619 block, sbi->s_itb_per_group); 620 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 621 GFP_NOFS); 622 if (err) 623 goto out; 624 625 handle_bb: 626 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 627 goto handle_ib; 628 629 /* Initialize block bitmap of the @group */ 630 block = group_data[i].block_bitmap; 631 err = ext4_resize_ensure_credits_batch(handle, 1); 632 if (err < 0) 633 goto out; 634 635 bh = bclean(handle, sb, block); 636 if (IS_ERR(bh)) { 637 err = PTR_ERR(bh); 638 goto out; 639 } 640 overhead = ext4_group_overhead_blocks(sb, group); 641 if (overhead != 0) { 642 ext4_debug("mark backup superblock %#04llx (+0)\n", 643 start); 644 mb_set_bits(bh->b_data, 0, 645 EXT4_NUM_B2C(sbi, overhead)); 646 } 647 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 648 sb->s_blocksize * 8, bh->b_data); 649 err = ext4_handle_dirty_metadata(handle, NULL, bh); 650 brelse(bh); 651 if (err) 652 goto out; 653 654 handle_ib: 655 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 656 continue; 657 658 /* Initialize inode bitmap of the @group */ 659 block = group_data[i].inode_bitmap; 660 err = ext4_resize_ensure_credits_batch(handle, 1); 661 if (err < 0) 662 goto out; 663 /* Mark unused entries in inode bitmap used */ 664 bh = bclean(handle, sb, block); 665 if (IS_ERR(bh)) { 666 err = PTR_ERR(bh); 667 goto out; 668 } 669 670 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 671 sb->s_blocksize * 8, bh->b_data); 672 err = ext4_handle_dirty_metadata(handle, NULL, bh); 673 brelse(bh); 674 if (err) 675 goto out; 676 } 677 678 /* Mark group tables in block bitmap */ 679 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 680 count = group_table_count[j]; 681 start = (&group_data[0].block_bitmap)[j]; 682 block = start; 683 for (i = 1; i < flex_gd->count; i++) { 684 block += group_table_count[j]; 685 if (block == (&group_data[i].block_bitmap)[j]) { 686 count += group_table_count[j]; 687 continue; 688 } 689 err = set_flexbg_block_bitmap(sb, handle, 690 flex_gd, 691 EXT4_B2C(sbi, start), 692 EXT4_B2C(sbi, 693 start + count 694 - 1)); 695 if (err) 696 goto out; 697 count = group_table_count[j]; 698 start = (&group_data[i].block_bitmap)[j]; 699 block = start; 700 } 701 702 if (count) { 703 err = set_flexbg_block_bitmap(sb, handle, 704 flex_gd, 705 EXT4_B2C(sbi, start), 706 EXT4_B2C(sbi, 707 start + count 708 - 1)); 709 if (err) 710 goto out; 711 } 712 } 713 714 out: 715 err2 = ext4_journal_stop(handle); 716 if (err2 && !err) 717 err = err2; 718 719 return err; 720 } 721 722 /* 723 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 724 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 725 * calling this for the first time. In a sparse filesystem it will be the 726 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 727 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 728 */ 729 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three, 730 unsigned int *five, unsigned int *seven) 731 { 732 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 733 unsigned int *min = three; 734 int mult = 3; 735 unsigned int ret; 736 737 if (ext4_has_feature_sparse_super2(sb)) { 738 do { 739 if (*min > 2) 740 return UINT_MAX; 741 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]); 742 *min += 1; 743 } while (!ret); 744 return ret; 745 } 746 747 if (!ext4_has_feature_sparse_super(sb)) { 748 ret = *min; 749 *min += 1; 750 return ret; 751 } 752 753 if (*five < *min) { 754 min = five; 755 mult = 5; 756 } 757 if (*seven < *min) { 758 min = seven; 759 mult = 7; 760 } 761 762 ret = *min; 763 *min *= mult; 764 765 return ret; 766 } 767 768 /* 769 * Check that all of the backup GDT blocks are held in the primary GDT block. 770 * It is assumed that they are stored in group order. Returns the number of 771 * groups in current filesystem that have BACKUPS, or -ve error code. 772 */ 773 static int verify_reserved_gdb(struct super_block *sb, 774 ext4_group_t end, 775 struct buffer_head *primary) 776 { 777 const ext4_fsblk_t blk = primary->b_blocknr; 778 unsigned three = 1; 779 unsigned five = 5; 780 unsigned seven = 7; 781 unsigned grp; 782 __le32 *p = (__le32 *)primary->b_data; 783 int gdbackups = 0; 784 785 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 786 if (le32_to_cpu(*p++) != 787 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 788 ext4_warning(sb, "reserved GDT %llu" 789 " missing grp %d (%llu)", 790 blk, grp, 791 grp * 792 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 793 blk); 794 return -EINVAL; 795 } 796 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 797 return -EFBIG; 798 } 799 800 return gdbackups; 801 } 802 803 /* 804 * Called when we need to bring a reserved group descriptor table block into 805 * use from the resize inode. The primary copy of the new GDT block currently 806 * is an indirect block (under the double indirect block in the resize inode). 807 * The new backup GDT blocks will be stored as leaf blocks in this indirect 808 * block, in group order. Even though we know all the block numbers we need, 809 * we check to ensure that the resize inode has actually reserved these blocks. 810 * 811 * Don't need to update the block bitmaps because the blocks are still in use. 812 * 813 * We get all of the error cases out of the way, so that we are sure to not 814 * fail once we start modifying the data on disk, because JBD has no rollback. 815 */ 816 static int add_new_gdb(handle_t *handle, struct inode *inode, 817 ext4_group_t group) 818 { 819 struct super_block *sb = inode->i_sb; 820 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 821 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 822 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 823 struct buffer_head **o_group_desc, **n_group_desc = NULL; 824 struct buffer_head *dind = NULL; 825 struct buffer_head *gdb_bh = NULL; 826 int gdbackups; 827 struct ext4_iloc iloc = { .bh = NULL }; 828 __le32 *data; 829 int err; 830 831 if (test_opt(sb, DEBUG)) 832 printk(KERN_DEBUG 833 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 834 gdb_num); 835 836 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 837 if (IS_ERR(gdb_bh)) 838 return PTR_ERR(gdb_bh); 839 840 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 841 if (gdbackups < 0) { 842 err = gdbackups; 843 goto errout; 844 } 845 846 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 847 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 848 if (IS_ERR(dind)) { 849 err = PTR_ERR(dind); 850 dind = NULL; 851 goto errout; 852 } 853 854 data = (__le32 *)dind->b_data; 855 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 856 ext4_warning(sb, "new group %u GDT block %llu not reserved", 857 group, gdblock); 858 err = -EINVAL; 859 goto errout; 860 } 861 862 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 863 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 864 EXT4_JTR_NONE); 865 if (unlikely(err)) 866 goto errout; 867 868 BUFFER_TRACE(gdb_bh, "get_write_access"); 869 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 870 if (unlikely(err)) 871 goto errout; 872 873 BUFFER_TRACE(dind, "get_write_access"); 874 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); 875 if (unlikely(err)) { 876 ext4_std_error(sb, err); 877 goto errout; 878 } 879 880 /* ext4_reserve_inode_write() gets a reference on the iloc */ 881 err = ext4_reserve_inode_write(handle, inode, &iloc); 882 if (unlikely(err)) 883 goto errout; 884 885 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 886 GFP_KERNEL); 887 if (!n_group_desc) { 888 err = -ENOMEM; 889 ext4_warning(sb, "not enough memory for %lu groups", 890 gdb_num + 1); 891 goto errout; 892 } 893 894 /* 895 * Finally, we have all of the possible failures behind us... 896 * 897 * Remove new GDT block from inode double-indirect block and clear out 898 * the new GDT block for use (which also "frees" the backup GDT blocks 899 * from the reserved inode). We don't need to change the bitmaps for 900 * these blocks, because they are marked as in-use from being in the 901 * reserved inode, and will become GDT blocks (primary and backup). 902 */ 903 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 904 err = ext4_handle_dirty_metadata(handle, NULL, dind); 905 if (unlikely(err)) { 906 ext4_std_error(sb, err); 907 goto errout; 908 } 909 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 910 (9 - EXT4_SB(sb)->s_cluster_bits); 911 ext4_mark_iloc_dirty(handle, inode, &iloc); 912 memset(gdb_bh->b_data, 0, sb->s_blocksize); 913 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 914 if (unlikely(err)) { 915 ext4_std_error(sb, err); 916 iloc.bh = NULL; 917 goto errout; 918 } 919 brelse(dind); 920 921 rcu_read_lock(); 922 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 923 memcpy(n_group_desc, o_group_desc, 924 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 925 rcu_read_unlock(); 926 n_group_desc[gdb_num] = gdb_bh; 927 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 928 EXT4_SB(sb)->s_gdb_count++; 929 ext4_kvfree_array_rcu(o_group_desc); 930 931 lock_buffer(EXT4_SB(sb)->s_sbh); 932 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 933 ext4_superblock_csum_set(sb); 934 unlock_buffer(EXT4_SB(sb)->s_sbh); 935 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 936 if (err) 937 ext4_std_error(sb, err); 938 return err; 939 errout: 940 kvfree(n_group_desc); 941 brelse(iloc.bh); 942 brelse(dind); 943 brelse(gdb_bh); 944 945 ext4_debug("leaving with error %d\n", err); 946 return err; 947 } 948 949 /* 950 * add_new_gdb_meta_bg is the sister of add_new_gdb. 951 */ 952 static int add_new_gdb_meta_bg(struct super_block *sb, 953 handle_t *handle, ext4_group_t group) { 954 ext4_fsblk_t gdblock; 955 struct buffer_head *gdb_bh; 956 struct buffer_head **o_group_desc, **n_group_desc; 957 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 958 int err; 959 960 gdblock = ext4_meta_bg_first_block_no(sb, group) + 961 ext4_bg_has_super(sb, group); 962 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 963 if (IS_ERR(gdb_bh)) 964 return PTR_ERR(gdb_bh); 965 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 966 GFP_KERNEL); 967 if (!n_group_desc) { 968 brelse(gdb_bh); 969 err = -ENOMEM; 970 ext4_warning(sb, "not enough memory for %lu groups", 971 gdb_num + 1); 972 return err; 973 } 974 975 rcu_read_lock(); 976 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 977 memcpy(n_group_desc, o_group_desc, 978 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 979 rcu_read_unlock(); 980 n_group_desc[gdb_num] = gdb_bh; 981 982 BUFFER_TRACE(gdb_bh, "get_write_access"); 983 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 984 if (err) { 985 kvfree(n_group_desc); 986 brelse(gdb_bh); 987 return err; 988 } 989 990 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 991 EXT4_SB(sb)->s_gdb_count++; 992 ext4_kvfree_array_rcu(o_group_desc); 993 return err; 994 } 995 996 /* 997 * Called when we are adding a new group which has a backup copy of each of 998 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 999 * We need to add these reserved backup GDT blocks to the resize inode, so 1000 * that they are kept for future resizing and not allocated to files. 1001 * 1002 * Each reserved backup GDT block will go into a different indirect block. 1003 * The indirect blocks are actually the primary reserved GDT blocks, 1004 * so we know in advance what their block numbers are. We only get the 1005 * double-indirect block to verify it is pointing to the primary reserved 1006 * GDT blocks so we don't overwrite a data block by accident. The reserved 1007 * backup GDT blocks are stored in their reserved primary GDT block. 1008 */ 1009 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 1010 ext4_group_t group) 1011 { 1012 struct super_block *sb = inode->i_sb; 1013 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 1014 int cluster_bits = EXT4_SB(sb)->s_cluster_bits; 1015 struct buffer_head **primary; 1016 struct buffer_head *dind; 1017 struct ext4_iloc iloc; 1018 ext4_fsblk_t blk; 1019 __le32 *data, *end; 1020 int gdbackups = 0; 1021 int res, i; 1022 int err; 1023 1024 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); 1025 if (!primary) 1026 return -ENOMEM; 1027 1028 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 1029 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 1030 if (IS_ERR(dind)) { 1031 err = PTR_ERR(dind); 1032 dind = NULL; 1033 goto exit_free; 1034 } 1035 1036 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 1037 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 1038 EXT4_ADDR_PER_BLOCK(sb)); 1039 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 1040 1041 /* Get each reserved primary GDT block and verify it holds backups */ 1042 for (res = 0; res < reserved_gdb; res++, blk++) { 1043 if (le32_to_cpu(*data) != blk) { 1044 ext4_warning(sb, "reserved block %llu" 1045 " not at offset %ld", 1046 blk, 1047 (long)(data - (__le32 *)dind->b_data)); 1048 err = -EINVAL; 1049 goto exit_bh; 1050 } 1051 primary[res] = ext4_sb_bread(sb, blk, 0); 1052 if (IS_ERR(primary[res])) { 1053 err = PTR_ERR(primary[res]); 1054 primary[res] = NULL; 1055 goto exit_bh; 1056 } 1057 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 1058 if (gdbackups < 0) { 1059 brelse(primary[res]); 1060 err = gdbackups; 1061 goto exit_bh; 1062 } 1063 if (++data >= end) 1064 data = (__le32 *)dind->b_data; 1065 } 1066 1067 for (i = 0; i < reserved_gdb; i++) { 1068 BUFFER_TRACE(primary[i], "get_write_access"); 1069 if ((err = ext4_journal_get_write_access(handle, sb, primary[i], 1070 EXT4_JTR_NONE))) 1071 goto exit_bh; 1072 } 1073 1074 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 1075 goto exit_bh; 1076 1077 /* 1078 * Finally we can add each of the reserved backup GDT blocks from 1079 * the new group to its reserved primary GDT block. 1080 */ 1081 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1082 for (i = 0; i < reserved_gdb; i++) { 1083 int err2; 1084 data = (__le32 *)primary[i]->b_data; 1085 /* printk("reserving backup %lu[%u] = %lu\n", 1086 primary[i]->b_blocknr, gdbackups, 1087 blk + primary[i]->b_blocknr); */ 1088 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1089 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1090 if (!err) 1091 err = err2; 1092 } 1093 1094 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); 1095 ext4_mark_iloc_dirty(handle, inode, &iloc); 1096 1097 exit_bh: 1098 while (--res >= 0) 1099 brelse(primary[res]); 1100 brelse(dind); 1101 1102 exit_free: 1103 kfree(primary); 1104 1105 return err; 1106 } 1107 1108 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, 1109 ext4_group_t group) 1110 { 1111 struct ext4_super_block *es = (struct ext4_super_block *) data; 1112 1113 es->s_block_group_nr = cpu_to_le16(group); 1114 if (ext4_has_metadata_csum(sb)) 1115 es->s_checksum = ext4_superblock_csum(sb, es); 1116 } 1117 1118 /* 1119 * Update the backup copies of the ext4 metadata. These don't need to be part 1120 * of the main resize transaction, because e2fsck will re-write them if there 1121 * is a problem (basically only OOM will cause a problem). However, we 1122 * _should_ update the backups if possible, in case the primary gets trashed 1123 * for some reason and we need to run e2fsck from a backup superblock. The 1124 * important part is that the new block and inode counts are in the backup 1125 * superblocks, and the location of the new group metadata in the GDT backups. 1126 * 1127 * We do not need take the s_resize_lock for this, because these 1128 * blocks are not otherwise touched by the filesystem code when it is 1129 * mounted. We don't need to worry about last changing from 1130 * sbi->s_groups_count, because the worst that can happen is that we 1131 * do not copy the full number of backups at this time. The resize 1132 * which changed s_groups_count will backup again. 1133 */ 1134 static void update_backups(struct super_block *sb, sector_t blk_off, char *data, 1135 int size, int meta_bg) 1136 { 1137 struct ext4_sb_info *sbi = EXT4_SB(sb); 1138 ext4_group_t last; 1139 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1140 unsigned three = 1; 1141 unsigned five = 5; 1142 unsigned seven = 7; 1143 ext4_group_t group = 0; 1144 int rest = sb->s_blocksize - size; 1145 handle_t *handle; 1146 int err = 0, err2; 1147 1148 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1149 if (IS_ERR(handle)) { 1150 group = 1; 1151 err = PTR_ERR(handle); 1152 goto exit_err; 1153 } 1154 1155 if (meta_bg == 0) { 1156 group = ext4_list_backups(sb, &three, &five, &seven); 1157 last = sbi->s_groups_count; 1158 } else { 1159 group = ext4_get_group_number(sb, blk_off) + 1; 1160 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1161 } 1162 1163 while (group < sbi->s_groups_count) { 1164 struct buffer_head *bh; 1165 ext4_fsblk_t backup_block; 1166 int has_super = ext4_bg_has_super(sb, group); 1167 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group); 1168 1169 /* Out of journal space, and can't get more - abort - so sad */ 1170 err = ext4_resize_ensure_credits_batch(handle, 1); 1171 if (err < 0) 1172 break; 1173 1174 if (meta_bg == 0) 1175 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1176 else 1177 backup_block = first_block + has_super; 1178 1179 bh = sb_getblk(sb, backup_block); 1180 if (unlikely(!bh)) { 1181 err = -ENOMEM; 1182 break; 1183 } 1184 ext4_debug("update metadata backup %llu(+%llu)\n", 1185 backup_block, backup_block - 1186 ext4_group_first_block_no(sb, group)); 1187 BUFFER_TRACE(bh, "get_write_access"); 1188 if ((err = ext4_journal_get_write_access(handle, sb, bh, 1189 EXT4_JTR_NONE))) { 1190 brelse(bh); 1191 break; 1192 } 1193 lock_buffer(bh); 1194 memcpy(bh->b_data, data, size); 1195 if (rest) 1196 memset(bh->b_data + size, 0, rest); 1197 if (has_super && (backup_block == first_block)) 1198 ext4_set_block_group_nr(sb, bh->b_data, group); 1199 set_buffer_uptodate(bh); 1200 unlock_buffer(bh); 1201 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1202 if (unlikely(err)) 1203 ext4_std_error(sb, err); 1204 brelse(bh); 1205 1206 if (meta_bg == 0) 1207 group = ext4_list_backups(sb, &three, &five, &seven); 1208 else if (group == last) 1209 break; 1210 else 1211 group = last; 1212 } 1213 if ((err2 = ext4_journal_stop(handle)) && !err) 1214 err = err2; 1215 1216 /* 1217 * Ugh! Need to have e2fsck write the backup copies. It is too 1218 * late to revert the resize, we shouldn't fail just because of 1219 * the backup copies (they are only needed in case of corruption). 1220 * 1221 * However, if we got here we have a journal problem too, so we 1222 * can't really start a transaction to mark the superblock. 1223 * Chicken out and just set the flag on the hope it will be written 1224 * to disk, and if not - we will simply wait until next fsck. 1225 */ 1226 exit_err: 1227 if (err) { 1228 ext4_warning(sb, "can't update backup for group %u (err %d), " 1229 "forcing fsck on next reboot", group, err); 1230 sbi->s_mount_state &= ~EXT4_VALID_FS; 1231 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1232 mark_buffer_dirty(sbi->s_sbh); 1233 } 1234 } 1235 1236 /* 1237 * ext4_add_new_descs() adds @count group descriptor of groups 1238 * starting at @group 1239 * 1240 * @handle: journal handle 1241 * @sb: super block 1242 * @group: the group no. of the first group desc to be added 1243 * @resize_inode: the resize inode 1244 * @count: number of group descriptors to be added 1245 */ 1246 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1247 ext4_group_t group, struct inode *resize_inode, 1248 ext4_group_t count) 1249 { 1250 struct ext4_sb_info *sbi = EXT4_SB(sb); 1251 struct ext4_super_block *es = sbi->s_es; 1252 struct buffer_head *gdb_bh; 1253 int i, gdb_off, gdb_num, err = 0; 1254 int meta_bg; 1255 1256 meta_bg = ext4_has_feature_meta_bg(sb); 1257 for (i = 0; i < count; i++, group++) { 1258 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1259 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1260 1261 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1262 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1263 1264 /* 1265 * We will only either add reserved group blocks to a backup group 1266 * or remove reserved blocks for the first group in a new group block. 1267 * Doing both would be mean more complex code, and sane people don't 1268 * use non-sparse filesystems anymore. This is already checked above. 1269 */ 1270 if (gdb_off) { 1271 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1272 gdb_num); 1273 BUFFER_TRACE(gdb_bh, "get_write_access"); 1274 err = ext4_journal_get_write_access(handle, sb, gdb_bh, 1275 EXT4_JTR_NONE); 1276 1277 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1278 err = reserve_backup_gdb(handle, resize_inode, group); 1279 } else if (meta_bg != 0) { 1280 err = add_new_gdb_meta_bg(sb, handle, group); 1281 } else { 1282 err = add_new_gdb(handle, resize_inode, group); 1283 } 1284 if (err) 1285 break; 1286 } 1287 return err; 1288 } 1289 1290 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1291 { 1292 struct buffer_head *bh = sb_getblk(sb, block); 1293 if (unlikely(!bh)) 1294 return NULL; 1295 if (!bh_uptodate_or_lock(bh)) { 1296 if (ext4_read_bh(bh, 0, NULL) < 0) { 1297 brelse(bh); 1298 return NULL; 1299 } 1300 } 1301 1302 return bh; 1303 } 1304 1305 static int ext4_set_bitmap_checksums(struct super_block *sb, 1306 struct ext4_group_desc *gdp, 1307 struct ext4_new_group_data *group_data) 1308 { 1309 struct buffer_head *bh; 1310 1311 if (!ext4_has_metadata_csum(sb)) 1312 return 0; 1313 1314 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1315 if (!bh) 1316 return -EIO; 1317 ext4_inode_bitmap_csum_set(sb, gdp, bh, 1318 EXT4_INODES_PER_GROUP(sb) / 8); 1319 brelse(bh); 1320 1321 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1322 if (!bh) 1323 return -EIO; 1324 ext4_block_bitmap_csum_set(sb, gdp, bh); 1325 brelse(bh); 1326 1327 return 0; 1328 } 1329 1330 /* 1331 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1332 */ 1333 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1334 struct ext4_new_flex_group_data *flex_gd) 1335 { 1336 struct ext4_new_group_data *group_data = flex_gd->groups; 1337 struct ext4_group_desc *gdp; 1338 struct ext4_sb_info *sbi = EXT4_SB(sb); 1339 struct buffer_head *gdb_bh; 1340 ext4_group_t group; 1341 __u16 *bg_flags = flex_gd->bg_flags; 1342 int i, gdb_off, gdb_num, err = 0; 1343 1344 1345 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1346 group = group_data->group; 1347 1348 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1349 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1350 1351 /* 1352 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1353 */ 1354 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); 1355 /* Update group descriptor block for new group */ 1356 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1357 gdb_off * EXT4_DESC_SIZE(sb)); 1358 1359 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1360 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1361 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1362 err = ext4_set_bitmap_checksums(sb, gdp, group_data); 1363 if (err) { 1364 ext4_std_error(sb, err); 1365 break; 1366 } 1367 1368 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1369 ext4_free_group_clusters_set(sb, gdp, 1370 group_data->free_clusters_count); 1371 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1372 if (ext4_has_group_desc_csum(sb)) 1373 ext4_itable_unused_set(sb, gdp, 1374 EXT4_INODES_PER_GROUP(sb)); 1375 gdp->bg_flags = cpu_to_le16(*bg_flags); 1376 ext4_group_desc_csum_set(sb, group, gdp); 1377 1378 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1379 if (unlikely(err)) { 1380 ext4_std_error(sb, err); 1381 break; 1382 } 1383 1384 /* 1385 * We can allocate memory for mb_alloc based on the new group 1386 * descriptor 1387 */ 1388 err = ext4_mb_add_groupinfo(sb, group, gdp); 1389 if (err) 1390 break; 1391 } 1392 return err; 1393 } 1394 1395 static void ext4_add_overhead(struct super_block *sb, 1396 const ext4_fsblk_t overhead) 1397 { 1398 struct ext4_sb_info *sbi = EXT4_SB(sb); 1399 struct ext4_super_block *es = sbi->s_es; 1400 1401 sbi->s_overhead += overhead; 1402 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1403 smp_wmb(); 1404 } 1405 1406 /* 1407 * ext4_update_super() updates the super block so that the newly added 1408 * groups can be seen by the filesystem. 1409 * 1410 * @sb: super block 1411 * @flex_gd: new added groups 1412 */ 1413 static void ext4_update_super(struct super_block *sb, 1414 struct ext4_new_flex_group_data *flex_gd) 1415 { 1416 ext4_fsblk_t blocks_count = 0; 1417 ext4_fsblk_t free_blocks = 0; 1418 ext4_fsblk_t reserved_blocks = 0; 1419 struct ext4_new_group_data *group_data = flex_gd->groups; 1420 struct ext4_sb_info *sbi = EXT4_SB(sb); 1421 struct ext4_super_block *es = sbi->s_es; 1422 int i; 1423 1424 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1425 /* 1426 * Make the new blocks and inodes valid next. We do this before 1427 * increasing the group count so that once the group is enabled, 1428 * all of its blocks and inodes are already valid. 1429 * 1430 * We always allocate group-by-group, then block-by-block or 1431 * inode-by-inode within a group, so enabling these 1432 * blocks/inodes before the group is live won't actually let us 1433 * allocate the new space yet. 1434 */ 1435 for (i = 0; i < flex_gd->count; i++) { 1436 blocks_count += group_data[i].blocks_count; 1437 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); 1438 } 1439 1440 reserved_blocks = ext4_r_blocks_count(es) * 100; 1441 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1442 reserved_blocks *= blocks_count; 1443 do_div(reserved_blocks, 100); 1444 1445 lock_buffer(sbi->s_sbh); 1446 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1447 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1448 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1449 flex_gd->count); 1450 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1451 flex_gd->count); 1452 1453 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1454 /* 1455 * We need to protect s_groups_count against other CPUs seeing 1456 * inconsistent state in the superblock. 1457 * 1458 * The precise rules we use are: 1459 * 1460 * * Writers must perform a smp_wmb() after updating all 1461 * dependent data and before modifying the groups count 1462 * 1463 * * Readers must perform an smp_rmb() after reading the groups 1464 * count and before reading any dependent data. 1465 * 1466 * NB. These rules can be relaxed when checking the group count 1467 * while freeing data, as we can only allocate from a block 1468 * group after serialising against the group count, and we can 1469 * only then free after serialising in turn against that 1470 * allocation. 1471 */ 1472 smp_wmb(); 1473 1474 /* Update the global fs size fields */ 1475 sbi->s_groups_count += flex_gd->count; 1476 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1477 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1478 1479 /* Update the reserved block counts only once the new group is 1480 * active. */ 1481 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1482 reserved_blocks); 1483 1484 /* Update the free space counts */ 1485 percpu_counter_add(&sbi->s_freeclusters_counter, 1486 EXT4_NUM_B2C(sbi, free_blocks)); 1487 percpu_counter_add(&sbi->s_freeinodes_counter, 1488 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1489 1490 ext4_debug("free blocks count %llu", 1491 percpu_counter_read(&sbi->s_freeclusters_counter)); 1492 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { 1493 ext4_group_t flex_group; 1494 struct flex_groups *fg; 1495 1496 flex_group = ext4_flex_group(sbi, group_data[0].group); 1497 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 1498 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1499 &fg->free_clusters); 1500 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1501 &fg->free_inodes); 1502 } 1503 1504 /* 1505 * Update the fs overhead information. 1506 * 1507 * For bigalloc, if the superblock already has a properly calculated 1508 * overhead, update it with a value based on numbers already computed 1509 * above for the newly allocated capacity. 1510 */ 1511 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0)) 1512 ext4_add_overhead(sb, 1513 EXT4_NUM_B2C(sbi, blocks_count - free_blocks)); 1514 else 1515 ext4_calculate_overhead(sb); 1516 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1517 1518 ext4_superblock_csum_set(sb); 1519 unlock_buffer(sbi->s_sbh); 1520 if (test_opt(sb, DEBUG)) 1521 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1522 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1523 blocks_count, free_blocks, reserved_blocks); 1524 } 1525 1526 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1527 * _before_ we start modifying the filesystem, because we cannot abort the 1528 * transaction and not have it write the data to disk. 1529 */ 1530 static int ext4_flex_group_add(struct super_block *sb, 1531 struct inode *resize_inode, 1532 struct ext4_new_flex_group_data *flex_gd) 1533 { 1534 struct ext4_sb_info *sbi = EXT4_SB(sb); 1535 struct ext4_super_block *es = sbi->s_es; 1536 ext4_fsblk_t o_blocks_count; 1537 ext4_grpblk_t last; 1538 ext4_group_t group; 1539 handle_t *handle; 1540 unsigned reserved_gdb; 1541 int err = 0, err2 = 0, credit; 1542 1543 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1544 1545 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1546 o_blocks_count = ext4_blocks_count(es); 1547 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1548 BUG_ON(last); 1549 1550 err = setup_new_flex_group_blocks(sb, flex_gd); 1551 if (err) 1552 goto exit; 1553 /* 1554 * We will always be modifying at least the superblock and GDT 1555 * blocks. If we are adding a group past the last current GDT block, 1556 * we will also modify the inode and the dindirect block. If we 1557 * are adding a group with superblock/GDT backups we will also 1558 * modify each of the reserved GDT dindirect blocks. 1559 */ 1560 credit = 3; /* sb, resize inode, resize inode dindirect */ 1561 /* GDT blocks */ 1562 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); 1563 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ 1564 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1565 if (IS_ERR(handle)) { 1566 err = PTR_ERR(handle); 1567 goto exit; 1568 } 1569 1570 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1571 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1572 EXT4_JTR_NONE); 1573 if (err) 1574 goto exit_journal; 1575 1576 group = flex_gd->groups[0].group; 1577 BUG_ON(group != sbi->s_groups_count); 1578 err = ext4_add_new_descs(handle, sb, group, 1579 resize_inode, flex_gd->count); 1580 if (err) 1581 goto exit_journal; 1582 1583 err = ext4_setup_new_descs(handle, sb, flex_gd); 1584 if (err) 1585 goto exit_journal; 1586 1587 ext4_update_super(sb, flex_gd); 1588 1589 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1590 1591 exit_journal: 1592 err2 = ext4_journal_stop(handle); 1593 if (!err) 1594 err = err2; 1595 1596 if (!err) { 1597 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1598 int gdb_num_end = ((group + flex_gd->count - 1) / 1599 EXT4_DESC_PER_BLOCK(sb)); 1600 int meta_bg = ext4_has_feature_meta_bg(sb); 1601 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - 1602 ext4_group_first_block_no(sb, 0); 1603 sector_t old_gdb = 0; 1604 1605 update_backups(sb, ext4_group_first_block_no(sb, 0), 1606 (char *)es, sizeof(struct ext4_super_block), 0); 1607 for (; gdb_num <= gdb_num_end; gdb_num++) { 1608 struct buffer_head *gdb_bh; 1609 1610 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1611 gdb_num); 1612 if (old_gdb == gdb_bh->b_blocknr) 1613 continue; 1614 update_backups(sb, gdb_bh->b_blocknr - padding_blocks, 1615 gdb_bh->b_data, gdb_bh->b_size, meta_bg); 1616 old_gdb = gdb_bh->b_blocknr; 1617 } 1618 } 1619 exit: 1620 return err; 1621 } 1622 1623 static int ext4_setup_next_flex_gd(struct super_block *sb, 1624 struct ext4_new_flex_group_data *flex_gd, 1625 ext4_fsblk_t n_blocks_count, 1626 unsigned long flexbg_size) 1627 { 1628 struct ext4_sb_info *sbi = EXT4_SB(sb); 1629 struct ext4_super_block *es = sbi->s_es; 1630 struct ext4_new_group_data *group_data = flex_gd->groups; 1631 ext4_fsblk_t o_blocks_count; 1632 ext4_group_t n_group; 1633 ext4_group_t group; 1634 ext4_group_t last_group; 1635 ext4_grpblk_t last; 1636 ext4_grpblk_t clusters_per_group; 1637 unsigned long i; 1638 1639 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); 1640 1641 o_blocks_count = ext4_blocks_count(es); 1642 1643 if (o_blocks_count == n_blocks_count) 1644 return 0; 1645 1646 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1647 BUG_ON(last); 1648 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1649 1650 last_group = group | (flexbg_size - 1); 1651 if (last_group > n_group) 1652 last_group = n_group; 1653 1654 flex_gd->count = last_group - group + 1; 1655 1656 for (i = 0; i < flex_gd->count; i++) { 1657 int overhead; 1658 1659 group_data[i].group = group + i; 1660 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); 1661 overhead = ext4_group_overhead_blocks(sb, group + i); 1662 group_data[i].mdata_blocks = overhead; 1663 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); 1664 if (ext4_has_group_desc_csum(sb)) { 1665 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1666 EXT4_BG_INODE_UNINIT; 1667 if (!test_opt(sb, INIT_INODE_TABLE)) 1668 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1669 } else 1670 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1671 } 1672 1673 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1674 /* We need to initialize block bitmap of last group. */ 1675 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1676 1677 if ((last_group == n_group) && (last != clusters_per_group - 1)) { 1678 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); 1679 group_data[i - 1].free_clusters_count -= clusters_per_group - 1680 last - 1; 1681 } 1682 1683 return 1; 1684 } 1685 1686 /* Add group descriptor data to an existing or new group descriptor block. 1687 * Ensure we handle all possible error conditions _before_ we start modifying 1688 * the filesystem, because we cannot abort the transaction and not have it 1689 * write the data to disk. 1690 * 1691 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1692 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1693 * 1694 * We only need to hold the superblock lock while we are actually adding 1695 * in the new group's counts to the superblock. Prior to that we have 1696 * not really "added" the group at all. We re-check that we are still 1697 * adding in the last group in case things have changed since verifying. 1698 */ 1699 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1700 { 1701 struct ext4_new_flex_group_data flex_gd; 1702 struct ext4_sb_info *sbi = EXT4_SB(sb); 1703 struct ext4_super_block *es = sbi->s_es; 1704 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1705 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1706 struct inode *inode = NULL; 1707 int gdb_off; 1708 int err; 1709 __u16 bg_flags = 0; 1710 1711 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1712 1713 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { 1714 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1715 return -EPERM; 1716 } 1717 1718 if (ext4_blocks_count(es) + input->blocks_count < 1719 ext4_blocks_count(es)) { 1720 ext4_warning(sb, "blocks_count overflow"); 1721 return -EINVAL; 1722 } 1723 1724 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1725 le32_to_cpu(es->s_inodes_count)) { 1726 ext4_warning(sb, "inodes_count overflow"); 1727 return -EINVAL; 1728 } 1729 1730 if (reserved_gdb || gdb_off == 0) { 1731 if (!ext4_has_feature_resize_inode(sb) || 1732 !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1733 ext4_warning(sb, 1734 "No reserved GDT blocks, can't resize"); 1735 return -EPERM; 1736 } 1737 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); 1738 if (IS_ERR(inode)) { 1739 ext4_warning(sb, "Error opening resize inode"); 1740 return PTR_ERR(inode); 1741 } 1742 } 1743 1744 1745 err = verify_group_input(sb, input); 1746 if (err) 1747 goto out; 1748 1749 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1750 if (err) 1751 goto out; 1752 1753 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1754 if (err) 1755 goto out; 1756 1757 flex_gd.count = 1; 1758 flex_gd.groups = input; 1759 flex_gd.bg_flags = &bg_flags; 1760 err = ext4_flex_group_add(sb, inode, &flex_gd); 1761 out: 1762 iput(inode); 1763 return err; 1764 } /* ext4_group_add */ 1765 1766 /* 1767 * extend a group without checking assuming that checking has been done. 1768 */ 1769 static int ext4_group_extend_no_check(struct super_block *sb, 1770 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1771 { 1772 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1773 handle_t *handle; 1774 int err = 0, err2; 1775 1776 /* We will update the superblock, one block bitmap, and 1777 * one group descriptor via ext4_group_add_blocks(). 1778 */ 1779 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1780 if (IS_ERR(handle)) { 1781 err = PTR_ERR(handle); 1782 ext4_warning(sb, "error %d on journal start", err); 1783 return err; 1784 } 1785 1786 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1787 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 1788 EXT4_JTR_NONE); 1789 if (err) { 1790 ext4_warning(sb, "error %d on journal write access", err); 1791 goto errout; 1792 } 1793 1794 lock_buffer(EXT4_SB(sb)->s_sbh); 1795 ext4_blocks_count_set(es, o_blocks_count + add); 1796 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1797 ext4_superblock_csum_set(sb); 1798 unlock_buffer(EXT4_SB(sb)->s_sbh); 1799 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1800 o_blocks_count + add); 1801 /* We add the blocks to the bitmap and set the group need init bit */ 1802 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1803 if (err) 1804 goto errout; 1805 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 1806 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1807 o_blocks_count + add); 1808 errout: 1809 err2 = ext4_journal_stop(handle); 1810 if (err2 && !err) 1811 err = err2; 1812 1813 if (!err) { 1814 if (test_opt(sb, DEBUG)) 1815 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1816 "blocks\n", ext4_blocks_count(es)); 1817 update_backups(sb, ext4_group_first_block_no(sb, 0), 1818 (char *)es, sizeof(struct ext4_super_block), 0); 1819 } 1820 return err; 1821 } 1822 1823 /* 1824 * Extend the filesystem to the new number of blocks specified. This entry 1825 * point is only used to extend the current filesystem to the end of the last 1826 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1827 * for emergencies (because it has no dependencies on reserved blocks). 1828 * 1829 * If we _really_ wanted, we could use default values to call ext4_group_add() 1830 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1831 * GDT blocks are reserved to grow to the desired size. 1832 */ 1833 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1834 ext4_fsblk_t n_blocks_count) 1835 { 1836 ext4_fsblk_t o_blocks_count; 1837 ext4_grpblk_t last; 1838 ext4_grpblk_t add; 1839 struct buffer_head *bh; 1840 ext4_group_t group; 1841 1842 o_blocks_count = ext4_blocks_count(es); 1843 1844 if (test_opt(sb, DEBUG)) 1845 ext4_msg(sb, KERN_DEBUG, 1846 "extending last group from %llu to %llu blocks", 1847 o_blocks_count, n_blocks_count); 1848 1849 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1850 return 0; 1851 1852 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1853 ext4_msg(sb, KERN_ERR, 1854 "filesystem too large to resize to %llu blocks safely", 1855 n_blocks_count); 1856 return -EINVAL; 1857 } 1858 1859 if (n_blocks_count < o_blocks_count) { 1860 ext4_warning(sb, "can't shrink FS - resize aborted"); 1861 return -EINVAL; 1862 } 1863 1864 /* Handle the remaining blocks in the last group only. */ 1865 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1866 1867 if (last == 0) { 1868 ext4_warning(sb, "need to use ext2online to resize further"); 1869 return -EPERM; 1870 } 1871 1872 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1873 1874 if (o_blocks_count + add < o_blocks_count) { 1875 ext4_warning(sb, "blocks_count overflow"); 1876 return -EINVAL; 1877 } 1878 1879 if (o_blocks_count + add > n_blocks_count) 1880 add = n_blocks_count - o_blocks_count; 1881 1882 if (o_blocks_count + add < n_blocks_count) 1883 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1884 o_blocks_count + add, add); 1885 1886 /* See if the device is actually as big as what was requested */ 1887 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); 1888 if (IS_ERR(bh)) { 1889 ext4_warning(sb, "can't read last block, resize aborted"); 1890 return -ENOSPC; 1891 } 1892 brelse(bh); 1893 1894 return ext4_group_extend_no_check(sb, o_blocks_count, add); 1895 } /* ext4_group_extend */ 1896 1897 1898 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1899 { 1900 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1901 } 1902 1903 /* 1904 * Release the resize inode and drop the resize_inode feature if there 1905 * are no more reserved gdt blocks, and then convert the file system 1906 * to enable meta_bg 1907 */ 1908 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1909 { 1910 handle_t *handle; 1911 struct ext4_sb_info *sbi = EXT4_SB(sb); 1912 struct ext4_super_block *es = sbi->s_es; 1913 struct ext4_inode_info *ei = EXT4_I(inode); 1914 ext4_fsblk_t nr; 1915 int i, ret, err = 0; 1916 int credits = 1; 1917 1918 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1919 if (inode) { 1920 if (es->s_reserved_gdt_blocks) { 1921 ext4_error(sb, "Unexpected non-zero " 1922 "s_reserved_gdt_blocks"); 1923 return -EPERM; 1924 } 1925 1926 /* Do a quick sanity check of the resize inode */ 1927 if (inode->i_blocks != 1 << (inode->i_blkbits - 1928 (9 - sbi->s_cluster_bits))) 1929 goto invalid_resize_inode; 1930 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1931 if (i == EXT4_DIND_BLOCK) { 1932 if (ei->i_data[i]) 1933 continue; 1934 else 1935 goto invalid_resize_inode; 1936 } 1937 if (ei->i_data[i]) 1938 goto invalid_resize_inode; 1939 } 1940 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1941 } 1942 1943 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1944 if (IS_ERR(handle)) 1945 return PTR_ERR(handle); 1946 1947 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1948 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1949 EXT4_JTR_NONE); 1950 if (err) 1951 goto errout; 1952 1953 lock_buffer(sbi->s_sbh); 1954 ext4_clear_feature_resize_inode(sb); 1955 ext4_set_feature_meta_bg(sb); 1956 sbi->s_es->s_first_meta_bg = 1957 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1958 ext4_superblock_csum_set(sb); 1959 unlock_buffer(sbi->s_sbh); 1960 1961 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1962 if (err) { 1963 ext4_std_error(sb, err); 1964 goto errout; 1965 } 1966 1967 if (inode) { 1968 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1969 ext4_free_blocks(handle, inode, NULL, nr, 1, 1970 EXT4_FREE_BLOCKS_METADATA | 1971 EXT4_FREE_BLOCKS_FORGET); 1972 ei->i_data[EXT4_DIND_BLOCK] = 0; 1973 inode->i_blocks = 0; 1974 1975 err = ext4_mark_inode_dirty(handle, inode); 1976 if (err) 1977 ext4_std_error(sb, err); 1978 } 1979 1980 errout: 1981 ret = ext4_journal_stop(handle); 1982 return err ? err : ret; 1983 1984 invalid_resize_inode: 1985 ext4_error(sb, "corrupted/inconsistent resize inode"); 1986 return -EINVAL; 1987 } 1988 1989 /* 1990 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1991 * 1992 * @sb: super block of the fs to be resized 1993 * @n_blocks_count: the number of blocks resides in the resized fs 1994 */ 1995 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1996 { 1997 struct ext4_new_flex_group_data *flex_gd = NULL; 1998 struct ext4_sb_info *sbi = EXT4_SB(sb); 1999 struct ext4_super_block *es = sbi->s_es; 2000 struct buffer_head *bh; 2001 struct inode *resize_inode = NULL; 2002 ext4_grpblk_t add, offset; 2003 unsigned long n_desc_blocks; 2004 unsigned long o_desc_blocks; 2005 ext4_group_t o_group; 2006 ext4_group_t n_group; 2007 ext4_fsblk_t o_blocks_count; 2008 ext4_fsblk_t n_blocks_count_retry = 0; 2009 unsigned long last_update_time = 0; 2010 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; 2011 int meta_bg; 2012 2013 /* See if the device is actually as big as what was requested */ 2014 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); 2015 if (IS_ERR(bh)) { 2016 ext4_warning(sb, "can't read last block, resize aborted"); 2017 return -ENOSPC; 2018 } 2019 brelse(bh); 2020 2021 /* 2022 * For bigalloc, trim the requested size to the nearest cluster 2023 * boundary to avoid creating an unusable filesystem. We do this 2024 * silently, instead of returning an error, to avoid breaking 2025 * callers that blindly resize the filesystem to the full size of 2026 * the underlying block device. 2027 */ 2028 if (ext4_has_feature_bigalloc(sb)) 2029 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); 2030 2031 retry: 2032 o_blocks_count = ext4_blocks_count(es); 2033 2034 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 2035 "to %llu blocks", o_blocks_count, n_blocks_count); 2036 2037 if (n_blocks_count < o_blocks_count) { 2038 /* On-line shrinking not supported */ 2039 ext4_warning(sb, "can't shrink FS - resize aborted"); 2040 return -EINVAL; 2041 } 2042 2043 if (n_blocks_count == o_blocks_count) 2044 /* Nothing need to do */ 2045 return 0; 2046 2047 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 2048 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 2049 ext4_warning(sb, "resize would cause inodes_count overflow"); 2050 return -EINVAL; 2051 } 2052 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 2053 2054 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 2055 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 2056 2057 meta_bg = ext4_has_feature_meta_bg(sb); 2058 2059 if (ext4_has_feature_resize_inode(sb)) { 2060 if (meta_bg) { 2061 ext4_error(sb, "resize_inode and meta_bg enabled " 2062 "simultaneously"); 2063 return -EINVAL; 2064 } 2065 if (n_desc_blocks > o_desc_blocks + 2066 le16_to_cpu(es->s_reserved_gdt_blocks)) { 2067 n_blocks_count_retry = n_blocks_count; 2068 n_desc_blocks = o_desc_blocks + 2069 le16_to_cpu(es->s_reserved_gdt_blocks); 2070 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 2071 n_blocks_count = (ext4_fsblk_t)n_group * 2072 EXT4_BLOCKS_PER_GROUP(sb) + 2073 le32_to_cpu(es->s_first_data_block); 2074 n_group--; /* set to last group number */ 2075 } 2076 2077 if (!resize_inode) 2078 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, 2079 EXT4_IGET_SPECIAL); 2080 if (IS_ERR(resize_inode)) { 2081 ext4_warning(sb, "Error opening resize inode"); 2082 return PTR_ERR(resize_inode); 2083 } 2084 } 2085 2086 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 2087 err = ext4_convert_meta_bg(sb, resize_inode); 2088 if (err) 2089 goto out; 2090 if (resize_inode) { 2091 iput(resize_inode); 2092 resize_inode = NULL; 2093 } 2094 if (n_blocks_count_retry) { 2095 n_blocks_count = n_blocks_count_retry; 2096 n_blocks_count_retry = 0; 2097 goto retry; 2098 } 2099 } 2100 2101 /* 2102 * Make sure the last group has enough space so that it's 2103 * guaranteed to have enough space for all metadata blocks 2104 * that it might need to hold. (We might not need to store 2105 * the inode table blocks in the last block group, but there 2106 * will be cases where this might be needed.) 2107 */ 2108 if ((ext4_group_first_block_no(sb, n_group) + 2109 ext4_group_overhead_blocks(sb, n_group) + 2 + 2110 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { 2111 n_blocks_count = ext4_group_first_block_no(sb, n_group); 2112 n_group--; 2113 n_blocks_count_retry = 0; 2114 if (resize_inode) { 2115 iput(resize_inode); 2116 resize_inode = NULL; 2117 } 2118 goto retry; 2119 } 2120 2121 /* extend the last group */ 2122 if (n_group == o_group) 2123 add = n_blocks_count - o_blocks_count; 2124 else 2125 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); 2126 if (add > 0) { 2127 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 2128 if (err) 2129 goto out; 2130 } 2131 2132 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) 2133 goto out; 2134 2135 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2136 if (err) 2137 goto out; 2138 2139 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2140 if (err) 2141 goto out; 2142 2143 flex_gd = alloc_flex_gd(flexbg_size); 2144 if (flex_gd == NULL) { 2145 err = -ENOMEM; 2146 goto out; 2147 } 2148 2149 /* Add flex groups. Note that a regular group is a 2150 * flex group with 1 group. 2151 */ 2152 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 2153 flexbg_size)) { 2154 if (time_is_before_jiffies(last_update_time + HZ * 10)) { 2155 if (last_update_time) 2156 ext4_msg(sb, KERN_INFO, 2157 "resized to %llu blocks", 2158 ext4_blocks_count(es)); 2159 last_update_time = jiffies; 2160 } 2161 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2162 break; 2163 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2164 if (unlikely(err)) 2165 break; 2166 } 2167 2168 if (!err && n_blocks_count_retry) { 2169 n_blocks_count = n_blocks_count_retry; 2170 n_blocks_count_retry = 0; 2171 free_flex_gd(flex_gd); 2172 flex_gd = NULL; 2173 if (resize_inode) { 2174 iput(resize_inode); 2175 resize_inode = NULL; 2176 } 2177 goto retry; 2178 } 2179 2180 out: 2181 if (flex_gd) 2182 free_flex_gd(flex_gd); 2183 if (resize_inode != NULL) 2184 iput(resize_inode); 2185 if (err) 2186 ext4_warning(sb, "error (%d) occurred during " 2187 "file system resize", err); 2188 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", 2189 ext4_blocks_count(es)); 2190 return err; 2191 } 2192