1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/resize.c 4 * 5 * Support for resizing an ext4 filesystem while it is mounted. 6 * 7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> 8 * 9 * This could probably be made into a module, because it is not often in use. 10 */ 11 12 13 #define EXT4FS_DEBUG 14 15 #include <linux/errno.h> 16 #include <linux/slab.h> 17 18 #include "ext4_jbd2.h" 19 20 struct ext4_rcu_ptr { 21 struct rcu_head rcu; 22 void *ptr; 23 }; 24 25 static void ext4_rcu_ptr_callback(struct rcu_head *head) 26 { 27 struct ext4_rcu_ptr *ptr; 28 29 ptr = container_of(head, struct ext4_rcu_ptr, rcu); 30 kvfree(ptr->ptr); 31 kfree(ptr); 32 } 33 34 void ext4_kvfree_array_rcu(void *to_free) 35 { 36 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); 37 38 if (ptr) { 39 ptr->ptr = to_free; 40 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback); 41 return; 42 } 43 synchronize_rcu(); 44 kvfree(to_free); 45 } 46 47 int ext4_resize_begin(struct super_block *sb) 48 { 49 struct ext4_sb_info *sbi = EXT4_SB(sb); 50 int ret = 0; 51 52 if (!capable(CAP_SYS_RESOURCE)) 53 return -EPERM; 54 55 /* 56 * If the reserved GDT blocks is non-zero, the resize_inode feature 57 * should always be set. 58 */ 59 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks && 60 !ext4_has_feature_resize_inode(sb)) { 61 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); 62 return -EFSCORRUPTED; 63 } 64 65 /* 66 * If we are not using the primary superblock/GDT copy don't resize, 67 * because the user tools have no way of handling this. Probably a 68 * bad time to do it anyways. 69 */ 70 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != 71 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 72 ext4_warning(sb, "won't resize using backup superblock at %llu", 73 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 74 return -EPERM; 75 } 76 77 /* 78 * We are not allowed to do online-resizing on a filesystem mounted 79 * with error, because it can destroy the filesystem easily. 80 */ 81 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 82 ext4_warning(sb, "There are errors in the filesystem, " 83 "so online resizing is not allowed"); 84 return -EPERM; 85 } 86 87 if (ext4_has_feature_sparse_super2(sb)) { 88 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); 89 return -EOPNOTSUPP; 90 } 91 92 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, 93 &EXT4_SB(sb)->s_ext4_flags)) 94 ret = -EBUSY; 95 96 return ret; 97 } 98 99 void ext4_resize_end(struct super_block *sb) 100 { 101 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); 102 smp_mb__after_atomic(); 103 } 104 105 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb, 106 ext4_group_t group) { 107 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << 108 EXT4_DESC_PER_BLOCK_BITS(sb); 109 } 110 111 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb, 112 ext4_group_t group) { 113 group = ext4_meta_bg_first_group(sb, group); 114 return ext4_group_first_block_no(sb, group); 115 } 116 117 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb, 118 ext4_group_t group) { 119 ext4_grpblk_t overhead; 120 overhead = ext4_bg_num_gdb(sb, group); 121 if (ext4_bg_has_super(sb, group)) 122 overhead += 1 + 123 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 124 return overhead; 125 } 126 127 #define outside(b, first, last) ((b) < (first) || (b) >= (last)) 128 #define inside(b, first, last) ((b) >= (first) && (b) < (last)) 129 130 static int verify_group_input(struct super_block *sb, 131 struct ext4_new_group_data *input) 132 { 133 struct ext4_sb_info *sbi = EXT4_SB(sb); 134 struct ext4_super_block *es = sbi->s_es; 135 ext4_fsblk_t start = ext4_blocks_count(es); 136 ext4_fsblk_t end = start + input->blocks_count; 137 ext4_group_t group = input->group; 138 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; 139 unsigned overhead; 140 ext4_fsblk_t metaend; 141 struct buffer_head *bh = NULL; 142 ext4_grpblk_t free_blocks_count, offset; 143 int err = -EINVAL; 144 145 if (group != sbi->s_groups_count) { 146 ext4_warning(sb, "Cannot add at group %u (only %u groups)", 147 input->group, sbi->s_groups_count); 148 return -EINVAL; 149 } 150 151 overhead = ext4_group_overhead_blocks(sb, group); 152 metaend = start + overhead; 153 input->free_clusters_count = free_blocks_count = 154 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; 155 156 if (test_opt(sb, DEBUG)) 157 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " 158 "(%d free, %u reserved)\n", 159 ext4_bg_has_super(sb, input->group) ? "normal" : 160 "no-super", input->group, input->blocks_count, 161 free_blocks_count, input->reserved_blocks); 162 163 ext4_get_group_no_and_offset(sb, start, NULL, &offset); 164 if (offset != 0) 165 ext4_warning(sb, "Last group not full"); 166 else if (input->reserved_blocks > input->blocks_count / 5) 167 ext4_warning(sb, "Reserved blocks too high (%u)", 168 input->reserved_blocks); 169 else if (free_blocks_count < 0) 170 ext4_warning(sb, "Bad blocks count %u", 171 input->blocks_count); 172 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) { 173 err = PTR_ERR(bh); 174 bh = NULL; 175 ext4_warning(sb, "Cannot read last block (%llu)", 176 end - 1); 177 } else if (outside(input->block_bitmap, start, end)) 178 ext4_warning(sb, "Block bitmap not in group (block %llu)", 179 (unsigned long long)input->block_bitmap); 180 else if (outside(input->inode_bitmap, start, end)) 181 ext4_warning(sb, "Inode bitmap not in group (block %llu)", 182 (unsigned long long)input->inode_bitmap); 183 else if (outside(input->inode_table, start, end) || 184 outside(itend - 1, start, end)) 185 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", 186 (unsigned long long)input->inode_table, itend - 1); 187 else if (input->inode_bitmap == input->block_bitmap) 188 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", 189 (unsigned long long)input->block_bitmap); 190 else if (inside(input->block_bitmap, input->inode_table, itend)) 191 ext4_warning(sb, "Block bitmap (%llu) in inode table " 192 "(%llu-%llu)", 193 (unsigned long long)input->block_bitmap, 194 (unsigned long long)input->inode_table, itend - 1); 195 else if (inside(input->inode_bitmap, input->inode_table, itend)) 196 ext4_warning(sb, "Inode bitmap (%llu) in inode table " 197 "(%llu-%llu)", 198 (unsigned long long)input->inode_bitmap, 199 (unsigned long long)input->inode_table, itend - 1); 200 else if (inside(input->block_bitmap, start, metaend)) 201 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", 202 (unsigned long long)input->block_bitmap, 203 start, metaend - 1); 204 else if (inside(input->inode_bitmap, start, metaend)) 205 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", 206 (unsigned long long)input->inode_bitmap, 207 start, metaend - 1); 208 else if (inside(input->inode_table, start, metaend) || 209 inside(itend - 1, start, metaend)) 210 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " 211 "(%llu-%llu)", 212 (unsigned long long)input->inode_table, 213 itend - 1, start, metaend - 1); 214 else 215 err = 0; 216 brelse(bh); 217 218 return err; 219 } 220 221 /* 222 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex 223 * group each time. 224 */ 225 struct ext4_new_flex_group_data { 226 struct ext4_new_group_data *groups; /* new_group_data for groups 227 in the flex group */ 228 __u16 *bg_flags; /* block group flags of groups 229 in @groups */ 230 ext4_group_t count; /* number of groups in @groups 231 */ 232 }; 233 234 /* 235 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of 236 * @flexbg_size. 237 * 238 * Returns NULL on failure otherwise address of the allocated structure. 239 */ 240 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) 241 { 242 struct ext4_new_flex_group_data *flex_gd; 243 244 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); 245 if (flex_gd == NULL) 246 goto out3; 247 248 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data)) 249 goto out2; 250 flex_gd->count = flexbg_size; 251 252 flex_gd->groups = kmalloc_array(flexbg_size, 253 sizeof(struct ext4_new_group_data), 254 GFP_NOFS); 255 if (flex_gd->groups == NULL) 256 goto out2; 257 258 flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16), 259 GFP_NOFS); 260 if (flex_gd->bg_flags == NULL) 261 goto out1; 262 263 return flex_gd; 264 265 out1: 266 kfree(flex_gd->groups); 267 out2: 268 kfree(flex_gd); 269 out3: 270 return NULL; 271 } 272 273 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) 274 { 275 kfree(flex_gd->bg_flags); 276 kfree(flex_gd->groups); 277 kfree(flex_gd); 278 } 279 280 /* 281 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps 282 * and inode tables for a flex group. 283 * 284 * This function is used by 64bit-resize. Note that this function allocates 285 * group tables from the 1st group of groups contained by @flexgd, which may 286 * be a partial of a flex group. 287 * 288 * @sb: super block of fs to which the groups belongs 289 * 290 * Returns 0 on a successful allocation of the metadata blocks in the 291 * block group. 292 */ 293 static int ext4_alloc_group_tables(struct super_block *sb, 294 struct ext4_new_flex_group_data *flex_gd, 295 int flexbg_size) 296 { 297 struct ext4_new_group_data *group_data = flex_gd->groups; 298 ext4_fsblk_t start_blk; 299 ext4_fsblk_t last_blk; 300 ext4_group_t src_group; 301 ext4_group_t bb_index = 0; 302 ext4_group_t ib_index = 0; 303 ext4_group_t it_index = 0; 304 ext4_group_t group; 305 ext4_group_t last_group; 306 unsigned overhead; 307 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 308 int i; 309 310 BUG_ON(flex_gd->count == 0 || group_data == NULL); 311 312 src_group = group_data[0].group; 313 last_group = src_group + flex_gd->count - 1; 314 315 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != 316 (last_group & ~(flexbg_size - 1)))); 317 next_group: 318 group = group_data[0].group; 319 if (src_group >= group_data[0].group + flex_gd->count) 320 return -ENOSPC; 321 start_blk = ext4_group_first_block_no(sb, src_group); 322 last_blk = start_blk + group_data[src_group - group].blocks_count; 323 324 overhead = ext4_group_overhead_blocks(sb, src_group); 325 326 start_blk += overhead; 327 328 /* We collect contiguous blocks as much as possible. */ 329 src_group++; 330 for (; src_group <= last_group; src_group++) { 331 overhead = ext4_group_overhead_blocks(sb, src_group); 332 if (overhead == 0) 333 last_blk += group_data[src_group - group].blocks_count; 334 else 335 break; 336 } 337 338 /* Allocate block bitmaps */ 339 for (; bb_index < flex_gd->count; bb_index++) { 340 if (start_blk >= last_blk) 341 goto next_group; 342 group_data[bb_index].block_bitmap = start_blk++; 343 group = ext4_get_group_number(sb, start_blk - 1); 344 group -= group_data[0].group; 345 group_data[group].mdata_blocks++; 346 flex_gd->bg_flags[group] &= uninit_mask; 347 } 348 349 /* Allocate inode bitmaps */ 350 for (; ib_index < flex_gd->count; ib_index++) { 351 if (start_blk >= last_blk) 352 goto next_group; 353 group_data[ib_index].inode_bitmap = start_blk++; 354 group = ext4_get_group_number(sb, start_blk - 1); 355 group -= group_data[0].group; 356 group_data[group].mdata_blocks++; 357 flex_gd->bg_flags[group] &= uninit_mask; 358 } 359 360 /* Allocate inode tables */ 361 for (; it_index < flex_gd->count; it_index++) { 362 unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 363 ext4_fsblk_t next_group_start; 364 365 if (start_blk + itb > last_blk) 366 goto next_group; 367 group_data[it_index].inode_table = start_blk; 368 group = ext4_get_group_number(sb, start_blk); 369 next_group_start = ext4_group_first_block_no(sb, group + 1); 370 group -= group_data[0].group; 371 372 if (start_blk + itb > next_group_start) { 373 flex_gd->bg_flags[group + 1] &= uninit_mask; 374 overhead = start_blk + itb - next_group_start; 375 group_data[group + 1].mdata_blocks += overhead; 376 itb -= overhead; 377 } 378 379 group_data[group].mdata_blocks += itb; 380 flex_gd->bg_flags[group] &= uninit_mask; 381 start_blk += EXT4_SB(sb)->s_itb_per_group; 382 } 383 384 /* Update free clusters count to exclude metadata blocks */ 385 for (i = 0; i < flex_gd->count; i++) { 386 group_data[i].free_clusters_count -= 387 EXT4_NUM_B2C(EXT4_SB(sb), 388 group_data[i].mdata_blocks); 389 } 390 391 if (test_opt(sb, DEBUG)) { 392 int i; 393 group = group_data[0].group; 394 395 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " 396 "%d groups, flexbg size is %d:\n", flex_gd->count, 397 flexbg_size); 398 399 for (i = 0; i < flex_gd->count; i++) { 400 ext4_debug( 401 "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n", 402 ext4_bg_has_super(sb, group + i) ? "normal" : 403 "no-super", group + i, 404 group_data[i].blocks_count, 405 group_data[i].free_clusters_count, 406 group_data[i].mdata_blocks); 407 } 408 } 409 return 0; 410 } 411 412 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 413 ext4_fsblk_t blk) 414 { 415 struct buffer_head *bh; 416 int err; 417 418 bh = sb_getblk(sb, blk); 419 if (unlikely(!bh)) 420 return ERR_PTR(-ENOMEM); 421 BUFFER_TRACE(bh, "get_write_access"); 422 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 423 if (err) { 424 brelse(bh); 425 bh = ERR_PTR(err); 426 } else { 427 memset(bh->b_data, 0, sb->s_blocksize); 428 set_buffer_uptodate(bh); 429 } 430 431 return bh; 432 } 433 434 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits) 435 { 436 return ext4_journal_ensure_credits_fn(handle, credits, 437 EXT4_MAX_TRANS_DATA, 0, 0); 438 } 439 440 /* 441 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used. 442 * 443 * Helper function for ext4_setup_new_group_blocks() which set . 444 * 445 * @sb: super block 446 * @handle: journal handle 447 * @flex_gd: flex group data 448 */ 449 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, 450 struct ext4_new_flex_group_data *flex_gd, 451 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster) 452 { 453 struct ext4_sb_info *sbi = EXT4_SB(sb); 454 ext4_group_t count = last_cluster - first_cluster + 1; 455 ext4_group_t count2; 456 457 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster, 458 last_cluster); 459 for (count2 = count; count > 0; 460 count -= count2, first_cluster += count2) { 461 ext4_fsblk_t start; 462 struct buffer_head *bh; 463 ext4_group_t group; 464 int err; 465 466 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster)); 467 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group)); 468 group -= flex_gd->groups[0].group; 469 470 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start); 471 if (count2 > count) 472 count2 = count; 473 474 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { 475 BUG_ON(flex_gd->count > 1); 476 continue; 477 } 478 479 err = ext4_resize_ensure_credits_batch(handle, 1); 480 if (err < 0) 481 return err; 482 483 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); 484 if (unlikely(!bh)) 485 return -ENOMEM; 486 487 BUFFER_TRACE(bh, "get_write_access"); 488 err = ext4_journal_get_write_access(handle, sb, bh, 489 EXT4_JTR_NONE); 490 if (err) { 491 brelse(bh); 492 return err; 493 } 494 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", 495 first_cluster, first_cluster - start, count2); 496 ext4_set_bits(bh->b_data, first_cluster - start, count2); 497 498 err = ext4_handle_dirty_metadata(handle, NULL, bh); 499 brelse(bh); 500 if (unlikely(err)) 501 return err; 502 } 503 504 return 0; 505 } 506 507 /* 508 * Set up the block and inode bitmaps, and the inode table for the new groups. 509 * This doesn't need to be part of the main transaction, since we are only 510 * changing blocks outside the actual filesystem. We still do journaling to 511 * ensure the recovery is correct in case of a failure just after resize. 512 * If any part of this fails, we simply abort the resize. 513 * 514 * setup_new_flex_group_blocks handles a flex group as follow: 515 * 1. copy super block and GDT, and initialize group tables if necessary. 516 * In this step, we only set bits in blocks bitmaps for blocks taken by 517 * super block and GDT. 518 * 2. allocate group tables in block bitmaps, that is, set bits in block 519 * bitmap for blocks taken by group tables. 520 */ 521 static int setup_new_flex_group_blocks(struct super_block *sb, 522 struct ext4_new_flex_group_data *flex_gd) 523 { 524 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; 525 ext4_fsblk_t start; 526 ext4_fsblk_t block; 527 struct ext4_sb_info *sbi = EXT4_SB(sb); 528 struct ext4_super_block *es = sbi->s_es; 529 struct ext4_new_group_data *group_data = flex_gd->groups; 530 __u16 *bg_flags = flex_gd->bg_flags; 531 handle_t *handle; 532 ext4_group_t group, count; 533 struct buffer_head *bh = NULL; 534 int reserved_gdb, i, j, err = 0, err2; 535 int meta_bg; 536 537 BUG_ON(!flex_gd->count || !group_data || 538 group_data[0].group != sbi->s_groups_count); 539 540 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 541 meta_bg = ext4_has_feature_meta_bg(sb); 542 543 /* This transaction may be extended/restarted along the way */ 544 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 545 if (IS_ERR(handle)) 546 return PTR_ERR(handle); 547 548 group = group_data[0].group; 549 for (i = 0; i < flex_gd->count; i++, group++) { 550 unsigned long gdblocks; 551 ext4_grpblk_t overhead; 552 553 gdblocks = ext4_bg_num_gdb(sb, group); 554 start = ext4_group_first_block_no(sb, group); 555 556 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) 557 goto handle_itb; 558 559 if (meta_bg == 1) { 560 ext4_group_t first_group; 561 first_group = ext4_meta_bg_first_group(sb, group); 562 if (first_group != group + 1 && 563 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1) 564 goto handle_itb; 565 } 566 567 block = start + ext4_bg_has_super(sb, group); 568 /* Copy all of the GDT blocks into the backup in this group */ 569 for (j = 0; j < gdblocks; j++, block++) { 570 struct buffer_head *gdb; 571 572 ext4_debug("update backup group %#04llx\n", block); 573 err = ext4_resize_ensure_credits_batch(handle, 1); 574 if (err < 0) 575 goto out; 576 577 gdb = sb_getblk(sb, block); 578 if (unlikely(!gdb)) { 579 err = -ENOMEM; 580 goto out; 581 } 582 583 BUFFER_TRACE(gdb, "get_write_access"); 584 err = ext4_journal_get_write_access(handle, sb, gdb, 585 EXT4_JTR_NONE); 586 if (err) { 587 brelse(gdb); 588 goto out; 589 } 590 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi, 591 s_group_desc, j)->b_data, gdb->b_size); 592 set_buffer_uptodate(gdb); 593 594 err = ext4_handle_dirty_metadata(handle, NULL, gdb); 595 if (unlikely(err)) { 596 brelse(gdb); 597 goto out; 598 } 599 brelse(gdb); 600 } 601 602 /* Zero out all of the reserved backup group descriptor 603 * table blocks 604 */ 605 if (ext4_bg_has_super(sb, group)) { 606 err = sb_issue_zeroout(sb, gdblocks + start + 1, 607 reserved_gdb, GFP_NOFS); 608 if (err) 609 goto out; 610 } 611 612 handle_itb: 613 /* Initialize group tables of the grop @group */ 614 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) 615 goto handle_bb; 616 617 /* Zero out all of the inode table blocks */ 618 block = group_data[i].inode_table; 619 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", 620 block, sbi->s_itb_per_group); 621 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, 622 GFP_NOFS); 623 if (err) 624 goto out; 625 626 handle_bb: 627 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) 628 goto handle_ib; 629 630 /* Initialize block bitmap of the @group */ 631 block = group_data[i].block_bitmap; 632 err = ext4_resize_ensure_credits_batch(handle, 1); 633 if (err < 0) 634 goto out; 635 636 bh = bclean(handle, sb, block); 637 if (IS_ERR(bh)) { 638 err = PTR_ERR(bh); 639 goto out; 640 } 641 overhead = ext4_group_overhead_blocks(sb, group); 642 if (overhead != 0) { 643 ext4_debug("mark backup superblock %#04llx (+0)\n", 644 start); 645 ext4_set_bits(bh->b_data, 0, 646 EXT4_NUM_B2C(sbi, overhead)); 647 } 648 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), 649 sb->s_blocksize * 8, bh->b_data); 650 err = ext4_handle_dirty_metadata(handle, NULL, bh); 651 brelse(bh); 652 if (err) 653 goto out; 654 655 handle_ib: 656 if (bg_flags[i] & EXT4_BG_INODE_UNINIT) 657 continue; 658 659 /* Initialize inode bitmap of the @group */ 660 block = group_data[i].inode_bitmap; 661 err = ext4_resize_ensure_credits_batch(handle, 1); 662 if (err < 0) 663 goto out; 664 /* Mark unused entries in inode bitmap used */ 665 bh = bclean(handle, sb, block); 666 if (IS_ERR(bh)) { 667 err = PTR_ERR(bh); 668 goto out; 669 } 670 671 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), 672 sb->s_blocksize * 8, bh->b_data); 673 err = ext4_handle_dirty_metadata(handle, NULL, bh); 674 brelse(bh); 675 if (err) 676 goto out; 677 } 678 679 /* Mark group tables in block bitmap */ 680 for (j = 0; j < GROUP_TABLE_COUNT; j++) { 681 count = group_table_count[j]; 682 start = (&group_data[0].block_bitmap)[j]; 683 block = start; 684 for (i = 1; i < flex_gd->count; i++) { 685 block += group_table_count[j]; 686 if (block == (&group_data[i].block_bitmap)[j]) { 687 count += group_table_count[j]; 688 continue; 689 } 690 err = set_flexbg_block_bitmap(sb, handle, 691 flex_gd, 692 EXT4_B2C(sbi, start), 693 EXT4_B2C(sbi, 694 start + count 695 - 1)); 696 if (err) 697 goto out; 698 count = group_table_count[j]; 699 start = (&group_data[i].block_bitmap)[j]; 700 block = start; 701 } 702 703 if (count) { 704 err = set_flexbg_block_bitmap(sb, handle, 705 flex_gd, 706 EXT4_B2C(sbi, start), 707 EXT4_B2C(sbi, 708 start + count 709 - 1)); 710 if (err) 711 goto out; 712 } 713 } 714 715 out: 716 err2 = ext4_journal_stop(handle); 717 if (err2 && !err) 718 err = err2; 719 720 return err; 721 } 722 723 /* 724 * Iterate through the groups which hold BACKUP superblock/GDT copies in an 725 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before 726 * calling this for the first time. In a sparse filesystem it will be the 727 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... 728 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 729 */ 730 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, 731 unsigned *five, unsigned *seven) 732 { 733 unsigned *min = three; 734 int mult = 3; 735 unsigned ret; 736 737 if (!ext4_has_feature_sparse_super(sb)) { 738 ret = *min; 739 *min += 1; 740 return ret; 741 } 742 743 if (*five < *min) { 744 min = five; 745 mult = 5; 746 } 747 if (*seven < *min) { 748 min = seven; 749 mult = 7; 750 } 751 752 ret = *min; 753 *min *= mult; 754 755 return ret; 756 } 757 758 /* 759 * Check that all of the backup GDT blocks are held in the primary GDT block. 760 * It is assumed that they are stored in group order. Returns the number of 761 * groups in current filesystem that have BACKUPS, or -ve error code. 762 */ 763 static int verify_reserved_gdb(struct super_block *sb, 764 ext4_group_t end, 765 struct buffer_head *primary) 766 { 767 const ext4_fsblk_t blk = primary->b_blocknr; 768 unsigned three = 1; 769 unsigned five = 5; 770 unsigned seven = 7; 771 unsigned grp; 772 __le32 *p = (__le32 *)primary->b_data; 773 int gdbackups = 0; 774 775 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { 776 if (le32_to_cpu(*p++) != 777 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ 778 ext4_warning(sb, "reserved GDT %llu" 779 " missing grp %d (%llu)", 780 blk, grp, 781 grp * 782 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + 783 blk); 784 return -EINVAL; 785 } 786 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) 787 return -EFBIG; 788 } 789 790 return gdbackups; 791 } 792 793 /* 794 * Called when we need to bring a reserved group descriptor table block into 795 * use from the resize inode. The primary copy of the new GDT block currently 796 * is an indirect block (under the double indirect block in the resize inode). 797 * The new backup GDT blocks will be stored as leaf blocks in this indirect 798 * block, in group order. Even though we know all the block numbers we need, 799 * we check to ensure that the resize inode has actually reserved these blocks. 800 * 801 * Don't need to update the block bitmaps because the blocks are still in use. 802 * 803 * We get all of the error cases out of the way, so that we are sure to not 804 * fail once we start modifying the data on disk, because JBD has no rollback. 805 */ 806 static int add_new_gdb(handle_t *handle, struct inode *inode, 807 ext4_group_t group) 808 { 809 struct super_block *sb = inode->i_sb; 810 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 811 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 812 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 813 struct buffer_head **o_group_desc, **n_group_desc = NULL; 814 struct buffer_head *dind = NULL; 815 struct buffer_head *gdb_bh = NULL; 816 int gdbackups; 817 struct ext4_iloc iloc = { .bh = NULL }; 818 __le32 *data; 819 int err; 820 821 if (test_opt(sb, DEBUG)) 822 printk(KERN_DEBUG 823 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 824 gdb_num); 825 826 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 827 if (IS_ERR(gdb_bh)) 828 return PTR_ERR(gdb_bh); 829 830 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); 831 if (gdbackups < 0) { 832 err = gdbackups; 833 goto errout; 834 } 835 836 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 837 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 838 if (IS_ERR(dind)) { 839 err = PTR_ERR(dind); 840 dind = NULL; 841 goto errout; 842 } 843 844 data = (__le32 *)dind->b_data; 845 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { 846 ext4_warning(sb, "new group %u GDT block %llu not reserved", 847 group, gdblock); 848 err = -EINVAL; 849 goto errout; 850 } 851 852 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 853 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 854 EXT4_JTR_NONE); 855 if (unlikely(err)) 856 goto errout; 857 858 BUFFER_TRACE(gdb_bh, "get_write_access"); 859 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 860 if (unlikely(err)) 861 goto errout; 862 863 BUFFER_TRACE(dind, "get_write_access"); 864 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE); 865 if (unlikely(err)) { 866 ext4_std_error(sb, err); 867 goto errout; 868 } 869 870 /* ext4_reserve_inode_write() gets a reference on the iloc */ 871 err = ext4_reserve_inode_write(handle, inode, &iloc); 872 if (unlikely(err)) 873 goto errout; 874 875 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 876 GFP_KERNEL); 877 if (!n_group_desc) { 878 err = -ENOMEM; 879 ext4_warning(sb, "not enough memory for %lu groups", 880 gdb_num + 1); 881 goto errout; 882 } 883 884 /* 885 * Finally, we have all of the possible failures behind us... 886 * 887 * Remove new GDT block from inode double-indirect block and clear out 888 * the new GDT block for use (which also "frees" the backup GDT blocks 889 * from the reserved inode). We don't need to change the bitmaps for 890 * these blocks, because they are marked as in-use from being in the 891 * reserved inode, and will become GDT blocks (primary and backup). 892 */ 893 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 894 err = ext4_handle_dirty_metadata(handle, NULL, dind); 895 if (unlikely(err)) { 896 ext4_std_error(sb, err); 897 goto errout; 898 } 899 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 900 (9 - EXT4_SB(sb)->s_cluster_bits); 901 ext4_mark_iloc_dirty(handle, inode, &iloc); 902 memset(gdb_bh->b_data, 0, sb->s_blocksize); 903 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 904 if (unlikely(err)) { 905 ext4_std_error(sb, err); 906 iloc.bh = NULL; 907 goto errout; 908 } 909 brelse(dind); 910 911 rcu_read_lock(); 912 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 913 memcpy(n_group_desc, o_group_desc, 914 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 915 rcu_read_unlock(); 916 n_group_desc[gdb_num] = gdb_bh; 917 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 918 EXT4_SB(sb)->s_gdb_count++; 919 ext4_kvfree_array_rcu(o_group_desc); 920 921 lock_buffer(EXT4_SB(sb)->s_sbh); 922 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 923 ext4_superblock_csum_set(sb); 924 unlock_buffer(EXT4_SB(sb)->s_sbh); 925 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 926 if (err) 927 ext4_std_error(sb, err); 928 return err; 929 errout: 930 kvfree(n_group_desc); 931 brelse(iloc.bh); 932 brelse(dind); 933 brelse(gdb_bh); 934 935 ext4_debug("leaving with error %d\n", err); 936 return err; 937 } 938 939 /* 940 * add_new_gdb_meta_bg is the sister of add_new_gdb. 941 */ 942 static int add_new_gdb_meta_bg(struct super_block *sb, 943 handle_t *handle, ext4_group_t group) { 944 ext4_fsblk_t gdblock; 945 struct buffer_head *gdb_bh; 946 struct buffer_head **o_group_desc, **n_group_desc; 947 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 948 int err; 949 950 gdblock = ext4_meta_bg_first_block_no(sb, group) + 951 ext4_bg_has_super(sb, group); 952 gdb_bh = ext4_sb_bread(sb, gdblock, 0); 953 if (IS_ERR(gdb_bh)) 954 return PTR_ERR(gdb_bh); 955 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 956 GFP_KERNEL); 957 if (!n_group_desc) { 958 brelse(gdb_bh); 959 err = -ENOMEM; 960 ext4_warning(sb, "not enough memory for %lu groups", 961 gdb_num + 1); 962 return err; 963 } 964 965 rcu_read_lock(); 966 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc); 967 memcpy(n_group_desc, o_group_desc, 968 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 969 rcu_read_unlock(); 970 n_group_desc[gdb_num] = gdb_bh; 971 972 BUFFER_TRACE(gdb_bh, "get_write_access"); 973 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE); 974 if (err) { 975 kvfree(n_group_desc); 976 brelse(gdb_bh); 977 return err; 978 } 979 980 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc); 981 EXT4_SB(sb)->s_gdb_count++; 982 ext4_kvfree_array_rcu(o_group_desc); 983 return err; 984 } 985 986 /* 987 * Called when we are adding a new group which has a backup copy of each of 988 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 989 * We need to add these reserved backup GDT blocks to the resize inode, so 990 * that they are kept for future resizing and not allocated to files. 991 * 992 * Each reserved backup GDT block will go into a different indirect block. 993 * The indirect blocks are actually the primary reserved GDT blocks, 994 * so we know in advance what their block numbers are. We only get the 995 * double-indirect block to verify it is pointing to the primary reserved 996 * GDT blocks so we don't overwrite a data block by accident. The reserved 997 * backup GDT blocks are stored in their reserved primary GDT block. 998 */ 999 static int reserve_backup_gdb(handle_t *handle, struct inode *inode, 1000 ext4_group_t group) 1001 { 1002 struct super_block *sb = inode->i_sb; 1003 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); 1004 int cluster_bits = EXT4_SB(sb)->s_cluster_bits; 1005 struct buffer_head **primary; 1006 struct buffer_head *dind; 1007 struct ext4_iloc iloc; 1008 ext4_fsblk_t blk; 1009 __le32 *data, *end; 1010 int gdbackups = 0; 1011 int res, i; 1012 int err; 1013 1014 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS); 1015 if (!primary) 1016 return -ENOMEM; 1017 1018 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; 1019 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0); 1020 if (IS_ERR(dind)) { 1021 err = PTR_ERR(dind); 1022 dind = NULL; 1023 goto exit_free; 1024 } 1025 1026 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; 1027 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % 1028 EXT4_ADDR_PER_BLOCK(sb)); 1029 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); 1030 1031 /* Get each reserved primary GDT block and verify it holds backups */ 1032 for (res = 0; res < reserved_gdb; res++, blk++) { 1033 if (le32_to_cpu(*data) != blk) { 1034 ext4_warning(sb, "reserved block %llu" 1035 " not at offset %ld", 1036 blk, 1037 (long)(data - (__le32 *)dind->b_data)); 1038 err = -EINVAL; 1039 goto exit_bh; 1040 } 1041 primary[res] = ext4_sb_bread(sb, blk, 0); 1042 if (IS_ERR(primary[res])) { 1043 err = PTR_ERR(primary[res]); 1044 primary[res] = NULL; 1045 goto exit_bh; 1046 } 1047 gdbackups = verify_reserved_gdb(sb, group, primary[res]); 1048 if (gdbackups < 0) { 1049 brelse(primary[res]); 1050 err = gdbackups; 1051 goto exit_bh; 1052 } 1053 if (++data >= end) 1054 data = (__le32 *)dind->b_data; 1055 } 1056 1057 for (i = 0; i < reserved_gdb; i++) { 1058 BUFFER_TRACE(primary[i], "get_write_access"); 1059 if ((err = ext4_journal_get_write_access(handle, sb, primary[i], 1060 EXT4_JTR_NONE))) 1061 goto exit_bh; 1062 } 1063 1064 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 1065 goto exit_bh; 1066 1067 /* 1068 * Finally we can add each of the reserved backup GDT blocks from 1069 * the new group to its reserved primary GDT block. 1070 */ 1071 blk = group * EXT4_BLOCKS_PER_GROUP(sb); 1072 for (i = 0; i < reserved_gdb; i++) { 1073 int err2; 1074 data = (__le32 *)primary[i]->b_data; 1075 /* printk("reserving backup %lu[%u] = %lu\n", 1076 primary[i]->b_blocknr, gdbackups, 1077 blk + primary[i]->b_blocknr); */ 1078 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); 1079 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); 1080 if (!err) 1081 err = err2; 1082 } 1083 1084 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits); 1085 ext4_mark_iloc_dirty(handle, inode, &iloc); 1086 1087 exit_bh: 1088 while (--res >= 0) 1089 brelse(primary[res]); 1090 brelse(dind); 1091 1092 exit_free: 1093 kfree(primary); 1094 1095 return err; 1096 } 1097 1098 /* 1099 * Update the backup copies of the ext4 metadata. These don't need to be part 1100 * of the main resize transaction, because e2fsck will re-write them if there 1101 * is a problem (basically only OOM will cause a problem). However, we 1102 * _should_ update the backups if possible, in case the primary gets trashed 1103 * for some reason and we need to run e2fsck from a backup superblock. The 1104 * important part is that the new block and inode counts are in the backup 1105 * superblocks, and the location of the new group metadata in the GDT backups. 1106 * 1107 * We do not need take the s_resize_lock for this, because these 1108 * blocks are not otherwise touched by the filesystem code when it is 1109 * mounted. We don't need to worry about last changing from 1110 * sbi->s_groups_count, because the worst that can happen is that we 1111 * do not copy the full number of backups at this time. The resize 1112 * which changed s_groups_count will backup again. 1113 */ 1114 static void update_backups(struct super_block *sb, sector_t blk_off, char *data, 1115 int size, int meta_bg) 1116 { 1117 struct ext4_sb_info *sbi = EXT4_SB(sb); 1118 ext4_group_t last; 1119 const int bpg = EXT4_BLOCKS_PER_GROUP(sb); 1120 unsigned three = 1; 1121 unsigned five = 5; 1122 unsigned seven = 7; 1123 ext4_group_t group = 0; 1124 int rest = sb->s_blocksize - size; 1125 handle_t *handle; 1126 int err = 0, err2; 1127 1128 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA); 1129 if (IS_ERR(handle)) { 1130 group = 1; 1131 err = PTR_ERR(handle); 1132 goto exit_err; 1133 } 1134 1135 if (meta_bg == 0) { 1136 group = ext4_list_backups(sb, &three, &five, &seven); 1137 last = sbi->s_groups_count; 1138 } else { 1139 group = ext4_get_group_number(sb, blk_off) + 1; 1140 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); 1141 } 1142 1143 while (group < sbi->s_groups_count) { 1144 struct buffer_head *bh; 1145 ext4_fsblk_t backup_block; 1146 1147 /* Out of journal space, and can't get more - abort - so sad */ 1148 err = ext4_resize_ensure_credits_batch(handle, 1); 1149 if (err < 0) 1150 break; 1151 1152 if (meta_bg == 0) 1153 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; 1154 else 1155 backup_block = (ext4_group_first_block_no(sb, group) + 1156 ext4_bg_has_super(sb, group)); 1157 1158 bh = sb_getblk(sb, backup_block); 1159 if (unlikely(!bh)) { 1160 err = -ENOMEM; 1161 break; 1162 } 1163 ext4_debug("update metadata backup %llu(+%llu)\n", 1164 backup_block, backup_block - 1165 ext4_group_first_block_no(sb, group)); 1166 BUFFER_TRACE(bh, "get_write_access"); 1167 if ((err = ext4_journal_get_write_access(handle, sb, bh, 1168 EXT4_JTR_NONE))) 1169 break; 1170 lock_buffer(bh); 1171 memcpy(bh->b_data, data, size); 1172 if (rest) 1173 memset(bh->b_data + size, 0, rest); 1174 set_buffer_uptodate(bh); 1175 unlock_buffer(bh); 1176 err = ext4_handle_dirty_metadata(handle, NULL, bh); 1177 if (unlikely(err)) 1178 ext4_std_error(sb, err); 1179 brelse(bh); 1180 1181 if (meta_bg == 0) 1182 group = ext4_list_backups(sb, &three, &five, &seven); 1183 else if (group == last) 1184 break; 1185 else 1186 group = last; 1187 } 1188 if ((err2 = ext4_journal_stop(handle)) && !err) 1189 err = err2; 1190 1191 /* 1192 * Ugh! Need to have e2fsck write the backup copies. It is too 1193 * late to revert the resize, we shouldn't fail just because of 1194 * the backup copies (they are only needed in case of corruption). 1195 * 1196 * However, if we got here we have a journal problem too, so we 1197 * can't really start a transaction to mark the superblock. 1198 * Chicken out and just set the flag on the hope it will be written 1199 * to disk, and if not - we will simply wait until next fsck. 1200 */ 1201 exit_err: 1202 if (err) { 1203 ext4_warning(sb, "can't update backup for group %u (err %d), " 1204 "forcing fsck on next reboot", group, err); 1205 sbi->s_mount_state &= ~EXT4_VALID_FS; 1206 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1207 mark_buffer_dirty(sbi->s_sbh); 1208 } 1209 } 1210 1211 /* 1212 * ext4_add_new_descs() adds @count group descriptor of groups 1213 * starting at @group 1214 * 1215 * @handle: journal handle 1216 * @sb: super block 1217 * @group: the group no. of the first group desc to be added 1218 * @resize_inode: the resize inode 1219 * @count: number of group descriptors to be added 1220 */ 1221 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, 1222 ext4_group_t group, struct inode *resize_inode, 1223 ext4_group_t count) 1224 { 1225 struct ext4_sb_info *sbi = EXT4_SB(sb); 1226 struct ext4_super_block *es = sbi->s_es; 1227 struct buffer_head *gdb_bh; 1228 int i, gdb_off, gdb_num, err = 0; 1229 int meta_bg; 1230 1231 meta_bg = ext4_has_feature_meta_bg(sb); 1232 for (i = 0; i < count; i++, group++) { 1233 int reserved_gdb = ext4_bg_has_super(sb, group) ? 1234 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1235 1236 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1237 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1238 1239 /* 1240 * We will only either add reserved group blocks to a backup group 1241 * or remove reserved blocks for the first group in a new group block. 1242 * Doing both would be mean more complex code, and sane people don't 1243 * use non-sparse filesystems anymore. This is already checked above. 1244 */ 1245 if (gdb_off) { 1246 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1247 gdb_num); 1248 BUFFER_TRACE(gdb_bh, "get_write_access"); 1249 err = ext4_journal_get_write_access(handle, sb, gdb_bh, 1250 EXT4_JTR_NONE); 1251 1252 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) 1253 err = reserve_backup_gdb(handle, resize_inode, group); 1254 } else if (meta_bg != 0) { 1255 err = add_new_gdb_meta_bg(sb, handle, group); 1256 } else { 1257 err = add_new_gdb(handle, resize_inode, group); 1258 } 1259 if (err) 1260 break; 1261 } 1262 return err; 1263 } 1264 1265 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) 1266 { 1267 struct buffer_head *bh = sb_getblk(sb, block); 1268 if (unlikely(!bh)) 1269 return NULL; 1270 if (!bh_uptodate_or_lock(bh)) { 1271 if (ext4_read_bh(bh, 0, NULL) < 0) { 1272 brelse(bh); 1273 return NULL; 1274 } 1275 } 1276 1277 return bh; 1278 } 1279 1280 static int ext4_set_bitmap_checksums(struct super_block *sb, 1281 ext4_group_t group, 1282 struct ext4_group_desc *gdp, 1283 struct ext4_new_group_data *group_data) 1284 { 1285 struct buffer_head *bh; 1286 1287 if (!ext4_has_metadata_csum(sb)) 1288 return 0; 1289 1290 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1291 if (!bh) 1292 return -EIO; 1293 ext4_inode_bitmap_csum_set(sb, group, gdp, bh, 1294 EXT4_INODES_PER_GROUP(sb) / 8); 1295 brelse(bh); 1296 1297 bh = ext4_get_bitmap(sb, group_data->block_bitmap); 1298 if (!bh) 1299 return -EIO; 1300 ext4_block_bitmap_csum_set(sb, group, gdp, bh); 1301 brelse(bh); 1302 1303 return 0; 1304 } 1305 1306 /* 1307 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1308 */ 1309 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, 1310 struct ext4_new_flex_group_data *flex_gd) 1311 { 1312 struct ext4_new_group_data *group_data = flex_gd->groups; 1313 struct ext4_group_desc *gdp; 1314 struct ext4_sb_info *sbi = EXT4_SB(sb); 1315 struct buffer_head *gdb_bh; 1316 ext4_group_t group; 1317 __u16 *bg_flags = flex_gd->bg_flags; 1318 int i, gdb_off, gdb_num, err = 0; 1319 1320 1321 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { 1322 group = group_data->group; 1323 1324 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); 1325 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1326 1327 /* 1328 * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). 1329 */ 1330 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num); 1331 /* Update group descriptor block for new group */ 1332 gdp = (struct ext4_group_desc *)(gdb_bh->b_data + 1333 gdb_off * EXT4_DESC_SIZE(sb)); 1334 1335 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1336 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1337 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1338 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); 1339 if (err) { 1340 ext4_std_error(sb, err); 1341 break; 1342 } 1343 1344 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1345 ext4_free_group_clusters_set(sb, gdp, 1346 group_data->free_clusters_count); 1347 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1348 if (ext4_has_group_desc_csum(sb)) 1349 ext4_itable_unused_set(sb, gdp, 1350 EXT4_INODES_PER_GROUP(sb)); 1351 gdp->bg_flags = cpu_to_le16(*bg_flags); 1352 ext4_group_desc_csum_set(sb, group, gdp); 1353 1354 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1355 if (unlikely(err)) { 1356 ext4_std_error(sb, err); 1357 break; 1358 } 1359 1360 /* 1361 * We can allocate memory for mb_alloc based on the new group 1362 * descriptor 1363 */ 1364 err = ext4_mb_add_groupinfo(sb, group, gdp); 1365 if (err) 1366 break; 1367 } 1368 return err; 1369 } 1370 1371 /* 1372 * ext4_update_super() updates the super block so that the newly added 1373 * groups can be seen by the filesystem. 1374 * 1375 * @sb: super block 1376 * @flex_gd: new added groups 1377 */ 1378 static void ext4_update_super(struct super_block *sb, 1379 struct ext4_new_flex_group_data *flex_gd) 1380 { 1381 ext4_fsblk_t blocks_count = 0; 1382 ext4_fsblk_t free_blocks = 0; 1383 ext4_fsblk_t reserved_blocks = 0; 1384 struct ext4_new_group_data *group_data = flex_gd->groups; 1385 struct ext4_sb_info *sbi = EXT4_SB(sb); 1386 struct ext4_super_block *es = sbi->s_es; 1387 int i; 1388 1389 BUG_ON(flex_gd->count == 0 || group_data == NULL); 1390 /* 1391 * Make the new blocks and inodes valid next. We do this before 1392 * increasing the group count so that once the group is enabled, 1393 * all of its blocks and inodes are already valid. 1394 * 1395 * We always allocate group-by-group, then block-by-block or 1396 * inode-by-inode within a group, so enabling these 1397 * blocks/inodes before the group is live won't actually let us 1398 * allocate the new space yet. 1399 */ 1400 for (i = 0; i < flex_gd->count; i++) { 1401 blocks_count += group_data[i].blocks_count; 1402 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count); 1403 } 1404 1405 reserved_blocks = ext4_r_blocks_count(es) * 100; 1406 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es)); 1407 reserved_blocks *= blocks_count; 1408 do_div(reserved_blocks, 100); 1409 1410 lock_buffer(sbi->s_sbh); 1411 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); 1412 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); 1413 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1414 flex_gd->count); 1415 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * 1416 flex_gd->count); 1417 1418 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); 1419 /* 1420 * We need to protect s_groups_count against other CPUs seeing 1421 * inconsistent state in the superblock. 1422 * 1423 * The precise rules we use are: 1424 * 1425 * * Writers must perform a smp_wmb() after updating all 1426 * dependent data and before modifying the groups count 1427 * 1428 * * Readers must perform an smp_rmb() after reading the groups 1429 * count and before reading any dependent data. 1430 * 1431 * NB. These rules can be relaxed when checking the group count 1432 * while freeing data, as we can only allocate from a block 1433 * group after serialising against the group count, and we can 1434 * only then free after serialising in turn against that 1435 * allocation. 1436 */ 1437 smp_wmb(); 1438 1439 /* Update the global fs size fields */ 1440 sbi->s_groups_count += flex_gd->count; 1441 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 1442 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 1443 1444 /* Update the reserved block counts only once the new group is 1445 * active. */ 1446 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + 1447 reserved_blocks); 1448 ext4_superblock_csum_set(sb); 1449 unlock_buffer(sbi->s_sbh); 1450 1451 /* Update the free space counts */ 1452 percpu_counter_add(&sbi->s_freeclusters_counter, 1453 EXT4_NUM_B2C(sbi, free_blocks)); 1454 percpu_counter_add(&sbi->s_freeinodes_counter, 1455 EXT4_INODES_PER_GROUP(sb) * flex_gd->count); 1456 1457 ext4_debug("free blocks count %llu", 1458 percpu_counter_read(&sbi->s_freeclusters_counter)); 1459 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { 1460 ext4_group_t flex_group; 1461 struct flex_groups *fg; 1462 1463 flex_group = ext4_flex_group(sbi, group_data[0].group); 1464 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 1465 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), 1466 &fg->free_clusters); 1467 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, 1468 &fg->free_inodes); 1469 } 1470 1471 /* 1472 * Update the fs overhead information 1473 */ 1474 ext4_calculate_overhead(sb); 1475 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); 1476 1477 if (test_opt(sb, DEBUG)) 1478 printk(KERN_DEBUG "EXT4-fs: added group %u:" 1479 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, 1480 blocks_count, free_blocks, reserved_blocks); 1481 } 1482 1483 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1484 * _before_ we start modifying the filesystem, because we cannot abort the 1485 * transaction and not have it write the data to disk. 1486 */ 1487 static int ext4_flex_group_add(struct super_block *sb, 1488 struct inode *resize_inode, 1489 struct ext4_new_flex_group_data *flex_gd) 1490 { 1491 struct ext4_sb_info *sbi = EXT4_SB(sb); 1492 struct ext4_super_block *es = sbi->s_es; 1493 ext4_fsblk_t o_blocks_count; 1494 ext4_grpblk_t last; 1495 ext4_group_t group; 1496 handle_t *handle; 1497 unsigned reserved_gdb; 1498 int err = 0, err2 = 0, credit; 1499 1500 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); 1501 1502 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); 1503 o_blocks_count = ext4_blocks_count(es); 1504 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1505 BUG_ON(last); 1506 1507 err = setup_new_flex_group_blocks(sb, flex_gd); 1508 if (err) 1509 goto exit; 1510 /* 1511 * We will always be modifying at least the superblock and GDT 1512 * blocks. If we are adding a group past the last current GDT block, 1513 * we will also modify the inode and the dindirect block. If we 1514 * are adding a group with superblock/GDT backups we will also 1515 * modify each of the reserved GDT dindirect blocks. 1516 */ 1517 credit = 3; /* sb, resize inode, resize inode dindirect */ 1518 /* GDT blocks */ 1519 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); 1520 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ 1521 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1522 if (IS_ERR(handle)) { 1523 err = PTR_ERR(handle); 1524 goto exit; 1525 } 1526 1527 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1528 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1529 EXT4_JTR_NONE); 1530 if (err) 1531 goto exit_journal; 1532 1533 group = flex_gd->groups[0].group; 1534 BUG_ON(group != sbi->s_groups_count); 1535 err = ext4_add_new_descs(handle, sb, group, 1536 resize_inode, flex_gd->count); 1537 if (err) 1538 goto exit_journal; 1539 1540 err = ext4_setup_new_descs(handle, sb, flex_gd); 1541 if (err) 1542 goto exit_journal; 1543 1544 ext4_update_super(sb, flex_gd); 1545 1546 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1547 1548 exit_journal: 1549 err2 = ext4_journal_stop(handle); 1550 if (!err) 1551 err = err2; 1552 1553 if (!err) { 1554 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); 1555 int gdb_num_end = ((group + flex_gd->count - 1) / 1556 EXT4_DESC_PER_BLOCK(sb)); 1557 int meta_bg = ext4_has_feature_meta_bg(sb); 1558 sector_t old_gdb = 0; 1559 1560 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, 1561 sizeof(struct ext4_super_block), 0); 1562 for (; gdb_num <= gdb_num_end; gdb_num++) { 1563 struct buffer_head *gdb_bh; 1564 1565 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, 1566 gdb_num); 1567 if (old_gdb == gdb_bh->b_blocknr) 1568 continue; 1569 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, 1570 gdb_bh->b_size, meta_bg); 1571 old_gdb = gdb_bh->b_blocknr; 1572 } 1573 } 1574 exit: 1575 return err; 1576 } 1577 1578 static int ext4_setup_next_flex_gd(struct super_block *sb, 1579 struct ext4_new_flex_group_data *flex_gd, 1580 ext4_fsblk_t n_blocks_count, 1581 unsigned long flexbg_size) 1582 { 1583 struct ext4_sb_info *sbi = EXT4_SB(sb); 1584 struct ext4_super_block *es = sbi->s_es; 1585 struct ext4_new_group_data *group_data = flex_gd->groups; 1586 ext4_fsblk_t o_blocks_count; 1587 ext4_group_t n_group; 1588 ext4_group_t group; 1589 ext4_group_t last_group; 1590 ext4_grpblk_t last; 1591 ext4_grpblk_t clusters_per_group; 1592 unsigned long i; 1593 1594 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb); 1595 1596 o_blocks_count = ext4_blocks_count(es); 1597 1598 if (o_blocks_count == n_blocks_count) 1599 return 0; 1600 1601 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1602 BUG_ON(last); 1603 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); 1604 1605 last_group = group | (flexbg_size - 1); 1606 if (last_group > n_group) 1607 last_group = n_group; 1608 1609 flex_gd->count = last_group - group + 1; 1610 1611 for (i = 0; i < flex_gd->count; i++) { 1612 int overhead; 1613 1614 group_data[i].group = group + i; 1615 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb); 1616 overhead = ext4_group_overhead_blocks(sb, group + i); 1617 group_data[i].mdata_blocks = overhead; 1618 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb); 1619 if (ext4_has_group_desc_csum(sb)) { 1620 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1621 EXT4_BG_INODE_UNINIT; 1622 if (!test_opt(sb, INIT_INODE_TABLE)) 1623 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED; 1624 } else 1625 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1626 } 1627 1628 if (last_group == n_group && ext4_has_group_desc_csum(sb)) 1629 /* We need to initialize block bitmap of last group. */ 1630 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1631 1632 if ((last_group == n_group) && (last != clusters_per_group - 1)) { 1633 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1); 1634 group_data[i - 1].free_clusters_count -= clusters_per_group - 1635 last - 1; 1636 } 1637 1638 return 1; 1639 } 1640 1641 /* Add group descriptor data to an existing or new group descriptor block. 1642 * Ensure we handle all possible error conditions _before_ we start modifying 1643 * the filesystem, because we cannot abort the transaction and not have it 1644 * write the data to disk. 1645 * 1646 * If we are on a GDT block boundary, we need to get the reserved GDT block. 1647 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1648 * 1649 * We only need to hold the superblock lock while we are actually adding 1650 * in the new group's counts to the superblock. Prior to that we have 1651 * not really "added" the group at all. We re-check that we are still 1652 * adding in the last group in case things have changed since verifying. 1653 */ 1654 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) 1655 { 1656 struct ext4_new_flex_group_data flex_gd; 1657 struct ext4_sb_info *sbi = EXT4_SB(sb); 1658 struct ext4_super_block *es = sbi->s_es; 1659 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? 1660 le16_to_cpu(es->s_reserved_gdt_blocks) : 0; 1661 struct inode *inode = NULL; 1662 int gdb_off; 1663 int err; 1664 __u16 bg_flags = 0; 1665 1666 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); 1667 1668 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) { 1669 ext4_warning(sb, "Can't resize non-sparse filesystem further"); 1670 return -EPERM; 1671 } 1672 1673 if (ext4_blocks_count(es) + input->blocks_count < 1674 ext4_blocks_count(es)) { 1675 ext4_warning(sb, "blocks_count overflow"); 1676 return -EINVAL; 1677 } 1678 1679 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < 1680 le32_to_cpu(es->s_inodes_count)) { 1681 ext4_warning(sb, "inodes_count overflow"); 1682 return -EINVAL; 1683 } 1684 1685 if (reserved_gdb || gdb_off == 0) { 1686 if (!ext4_has_feature_resize_inode(sb) || 1687 !le16_to_cpu(es->s_reserved_gdt_blocks)) { 1688 ext4_warning(sb, 1689 "No reserved GDT blocks, can't resize"); 1690 return -EPERM; 1691 } 1692 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL); 1693 if (IS_ERR(inode)) { 1694 ext4_warning(sb, "Error opening resize inode"); 1695 return PTR_ERR(inode); 1696 } 1697 } 1698 1699 1700 err = verify_group_input(sb, input); 1701 if (err) 1702 goto out; 1703 1704 err = ext4_alloc_flex_bg_array(sb, input->group + 1); 1705 if (err) 1706 goto out; 1707 1708 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); 1709 if (err) 1710 goto out; 1711 1712 flex_gd.count = 1; 1713 flex_gd.groups = input; 1714 flex_gd.bg_flags = &bg_flags; 1715 err = ext4_flex_group_add(sb, inode, &flex_gd); 1716 out: 1717 iput(inode); 1718 return err; 1719 } /* ext4_group_add */ 1720 1721 /* 1722 * extend a group without checking assuming that checking has been done. 1723 */ 1724 static int ext4_group_extend_no_check(struct super_block *sb, 1725 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) 1726 { 1727 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1728 handle_t *handle; 1729 int err = 0, err2; 1730 1731 /* We will update the superblock, one block bitmap, and 1732 * one group descriptor via ext4_group_add_blocks(). 1733 */ 1734 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3); 1735 if (IS_ERR(handle)) { 1736 err = PTR_ERR(handle); 1737 ext4_warning(sb, "error %d on journal start", err); 1738 return err; 1739 } 1740 1741 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1742 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh, 1743 EXT4_JTR_NONE); 1744 if (err) { 1745 ext4_warning(sb, "error %d on journal write access", err); 1746 goto errout; 1747 } 1748 1749 lock_buffer(EXT4_SB(sb)->s_sbh); 1750 ext4_blocks_count_set(es, o_blocks_count + add); 1751 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); 1752 ext4_superblock_csum_set(sb); 1753 unlock_buffer(EXT4_SB(sb)->s_sbh); 1754 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, 1755 o_blocks_count + add); 1756 /* We add the blocks to the bitmap and set the group need init bit */ 1757 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); 1758 if (err) 1759 goto errout; 1760 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 1761 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, 1762 o_blocks_count + add); 1763 errout: 1764 err2 = ext4_journal_stop(handle); 1765 if (err2 && !err) 1766 err = err2; 1767 1768 if (!err) { 1769 if (test_opt(sb, DEBUG)) 1770 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " 1771 "blocks\n", ext4_blocks_count(es)); 1772 update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, 1773 (char *)es, sizeof(struct ext4_super_block), 0); 1774 } 1775 return err; 1776 } 1777 1778 /* 1779 * Extend the filesystem to the new number of blocks specified. This entry 1780 * point is only used to extend the current filesystem to the end of the last 1781 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1782 * for emergencies (because it has no dependencies on reserved blocks). 1783 * 1784 * If we _really_ wanted, we could use default values to call ext4_group_add() 1785 * allow the "remount" trick to work for arbitrary resizing, assuming enough 1786 * GDT blocks are reserved to grow to the desired size. 1787 */ 1788 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, 1789 ext4_fsblk_t n_blocks_count) 1790 { 1791 ext4_fsblk_t o_blocks_count; 1792 ext4_grpblk_t last; 1793 ext4_grpblk_t add; 1794 struct buffer_head *bh; 1795 int err; 1796 ext4_group_t group; 1797 1798 o_blocks_count = ext4_blocks_count(es); 1799 1800 if (test_opt(sb, DEBUG)) 1801 ext4_msg(sb, KERN_DEBUG, 1802 "extending last group from %llu to %llu blocks", 1803 o_blocks_count, n_blocks_count); 1804 1805 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 1806 return 0; 1807 1808 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { 1809 ext4_msg(sb, KERN_ERR, 1810 "filesystem too large to resize to %llu blocks safely", 1811 n_blocks_count); 1812 return -EINVAL; 1813 } 1814 1815 if (n_blocks_count < o_blocks_count) { 1816 ext4_warning(sb, "can't shrink FS - resize aborted"); 1817 return -EINVAL; 1818 } 1819 1820 /* Handle the remaining blocks in the last group only. */ 1821 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); 1822 1823 if (last == 0) { 1824 ext4_warning(sb, "need to use ext2online to resize further"); 1825 return -EPERM; 1826 } 1827 1828 add = EXT4_BLOCKS_PER_GROUP(sb) - last; 1829 1830 if (o_blocks_count + add < o_blocks_count) { 1831 ext4_warning(sb, "blocks_count overflow"); 1832 return -EINVAL; 1833 } 1834 1835 if (o_blocks_count + add > n_blocks_count) 1836 add = n_blocks_count - o_blocks_count; 1837 1838 if (o_blocks_count + add < n_blocks_count) 1839 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", 1840 o_blocks_count + add, add); 1841 1842 /* See if the device is actually as big as what was requested */ 1843 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0); 1844 if (IS_ERR(bh)) { 1845 ext4_warning(sb, "can't read last block, resize aborted"); 1846 return -ENOSPC; 1847 } 1848 brelse(bh); 1849 1850 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 1851 return err; 1852 } /* ext4_group_extend */ 1853 1854 1855 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups) 1856 { 1857 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); 1858 } 1859 1860 /* 1861 * Release the resize inode and drop the resize_inode feature if there 1862 * are no more reserved gdt blocks, and then convert the file system 1863 * to enable meta_bg 1864 */ 1865 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode) 1866 { 1867 handle_t *handle; 1868 struct ext4_sb_info *sbi = EXT4_SB(sb); 1869 struct ext4_super_block *es = sbi->s_es; 1870 struct ext4_inode_info *ei = EXT4_I(inode); 1871 ext4_fsblk_t nr; 1872 int i, ret, err = 0; 1873 int credits = 1; 1874 1875 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg"); 1876 if (inode) { 1877 if (es->s_reserved_gdt_blocks) { 1878 ext4_error(sb, "Unexpected non-zero " 1879 "s_reserved_gdt_blocks"); 1880 return -EPERM; 1881 } 1882 1883 /* Do a quick sanity check of the resize inode */ 1884 if (inode->i_blocks != 1 << (inode->i_blkbits - 1885 (9 - sbi->s_cluster_bits))) 1886 goto invalid_resize_inode; 1887 for (i = 0; i < EXT4_N_BLOCKS; i++) { 1888 if (i == EXT4_DIND_BLOCK) { 1889 if (ei->i_data[i]) 1890 continue; 1891 else 1892 goto invalid_resize_inode; 1893 } 1894 if (ei->i_data[i]) 1895 goto invalid_resize_inode; 1896 } 1897 credits += 3; /* block bitmap, bg descriptor, resize inode */ 1898 } 1899 1900 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits); 1901 if (IS_ERR(handle)) 1902 return PTR_ERR(handle); 1903 1904 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 1905 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, 1906 EXT4_JTR_NONE); 1907 if (err) 1908 goto errout; 1909 1910 lock_buffer(sbi->s_sbh); 1911 ext4_clear_feature_resize_inode(sb); 1912 ext4_set_feature_meta_bg(sb); 1913 sbi->s_es->s_first_meta_bg = 1914 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count)); 1915 ext4_superblock_csum_set(sb); 1916 unlock_buffer(sbi->s_sbh); 1917 1918 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 1919 if (err) { 1920 ext4_std_error(sb, err); 1921 goto errout; 1922 } 1923 1924 if (inode) { 1925 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]); 1926 ext4_free_blocks(handle, inode, NULL, nr, 1, 1927 EXT4_FREE_BLOCKS_METADATA | 1928 EXT4_FREE_BLOCKS_FORGET); 1929 ei->i_data[EXT4_DIND_BLOCK] = 0; 1930 inode->i_blocks = 0; 1931 1932 err = ext4_mark_inode_dirty(handle, inode); 1933 if (err) 1934 ext4_std_error(sb, err); 1935 } 1936 1937 errout: 1938 ret = ext4_journal_stop(handle); 1939 if (!err) 1940 err = ret; 1941 return ret; 1942 1943 invalid_resize_inode: 1944 ext4_error(sb, "corrupted/inconsistent resize inode"); 1945 return -EINVAL; 1946 } 1947 1948 /* 1949 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count 1950 * 1951 * @sb: super block of the fs to be resized 1952 * @n_blocks_count: the number of blocks resides in the resized fs 1953 */ 1954 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) 1955 { 1956 struct ext4_new_flex_group_data *flex_gd = NULL; 1957 struct ext4_sb_info *sbi = EXT4_SB(sb); 1958 struct ext4_super_block *es = sbi->s_es; 1959 struct buffer_head *bh; 1960 struct inode *resize_inode = NULL; 1961 ext4_grpblk_t add, offset; 1962 unsigned long n_desc_blocks; 1963 unsigned long o_desc_blocks; 1964 ext4_group_t o_group; 1965 ext4_group_t n_group; 1966 ext4_fsblk_t o_blocks_count; 1967 ext4_fsblk_t n_blocks_count_retry = 0; 1968 unsigned long last_update_time = 0; 1969 int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex; 1970 int meta_bg; 1971 1972 /* See if the device is actually as big as what was requested */ 1973 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0); 1974 if (IS_ERR(bh)) { 1975 ext4_warning(sb, "can't read last block, resize aborted"); 1976 return -ENOSPC; 1977 } 1978 brelse(bh); 1979 1980 retry: 1981 o_blocks_count = ext4_blocks_count(es); 1982 1983 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu " 1984 "to %llu blocks", o_blocks_count, n_blocks_count); 1985 1986 if (n_blocks_count < o_blocks_count) { 1987 /* On-line shrinking not supported */ 1988 ext4_warning(sb, "can't shrink FS - resize aborted"); 1989 return -EINVAL; 1990 } 1991 1992 if (n_blocks_count == o_blocks_count) 1993 /* Nothing need to do */ 1994 return 0; 1995 1996 n_group = ext4_get_group_number(sb, n_blocks_count - 1); 1997 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { 1998 ext4_warning(sb, "resize would cause inodes_count overflow"); 1999 return -EINVAL; 2000 } 2001 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); 2002 2003 n_desc_blocks = num_desc_blocks(sb, n_group + 1); 2004 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count); 2005 2006 meta_bg = ext4_has_feature_meta_bg(sb); 2007 2008 if (ext4_has_feature_resize_inode(sb)) { 2009 if (meta_bg) { 2010 ext4_error(sb, "resize_inode and meta_bg enabled " 2011 "simultaneously"); 2012 return -EINVAL; 2013 } 2014 if (n_desc_blocks > o_desc_blocks + 2015 le16_to_cpu(es->s_reserved_gdt_blocks)) { 2016 n_blocks_count_retry = n_blocks_count; 2017 n_desc_blocks = o_desc_blocks + 2018 le16_to_cpu(es->s_reserved_gdt_blocks); 2019 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); 2020 n_blocks_count = (ext4_fsblk_t)n_group * 2021 EXT4_BLOCKS_PER_GROUP(sb) + 2022 le32_to_cpu(es->s_first_data_block); 2023 n_group--; /* set to last group number */ 2024 } 2025 2026 if (!resize_inode) 2027 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO, 2028 EXT4_IGET_SPECIAL); 2029 if (IS_ERR(resize_inode)) { 2030 ext4_warning(sb, "Error opening resize inode"); 2031 return PTR_ERR(resize_inode); 2032 } 2033 } 2034 2035 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { 2036 err = ext4_convert_meta_bg(sb, resize_inode); 2037 if (err) 2038 goto out; 2039 if (resize_inode) { 2040 iput(resize_inode); 2041 resize_inode = NULL; 2042 } 2043 if (n_blocks_count_retry) { 2044 n_blocks_count = n_blocks_count_retry; 2045 n_blocks_count_retry = 0; 2046 goto retry; 2047 } 2048 } 2049 2050 /* 2051 * Make sure the last group has enough space so that it's 2052 * guaranteed to have enough space for all metadata blocks 2053 * that it might need to hold. (We might not need to store 2054 * the inode table blocks in the last block group, but there 2055 * will be cases where this might be needed.) 2056 */ 2057 if ((ext4_group_first_block_no(sb, n_group) + 2058 ext4_group_overhead_blocks(sb, n_group) + 2 + 2059 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { 2060 n_blocks_count = ext4_group_first_block_no(sb, n_group); 2061 n_group--; 2062 n_blocks_count_retry = 0; 2063 if (resize_inode) { 2064 iput(resize_inode); 2065 resize_inode = NULL; 2066 } 2067 goto retry; 2068 } 2069 2070 /* extend the last group */ 2071 if (n_group == o_group) 2072 add = n_blocks_count - o_blocks_count; 2073 else 2074 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1)); 2075 if (add > 0) { 2076 err = ext4_group_extend_no_check(sb, o_blocks_count, add); 2077 if (err) 2078 goto out; 2079 } 2080 2081 if (ext4_blocks_count(es) == n_blocks_count) 2082 goto out; 2083 2084 err = ext4_alloc_flex_bg_array(sb, n_group + 1); 2085 if (err) 2086 goto out; 2087 2088 err = ext4_mb_alloc_groupinfo(sb, n_group + 1); 2089 if (err) 2090 goto out; 2091 2092 flex_gd = alloc_flex_gd(flexbg_size); 2093 if (flex_gd == NULL) { 2094 err = -ENOMEM; 2095 goto out; 2096 } 2097 2098 /* Add flex groups. Note that a regular group is a 2099 * flex group with 1 group. 2100 */ 2101 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, 2102 flexbg_size)) { 2103 if (jiffies - last_update_time > HZ * 10) { 2104 if (last_update_time) 2105 ext4_msg(sb, KERN_INFO, 2106 "resized to %llu blocks", 2107 ext4_blocks_count(es)); 2108 last_update_time = jiffies; 2109 } 2110 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0) 2111 break; 2112 err = ext4_flex_group_add(sb, resize_inode, flex_gd); 2113 if (unlikely(err)) 2114 break; 2115 } 2116 2117 if (!err && n_blocks_count_retry) { 2118 n_blocks_count = n_blocks_count_retry; 2119 n_blocks_count_retry = 0; 2120 free_flex_gd(flex_gd); 2121 flex_gd = NULL; 2122 if (resize_inode) { 2123 iput(resize_inode); 2124 resize_inode = NULL; 2125 } 2126 goto retry; 2127 } 2128 2129 out: 2130 if (flex_gd) 2131 free_flex_gd(flex_gd); 2132 if (resize_inode != NULL) 2133 iput(resize_inode); 2134 if (err) 2135 ext4_warning(sb, "error (%d) occurred during " 2136 "file system resize", err); 2137 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", 2138 ext4_blocks_count(es)); 2139 return err; 2140 } 2141