xref: /openbmc/linux/fs/ext4/resize.c (revision d1ab7c3a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/resize.c
4  *
5  * Support for resizing an ext4 filesystem while it is mounted.
6  *
7  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8  *
9  * This could probably be made into a module, because it is not often in use.
10  */
11 
12 
13 #define EXT4FS_DEBUG
14 
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 
18 #include "ext4_jbd2.h"
19 
20 struct ext4_rcu_ptr {
21 	struct rcu_head rcu;
22 	void *ptr;
23 };
24 
25 static void ext4_rcu_ptr_callback(struct rcu_head *head)
26 {
27 	struct ext4_rcu_ptr *ptr;
28 
29 	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
30 	kvfree(ptr->ptr);
31 	kfree(ptr);
32 }
33 
34 void ext4_kvfree_array_rcu(void *to_free)
35 {
36 	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
37 
38 	if (ptr) {
39 		ptr->ptr = to_free;
40 		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
41 		return;
42 	}
43 	synchronize_rcu();
44 	kvfree(to_free);
45 }
46 
47 int ext4_resize_begin(struct super_block *sb)
48 {
49 	struct ext4_sb_info *sbi = EXT4_SB(sb);
50 	int ret = 0;
51 
52 	if (!capable(CAP_SYS_RESOURCE))
53 		return -EPERM;
54 
55 	/*
56 	 * If we are not using the primary superblock/GDT copy don't resize,
57          * because the user tools have no way of handling this.  Probably a
58          * bad time to do it anyways.
59          */
60 	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
61 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
62 		ext4_warning(sb, "won't resize using backup superblock at %llu",
63 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
64 		return -EPERM;
65 	}
66 
67 	/*
68 	 * We are not allowed to do online-resizing on a filesystem mounted
69 	 * with error, because it can destroy the filesystem easily.
70 	 */
71 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
72 		ext4_warning(sb, "There are errors in the filesystem, "
73 			     "so online resizing is not allowed");
74 		return -EPERM;
75 	}
76 
77 	if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
78 				  &EXT4_SB(sb)->s_ext4_flags))
79 		ret = -EBUSY;
80 
81 	return ret;
82 }
83 
84 void ext4_resize_end(struct super_block *sb)
85 {
86 	clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
87 	smp_mb__after_atomic();
88 }
89 
90 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
91 					     ext4_group_t group) {
92 	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
93 	       EXT4_DESC_PER_BLOCK_BITS(sb);
94 }
95 
96 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
97 					     ext4_group_t group) {
98 	group = ext4_meta_bg_first_group(sb, group);
99 	return ext4_group_first_block_no(sb, group);
100 }
101 
102 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
103 						ext4_group_t group) {
104 	ext4_grpblk_t overhead;
105 	overhead = ext4_bg_num_gdb(sb, group);
106 	if (ext4_bg_has_super(sb, group))
107 		overhead += 1 +
108 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
109 	return overhead;
110 }
111 
112 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
113 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
114 
115 static int verify_group_input(struct super_block *sb,
116 			      struct ext4_new_group_data *input)
117 {
118 	struct ext4_sb_info *sbi = EXT4_SB(sb);
119 	struct ext4_super_block *es = sbi->s_es;
120 	ext4_fsblk_t start = ext4_blocks_count(es);
121 	ext4_fsblk_t end = start + input->blocks_count;
122 	ext4_group_t group = input->group;
123 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
124 	unsigned overhead;
125 	ext4_fsblk_t metaend;
126 	struct buffer_head *bh = NULL;
127 	ext4_grpblk_t free_blocks_count, offset;
128 	int err = -EINVAL;
129 
130 	if (group != sbi->s_groups_count) {
131 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
132 			     input->group, sbi->s_groups_count);
133 		return -EINVAL;
134 	}
135 
136 	overhead = ext4_group_overhead_blocks(sb, group);
137 	metaend = start + overhead;
138 	input->free_clusters_count = free_blocks_count =
139 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
140 
141 	if (test_opt(sb, DEBUG))
142 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
143 		       "(%d free, %u reserved)\n",
144 		       ext4_bg_has_super(sb, input->group) ? "normal" :
145 		       "no-super", input->group, input->blocks_count,
146 		       free_blocks_count, input->reserved_blocks);
147 
148 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
149 	if (offset != 0)
150 			ext4_warning(sb, "Last group not full");
151 	else if (input->reserved_blocks > input->blocks_count / 5)
152 		ext4_warning(sb, "Reserved blocks too high (%u)",
153 			     input->reserved_blocks);
154 	else if (free_blocks_count < 0)
155 		ext4_warning(sb, "Bad blocks count %u",
156 			     input->blocks_count);
157 	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
158 		err = PTR_ERR(bh);
159 		bh = NULL;
160 		ext4_warning(sb, "Cannot read last block (%llu)",
161 			     end - 1);
162 	} else if (outside(input->block_bitmap, start, end))
163 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
164 			     (unsigned long long)input->block_bitmap);
165 	else if (outside(input->inode_bitmap, start, end))
166 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
167 			     (unsigned long long)input->inode_bitmap);
168 	else if (outside(input->inode_table, start, end) ||
169 		 outside(itend - 1, start, end))
170 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
171 			     (unsigned long long)input->inode_table, itend - 1);
172 	else if (input->inode_bitmap == input->block_bitmap)
173 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
174 			     (unsigned long long)input->block_bitmap);
175 	else if (inside(input->block_bitmap, input->inode_table, itend))
176 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
177 			     "(%llu-%llu)",
178 			     (unsigned long long)input->block_bitmap,
179 			     (unsigned long long)input->inode_table, itend - 1);
180 	else if (inside(input->inode_bitmap, input->inode_table, itend))
181 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
182 			     "(%llu-%llu)",
183 			     (unsigned long long)input->inode_bitmap,
184 			     (unsigned long long)input->inode_table, itend - 1);
185 	else if (inside(input->block_bitmap, start, metaend))
186 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
187 			     (unsigned long long)input->block_bitmap,
188 			     start, metaend - 1);
189 	else if (inside(input->inode_bitmap, start, metaend))
190 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
191 			     (unsigned long long)input->inode_bitmap,
192 			     start, metaend - 1);
193 	else if (inside(input->inode_table, start, metaend) ||
194 		 inside(itend - 1, start, metaend))
195 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
196 			     "(%llu-%llu)",
197 			     (unsigned long long)input->inode_table,
198 			     itend - 1, start, metaend - 1);
199 	else
200 		err = 0;
201 	brelse(bh);
202 
203 	return err;
204 }
205 
206 /*
207  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
208  * group each time.
209  */
210 struct ext4_new_flex_group_data {
211 	struct ext4_new_group_data *groups;	/* new_group_data for groups
212 						   in the flex group */
213 	__u16 *bg_flags;			/* block group flags of groups
214 						   in @groups */
215 	ext4_group_t count;			/* number of groups in @groups
216 						 */
217 };
218 
219 /*
220  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
221  * @flexbg_size.
222  *
223  * Returns NULL on failure otherwise address of the allocated structure.
224  */
225 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
226 {
227 	struct ext4_new_flex_group_data *flex_gd;
228 
229 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
230 	if (flex_gd == NULL)
231 		goto out3;
232 
233 	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
234 		goto out2;
235 	flex_gd->count = flexbg_size;
236 
237 	flex_gd->groups = kmalloc_array(flexbg_size,
238 					sizeof(struct ext4_new_group_data),
239 					GFP_NOFS);
240 	if (flex_gd->groups == NULL)
241 		goto out2;
242 
243 	flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
244 					  GFP_NOFS);
245 	if (flex_gd->bg_flags == NULL)
246 		goto out1;
247 
248 	return flex_gd;
249 
250 out1:
251 	kfree(flex_gd->groups);
252 out2:
253 	kfree(flex_gd);
254 out3:
255 	return NULL;
256 }
257 
258 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
259 {
260 	kfree(flex_gd->bg_flags);
261 	kfree(flex_gd->groups);
262 	kfree(flex_gd);
263 }
264 
265 /*
266  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
267  * and inode tables for a flex group.
268  *
269  * This function is used by 64bit-resize.  Note that this function allocates
270  * group tables from the 1st group of groups contained by @flexgd, which may
271  * be a partial of a flex group.
272  *
273  * @sb: super block of fs to which the groups belongs
274  *
275  * Returns 0 on a successful allocation of the metadata blocks in the
276  * block group.
277  */
278 static int ext4_alloc_group_tables(struct super_block *sb,
279 				struct ext4_new_flex_group_data *flex_gd,
280 				int flexbg_size)
281 {
282 	struct ext4_new_group_data *group_data = flex_gd->groups;
283 	ext4_fsblk_t start_blk;
284 	ext4_fsblk_t last_blk;
285 	ext4_group_t src_group;
286 	ext4_group_t bb_index = 0;
287 	ext4_group_t ib_index = 0;
288 	ext4_group_t it_index = 0;
289 	ext4_group_t group;
290 	ext4_group_t last_group;
291 	unsigned overhead;
292 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
293 	int i;
294 
295 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
296 
297 	src_group = group_data[0].group;
298 	last_group  = src_group + flex_gd->count - 1;
299 
300 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
301 	       (last_group & ~(flexbg_size - 1))));
302 next_group:
303 	group = group_data[0].group;
304 	if (src_group >= group_data[0].group + flex_gd->count)
305 		return -ENOSPC;
306 	start_blk = ext4_group_first_block_no(sb, src_group);
307 	last_blk = start_blk + group_data[src_group - group].blocks_count;
308 
309 	overhead = ext4_group_overhead_blocks(sb, src_group);
310 
311 	start_blk += overhead;
312 
313 	/* We collect contiguous blocks as much as possible. */
314 	src_group++;
315 	for (; src_group <= last_group; src_group++) {
316 		overhead = ext4_group_overhead_blocks(sb, src_group);
317 		if (overhead == 0)
318 			last_blk += group_data[src_group - group].blocks_count;
319 		else
320 			break;
321 	}
322 
323 	/* Allocate block bitmaps */
324 	for (; bb_index < flex_gd->count; bb_index++) {
325 		if (start_blk >= last_blk)
326 			goto next_group;
327 		group_data[bb_index].block_bitmap = start_blk++;
328 		group = ext4_get_group_number(sb, start_blk - 1);
329 		group -= group_data[0].group;
330 		group_data[group].mdata_blocks++;
331 		flex_gd->bg_flags[group] &= uninit_mask;
332 	}
333 
334 	/* Allocate inode bitmaps */
335 	for (; ib_index < flex_gd->count; ib_index++) {
336 		if (start_blk >= last_blk)
337 			goto next_group;
338 		group_data[ib_index].inode_bitmap = start_blk++;
339 		group = ext4_get_group_number(sb, start_blk - 1);
340 		group -= group_data[0].group;
341 		group_data[group].mdata_blocks++;
342 		flex_gd->bg_flags[group] &= uninit_mask;
343 	}
344 
345 	/* Allocate inode tables */
346 	for (; it_index < flex_gd->count; it_index++) {
347 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
348 		ext4_fsblk_t next_group_start;
349 
350 		if (start_blk + itb > last_blk)
351 			goto next_group;
352 		group_data[it_index].inode_table = start_blk;
353 		group = ext4_get_group_number(sb, start_blk);
354 		next_group_start = ext4_group_first_block_no(sb, group + 1);
355 		group -= group_data[0].group;
356 
357 		if (start_blk + itb > next_group_start) {
358 			flex_gd->bg_flags[group + 1] &= uninit_mask;
359 			overhead = start_blk + itb - next_group_start;
360 			group_data[group + 1].mdata_blocks += overhead;
361 			itb -= overhead;
362 		}
363 
364 		group_data[group].mdata_blocks += itb;
365 		flex_gd->bg_flags[group] &= uninit_mask;
366 		start_blk += EXT4_SB(sb)->s_itb_per_group;
367 	}
368 
369 	/* Update free clusters count to exclude metadata blocks */
370 	for (i = 0; i < flex_gd->count; i++) {
371 		group_data[i].free_clusters_count -=
372 				EXT4_NUM_B2C(EXT4_SB(sb),
373 					     group_data[i].mdata_blocks);
374 	}
375 
376 	if (test_opt(sb, DEBUG)) {
377 		int i;
378 		group = group_data[0].group;
379 
380 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
381 		       "%d groups, flexbg size is %d:\n", flex_gd->count,
382 		       flexbg_size);
383 
384 		for (i = 0; i < flex_gd->count; i++) {
385 			ext4_debug(
386 			       "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
387 			       ext4_bg_has_super(sb, group + i) ? "normal" :
388 			       "no-super", group + i,
389 			       group_data[i].blocks_count,
390 			       group_data[i].free_clusters_count,
391 			       group_data[i].mdata_blocks);
392 		}
393 	}
394 	return 0;
395 }
396 
397 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
398 				  ext4_fsblk_t blk)
399 {
400 	struct buffer_head *bh;
401 	int err;
402 
403 	bh = sb_getblk(sb, blk);
404 	if (unlikely(!bh))
405 		return ERR_PTR(-ENOMEM);
406 	BUFFER_TRACE(bh, "get_write_access");
407 	if ((err = ext4_journal_get_write_access(handle, bh))) {
408 		brelse(bh);
409 		bh = ERR_PTR(err);
410 	} else {
411 		memset(bh->b_data, 0, sb->s_blocksize);
412 		set_buffer_uptodate(bh);
413 	}
414 
415 	return bh;
416 }
417 
418 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
419 {
420 	return ext4_journal_ensure_credits_fn(handle, credits,
421 		EXT4_MAX_TRANS_DATA, 0, 0);
422 }
423 
424 /*
425  * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
426  *
427  * Helper function for ext4_setup_new_group_blocks() which set .
428  *
429  * @sb: super block
430  * @handle: journal handle
431  * @flex_gd: flex group data
432  */
433 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
434 			struct ext4_new_flex_group_data *flex_gd,
435 			ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
436 {
437 	struct ext4_sb_info *sbi = EXT4_SB(sb);
438 	ext4_group_t count = last_cluster - first_cluster + 1;
439 	ext4_group_t count2;
440 
441 	ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
442 		   last_cluster);
443 	for (count2 = count; count > 0;
444 	     count -= count2, first_cluster += count2) {
445 		ext4_fsblk_t start;
446 		struct buffer_head *bh;
447 		ext4_group_t group;
448 		int err;
449 
450 		group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
451 		start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
452 		group -= flex_gd->groups[0].group;
453 
454 		count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
455 		if (count2 > count)
456 			count2 = count;
457 
458 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
459 			BUG_ON(flex_gd->count > 1);
460 			continue;
461 		}
462 
463 		err = ext4_resize_ensure_credits_batch(handle, 1);
464 		if (err < 0)
465 			return err;
466 
467 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
468 		if (unlikely(!bh))
469 			return -ENOMEM;
470 
471 		BUFFER_TRACE(bh, "get_write_access");
472 		err = ext4_journal_get_write_access(handle, bh);
473 		if (err) {
474 			brelse(bh);
475 			return err;
476 		}
477 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
478 			   first_cluster, first_cluster - start, count2);
479 		ext4_set_bits(bh->b_data, first_cluster - start, count2);
480 
481 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
482 		brelse(bh);
483 		if (unlikely(err))
484 			return err;
485 	}
486 
487 	return 0;
488 }
489 
490 /*
491  * Set up the block and inode bitmaps, and the inode table for the new groups.
492  * This doesn't need to be part of the main transaction, since we are only
493  * changing blocks outside the actual filesystem.  We still do journaling to
494  * ensure the recovery is correct in case of a failure just after resize.
495  * If any part of this fails, we simply abort the resize.
496  *
497  * setup_new_flex_group_blocks handles a flex group as follow:
498  *  1. copy super block and GDT, and initialize group tables if necessary.
499  *     In this step, we only set bits in blocks bitmaps for blocks taken by
500  *     super block and GDT.
501  *  2. allocate group tables in block bitmaps, that is, set bits in block
502  *     bitmap for blocks taken by group tables.
503  */
504 static int setup_new_flex_group_blocks(struct super_block *sb,
505 				struct ext4_new_flex_group_data *flex_gd)
506 {
507 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
508 	ext4_fsblk_t start;
509 	ext4_fsblk_t block;
510 	struct ext4_sb_info *sbi = EXT4_SB(sb);
511 	struct ext4_super_block *es = sbi->s_es;
512 	struct ext4_new_group_data *group_data = flex_gd->groups;
513 	__u16 *bg_flags = flex_gd->bg_flags;
514 	handle_t *handle;
515 	ext4_group_t group, count;
516 	struct buffer_head *bh = NULL;
517 	int reserved_gdb, i, j, err = 0, err2;
518 	int meta_bg;
519 
520 	BUG_ON(!flex_gd->count || !group_data ||
521 	       group_data[0].group != sbi->s_groups_count);
522 
523 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
524 	meta_bg = ext4_has_feature_meta_bg(sb);
525 
526 	/* This transaction may be extended/restarted along the way */
527 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
528 	if (IS_ERR(handle))
529 		return PTR_ERR(handle);
530 
531 	group = group_data[0].group;
532 	for (i = 0; i < flex_gd->count; i++, group++) {
533 		unsigned long gdblocks;
534 		ext4_grpblk_t overhead;
535 
536 		gdblocks = ext4_bg_num_gdb(sb, group);
537 		start = ext4_group_first_block_no(sb, group);
538 
539 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
540 			goto handle_itb;
541 
542 		if (meta_bg == 1) {
543 			ext4_group_t first_group;
544 			first_group = ext4_meta_bg_first_group(sb, group);
545 			if (first_group != group + 1 &&
546 			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
547 				goto handle_itb;
548 		}
549 
550 		block = start + ext4_bg_has_super(sb, group);
551 		/* Copy all of the GDT blocks into the backup in this group */
552 		for (j = 0; j < gdblocks; j++, block++) {
553 			struct buffer_head *gdb;
554 
555 			ext4_debug("update backup group %#04llx\n", block);
556 			err = ext4_resize_ensure_credits_batch(handle, 1);
557 			if (err < 0)
558 				goto out;
559 
560 			gdb = sb_getblk(sb, block);
561 			if (unlikely(!gdb)) {
562 				err = -ENOMEM;
563 				goto out;
564 			}
565 
566 			BUFFER_TRACE(gdb, "get_write_access");
567 			err = ext4_journal_get_write_access(handle, gdb);
568 			if (err) {
569 				brelse(gdb);
570 				goto out;
571 			}
572 			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
573 				s_group_desc, j)->b_data, gdb->b_size);
574 			set_buffer_uptodate(gdb);
575 
576 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
577 			if (unlikely(err)) {
578 				brelse(gdb);
579 				goto out;
580 			}
581 			brelse(gdb);
582 		}
583 
584 		/* Zero out all of the reserved backup group descriptor
585 		 * table blocks
586 		 */
587 		if (ext4_bg_has_super(sb, group)) {
588 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
589 					reserved_gdb, GFP_NOFS);
590 			if (err)
591 				goto out;
592 		}
593 
594 handle_itb:
595 		/* Initialize group tables of the grop @group */
596 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
597 			goto handle_bb;
598 
599 		/* Zero out all of the inode table blocks */
600 		block = group_data[i].inode_table;
601 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
602 			   block, sbi->s_itb_per_group);
603 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
604 				       GFP_NOFS);
605 		if (err)
606 			goto out;
607 
608 handle_bb:
609 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
610 			goto handle_ib;
611 
612 		/* Initialize block bitmap of the @group */
613 		block = group_data[i].block_bitmap;
614 		err = ext4_resize_ensure_credits_batch(handle, 1);
615 		if (err < 0)
616 			goto out;
617 
618 		bh = bclean(handle, sb, block);
619 		if (IS_ERR(bh)) {
620 			err = PTR_ERR(bh);
621 			goto out;
622 		}
623 		overhead = ext4_group_overhead_blocks(sb, group);
624 		if (overhead != 0) {
625 			ext4_debug("mark backup superblock %#04llx (+0)\n",
626 				   start);
627 			ext4_set_bits(bh->b_data, 0,
628 				      EXT4_NUM_B2C(sbi, overhead));
629 		}
630 		ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
631 				     sb->s_blocksize * 8, bh->b_data);
632 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
633 		brelse(bh);
634 		if (err)
635 			goto out;
636 
637 handle_ib:
638 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
639 			continue;
640 
641 		/* Initialize inode bitmap of the @group */
642 		block = group_data[i].inode_bitmap;
643 		err = ext4_resize_ensure_credits_batch(handle, 1);
644 		if (err < 0)
645 			goto out;
646 		/* Mark unused entries in inode bitmap used */
647 		bh = bclean(handle, sb, block);
648 		if (IS_ERR(bh)) {
649 			err = PTR_ERR(bh);
650 			goto out;
651 		}
652 
653 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
654 				     sb->s_blocksize * 8, bh->b_data);
655 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
656 		brelse(bh);
657 		if (err)
658 			goto out;
659 	}
660 
661 	/* Mark group tables in block bitmap */
662 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
663 		count = group_table_count[j];
664 		start = (&group_data[0].block_bitmap)[j];
665 		block = start;
666 		for (i = 1; i < flex_gd->count; i++) {
667 			block += group_table_count[j];
668 			if (block == (&group_data[i].block_bitmap)[j]) {
669 				count += group_table_count[j];
670 				continue;
671 			}
672 			err = set_flexbg_block_bitmap(sb, handle,
673 						      flex_gd,
674 						      EXT4_B2C(sbi, start),
675 						      EXT4_B2C(sbi,
676 							       start + count
677 							       - 1));
678 			if (err)
679 				goto out;
680 			count = group_table_count[j];
681 			start = (&group_data[i].block_bitmap)[j];
682 			block = start;
683 		}
684 
685 		if (count) {
686 			err = set_flexbg_block_bitmap(sb, handle,
687 						      flex_gd,
688 						      EXT4_B2C(sbi, start),
689 						      EXT4_B2C(sbi,
690 							       start + count
691 							       - 1));
692 			if (err)
693 				goto out;
694 		}
695 	}
696 
697 out:
698 	err2 = ext4_journal_stop(handle);
699 	if (err2 && !err)
700 		err = err2;
701 
702 	return err;
703 }
704 
705 /*
706  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
707  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
708  * calling this for the first time.  In a sparse filesystem it will be the
709  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
710  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
711  */
712 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
713 				  unsigned *five, unsigned *seven)
714 {
715 	unsigned *min = three;
716 	int mult = 3;
717 	unsigned ret;
718 
719 	if (!ext4_has_feature_sparse_super(sb)) {
720 		ret = *min;
721 		*min += 1;
722 		return ret;
723 	}
724 
725 	if (*five < *min) {
726 		min = five;
727 		mult = 5;
728 	}
729 	if (*seven < *min) {
730 		min = seven;
731 		mult = 7;
732 	}
733 
734 	ret = *min;
735 	*min *= mult;
736 
737 	return ret;
738 }
739 
740 /*
741  * Check that all of the backup GDT blocks are held in the primary GDT block.
742  * It is assumed that they are stored in group order.  Returns the number of
743  * groups in current filesystem that have BACKUPS, or -ve error code.
744  */
745 static int verify_reserved_gdb(struct super_block *sb,
746 			       ext4_group_t end,
747 			       struct buffer_head *primary)
748 {
749 	const ext4_fsblk_t blk = primary->b_blocknr;
750 	unsigned three = 1;
751 	unsigned five = 5;
752 	unsigned seven = 7;
753 	unsigned grp;
754 	__le32 *p = (__le32 *)primary->b_data;
755 	int gdbackups = 0;
756 
757 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
758 		if (le32_to_cpu(*p++) !=
759 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
760 			ext4_warning(sb, "reserved GDT %llu"
761 				     " missing grp %d (%llu)",
762 				     blk, grp,
763 				     grp *
764 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
765 				     blk);
766 			return -EINVAL;
767 		}
768 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
769 			return -EFBIG;
770 	}
771 
772 	return gdbackups;
773 }
774 
775 /*
776  * Called when we need to bring a reserved group descriptor table block into
777  * use from the resize inode.  The primary copy of the new GDT block currently
778  * is an indirect block (under the double indirect block in the resize inode).
779  * The new backup GDT blocks will be stored as leaf blocks in this indirect
780  * block, in group order.  Even though we know all the block numbers we need,
781  * we check to ensure that the resize inode has actually reserved these blocks.
782  *
783  * Don't need to update the block bitmaps because the blocks are still in use.
784  *
785  * We get all of the error cases out of the way, so that we are sure to not
786  * fail once we start modifying the data on disk, because JBD has no rollback.
787  */
788 static int add_new_gdb(handle_t *handle, struct inode *inode,
789 		       ext4_group_t group)
790 {
791 	struct super_block *sb = inode->i_sb;
792 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
793 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
794 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
795 	struct buffer_head **o_group_desc, **n_group_desc = NULL;
796 	struct buffer_head *dind = NULL;
797 	struct buffer_head *gdb_bh = NULL;
798 	int gdbackups;
799 	struct ext4_iloc iloc = { .bh = NULL };
800 	__le32 *data;
801 	int err;
802 
803 	if (test_opt(sb, DEBUG))
804 		printk(KERN_DEBUG
805 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
806 		       gdb_num);
807 
808 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
809 	if (IS_ERR(gdb_bh))
810 		return PTR_ERR(gdb_bh);
811 
812 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
813 	if (gdbackups < 0) {
814 		err = gdbackups;
815 		goto errout;
816 	}
817 
818 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
819 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
820 	if (IS_ERR(dind)) {
821 		err = PTR_ERR(dind);
822 		dind = NULL;
823 		goto errout;
824 	}
825 
826 	data = (__le32 *)dind->b_data;
827 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
828 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
829 			     group, gdblock);
830 		err = -EINVAL;
831 		goto errout;
832 	}
833 
834 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
835 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
836 	if (unlikely(err))
837 		goto errout;
838 
839 	BUFFER_TRACE(gdb_bh, "get_write_access");
840 	err = ext4_journal_get_write_access(handle, gdb_bh);
841 	if (unlikely(err))
842 		goto errout;
843 
844 	BUFFER_TRACE(dind, "get_write_access");
845 	err = ext4_journal_get_write_access(handle, dind);
846 	if (unlikely(err)) {
847 		ext4_std_error(sb, err);
848 		goto errout;
849 	}
850 
851 	/* ext4_reserve_inode_write() gets a reference on the iloc */
852 	err = ext4_reserve_inode_write(handle, inode, &iloc);
853 	if (unlikely(err))
854 		goto errout;
855 
856 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
857 				GFP_KERNEL);
858 	if (!n_group_desc) {
859 		err = -ENOMEM;
860 		ext4_warning(sb, "not enough memory for %lu groups",
861 			     gdb_num + 1);
862 		goto errout;
863 	}
864 
865 	/*
866 	 * Finally, we have all of the possible failures behind us...
867 	 *
868 	 * Remove new GDT block from inode double-indirect block and clear out
869 	 * the new GDT block for use (which also "frees" the backup GDT blocks
870 	 * from the reserved inode).  We don't need to change the bitmaps for
871 	 * these blocks, because they are marked as in-use from being in the
872 	 * reserved inode, and will become GDT blocks (primary and backup).
873 	 */
874 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
875 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
876 	if (unlikely(err)) {
877 		ext4_std_error(sb, err);
878 		goto errout;
879 	}
880 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
881 			   (9 - EXT4_SB(sb)->s_cluster_bits);
882 	ext4_mark_iloc_dirty(handle, inode, &iloc);
883 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
884 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
885 	if (unlikely(err)) {
886 		ext4_std_error(sb, err);
887 		iloc.bh = NULL;
888 		goto errout;
889 	}
890 	brelse(dind);
891 
892 	rcu_read_lock();
893 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
894 	memcpy(n_group_desc, o_group_desc,
895 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
896 	rcu_read_unlock();
897 	n_group_desc[gdb_num] = gdb_bh;
898 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
899 	EXT4_SB(sb)->s_gdb_count++;
900 	ext4_kvfree_array_rcu(o_group_desc);
901 
902 	lock_buffer(EXT4_SB(sb)->s_sbh);
903 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
904 	ext4_superblock_csum_set(sb);
905 	unlock_buffer(EXT4_SB(sb)->s_sbh);
906 	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
907 	if (err)
908 		ext4_std_error(sb, err);
909 	return err;
910 errout:
911 	kvfree(n_group_desc);
912 	brelse(iloc.bh);
913 	brelse(dind);
914 	brelse(gdb_bh);
915 
916 	ext4_debug("leaving with error %d\n", err);
917 	return err;
918 }
919 
920 /*
921  * add_new_gdb_meta_bg is the sister of add_new_gdb.
922  */
923 static int add_new_gdb_meta_bg(struct super_block *sb,
924 			       handle_t *handle, ext4_group_t group) {
925 	ext4_fsblk_t gdblock;
926 	struct buffer_head *gdb_bh;
927 	struct buffer_head **o_group_desc, **n_group_desc;
928 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
929 	int err;
930 
931 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
932 		   ext4_bg_has_super(sb, group);
933 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
934 	if (IS_ERR(gdb_bh))
935 		return PTR_ERR(gdb_bh);
936 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
937 				GFP_KERNEL);
938 	if (!n_group_desc) {
939 		brelse(gdb_bh);
940 		err = -ENOMEM;
941 		ext4_warning(sb, "not enough memory for %lu groups",
942 			     gdb_num + 1);
943 		return err;
944 	}
945 
946 	rcu_read_lock();
947 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
948 	memcpy(n_group_desc, o_group_desc,
949 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
950 	rcu_read_unlock();
951 	n_group_desc[gdb_num] = gdb_bh;
952 
953 	BUFFER_TRACE(gdb_bh, "get_write_access");
954 	err = ext4_journal_get_write_access(handle, gdb_bh);
955 	if (err) {
956 		kvfree(n_group_desc);
957 		brelse(gdb_bh);
958 		return err;
959 	}
960 
961 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
962 	EXT4_SB(sb)->s_gdb_count++;
963 	ext4_kvfree_array_rcu(o_group_desc);
964 	return err;
965 }
966 
967 /*
968  * Called when we are adding a new group which has a backup copy of each of
969  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
970  * We need to add these reserved backup GDT blocks to the resize inode, so
971  * that they are kept for future resizing and not allocated to files.
972  *
973  * Each reserved backup GDT block will go into a different indirect block.
974  * The indirect blocks are actually the primary reserved GDT blocks,
975  * so we know in advance what their block numbers are.  We only get the
976  * double-indirect block to verify it is pointing to the primary reserved
977  * GDT blocks so we don't overwrite a data block by accident.  The reserved
978  * backup GDT blocks are stored in their reserved primary GDT block.
979  */
980 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
981 			      ext4_group_t group)
982 {
983 	struct super_block *sb = inode->i_sb;
984 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
985 	int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
986 	struct buffer_head **primary;
987 	struct buffer_head *dind;
988 	struct ext4_iloc iloc;
989 	ext4_fsblk_t blk;
990 	__le32 *data, *end;
991 	int gdbackups = 0;
992 	int res, i;
993 	int err;
994 
995 	primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
996 	if (!primary)
997 		return -ENOMEM;
998 
999 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1000 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1001 	if (IS_ERR(dind)) {
1002 		err = PTR_ERR(dind);
1003 		dind = NULL;
1004 		goto exit_free;
1005 	}
1006 
1007 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1008 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1009 					 EXT4_ADDR_PER_BLOCK(sb));
1010 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1011 
1012 	/* Get each reserved primary GDT block and verify it holds backups */
1013 	for (res = 0; res < reserved_gdb; res++, blk++) {
1014 		if (le32_to_cpu(*data) != blk) {
1015 			ext4_warning(sb, "reserved block %llu"
1016 				     " not at offset %ld",
1017 				     blk,
1018 				     (long)(data - (__le32 *)dind->b_data));
1019 			err = -EINVAL;
1020 			goto exit_bh;
1021 		}
1022 		primary[res] = ext4_sb_bread(sb, blk, 0);
1023 		if (IS_ERR(primary[res])) {
1024 			err = PTR_ERR(primary[res]);
1025 			primary[res] = NULL;
1026 			goto exit_bh;
1027 		}
1028 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1029 		if (gdbackups < 0) {
1030 			brelse(primary[res]);
1031 			err = gdbackups;
1032 			goto exit_bh;
1033 		}
1034 		if (++data >= end)
1035 			data = (__le32 *)dind->b_data;
1036 	}
1037 
1038 	for (i = 0; i < reserved_gdb; i++) {
1039 		BUFFER_TRACE(primary[i], "get_write_access");
1040 		if ((err = ext4_journal_get_write_access(handle, primary[i])))
1041 			goto exit_bh;
1042 	}
1043 
1044 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1045 		goto exit_bh;
1046 
1047 	/*
1048 	 * Finally we can add each of the reserved backup GDT blocks from
1049 	 * the new group to its reserved primary GDT block.
1050 	 */
1051 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1052 	for (i = 0; i < reserved_gdb; i++) {
1053 		int err2;
1054 		data = (__le32 *)primary[i]->b_data;
1055 		/* printk("reserving backup %lu[%u] = %lu\n",
1056 		       primary[i]->b_blocknr, gdbackups,
1057 		       blk + primary[i]->b_blocknr); */
1058 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1059 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1060 		if (!err)
1061 			err = err2;
1062 	}
1063 
1064 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1065 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1066 
1067 exit_bh:
1068 	while (--res >= 0)
1069 		brelse(primary[res]);
1070 	brelse(dind);
1071 
1072 exit_free:
1073 	kfree(primary);
1074 
1075 	return err;
1076 }
1077 
1078 /*
1079  * Update the backup copies of the ext4 metadata.  These don't need to be part
1080  * of the main resize transaction, because e2fsck will re-write them if there
1081  * is a problem (basically only OOM will cause a problem).  However, we
1082  * _should_ update the backups if possible, in case the primary gets trashed
1083  * for some reason and we need to run e2fsck from a backup superblock.  The
1084  * important part is that the new block and inode counts are in the backup
1085  * superblocks, and the location of the new group metadata in the GDT backups.
1086  *
1087  * We do not need take the s_resize_lock for this, because these
1088  * blocks are not otherwise touched by the filesystem code when it is
1089  * mounted.  We don't need to worry about last changing from
1090  * sbi->s_groups_count, because the worst that can happen is that we
1091  * do not copy the full number of backups at this time.  The resize
1092  * which changed s_groups_count will backup again.
1093  */
1094 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1095 			   int size, int meta_bg)
1096 {
1097 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1098 	ext4_group_t last;
1099 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1100 	unsigned three = 1;
1101 	unsigned five = 5;
1102 	unsigned seven = 7;
1103 	ext4_group_t group = 0;
1104 	int rest = sb->s_blocksize - size;
1105 	handle_t *handle;
1106 	int err = 0, err2;
1107 
1108 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1109 	if (IS_ERR(handle)) {
1110 		group = 1;
1111 		err = PTR_ERR(handle);
1112 		goto exit_err;
1113 	}
1114 
1115 	if (meta_bg == 0) {
1116 		group = ext4_list_backups(sb, &three, &five, &seven);
1117 		last = sbi->s_groups_count;
1118 	} else {
1119 		group = ext4_get_group_number(sb, blk_off) + 1;
1120 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1121 	}
1122 
1123 	while (group < sbi->s_groups_count) {
1124 		struct buffer_head *bh;
1125 		ext4_fsblk_t backup_block;
1126 
1127 		/* Out of journal space, and can't get more - abort - so sad */
1128 		err = ext4_resize_ensure_credits_batch(handle, 1);
1129 		if (err < 0)
1130 			break;
1131 
1132 		if (meta_bg == 0)
1133 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1134 		else
1135 			backup_block = (ext4_group_first_block_no(sb, group) +
1136 					ext4_bg_has_super(sb, group));
1137 
1138 		bh = sb_getblk(sb, backup_block);
1139 		if (unlikely(!bh)) {
1140 			err = -ENOMEM;
1141 			break;
1142 		}
1143 		ext4_debug("update metadata backup %llu(+%llu)\n",
1144 			   backup_block, backup_block -
1145 			   ext4_group_first_block_no(sb, group));
1146 		BUFFER_TRACE(bh, "get_write_access");
1147 		if ((err = ext4_journal_get_write_access(handle, bh))) {
1148 			brelse(bh);
1149 			break;
1150 		}
1151 		lock_buffer(bh);
1152 		memcpy(bh->b_data, data, size);
1153 		if (rest)
1154 			memset(bh->b_data + size, 0, rest);
1155 		set_buffer_uptodate(bh);
1156 		unlock_buffer(bh);
1157 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1158 		if (unlikely(err))
1159 			ext4_std_error(sb, err);
1160 		brelse(bh);
1161 
1162 		if (meta_bg == 0)
1163 			group = ext4_list_backups(sb, &three, &five, &seven);
1164 		else if (group == last)
1165 			break;
1166 		else
1167 			group = last;
1168 	}
1169 	if ((err2 = ext4_journal_stop(handle)) && !err)
1170 		err = err2;
1171 
1172 	/*
1173 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1174 	 * late to revert the resize, we shouldn't fail just because of
1175 	 * the backup copies (they are only needed in case of corruption).
1176 	 *
1177 	 * However, if we got here we have a journal problem too, so we
1178 	 * can't really start a transaction to mark the superblock.
1179 	 * Chicken out and just set the flag on the hope it will be written
1180 	 * to disk, and if not - we will simply wait until next fsck.
1181 	 */
1182 exit_err:
1183 	if (err) {
1184 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1185 			     "forcing fsck on next reboot", group, err);
1186 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1187 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1188 		mark_buffer_dirty(sbi->s_sbh);
1189 	}
1190 }
1191 
1192 /*
1193  * ext4_add_new_descs() adds @count group descriptor of groups
1194  * starting at @group
1195  *
1196  * @handle: journal handle
1197  * @sb: super block
1198  * @group: the group no. of the first group desc to be added
1199  * @resize_inode: the resize inode
1200  * @count: number of group descriptors to be added
1201  */
1202 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1203 			      ext4_group_t group, struct inode *resize_inode,
1204 			      ext4_group_t count)
1205 {
1206 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1207 	struct ext4_super_block *es = sbi->s_es;
1208 	struct buffer_head *gdb_bh;
1209 	int i, gdb_off, gdb_num, err = 0;
1210 	int meta_bg;
1211 
1212 	meta_bg = ext4_has_feature_meta_bg(sb);
1213 	for (i = 0; i < count; i++, group++) {
1214 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1215 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1216 
1217 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1218 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1219 
1220 		/*
1221 		 * We will only either add reserved group blocks to a backup group
1222 		 * or remove reserved blocks for the first group in a new group block.
1223 		 * Doing both would be mean more complex code, and sane people don't
1224 		 * use non-sparse filesystems anymore.  This is already checked above.
1225 		 */
1226 		if (gdb_off) {
1227 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1228 						     gdb_num);
1229 			BUFFER_TRACE(gdb_bh, "get_write_access");
1230 			err = ext4_journal_get_write_access(handle, gdb_bh);
1231 
1232 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1233 				err = reserve_backup_gdb(handle, resize_inode, group);
1234 		} else if (meta_bg != 0) {
1235 			err = add_new_gdb_meta_bg(sb, handle, group);
1236 		} else {
1237 			err = add_new_gdb(handle, resize_inode, group);
1238 		}
1239 		if (err)
1240 			break;
1241 	}
1242 	return err;
1243 }
1244 
1245 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1246 {
1247 	struct buffer_head *bh = sb_getblk(sb, block);
1248 	if (unlikely(!bh))
1249 		return NULL;
1250 	if (!bh_uptodate_or_lock(bh)) {
1251 		if (ext4_read_bh(bh, 0, NULL) < 0) {
1252 			brelse(bh);
1253 			return NULL;
1254 		}
1255 	}
1256 
1257 	return bh;
1258 }
1259 
1260 static int ext4_set_bitmap_checksums(struct super_block *sb,
1261 				     ext4_group_t group,
1262 				     struct ext4_group_desc *gdp,
1263 				     struct ext4_new_group_data *group_data)
1264 {
1265 	struct buffer_head *bh;
1266 
1267 	if (!ext4_has_metadata_csum(sb))
1268 		return 0;
1269 
1270 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1271 	if (!bh)
1272 		return -EIO;
1273 	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1274 				   EXT4_INODES_PER_GROUP(sb) / 8);
1275 	brelse(bh);
1276 
1277 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1278 	if (!bh)
1279 		return -EIO;
1280 	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1281 	brelse(bh);
1282 
1283 	return 0;
1284 }
1285 
1286 /*
1287  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1288  */
1289 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1290 				struct ext4_new_flex_group_data *flex_gd)
1291 {
1292 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1293 	struct ext4_group_desc		*gdp;
1294 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1295 	struct buffer_head		*gdb_bh;
1296 	ext4_group_t			group;
1297 	__u16				*bg_flags = flex_gd->bg_flags;
1298 	int				i, gdb_off, gdb_num, err = 0;
1299 
1300 
1301 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1302 		group = group_data->group;
1303 
1304 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1305 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1306 
1307 		/*
1308 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1309 		 */
1310 		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1311 		/* Update group descriptor block for new group */
1312 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1313 						 gdb_off * EXT4_DESC_SIZE(sb));
1314 
1315 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1316 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1317 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1318 		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1319 		if (err) {
1320 			ext4_std_error(sb, err);
1321 			break;
1322 		}
1323 
1324 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1325 		ext4_free_group_clusters_set(sb, gdp,
1326 					     group_data->free_clusters_count);
1327 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1328 		if (ext4_has_group_desc_csum(sb))
1329 			ext4_itable_unused_set(sb, gdp,
1330 					       EXT4_INODES_PER_GROUP(sb));
1331 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1332 		ext4_group_desc_csum_set(sb, group, gdp);
1333 
1334 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1335 		if (unlikely(err)) {
1336 			ext4_std_error(sb, err);
1337 			break;
1338 		}
1339 
1340 		/*
1341 		 * We can allocate memory for mb_alloc based on the new group
1342 		 * descriptor
1343 		 */
1344 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1345 		if (err)
1346 			break;
1347 	}
1348 	return err;
1349 }
1350 
1351 /*
1352  * ext4_update_super() updates the super block so that the newly added
1353  * groups can be seen by the filesystem.
1354  *
1355  * @sb: super block
1356  * @flex_gd: new added groups
1357  */
1358 static void ext4_update_super(struct super_block *sb,
1359 			     struct ext4_new_flex_group_data *flex_gd)
1360 {
1361 	ext4_fsblk_t blocks_count = 0;
1362 	ext4_fsblk_t free_blocks = 0;
1363 	ext4_fsblk_t reserved_blocks = 0;
1364 	struct ext4_new_group_data *group_data = flex_gd->groups;
1365 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1366 	struct ext4_super_block *es = sbi->s_es;
1367 	int i;
1368 
1369 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1370 	/*
1371 	 * Make the new blocks and inodes valid next.  We do this before
1372 	 * increasing the group count so that once the group is enabled,
1373 	 * all of its blocks and inodes are already valid.
1374 	 *
1375 	 * We always allocate group-by-group, then block-by-block or
1376 	 * inode-by-inode within a group, so enabling these
1377 	 * blocks/inodes before the group is live won't actually let us
1378 	 * allocate the new space yet.
1379 	 */
1380 	for (i = 0; i < flex_gd->count; i++) {
1381 		blocks_count += group_data[i].blocks_count;
1382 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1383 	}
1384 
1385 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1386 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1387 	reserved_blocks *= blocks_count;
1388 	do_div(reserved_blocks, 100);
1389 
1390 	lock_buffer(sbi->s_sbh);
1391 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1392 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1393 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1394 		     flex_gd->count);
1395 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1396 		     flex_gd->count);
1397 
1398 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1399 	/*
1400 	 * We need to protect s_groups_count against other CPUs seeing
1401 	 * inconsistent state in the superblock.
1402 	 *
1403 	 * The precise rules we use are:
1404 	 *
1405 	 * * Writers must perform a smp_wmb() after updating all
1406 	 *   dependent data and before modifying the groups count
1407 	 *
1408 	 * * Readers must perform an smp_rmb() after reading the groups
1409 	 *   count and before reading any dependent data.
1410 	 *
1411 	 * NB. These rules can be relaxed when checking the group count
1412 	 * while freeing data, as we can only allocate from a block
1413 	 * group after serialising against the group count, and we can
1414 	 * only then free after serialising in turn against that
1415 	 * allocation.
1416 	 */
1417 	smp_wmb();
1418 
1419 	/* Update the global fs size fields */
1420 	sbi->s_groups_count += flex_gd->count;
1421 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1422 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1423 
1424 	/* Update the reserved block counts only once the new group is
1425 	 * active. */
1426 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1427 				reserved_blocks);
1428 	ext4_superblock_csum_set(sb);
1429 	unlock_buffer(sbi->s_sbh);
1430 
1431 	/* Update the free space counts */
1432 	percpu_counter_add(&sbi->s_freeclusters_counter,
1433 			   EXT4_NUM_B2C(sbi, free_blocks));
1434 	percpu_counter_add(&sbi->s_freeinodes_counter,
1435 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1436 
1437 	ext4_debug("free blocks count %llu",
1438 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1439 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1440 		ext4_group_t flex_group;
1441 		struct flex_groups *fg;
1442 
1443 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1444 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1445 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1446 			     &fg->free_clusters);
1447 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1448 			   &fg->free_inodes);
1449 	}
1450 
1451 	/*
1452 	 * Update the fs overhead information
1453 	 */
1454 	ext4_calculate_overhead(sb);
1455 
1456 	if (test_opt(sb, DEBUG))
1457 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1458 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1459 		       blocks_count, free_blocks, reserved_blocks);
1460 }
1461 
1462 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1463  * _before_ we start modifying the filesystem, because we cannot abort the
1464  * transaction and not have it write the data to disk.
1465  */
1466 static int ext4_flex_group_add(struct super_block *sb,
1467 			       struct inode *resize_inode,
1468 			       struct ext4_new_flex_group_data *flex_gd)
1469 {
1470 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1471 	struct ext4_super_block *es = sbi->s_es;
1472 	ext4_fsblk_t o_blocks_count;
1473 	ext4_grpblk_t last;
1474 	ext4_group_t group;
1475 	handle_t *handle;
1476 	unsigned reserved_gdb;
1477 	int err = 0, err2 = 0, credit;
1478 
1479 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1480 
1481 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1482 	o_blocks_count = ext4_blocks_count(es);
1483 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1484 	BUG_ON(last);
1485 
1486 	err = setup_new_flex_group_blocks(sb, flex_gd);
1487 	if (err)
1488 		goto exit;
1489 	/*
1490 	 * We will always be modifying at least the superblock and  GDT
1491 	 * blocks.  If we are adding a group past the last current GDT block,
1492 	 * we will also modify the inode and the dindirect block.  If we
1493 	 * are adding a group with superblock/GDT backups  we will also
1494 	 * modify each of the reserved GDT dindirect blocks.
1495 	 */
1496 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1497 	/* GDT blocks */
1498 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1499 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1500 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1501 	if (IS_ERR(handle)) {
1502 		err = PTR_ERR(handle);
1503 		goto exit;
1504 	}
1505 
1506 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1507 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1508 	if (err)
1509 		goto exit_journal;
1510 
1511 	group = flex_gd->groups[0].group;
1512 	BUG_ON(group != sbi->s_groups_count);
1513 	err = ext4_add_new_descs(handle, sb, group,
1514 				resize_inode, flex_gd->count);
1515 	if (err)
1516 		goto exit_journal;
1517 
1518 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1519 	if (err)
1520 		goto exit_journal;
1521 
1522 	ext4_update_super(sb, flex_gd);
1523 
1524 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1525 
1526 exit_journal:
1527 	err2 = ext4_journal_stop(handle);
1528 	if (!err)
1529 		err = err2;
1530 
1531 	if (!err) {
1532 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1533 		int gdb_num_end = ((group + flex_gd->count - 1) /
1534 				   EXT4_DESC_PER_BLOCK(sb));
1535 		int meta_bg = ext4_has_feature_meta_bg(sb);
1536 		sector_t old_gdb = 0;
1537 
1538 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1539 			       sizeof(struct ext4_super_block), 0);
1540 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1541 			struct buffer_head *gdb_bh;
1542 
1543 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1544 						     gdb_num);
1545 			if (old_gdb == gdb_bh->b_blocknr)
1546 				continue;
1547 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1548 				       gdb_bh->b_size, meta_bg);
1549 			old_gdb = gdb_bh->b_blocknr;
1550 		}
1551 	}
1552 exit:
1553 	return err;
1554 }
1555 
1556 static int ext4_setup_next_flex_gd(struct super_block *sb,
1557 				    struct ext4_new_flex_group_data *flex_gd,
1558 				    ext4_fsblk_t n_blocks_count,
1559 				    unsigned long flexbg_size)
1560 {
1561 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1562 	struct ext4_super_block *es = sbi->s_es;
1563 	struct ext4_new_group_data *group_data = flex_gd->groups;
1564 	ext4_fsblk_t o_blocks_count;
1565 	ext4_group_t n_group;
1566 	ext4_group_t group;
1567 	ext4_group_t last_group;
1568 	ext4_grpblk_t last;
1569 	ext4_grpblk_t clusters_per_group;
1570 	unsigned long i;
1571 
1572 	clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1573 
1574 	o_blocks_count = ext4_blocks_count(es);
1575 
1576 	if (o_blocks_count == n_blocks_count)
1577 		return 0;
1578 
1579 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1580 	BUG_ON(last);
1581 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1582 
1583 	last_group = group | (flexbg_size - 1);
1584 	if (last_group > n_group)
1585 		last_group = n_group;
1586 
1587 	flex_gd->count = last_group - group + 1;
1588 
1589 	for (i = 0; i < flex_gd->count; i++) {
1590 		int overhead;
1591 
1592 		group_data[i].group = group + i;
1593 		group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1594 		overhead = ext4_group_overhead_blocks(sb, group + i);
1595 		group_data[i].mdata_blocks = overhead;
1596 		group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1597 		if (ext4_has_group_desc_csum(sb)) {
1598 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1599 					       EXT4_BG_INODE_UNINIT;
1600 			if (!test_opt(sb, INIT_INODE_TABLE))
1601 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1602 		} else
1603 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1604 	}
1605 
1606 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1607 		/* We need to initialize block bitmap of last group. */
1608 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1609 
1610 	if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1611 		group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1612 		group_data[i - 1].free_clusters_count -= clusters_per_group -
1613 						       last - 1;
1614 	}
1615 
1616 	return 1;
1617 }
1618 
1619 /* Add group descriptor data to an existing or new group descriptor block.
1620  * Ensure we handle all possible error conditions _before_ we start modifying
1621  * the filesystem, because we cannot abort the transaction and not have it
1622  * write the data to disk.
1623  *
1624  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1625  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1626  *
1627  * We only need to hold the superblock lock while we are actually adding
1628  * in the new group's counts to the superblock.  Prior to that we have
1629  * not really "added" the group at all.  We re-check that we are still
1630  * adding in the last group in case things have changed since verifying.
1631  */
1632 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1633 {
1634 	struct ext4_new_flex_group_data flex_gd;
1635 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1636 	struct ext4_super_block *es = sbi->s_es;
1637 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1638 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1639 	struct inode *inode = NULL;
1640 	int gdb_off;
1641 	int err;
1642 	__u16 bg_flags = 0;
1643 
1644 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1645 
1646 	if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1647 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1648 		return -EPERM;
1649 	}
1650 
1651 	if (ext4_blocks_count(es) + input->blocks_count <
1652 	    ext4_blocks_count(es)) {
1653 		ext4_warning(sb, "blocks_count overflow");
1654 		return -EINVAL;
1655 	}
1656 
1657 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1658 	    le32_to_cpu(es->s_inodes_count)) {
1659 		ext4_warning(sb, "inodes_count overflow");
1660 		return -EINVAL;
1661 	}
1662 
1663 	if (reserved_gdb || gdb_off == 0) {
1664 		if (!ext4_has_feature_resize_inode(sb) ||
1665 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1666 			ext4_warning(sb,
1667 				     "No reserved GDT blocks, can't resize");
1668 			return -EPERM;
1669 		}
1670 		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1671 		if (IS_ERR(inode)) {
1672 			ext4_warning(sb, "Error opening resize inode");
1673 			return PTR_ERR(inode);
1674 		}
1675 	}
1676 
1677 
1678 	err = verify_group_input(sb, input);
1679 	if (err)
1680 		goto out;
1681 
1682 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1683 	if (err)
1684 		goto out;
1685 
1686 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1687 	if (err)
1688 		goto out;
1689 
1690 	flex_gd.count = 1;
1691 	flex_gd.groups = input;
1692 	flex_gd.bg_flags = &bg_flags;
1693 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1694 out:
1695 	iput(inode);
1696 	return err;
1697 } /* ext4_group_add */
1698 
1699 /*
1700  * extend a group without checking assuming that checking has been done.
1701  */
1702 static int ext4_group_extend_no_check(struct super_block *sb,
1703 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1704 {
1705 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1706 	handle_t *handle;
1707 	int err = 0, err2;
1708 
1709 	/* We will update the superblock, one block bitmap, and
1710 	 * one group descriptor via ext4_group_add_blocks().
1711 	 */
1712 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1713 	if (IS_ERR(handle)) {
1714 		err = PTR_ERR(handle);
1715 		ext4_warning(sb, "error %d on journal start", err);
1716 		return err;
1717 	}
1718 
1719 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1720 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1721 	if (err) {
1722 		ext4_warning(sb, "error %d on journal write access", err);
1723 		goto errout;
1724 	}
1725 
1726 	lock_buffer(EXT4_SB(sb)->s_sbh);
1727 	ext4_blocks_count_set(es, o_blocks_count + add);
1728 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1729 	ext4_superblock_csum_set(sb);
1730 	unlock_buffer(EXT4_SB(sb)->s_sbh);
1731 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1732 		   o_blocks_count + add);
1733 	/* We add the blocks to the bitmap and set the group need init bit */
1734 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1735 	if (err)
1736 		goto errout;
1737 	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1738 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1739 		   o_blocks_count + add);
1740 errout:
1741 	err2 = ext4_journal_stop(handle);
1742 	if (err2 && !err)
1743 		err = err2;
1744 
1745 	if (!err) {
1746 		if (test_opt(sb, DEBUG))
1747 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1748 			       "blocks\n", ext4_blocks_count(es));
1749 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1750 			       (char *)es, sizeof(struct ext4_super_block), 0);
1751 	}
1752 	return err;
1753 }
1754 
1755 /*
1756  * Extend the filesystem to the new number of blocks specified.  This entry
1757  * point is only used to extend the current filesystem to the end of the last
1758  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1759  * for emergencies (because it has no dependencies on reserved blocks).
1760  *
1761  * If we _really_ wanted, we could use default values to call ext4_group_add()
1762  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1763  * GDT blocks are reserved to grow to the desired size.
1764  */
1765 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1766 		      ext4_fsblk_t n_blocks_count)
1767 {
1768 	ext4_fsblk_t o_blocks_count;
1769 	ext4_grpblk_t last;
1770 	ext4_grpblk_t add;
1771 	struct buffer_head *bh;
1772 	int err;
1773 	ext4_group_t group;
1774 
1775 	o_blocks_count = ext4_blocks_count(es);
1776 
1777 	if (test_opt(sb, DEBUG))
1778 		ext4_msg(sb, KERN_DEBUG,
1779 			 "extending last group from %llu to %llu blocks",
1780 			 o_blocks_count, n_blocks_count);
1781 
1782 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1783 		return 0;
1784 
1785 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1786 		ext4_msg(sb, KERN_ERR,
1787 			 "filesystem too large to resize to %llu blocks safely",
1788 			 n_blocks_count);
1789 		return -EINVAL;
1790 	}
1791 
1792 	if (n_blocks_count < o_blocks_count) {
1793 		ext4_warning(sb, "can't shrink FS - resize aborted");
1794 		return -EINVAL;
1795 	}
1796 
1797 	/* Handle the remaining blocks in the last group only. */
1798 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1799 
1800 	if (last == 0) {
1801 		ext4_warning(sb, "need to use ext2online to resize further");
1802 		return -EPERM;
1803 	}
1804 
1805 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1806 
1807 	if (o_blocks_count + add < o_blocks_count) {
1808 		ext4_warning(sb, "blocks_count overflow");
1809 		return -EINVAL;
1810 	}
1811 
1812 	if (o_blocks_count + add > n_blocks_count)
1813 		add = n_blocks_count - o_blocks_count;
1814 
1815 	if (o_blocks_count + add < n_blocks_count)
1816 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1817 			     o_blocks_count + add, add);
1818 
1819 	/* See if the device is actually as big as what was requested */
1820 	bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1821 	if (IS_ERR(bh)) {
1822 		ext4_warning(sb, "can't read last block, resize aborted");
1823 		return -ENOSPC;
1824 	}
1825 	brelse(bh);
1826 
1827 	err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1828 	return err;
1829 } /* ext4_group_extend */
1830 
1831 
1832 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1833 {
1834 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1835 }
1836 
1837 /*
1838  * Release the resize inode and drop the resize_inode feature if there
1839  * are no more reserved gdt blocks, and then convert the file system
1840  * to enable meta_bg
1841  */
1842 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1843 {
1844 	handle_t *handle;
1845 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1846 	struct ext4_super_block *es = sbi->s_es;
1847 	struct ext4_inode_info *ei = EXT4_I(inode);
1848 	ext4_fsblk_t nr;
1849 	int i, ret, err = 0;
1850 	int credits = 1;
1851 
1852 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1853 	if (inode) {
1854 		if (es->s_reserved_gdt_blocks) {
1855 			ext4_error(sb, "Unexpected non-zero "
1856 				   "s_reserved_gdt_blocks");
1857 			return -EPERM;
1858 		}
1859 
1860 		/* Do a quick sanity check of the resize inode */
1861 		if (inode->i_blocks != 1 << (inode->i_blkbits -
1862 					     (9 - sbi->s_cluster_bits)))
1863 			goto invalid_resize_inode;
1864 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1865 			if (i == EXT4_DIND_BLOCK) {
1866 				if (ei->i_data[i])
1867 					continue;
1868 				else
1869 					goto invalid_resize_inode;
1870 			}
1871 			if (ei->i_data[i])
1872 				goto invalid_resize_inode;
1873 		}
1874 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1875 	}
1876 
1877 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1878 	if (IS_ERR(handle))
1879 		return PTR_ERR(handle);
1880 
1881 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1882 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1883 	if (err)
1884 		goto errout;
1885 
1886 	lock_buffer(sbi->s_sbh);
1887 	ext4_clear_feature_resize_inode(sb);
1888 	ext4_set_feature_meta_bg(sb);
1889 	sbi->s_es->s_first_meta_bg =
1890 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1891 	ext4_superblock_csum_set(sb);
1892 	unlock_buffer(sbi->s_sbh);
1893 
1894 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1895 	if (err) {
1896 		ext4_std_error(sb, err);
1897 		goto errout;
1898 	}
1899 
1900 	if (inode) {
1901 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1902 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1903 				 EXT4_FREE_BLOCKS_METADATA |
1904 				 EXT4_FREE_BLOCKS_FORGET);
1905 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1906 		inode->i_blocks = 0;
1907 
1908 		err = ext4_mark_inode_dirty(handle, inode);
1909 		if (err)
1910 			ext4_std_error(sb, err);
1911 	}
1912 
1913 errout:
1914 	ret = ext4_journal_stop(handle);
1915 	if (!err)
1916 		err = ret;
1917 	return ret;
1918 
1919 invalid_resize_inode:
1920 	ext4_error(sb, "corrupted/inconsistent resize inode");
1921 	return -EINVAL;
1922 }
1923 
1924 /*
1925  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1926  *
1927  * @sb: super block of the fs to be resized
1928  * @n_blocks_count: the number of blocks resides in the resized fs
1929  */
1930 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1931 {
1932 	struct ext4_new_flex_group_data *flex_gd = NULL;
1933 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1934 	struct ext4_super_block *es = sbi->s_es;
1935 	struct buffer_head *bh;
1936 	struct inode *resize_inode = NULL;
1937 	ext4_grpblk_t add, offset;
1938 	unsigned long n_desc_blocks;
1939 	unsigned long o_desc_blocks;
1940 	ext4_group_t o_group;
1941 	ext4_group_t n_group;
1942 	ext4_fsblk_t o_blocks_count;
1943 	ext4_fsblk_t n_blocks_count_retry = 0;
1944 	unsigned long last_update_time = 0;
1945 	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1946 	int meta_bg;
1947 
1948 	/* See if the device is actually as big as what was requested */
1949 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
1950 	if (IS_ERR(bh)) {
1951 		ext4_warning(sb, "can't read last block, resize aborted");
1952 		return -ENOSPC;
1953 	}
1954 	brelse(bh);
1955 
1956 retry:
1957 	o_blocks_count = ext4_blocks_count(es);
1958 
1959 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1960 		 "to %llu blocks", o_blocks_count, n_blocks_count);
1961 
1962 	if (n_blocks_count < o_blocks_count) {
1963 		/* On-line shrinking not supported */
1964 		ext4_warning(sb, "can't shrink FS - resize aborted");
1965 		return -EINVAL;
1966 	}
1967 
1968 	if (n_blocks_count == o_blocks_count)
1969 		/* Nothing need to do */
1970 		return 0;
1971 
1972 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1973 	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1974 		ext4_warning(sb, "resize would cause inodes_count overflow");
1975 		return -EINVAL;
1976 	}
1977 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
1978 
1979 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
1980 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
1981 
1982 	meta_bg = ext4_has_feature_meta_bg(sb);
1983 
1984 	if (ext4_has_feature_resize_inode(sb)) {
1985 		if (meta_bg) {
1986 			ext4_error(sb, "resize_inode and meta_bg enabled "
1987 				   "simultaneously");
1988 			return -EINVAL;
1989 		}
1990 		if (n_desc_blocks > o_desc_blocks +
1991 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
1992 			n_blocks_count_retry = n_blocks_count;
1993 			n_desc_blocks = o_desc_blocks +
1994 				le16_to_cpu(es->s_reserved_gdt_blocks);
1995 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
1996 			n_blocks_count = (ext4_fsblk_t)n_group *
1997 				EXT4_BLOCKS_PER_GROUP(sb) +
1998 				le32_to_cpu(es->s_first_data_block);
1999 			n_group--; /* set to last group number */
2000 		}
2001 
2002 		if (!resize_inode)
2003 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2004 						 EXT4_IGET_SPECIAL);
2005 		if (IS_ERR(resize_inode)) {
2006 			ext4_warning(sb, "Error opening resize inode");
2007 			return PTR_ERR(resize_inode);
2008 		}
2009 	}
2010 
2011 	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2012 		err = ext4_convert_meta_bg(sb, resize_inode);
2013 		if (err)
2014 			goto out;
2015 		if (resize_inode) {
2016 			iput(resize_inode);
2017 			resize_inode = NULL;
2018 		}
2019 		if (n_blocks_count_retry) {
2020 			n_blocks_count = n_blocks_count_retry;
2021 			n_blocks_count_retry = 0;
2022 			goto retry;
2023 		}
2024 	}
2025 
2026 	/*
2027 	 * Make sure the last group has enough space so that it's
2028 	 * guaranteed to have enough space for all metadata blocks
2029 	 * that it might need to hold.  (We might not need to store
2030 	 * the inode table blocks in the last block group, but there
2031 	 * will be cases where this might be needed.)
2032 	 */
2033 	if ((ext4_group_first_block_no(sb, n_group) +
2034 	     ext4_group_overhead_blocks(sb, n_group) + 2 +
2035 	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2036 		n_blocks_count = ext4_group_first_block_no(sb, n_group);
2037 		n_group--;
2038 		n_blocks_count_retry = 0;
2039 		if (resize_inode) {
2040 			iput(resize_inode);
2041 			resize_inode = NULL;
2042 		}
2043 		goto retry;
2044 	}
2045 
2046 	/* extend the last group */
2047 	if (n_group == o_group)
2048 		add = n_blocks_count - o_blocks_count;
2049 	else
2050 		add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2051 	if (add > 0) {
2052 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2053 		if (err)
2054 			goto out;
2055 	}
2056 
2057 	if (ext4_blocks_count(es) == n_blocks_count)
2058 		goto out;
2059 
2060 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2061 	if (err)
2062 		goto out;
2063 
2064 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2065 	if (err)
2066 		goto out;
2067 
2068 	flex_gd = alloc_flex_gd(flexbg_size);
2069 	if (flex_gd == NULL) {
2070 		err = -ENOMEM;
2071 		goto out;
2072 	}
2073 
2074 	/* Add flex groups. Note that a regular group is a
2075 	 * flex group with 1 group.
2076 	 */
2077 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2078 					      flexbg_size)) {
2079 		if (jiffies - last_update_time > HZ * 10) {
2080 			if (last_update_time)
2081 				ext4_msg(sb, KERN_INFO,
2082 					 "resized to %llu blocks",
2083 					 ext4_blocks_count(es));
2084 			last_update_time = jiffies;
2085 		}
2086 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2087 			break;
2088 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2089 		if (unlikely(err))
2090 			break;
2091 	}
2092 
2093 	if (!err && n_blocks_count_retry) {
2094 		n_blocks_count = n_blocks_count_retry;
2095 		n_blocks_count_retry = 0;
2096 		free_flex_gd(flex_gd);
2097 		flex_gd = NULL;
2098 		if (resize_inode) {
2099 			iput(resize_inode);
2100 			resize_inode = NULL;
2101 		}
2102 		goto retry;
2103 	}
2104 
2105 out:
2106 	if (flex_gd)
2107 		free_flex_gd(flex_gd);
2108 	if (resize_inode != NULL)
2109 		iput(resize_inode);
2110 	if (err)
2111 		ext4_warning(sb, "error (%d) occurred during "
2112 			     "file system resize", err);
2113 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2114 		 ext4_blocks_count(es));
2115 	return err;
2116 }
2117