xref: /openbmc/linux/fs/ext4/resize.c (revision bbc605cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/resize.c
4  *
5  * Support for resizing an ext4 filesystem while it is mounted.
6  *
7  * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8  *
9  * This could probably be made into a module, because it is not often in use.
10  */
11 
12 
13 #define EXT4FS_DEBUG
14 
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 
18 #include "ext4_jbd2.h"
19 
20 struct ext4_rcu_ptr {
21 	struct rcu_head rcu;
22 	void *ptr;
23 };
24 
25 static void ext4_rcu_ptr_callback(struct rcu_head *head)
26 {
27 	struct ext4_rcu_ptr *ptr;
28 
29 	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
30 	kvfree(ptr->ptr);
31 	kfree(ptr);
32 }
33 
34 void ext4_kvfree_array_rcu(void *to_free)
35 {
36 	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
37 
38 	if (ptr) {
39 		ptr->ptr = to_free;
40 		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
41 		return;
42 	}
43 	synchronize_rcu();
44 	kvfree(to_free);
45 }
46 
47 int ext4_resize_begin(struct super_block *sb)
48 {
49 	struct ext4_sb_info *sbi = EXT4_SB(sb);
50 	int ret = 0;
51 
52 	if (!capable(CAP_SYS_RESOURCE))
53 		return -EPERM;
54 
55 	/*
56 	 * If we are not using the primary superblock/GDT copy don't resize,
57          * because the user tools have no way of handling this.  Probably a
58          * bad time to do it anyways.
59          */
60 	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
61 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
62 		ext4_warning(sb, "won't resize using backup superblock at %llu",
63 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
64 		return -EPERM;
65 	}
66 
67 	/*
68 	 * We are not allowed to do online-resizing on a filesystem mounted
69 	 * with error, because it can destroy the filesystem easily.
70 	 */
71 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
72 		ext4_warning(sb, "There are errors in the filesystem, "
73 			     "so online resizing is not allowed");
74 		return -EPERM;
75 	}
76 
77 	if (ext4_has_feature_sparse_super2(sb)) {
78 		ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
79 		return -EOPNOTSUPP;
80 	}
81 
82 	if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
83 				  &EXT4_SB(sb)->s_ext4_flags))
84 		ret = -EBUSY;
85 
86 	return ret;
87 }
88 
89 void ext4_resize_end(struct super_block *sb)
90 {
91 	clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
92 	smp_mb__after_atomic();
93 }
94 
95 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
96 					     ext4_group_t group) {
97 	return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
98 	       EXT4_DESC_PER_BLOCK_BITS(sb);
99 }
100 
101 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
102 					     ext4_group_t group) {
103 	group = ext4_meta_bg_first_group(sb, group);
104 	return ext4_group_first_block_no(sb, group);
105 }
106 
107 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
108 						ext4_group_t group) {
109 	ext4_grpblk_t overhead;
110 	overhead = ext4_bg_num_gdb(sb, group);
111 	if (ext4_bg_has_super(sb, group))
112 		overhead += 1 +
113 			  le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
114 	return overhead;
115 }
116 
117 #define outside(b, first, last)	((b) < (first) || (b) >= (last))
118 #define inside(b, first, last)	((b) >= (first) && (b) < (last))
119 
120 static int verify_group_input(struct super_block *sb,
121 			      struct ext4_new_group_data *input)
122 {
123 	struct ext4_sb_info *sbi = EXT4_SB(sb);
124 	struct ext4_super_block *es = sbi->s_es;
125 	ext4_fsblk_t start = ext4_blocks_count(es);
126 	ext4_fsblk_t end = start + input->blocks_count;
127 	ext4_group_t group = input->group;
128 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
129 	unsigned overhead;
130 	ext4_fsblk_t metaend;
131 	struct buffer_head *bh = NULL;
132 	ext4_grpblk_t free_blocks_count, offset;
133 	int err = -EINVAL;
134 
135 	if (group != sbi->s_groups_count) {
136 		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
137 			     input->group, sbi->s_groups_count);
138 		return -EINVAL;
139 	}
140 
141 	overhead = ext4_group_overhead_blocks(sb, group);
142 	metaend = start + overhead;
143 	input->free_clusters_count = free_blocks_count =
144 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
145 
146 	if (test_opt(sb, DEBUG))
147 		printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
148 		       "(%d free, %u reserved)\n",
149 		       ext4_bg_has_super(sb, input->group) ? "normal" :
150 		       "no-super", input->group, input->blocks_count,
151 		       free_blocks_count, input->reserved_blocks);
152 
153 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
154 	if (offset != 0)
155 			ext4_warning(sb, "Last group not full");
156 	else if (input->reserved_blocks > input->blocks_count / 5)
157 		ext4_warning(sb, "Reserved blocks too high (%u)",
158 			     input->reserved_blocks);
159 	else if (free_blocks_count < 0)
160 		ext4_warning(sb, "Bad blocks count %u",
161 			     input->blocks_count);
162 	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
163 		err = PTR_ERR(bh);
164 		bh = NULL;
165 		ext4_warning(sb, "Cannot read last block (%llu)",
166 			     end - 1);
167 	} else if (outside(input->block_bitmap, start, end))
168 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
169 			     (unsigned long long)input->block_bitmap);
170 	else if (outside(input->inode_bitmap, start, end))
171 		ext4_warning(sb, "Inode bitmap not in group (block %llu)",
172 			     (unsigned long long)input->inode_bitmap);
173 	else if (outside(input->inode_table, start, end) ||
174 		 outside(itend - 1, start, end))
175 		ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
176 			     (unsigned long long)input->inode_table, itend - 1);
177 	else if (input->inode_bitmap == input->block_bitmap)
178 		ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
179 			     (unsigned long long)input->block_bitmap);
180 	else if (inside(input->block_bitmap, input->inode_table, itend))
181 		ext4_warning(sb, "Block bitmap (%llu) in inode table "
182 			     "(%llu-%llu)",
183 			     (unsigned long long)input->block_bitmap,
184 			     (unsigned long long)input->inode_table, itend - 1);
185 	else if (inside(input->inode_bitmap, input->inode_table, itend))
186 		ext4_warning(sb, "Inode bitmap (%llu) in inode table "
187 			     "(%llu-%llu)",
188 			     (unsigned long long)input->inode_bitmap,
189 			     (unsigned long long)input->inode_table, itend - 1);
190 	else if (inside(input->block_bitmap, start, metaend))
191 		ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
192 			     (unsigned long long)input->block_bitmap,
193 			     start, metaend - 1);
194 	else if (inside(input->inode_bitmap, start, metaend))
195 		ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
196 			     (unsigned long long)input->inode_bitmap,
197 			     start, metaend - 1);
198 	else if (inside(input->inode_table, start, metaend) ||
199 		 inside(itend - 1, start, metaend))
200 		ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
201 			     "(%llu-%llu)",
202 			     (unsigned long long)input->inode_table,
203 			     itend - 1, start, metaend - 1);
204 	else
205 		err = 0;
206 	brelse(bh);
207 
208 	return err;
209 }
210 
211 /*
212  * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
213  * group each time.
214  */
215 struct ext4_new_flex_group_data {
216 	struct ext4_new_group_data *groups;	/* new_group_data for groups
217 						   in the flex group */
218 	__u16 *bg_flags;			/* block group flags of groups
219 						   in @groups */
220 	ext4_group_t count;			/* number of groups in @groups
221 						 */
222 };
223 
224 /*
225  * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
226  * @flexbg_size.
227  *
228  * Returns NULL on failure otherwise address of the allocated structure.
229  */
230 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
231 {
232 	struct ext4_new_flex_group_data *flex_gd;
233 
234 	flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
235 	if (flex_gd == NULL)
236 		goto out3;
237 
238 	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
239 		goto out2;
240 	flex_gd->count = flexbg_size;
241 
242 	flex_gd->groups = kmalloc_array(flexbg_size,
243 					sizeof(struct ext4_new_group_data),
244 					GFP_NOFS);
245 	if (flex_gd->groups == NULL)
246 		goto out2;
247 
248 	flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
249 					  GFP_NOFS);
250 	if (flex_gd->bg_flags == NULL)
251 		goto out1;
252 
253 	return flex_gd;
254 
255 out1:
256 	kfree(flex_gd->groups);
257 out2:
258 	kfree(flex_gd);
259 out3:
260 	return NULL;
261 }
262 
263 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
264 {
265 	kfree(flex_gd->bg_flags);
266 	kfree(flex_gd->groups);
267 	kfree(flex_gd);
268 }
269 
270 /*
271  * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
272  * and inode tables for a flex group.
273  *
274  * This function is used by 64bit-resize.  Note that this function allocates
275  * group tables from the 1st group of groups contained by @flexgd, which may
276  * be a partial of a flex group.
277  *
278  * @sb: super block of fs to which the groups belongs
279  *
280  * Returns 0 on a successful allocation of the metadata blocks in the
281  * block group.
282  */
283 static int ext4_alloc_group_tables(struct super_block *sb,
284 				struct ext4_new_flex_group_data *flex_gd,
285 				int flexbg_size)
286 {
287 	struct ext4_new_group_data *group_data = flex_gd->groups;
288 	ext4_fsblk_t start_blk;
289 	ext4_fsblk_t last_blk;
290 	ext4_group_t src_group;
291 	ext4_group_t bb_index = 0;
292 	ext4_group_t ib_index = 0;
293 	ext4_group_t it_index = 0;
294 	ext4_group_t group;
295 	ext4_group_t last_group;
296 	unsigned overhead;
297 	__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
298 	int i;
299 
300 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
301 
302 	src_group = group_data[0].group;
303 	last_group  = src_group + flex_gd->count - 1;
304 
305 	BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
306 	       (last_group & ~(flexbg_size - 1))));
307 next_group:
308 	group = group_data[0].group;
309 	if (src_group >= group_data[0].group + flex_gd->count)
310 		return -ENOSPC;
311 	start_blk = ext4_group_first_block_no(sb, src_group);
312 	last_blk = start_blk + group_data[src_group - group].blocks_count;
313 
314 	overhead = ext4_group_overhead_blocks(sb, src_group);
315 
316 	start_blk += overhead;
317 
318 	/* We collect contiguous blocks as much as possible. */
319 	src_group++;
320 	for (; src_group <= last_group; src_group++) {
321 		overhead = ext4_group_overhead_blocks(sb, src_group);
322 		if (overhead == 0)
323 			last_blk += group_data[src_group - group].blocks_count;
324 		else
325 			break;
326 	}
327 
328 	/* Allocate block bitmaps */
329 	for (; bb_index < flex_gd->count; bb_index++) {
330 		if (start_blk >= last_blk)
331 			goto next_group;
332 		group_data[bb_index].block_bitmap = start_blk++;
333 		group = ext4_get_group_number(sb, start_blk - 1);
334 		group -= group_data[0].group;
335 		group_data[group].mdata_blocks++;
336 		flex_gd->bg_flags[group] &= uninit_mask;
337 	}
338 
339 	/* Allocate inode bitmaps */
340 	for (; ib_index < flex_gd->count; ib_index++) {
341 		if (start_blk >= last_blk)
342 			goto next_group;
343 		group_data[ib_index].inode_bitmap = start_blk++;
344 		group = ext4_get_group_number(sb, start_blk - 1);
345 		group -= group_data[0].group;
346 		group_data[group].mdata_blocks++;
347 		flex_gd->bg_flags[group] &= uninit_mask;
348 	}
349 
350 	/* Allocate inode tables */
351 	for (; it_index < flex_gd->count; it_index++) {
352 		unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
353 		ext4_fsblk_t next_group_start;
354 
355 		if (start_blk + itb > last_blk)
356 			goto next_group;
357 		group_data[it_index].inode_table = start_blk;
358 		group = ext4_get_group_number(sb, start_blk);
359 		next_group_start = ext4_group_first_block_no(sb, group + 1);
360 		group -= group_data[0].group;
361 
362 		if (start_blk + itb > next_group_start) {
363 			flex_gd->bg_flags[group + 1] &= uninit_mask;
364 			overhead = start_blk + itb - next_group_start;
365 			group_data[group + 1].mdata_blocks += overhead;
366 			itb -= overhead;
367 		}
368 
369 		group_data[group].mdata_blocks += itb;
370 		flex_gd->bg_flags[group] &= uninit_mask;
371 		start_blk += EXT4_SB(sb)->s_itb_per_group;
372 	}
373 
374 	/* Update free clusters count to exclude metadata blocks */
375 	for (i = 0; i < flex_gd->count; i++) {
376 		group_data[i].free_clusters_count -=
377 				EXT4_NUM_B2C(EXT4_SB(sb),
378 					     group_data[i].mdata_blocks);
379 	}
380 
381 	if (test_opt(sb, DEBUG)) {
382 		int i;
383 		group = group_data[0].group;
384 
385 		printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
386 		       "%d groups, flexbg size is %d:\n", flex_gd->count,
387 		       flexbg_size);
388 
389 		for (i = 0; i < flex_gd->count; i++) {
390 			ext4_debug(
391 			       "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
392 			       ext4_bg_has_super(sb, group + i) ? "normal" :
393 			       "no-super", group + i,
394 			       group_data[i].blocks_count,
395 			       group_data[i].free_clusters_count,
396 			       group_data[i].mdata_blocks);
397 		}
398 	}
399 	return 0;
400 }
401 
402 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
403 				  ext4_fsblk_t blk)
404 {
405 	struct buffer_head *bh;
406 	int err;
407 
408 	bh = sb_getblk(sb, blk);
409 	if (unlikely(!bh))
410 		return ERR_PTR(-ENOMEM);
411 	BUFFER_TRACE(bh, "get_write_access");
412 	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
413 	if (err) {
414 		brelse(bh);
415 		bh = ERR_PTR(err);
416 	} else {
417 		memset(bh->b_data, 0, sb->s_blocksize);
418 		set_buffer_uptodate(bh);
419 	}
420 
421 	return bh;
422 }
423 
424 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
425 {
426 	return ext4_journal_ensure_credits_fn(handle, credits,
427 		EXT4_MAX_TRANS_DATA, 0, 0);
428 }
429 
430 /*
431  * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
432  *
433  * Helper function for ext4_setup_new_group_blocks() which set .
434  *
435  * @sb: super block
436  * @handle: journal handle
437  * @flex_gd: flex group data
438  */
439 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
440 			struct ext4_new_flex_group_data *flex_gd,
441 			ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
442 {
443 	struct ext4_sb_info *sbi = EXT4_SB(sb);
444 	ext4_group_t count = last_cluster - first_cluster + 1;
445 	ext4_group_t count2;
446 
447 	ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
448 		   last_cluster);
449 	for (count2 = count; count > 0;
450 	     count -= count2, first_cluster += count2) {
451 		ext4_fsblk_t start;
452 		struct buffer_head *bh;
453 		ext4_group_t group;
454 		int err;
455 
456 		group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
457 		start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
458 		group -= flex_gd->groups[0].group;
459 
460 		count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
461 		if (count2 > count)
462 			count2 = count;
463 
464 		if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
465 			BUG_ON(flex_gd->count > 1);
466 			continue;
467 		}
468 
469 		err = ext4_resize_ensure_credits_batch(handle, 1);
470 		if (err < 0)
471 			return err;
472 
473 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
474 		if (unlikely(!bh))
475 			return -ENOMEM;
476 
477 		BUFFER_TRACE(bh, "get_write_access");
478 		err = ext4_journal_get_write_access(handle, sb, bh,
479 						    EXT4_JTR_NONE);
480 		if (err) {
481 			brelse(bh);
482 			return err;
483 		}
484 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
485 			   first_cluster, first_cluster - start, count2);
486 		ext4_set_bits(bh->b_data, first_cluster - start, count2);
487 
488 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
489 		brelse(bh);
490 		if (unlikely(err))
491 			return err;
492 	}
493 
494 	return 0;
495 }
496 
497 /*
498  * Set up the block and inode bitmaps, and the inode table for the new groups.
499  * This doesn't need to be part of the main transaction, since we are only
500  * changing blocks outside the actual filesystem.  We still do journaling to
501  * ensure the recovery is correct in case of a failure just after resize.
502  * If any part of this fails, we simply abort the resize.
503  *
504  * setup_new_flex_group_blocks handles a flex group as follow:
505  *  1. copy super block and GDT, and initialize group tables if necessary.
506  *     In this step, we only set bits in blocks bitmaps for blocks taken by
507  *     super block and GDT.
508  *  2. allocate group tables in block bitmaps, that is, set bits in block
509  *     bitmap for blocks taken by group tables.
510  */
511 static int setup_new_flex_group_blocks(struct super_block *sb,
512 				struct ext4_new_flex_group_data *flex_gd)
513 {
514 	int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
515 	ext4_fsblk_t start;
516 	ext4_fsblk_t block;
517 	struct ext4_sb_info *sbi = EXT4_SB(sb);
518 	struct ext4_super_block *es = sbi->s_es;
519 	struct ext4_new_group_data *group_data = flex_gd->groups;
520 	__u16 *bg_flags = flex_gd->bg_flags;
521 	handle_t *handle;
522 	ext4_group_t group, count;
523 	struct buffer_head *bh = NULL;
524 	int reserved_gdb, i, j, err = 0, err2;
525 	int meta_bg;
526 
527 	BUG_ON(!flex_gd->count || !group_data ||
528 	       group_data[0].group != sbi->s_groups_count);
529 
530 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
531 	meta_bg = ext4_has_feature_meta_bg(sb);
532 
533 	/* This transaction may be extended/restarted along the way */
534 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
535 	if (IS_ERR(handle))
536 		return PTR_ERR(handle);
537 
538 	group = group_data[0].group;
539 	for (i = 0; i < flex_gd->count; i++, group++) {
540 		unsigned long gdblocks;
541 		ext4_grpblk_t overhead;
542 
543 		gdblocks = ext4_bg_num_gdb(sb, group);
544 		start = ext4_group_first_block_no(sb, group);
545 
546 		if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
547 			goto handle_itb;
548 
549 		if (meta_bg == 1) {
550 			ext4_group_t first_group;
551 			first_group = ext4_meta_bg_first_group(sb, group);
552 			if (first_group != group + 1 &&
553 			    first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
554 				goto handle_itb;
555 		}
556 
557 		block = start + ext4_bg_has_super(sb, group);
558 		/* Copy all of the GDT blocks into the backup in this group */
559 		for (j = 0; j < gdblocks; j++, block++) {
560 			struct buffer_head *gdb;
561 
562 			ext4_debug("update backup group %#04llx\n", block);
563 			err = ext4_resize_ensure_credits_batch(handle, 1);
564 			if (err < 0)
565 				goto out;
566 
567 			gdb = sb_getblk(sb, block);
568 			if (unlikely(!gdb)) {
569 				err = -ENOMEM;
570 				goto out;
571 			}
572 
573 			BUFFER_TRACE(gdb, "get_write_access");
574 			err = ext4_journal_get_write_access(handle, sb, gdb,
575 							    EXT4_JTR_NONE);
576 			if (err) {
577 				brelse(gdb);
578 				goto out;
579 			}
580 			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
581 				s_group_desc, j)->b_data, gdb->b_size);
582 			set_buffer_uptodate(gdb);
583 
584 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
585 			if (unlikely(err)) {
586 				brelse(gdb);
587 				goto out;
588 			}
589 			brelse(gdb);
590 		}
591 
592 		/* Zero out all of the reserved backup group descriptor
593 		 * table blocks
594 		 */
595 		if (ext4_bg_has_super(sb, group)) {
596 			err = sb_issue_zeroout(sb, gdblocks + start + 1,
597 					reserved_gdb, GFP_NOFS);
598 			if (err)
599 				goto out;
600 		}
601 
602 handle_itb:
603 		/* Initialize group tables of the grop @group */
604 		if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
605 			goto handle_bb;
606 
607 		/* Zero out all of the inode table blocks */
608 		block = group_data[i].inode_table;
609 		ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
610 			   block, sbi->s_itb_per_group);
611 		err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
612 				       GFP_NOFS);
613 		if (err)
614 			goto out;
615 
616 handle_bb:
617 		if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
618 			goto handle_ib;
619 
620 		/* Initialize block bitmap of the @group */
621 		block = group_data[i].block_bitmap;
622 		err = ext4_resize_ensure_credits_batch(handle, 1);
623 		if (err < 0)
624 			goto out;
625 
626 		bh = bclean(handle, sb, block);
627 		if (IS_ERR(bh)) {
628 			err = PTR_ERR(bh);
629 			goto out;
630 		}
631 		overhead = ext4_group_overhead_blocks(sb, group);
632 		if (overhead != 0) {
633 			ext4_debug("mark backup superblock %#04llx (+0)\n",
634 				   start);
635 			ext4_set_bits(bh->b_data, 0,
636 				      EXT4_NUM_B2C(sbi, overhead));
637 		}
638 		ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
639 				     sb->s_blocksize * 8, bh->b_data);
640 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
641 		brelse(bh);
642 		if (err)
643 			goto out;
644 
645 handle_ib:
646 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
647 			continue;
648 
649 		/* Initialize inode bitmap of the @group */
650 		block = group_data[i].inode_bitmap;
651 		err = ext4_resize_ensure_credits_batch(handle, 1);
652 		if (err < 0)
653 			goto out;
654 		/* Mark unused entries in inode bitmap used */
655 		bh = bclean(handle, sb, block);
656 		if (IS_ERR(bh)) {
657 			err = PTR_ERR(bh);
658 			goto out;
659 		}
660 
661 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
662 				     sb->s_blocksize * 8, bh->b_data);
663 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
664 		brelse(bh);
665 		if (err)
666 			goto out;
667 	}
668 
669 	/* Mark group tables in block bitmap */
670 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
671 		count = group_table_count[j];
672 		start = (&group_data[0].block_bitmap)[j];
673 		block = start;
674 		for (i = 1; i < flex_gd->count; i++) {
675 			block += group_table_count[j];
676 			if (block == (&group_data[i].block_bitmap)[j]) {
677 				count += group_table_count[j];
678 				continue;
679 			}
680 			err = set_flexbg_block_bitmap(sb, handle,
681 						      flex_gd,
682 						      EXT4_B2C(sbi, start),
683 						      EXT4_B2C(sbi,
684 							       start + count
685 							       - 1));
686 			if (err)
687 				goto out;
688 			count = group_table_count[j];
689 			start = (&group_data[i].block_bitmap)[j];
690 			block = start;
691 		}
692 
693 		if (count) {
694 			err = set_flexbg_block_bitmap(sb, handle,
695 						      flex_gd,
696 						      EXT4_B2C(sbi, start),
697 						      EXT4_B2C(sbi,
698 							       start + count
699 							       - 1));
700 			if (err)
701 				goto out;
702 		}
703 	}
704 
705 out:
706 	err2 = ext4_journal_stop(handle);
707 	if (err2 && !err)
708 		err = err2;
709 
710 	return err;
711 }
712 
713 /*
714  * Iterate through the groups which hold BACKUP superblock/GDT copies in an
715  * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
716  * calling this for the first time.  In a sparse filesystem it will be the
717  * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
718  * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
719  */
720 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
721 			       unsigned int *five, unsigned int *seven)
722 {
723 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
724 	unsigned int *min = three;
725 	int mult = 3;
726 	unsigned int ret;
727 
728 	if (ext4_has_feature_sparse_super2(sb)) {
729 		do {
730 			if (*min > 2)
731 				return UINT_MAX;
732 			ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
733 			*min += 1;
734 		} while (!ret);
735 		return ret;
736 	}
737 
738 	if (!ext4_has_feature_sparse_super(sb)) {
739 		ret = *min;
740 		*min += 1;
741 		return ret;
742 	}
743 
744 	if (*five < *min) {
745 		min = five;
746 		mult = 5;
747 	}
748 	if (*seven < *min) {
749 		min = seven;
750 		mult = 7;
751 	}
752 
753 	ret = *min;
754 	*min *= mult;
755 
756 	return ret;
757 }
758 
759 /*
760  * Check that all of the backup GDT blocks are held in the primary GDT block.
761  * It is assumed that they are stored in group order.  Returns the number of
762  * groups in current filesystem that have BACKUPS, or -ve error code.
763  */
764 static int verify_reserved_gdb(struct super_block *sb,
765 			       ext4_group_t end,
766 			       struct buffer_head *primary)
767 {
768 	const ext4_fsblk_t blk = primary->b_blocknr;
769 	unsigned three = 1;
770 	unsigned five = 5;
771 	unsigned seven = 7;
772 	unsigned grp;
773 	__le32 *p = (__le32 *)primary->b_data;
774 	int gdbackups = 0;
775 
776 	while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
777 		if (le32_to_cpu(*p++) !=
778 		    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
779 			ext4_warning(sb, "reserved GDT %llu"
780 				     " missing grp %d (%llu)",
781 				     blk, grp,
782 				     grp *
783 				     (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
784 				     blk);
785 			return -EINVAL;
786 		}
787 		if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
788 			return -EFBIG;
789 	}
790 
791 	return gdbackups;
792 }
793 
794 /*
795  * Called when we need to bring a reserved group descriptor table block into
796  * use from the resize inode.  The primary copy of the new GDT block currently
797  * is an indirect block (under the double indirect block in the resize inode).
798  * The new backup GDT blocks will be stored as leaf blocks in this indirect
799  * block, in group order.  Even though we know all the block numbers we need,
800  * we check to ensure that the resize inode has actually reserved these blocks.
801  *
802  * Don't need to update the block bitmaps because the blocks are still in use.
803  *
804  * We get all of the error cases out of the way, so that we are sure to not
805  * fail once we start modifying the data on disk, because JBD has no rollback.
806  */
807 static int add_new_gdb(handle_t *handle, struct inode *inode,
808 		       ext4_group_t group)
809 {
810 	struct super_block *sb = inode->i_sb;
811 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
812 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
813 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
814 	struct buffer_head **o_group_desc, **n_group_desc = NULL;
815 	struct buffer_head *dind = NULL;
816 	struct buffer_head *gdb_bh = NULL;
817 	int gdbackups;
818 	struct ext4_iloc iloc = { .bh = NULL };
819 	__le32 *data;
820 	int err;
821 
822 	if (test_opt(sb, DEBUG))
823 		printk(KERN_DEBUG
824 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
825 		       gdb_num);
826 
827 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
828 	if (IS_ERR(gdb_bh))
829 		return PTR_ERR(gdb_bh);
830 
831 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
832 	if (gdbackups < 0) {
833 		err = gdbackups;
834 		goto errout;
835 	}
836 
837 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
838 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
839 	if (IS_ERR(dind)) {
840 		err = PTR_ERR(dind);
841 		dind = NULL;
842 		goto errout;
843 	}
844 
845 	data = (__le32 *)dind->b_data;
846 	if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
847 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
848 			     group, gdblock);
849 		err = -EINVAL;
850 		goto errout;
851 	}
852 
853 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
854 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
855 					    EXT4_JTR_NONE);
856 	if (unlikely(err))
857 		goto errout;
858 
859 	BUFFER_TRACE(gdb_bh, "get_write_access");
860 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
861 	if (unlikely(err))
862 		goto errout;
863 
864 	BUFFER_TRACE(dind, "get_write_access");
865 	err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
866 	if (unlikely(err)) {
867 		ext4_std_error(sb, err);
868 		goto errout;
869 	}
870 
871 	/* ext4_reserve_inode_write() gets a reference on the iloc */
872 	err = ext4_reserve_inode_write(handle, inode, &iloc);
873 	if (unlikely(err))
874 		goto errout;
875 
876 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
877 				GFP_KERNEL);
878 	if (!n_group_desc) {
879 		err = -ENOMEM;
880 		ext4_warning(sb, "not enough memory for %lu groups",
881 			     gdb_num + 1);
882 		goto errout;
883 	}
884 
885 	/*
886 	 * Finally, we have all of the possible failures behind us...
887 	 *
888 	 * Remove new GDT block from inode double-indirect block and clear out
889 	 * the new GDT block for use (which also "frees" the backup GDT blocks
890 	 * from the reserved inode).  We don't need to change the bitmaps for
891 	 * these blocks, because they are marked as in-use from being in the
892 	 * reserved inode, and will become GDT blocks (primary and backup).
893 	 */
894 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
895 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
896 	if (unlikely(err)) {
897 		ext4_std_error(sb, err);
898 		goto errout;
899 	}
900 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
901 			   (9 - EXT4_SB(sb)->s_cluster_bits);
902 	ext4_mark_iloc_dirty(handle, inode, &iloc);
903 	memset(gdb_bh->b_data, 0, sb->s_blocksize);
904 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
905 	if (unlikely(err)) {
906 		ext4_std_error(sb, err);
907 		iloc.bh = NULL;
908 		goto errout;
909 	}
910 	brelse(dind);
911 
912 	rcu_read_lock();
913 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
914 	memcpy(n_group_desc, o_group_desc,
915 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
916 	rcu_read_unlock();
917 	n_group_desc[gdb_num] = gdb_bh;
918 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
919 	EXT4_SB(sb)->s_gdb_count++;
920 	ext4_kvfree_array_rcu(o_group_desc);
921 
922 	lock_buffer(EXT4_SB(sb)->s_sbh);
923 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
924 	ext4_superblock_csum_set(sb);
925 	unlock_buffer(EXT4_SB(sb)->s_sbh);
926 	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
927 	if (err)
928 		ext4_std_error(sb, err);
929 	return err;
930 errout:
931 	kvfree(n_group_desc);
932 	brelse(iloc.bh);
933 	brelse(dind);
934 	brelse(gdb_bh);
935 
936 	ext4_debug("leaving with error %d\n", err);
937 	return err;
938 }
939 
940 /*
941  * add_new_gdb_meta_bg is the sister of add_new_gdb.
942  */
943 static int add_new_gdb_meta_bg(struct super_block *sb,
944 			       handle_t *handle, ext4_group_t group) {
945 	ext4_fsblk_t gdblock;
946 	struct buffer_head *gdb_bh;
947 	struct buffer_head **o_group_desc, **n_group_desc;
948 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
949 	int err;
950 
951 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
952 		   ext4_bg_has_super(sb, group);
953 	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
954 	if (IS_ERR(gdb_bh))
955 		return PTR_ERR(gdb_bh);
956 	n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
957 				GFP_KERNEL);
958 	if (!n_group_desc) {
959 		brelse(gdb_bh);
960 		err = -ENOMEM;
961 		ext4_warning(sb, "not enough memory for %lu groups",
962 			     gdb_num + 1);
963 		return err;
964 	}
965 
966 	rcu_read_lock();
967 	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
968 	memcpy(n_group_desc, o_group_desc,
969 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
970 	rcu_read_unlock();
971 	n_group_desc[gdb_num] = gdb_bh;
972 
973 	BUFFER_TRACE(gdb_bh, "get_write_access");
974 	err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
975 	if (err) {
976 		kvfree(n_group_desc);
977 		brelse(gdb_bh);
978 		return err;
979 	}
980 
981 	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
982 	EXT4_SB(sb)->s_gdb_count++;
983 	ext4_kvfree_array_rcu(o_group_desc);
984 	return err;
985 }
986 
987 /*
988  * Called when we are adding a new group which has a backup copy of each of
989  * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
990  * We need to add these reserved backup GDT blocks to the resize inode, so
991  * that they are kept for future resizing and not allocated to files.
992  *
993  * Each reserved backup GDT block will go into a different indirect block.
994  * The indirect blocks are actually the primary reserved GDT blocks,
995  * so we know in advance what their block numbers are.  We only get the
996  * double-indirect block to verify it is pointing to the primary reserved
997  * GDT blocks so we don't overwrite a data block by accident.  The reserved
998  * backup GDT blocks are stored in their reserved primary GDT block.
999  */
1000 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1001 			      ext4_group_t group)
1002 {
1003 	struct super_block *sb = inode->i_sb;
1004 	int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1005 	int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1006 	struct buffer_head **primary;
1007 	struct buffer_head *dind;
1008 	struct ext4_iloc iloc;
1009 	ext4_fsblk_t blk;
1010 	__le32 *data, *end;
1011 	int gdbackups = 0;
1012 	int res, i;
1013 	int err;
1014 
1015 	primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1016 	if (!primary)
1017 		return -ENOMEM;
1018 
1019 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1020 	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1021 	if (IS_ERR(dind)) {
1022 		err = PTR_ERR(dind);
1023 		dind = NULL;
1024 		goto exit_free;
1025 	}
1026 
1027 	blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1028 	data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1029 					 EXT4_ADDR_PER_BLOCK(sb));
1030 	end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1031 
1032 	/* Get each reserved primary GDT block and verify it holds backups */
1033 	for (res = 0; res < reserved_gdb; res++, blk++) {
1034 		if (le32_to_cpu(*data) != blk) {
1035 			ext4_warning(sb, "reserved block %llu"
1036 				     " not at offset %ld",
1037 				     blk,
1038 				     (long)(data - (__le32 *)dind->b_data));
1039 			err = -EINVAL;
1040 			goto exit_bh;
1041 		}
1042 		primary[res] = ext4_sb_bread(sb, blk, 0);
1043 		if (IS_ERR(primary[res])) {
1044 			err = PTR_ERR(primary[res]);
1045 			primary[res] = NULL;
1046 			goto exit_bh;
1047 		}
1048 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1049 		if (gdbackups < 0) {
1050 			brelse(primary[res]);
1051 			err = gdbackups;
1052 			goto exit_bh;
1053 		}
1054 		if (++data >= end)
1055 			data = (__le32 *)dind->b_data;
1056 	}
1057 
1058 	for (i = 0; i < reserved_gdb; i++) {
1059 		BUFFER_TRACE(primary[i], "get_write_access");
1060 		if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1061 							 EXT4_JTR_NONE)))
1062 			goto exit_bh;
1063 	}
1064 
1065 	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1066 		goto exit_bh;
1067 
1068 	/*
1069 	 * Finally we can add each of the reserved backup GDT blocks from
1070 	 * the new group to its reserved primary GDT block.
1071 	 */
1072 	blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1073 	for (i = 0; i < reserved_gdb; i++) {
1074 		int err2;
1075 		data = (__le32 *)primary[i]->b_data;
1076 		/* printk("reserving backup %lu[%u] = %lu\n",
1077 		       primary[i]->b_blocknr, gdbackups,
1078 		       blk + primary[i]->b_blocknr); */
1079 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1080 		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1081 		if (!err)
1082 			err = err2;
1083 	}
1084 
1085 	inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1086 	ext4_mark_iloc_dirty(handle, inode, &iloc);
1087 
1088 exit_bh:
1089 	while (--res >= 0)
1090 		brelse(primary[res]);
1091 	brelse(dind);
1092 
1093 exit_free:
1094 	kfree(primary);
1095 
1096 	return err;
1097 }
1098 
1099 /*
1100  * Update the backup copies of the ext4 metadata.  These don't need to be part
1101  * of the main resize transaction, because e2fsck will re-write them if there
1102  * is a problem (basically only OOM will cause a problem).  However, we
1103  * _should_ update the backups if possible, in case the primary gets trashed
1104  * for some reason and we need to run e2fsck from a backup superblock.  The
1105  * important part is that the new block and inode counts are in the backup
1106  * superblocks, and the location of the new group metadata in the GDT backups.
1107  *
1108  * We do not need take the s_resize_lock for this, because these
1109  * blocks are not otherwise touched by the filesystem code when it is
1110  * mounted.  We don't need to worry about last changing from
1111  * sbi->s_groups_count, because the worst that can happen is that we
1112  * do not copy the full number of backups at this time.  The resize
1113  * which changed s_groups_count will backup again.
1114  */
1115 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1116 			   int size, int meta_bg)
1117 {
1118 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1119 	ext4_group_t last;
1120 	const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1121 	unsigned three = 1;
1122 	unsigned five = 5;
1123 	unsigned seven = 7;
1124 	ext4_group_t group = 0;
1125 	int rest = sb->s_blocksize - size;
1126 	handle_t *handle;
1127 	int err = 0, err2;
1128 
1129 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1130 	if (IS_ERR(handle)) {
1131 		group = 1;
1132 		err = PTR_ERR(handle);
1133 		goto exit_err;
1134 	}
1135 
1136 	if (meta_bg == 0) {
1137 		group = ext4_list_backups(sb, &three, &five, &seven);
1138 		last = sbi->s_groups_count;
1139 	} else {
1140 		group = ext4_get_group_number(sb, blk_off) + 1;
1141 		last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1142 	}
1143 
1144 	while (group < sbi->s_groups_count) {
1145 		struct buffer_head *bh;
1146 		ext4_fsblk_t backup_block;
1147 
1148 		/* Out of journal space, and can't get more - abort - so sad */
1149 		err = ext4_resize_ensure_credits_batch(handle, 1);
1150 		if (err < 0)
1151 			break;
1152 
1153 		if (meta_bg == 0)
1154 			backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1155 		else
1156 			backup_block = (ext4_group_first_block_no(sb, group) +
1157 					ext4_bg_has_super(sb, group));
1158 
1159 		bh = sb_getblk(sb, backup_block);
1160 		if (unlikely(!bh)) {
1161 			err = -ENOMEM;
1162 			break;
1163 		}
1164 		ext4_debug("update metadata backup %llu(+%llu)\n",
1165 			   backup_block, backup_block -
1166 			   ext4_group_first_block_no(sb, group));
1167 		BUFFER_TRACE(bh, "get_write_access");
1168 		if ((err = ext4_journal_get_write_access(handle, sb, bh,
1169 							 EXT4_JTR_NONE)))
1170 			break;
1171 		lock_buffer(bh);
1172 		memcpy(bh->b_data, data, size);
1173 		if (rest)
1174 			memset(bh->b_data + size, 0, rest);
1175 		set_buffer_uptodate(bh);
1176 		unlock_buffer(bh);
1177 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
1178 		if (unlikely(err))
1179 			ext4_std_error(sb, err);
1180 		brelse(bh);
1181 
1182 		if (meta_bg == 0)
1183 			group = ext4_list_backups(sb, &three, &five, &seven);
1184 		else if (group == last)
1185 			break;
1186 		else
1187 			group = last;
1188 	}
1189 	if ((err2 = ext4_journal_stop(handle)) && !err)
1190 		err = err2;
1191 
1192 	/*
1193 	 * Ugh! Need to have e2fsck write the backup copies.  It is too
1194 	 * late to revert the resize, we shouldn't fail just because of
1195 	 * the backup copies (they are only needed in case of corruption).
1196 	 *
1197 	 * However, if we got here we have a journal problem too, so we
1198 	 * can't really start a transaction to mark the superblock.
1199 	 * Chicken out and just set the flag on the hope it will be written
1200 	 * to disk, and if not - we will simply wait until next fsck.
1201 	 */
1202 exit_err:
1203 	if (err) {
1204 		ext4_warning(sb, "can't update backup for group %u (err %d), "
1205 			     "forcing fsck on next reboot", group, err);
1206 		sbi->s_mount_state &= ~EXT4_VALID_FS;
1207 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1208 		mark_buffer_dirty(sbi->s_sbh);
1209 	}
1210 }
1211 
1212 /*
1213  * ext4_add_new_descs() adds @count group descriptor of groups
1214  * starting at @group
1215  *
1216  * @handle: journal handle
1217  * @sb: super block
1218  * @group: the group no. of the first group desc to be added
1219  * @resize_inode: the resize inode
1220  * @count: number of group descriptors to be added
1221  */
1222 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1223 			      ext4_group_t group, struct inode *resize_inode,
1224 			      ext4_group_t count)
1225 {
1226 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1227 	struct ext4_super_block *es = sbi->s_es;
1228 	struct buffer_head *gdb_bh;
1229 	int i, gdb_off, gdb_num, err = 0;
1230 	int meta_bg;
1231 
1232 	meta_bg = ext4_has_feature_meta_bg(sb);
1233 	for (i = 0; i < count; i++, group++) {
1234 		int reserved_gdb = ext4_bg_has_super(sb, group) ?
1235 			le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1236 
1237 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1238 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1239 
1240 		/*
1241 		 * We will only either add reserved group blocks to a backup group
1242 		 * or remove reserved blocks for the first group in a new group block.
1243 		 * Doing both would be mean more complex code, and sane people don't
1244 		 * use non-sparse filesystems anymore.  This is already checked above.
1245 		 */
1246 		if (gdb_off) {
1247 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1248 						     gdb_num);
1249 			BUFFER_TRACE(gdb_bh, "get_write_access");
1250 			err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1251 							    EXT4_JTR_NONE);
1252 
1253 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1254 				err = reserve_backup_gdb(handle, resize_inode, group);
1255 		} else if (meta_bg != 0) {
1256 			err = add_new_gdb_meta_bg(sb, handle, group);
1257 		} else {
1258 			err = add_new_gdb(handle, resize_inode, group);
1259 		}
1260 		if (err)
1261 			break;
1262 	}
1263 	return err;
1264 }
1265 
1266 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1267 {
1268 	struct buffer_head *bh = sb_getblk(sb, block);
1269 	if (unlikely(!bh))
1270 		return NULL;
1271 	if (!bh_uptodate_or_lock(bh)) {
1272 		if (ext4_read_bh(bh, 0, NULL) < 0) {
1273 			brelse(bh);
1274 			return NULL;
1275 		}
1276 	}
1277 
1278 	return bh;
1279 }
1280 
1281 static int ext4_set_bitmap_checksums(struct super_block *sb,
1282 				     ext4_group_t group,
1283 				     struct ext4_group_desc *gdp,
1284 				     struct ext4_new_group_data *group_data)
1285 {
1286 	struct buffer_head *bh;
1287 
1288 	if (!ext4_has_metadata_csum(sb))
1289 		return 0;
1290 
1291 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1292 	if (!bh)
1293 		return -EIO;
1294 	ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1295 				   EXT4_INODES_PER_GROUP(sb) / 8);
1296 	brelse(bh);
1297 
1298 	bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1299 	if (!bh)
1300 		return -EIO;
1301 	ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1302 	brelse(bh);
1303 
1304 	return 0;
1305 }
1306 
1307 /*
1308  * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1309  */
1310 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1311 				struct ext4_new_flex_group_data *flex_gd)
1312 {
1313 	struct ext4_new_group_data	*group_data = flex_gd->groups;
1314 	struct ext4_group_desc		*gdp;
1315 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
1316 	struct buffer_head		*gdb_bh;
1317 	ext4_group_t			group;
1318 	__u16				*bg_flags = flex_gd->bg_flags;
1319 	int				i, gdb_off, gdb_num, err = 0;
1320 
1321 
1322 	for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1323 		group = group_data->group;
1324 
1325 		gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1326 		gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1327 
1328 		/*
1329 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1330 		 */
1331 		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1332 		/* Update group descriptor block for new group */
1333 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1334 						 gdb_off * EXT4_DESC_SIZE(sb));
1335 
1336 		memset(gdp, 0, EXT4_DESC_SIZE(sb));
1337 		ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1338 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1339 		err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1340 		if (err) {
1341 			ext4_std_error(sb, err);
1342 			break;
1343 		}
1344 
1345 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
1346 		ext4_free_group_clusters_set(sb, gdp,
1347 					     group_data->free_clusters_count);
1348 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1349 		if (ext4_has_group_desc_csum(sb))
1350 			ext4_itable_unused_set(sb, gdp,
1351 					       EXT4_INODES_PER_GROUP(sb));
1352 		gdp->bg_flags = cpu_to_le16(*bg_flags);
1353 		ext4_group_desc_csum_set(sb, group, gdp);
1354 
1355 		err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1356 		if (unlikely(err)) {
1357 			ext4_std_error(sb, err);
1358 			break;
1359 		}
1360 
1361 		/*
1362 		 * We can allocate memory for mb_alloc based on the new group
1363 		 * descriptor
1364 		 */
1365 		err = ext4_mb_add_groupinfo(sb, group, gdp);
1366 		if (err)
1367 			break;
1368 	}
1369 	return err;
1370 }
1371 
1372 /*
1373  * ext4_update_super() updates the super block so that the newly added
1374  * groups can be seen by the filesystem.
1375  *
1376  * @sb: super block
1377  * @flex_gd: new added groups
1378  */
1379 static void ext4_update_super(struct super_block *sb,
1380 			     struct ext4_new_flex_group_data *flex_gd)
1381 {
1382 	ext4_fsblk_t blocks_count = 0;
1383 	ext4_fsblk_t free_blocks = 0;
1384 	ext4_fsblk_t reserved_blocks = 0;
1385 	struct ext4_new_group_data *group_data = flex_gd->groups;
1386 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1387 	struct ext4_super_block *es = sbi->s_es;
1388 	int i;
1389 
1390 	BUG_ON(flex_gd->count == 0 || group_data == NULL);
1391 	/*
1392 	 * Make the new blocks and inodes valid next.  We do this before
1393 	 * increasing the group count so that once the group is enabled,
1394 	 * all of its blocks and inodes are already valid.
1395 	 *
1396 	 * We always allocate group-by-group, then block-by-block or
1397 	 * inode-by-inode within a group, so enabling these
1398 	 * blocks/inodes before the group is live won't actually let us
1399 	 * allocate the new space yet.
1400 	 */
1401 	for (i = 0; i < flex_gd->count; i++) {
1402 		blocks_count += group_data[i].blocks_count;
1403 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1404 	}
1405 
1406 	reserved_blocks = ext4_r_blocks_count(es) * 100;
1407 	reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1408 	reserved_blocks *= blocks_count;
1409 	do_div(reserved_blocks, 100);
1410 
1411 	lock_buffer(sbi->s_sbh);
1412 	ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1413 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1414 	le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1415 		     flex_gd->count);
1416 	le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1417 		     flex_gd->count);
1418 
1419 	ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1420 	/*
1421 	 * We need to protect s_groups_count against other CPUs seeing
1422 	 * inconsistent state in the superblock.
1423 	 *
1424 	 * The precise rules we use are:
1425 	 *
1426 	 * * Writers must perform a smp_wmb() after updating all
1427 	 *   dependent data and before modifying the groups count
1428 	 *
1429 	 * * Readers must perform an smp_rmb() after reading the groups
1430 	 *   count and before reading any dependent data.
1431 	 *
1432 	 * NB. These rules can be relaxed when checking the group count
1433 	 * while freeing data, as we can only allocate from a block
1434 	 * group after serialising against the group count, and we can
1435 	 * only then free after serialising in turn against that
1436 	 * allocation.
1437 	 */
1438 	smp_wmb();
1439 
1440 	/* Update the global fs size fields */
1441 	sbi->s_groups_count += flex_gd->count;
1442 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1443 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1444 
1445 	/* Update the reserved block counts only once the new group is
1446 	 * active. */
1447 	ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1448 				reserved_blocks);
1449 	ext4_superblock_csum_set(sb);
1450 	unlock_buffer(sbi->s_sbh);
1451 
1452 	/* Update the free space counts */
1453 	percpu_counter_add(&sbi->s_freeclusters_counter,
1454 			   EXT4_NUM_B2C(sbi, free_blocks));
1455 	percpu_counter_add(&sbi->s_freeinodes_counter,
1456 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1457 
1458 	ext4_debug("free blocks count %llu",
1459 		   percpu_counter_read(&sbi->s_freeclusters_counter));
1460 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1461 		ext4_group_t flex_group;
1462 		struct flex_groups *fg;
1463 
1464 		flex_group = ext4_flex_group(sbi, group_data[0].group);
1465 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1466 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1467 			     &fg->free_clusters);
1468 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1469 			   &fg->free_inodes);
1470 	}
1471 
1472 	/*
1473 	 * Update the fs overhead information
1474 	 */
1475 	ext4_calculate_overhead(sb);
1476 
1477 	if (test_opt(sb, DEBUG))
1478 		printk(KERN_DEBUG "EXT4-fs: added group %u:"
1479 		       "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1480 		       blocks_count, free_blocks, reserved_blocks);
1481 }
1482 
1483 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1484  * _before_ we start modifying the filesystem, because we cannot abort the
1485  * transaction and not have it write the data to disk.
1486  */
1487 static int ext4_flex_group_add(struct super_block *sb,
1488 			       struct inode *resize_inode,
1489 			       struct ext4_new_flex_group_data *flex_gd)
1490 {
1491 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1492 	struct ext4_super_block *es = sbi->s_es;
1493 	ext4_fsblk_t o_blocks_count;
1494 	ext4_grpblk_t last;
1495 	ext4_group_t group;
1496 	handle_t *handle;
1497 	unsigned reserved_gdb;
1498 	int err = 0, err2 = 0, credit;
1499 
1500 	BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1501 
1502 	reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1503 	o_blocks_count = ext4_blocks_count(es);
1504 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1505 	BUG_ON(last);
1506 
1507 	err = setup_new_flex_group_blocks(sb, flex_gd);
1508 	if (err)
1509 		goto exit;
1510 	/*
1511 	 * We will always be modifying at least the superblock and  GDT
1512 	 * blocks.  If we are adding a group past the last current GDT block,
1513 	 * we will also modify the inode and the dindirect block.  If we
1514 	 * are adding a group with superblock/GDT backups  we will also
1515 	 * modify each of the reserved GDT dindirect blocks.
1516 	 */
1517 	credit = 3;	/* sb, resize inode, resize inode dindirect */
1518 	/* GDT blocks */
1519 	credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1520 	credit += reserved_gdb;	/* Reserved GDT dindirect blocks */
1521 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1522 	if (IS_ERR(handle)) {
1523 		err = PTR_ERR(handle);
1524 		goto exit;
1525 	}
1526 
1527 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1528 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1529 					    EXT4_JTR_NONE);
1530 	if (err)
1531 		goto exit_journal;
1532 
1533 	group = flex_gd->groups[0].group;
1534 	BUG_ON(group != sbi->s_groups_count);
1535 	err = ext4_add_new_descs(handle, sb, group,
1536 				resize_inode, flex_gd->count);
1537 	if (err)
1538 		goto exit_journal;
1539 
1540 	err = ext4_setup_new_descs(handle, sb, flex_gd);
1541 	if (err)
1542 		goto exit_journal;
1543 
1544 	ext4_update_super(sb, flex_gd);
1545 
1546 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1547 
1548 exit_journal:
1549 	err2 = ext4_journal_stop(handle);
1550 	if (!err)
1551 		err = err2;
1552 
1553 	if (!err) {
1554 		int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1555 		int gdb_num_end = ((group + flex_gd->count - 1) /
1556 				   EXT4_DESC_PER_BLOCK(sb));
1557 		int meta_bg = ext4_has_feature_meta_bg(sb);
1558 		sector_t old_gdb = 0;
1559 
1560 		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
1561 			       sizeof(struct ext4_super_block), 0);
1562 		for (; gdb_num <= gdb_num_end; gdb_num++) {
1563 			struct buffer_head *gdb_bh;
1564 
1565 			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1566 						     gdb_num);
1567 			if (old_gdb == gdb_bh->b_blocknr)
1568 				continue;
1569 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
1570 				       gdb_bh->b_size, meta_bg);
1571 			old_gdb = gdb_bh->b_blocknr;
1572 		}
1573 	}
1574 exit:
1575 	return err;
1576 }
1577 
1578 static int ext4_setup_next_flex_gd(struct super_block *sb,
1579 				    struct ext4_new_flex_group_data *flex_gd,
1580 				    ext4_fsblk_t n_blocks_count,
1581 				    unsigned long flexbg_size)
1582 {
1583 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1584 	struct ext4_super_block *es = sbi->s_es;
1585 	struct ext4_new_group_data *group_data = flex_gd->groups;
1586 	ext4_fsblk_t o_blocks_count;
1587 	ext4_group_t n_group;
1588 	ext4_group_t group;
1589 	ext4_group_t last_group;
1590 	ext4_grpblk_t last;
1591 	ext4_grpblk_t clusters_per_group;
1592 	unsigned long i;
1593 
1594 	clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1595 
1596 	o_blocks_count = ext4_blocks_count(es);
1597 
1598 	if (o_blocks_count == n_blocks_count)
1599 		return 0;
1600 
1601 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1602 	BUG_ON(last);
1603 	ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1604 
1605 	last_group = group | (flexbg_size - 1);
1606 	if (last_group > n_group)
1607 		last_group = n_group;
1608 
1609 	flex_gd->count = last_group - group + 1;
1610 
1611 	for (i = 0; i < flex_gd->count; i++) {
1612 		int overhead;
1613 
1614 		group_data[i].group = group + i;
1615 		group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1616 		overhead = ext4_group_overhead_blocks(sb, group + i);
1617 		group_data[i].mdata_blocks = overhead;
1618 		group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1619 		if (ext4_has_group_desc_csum(sb)) {
1620 			flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1621 					       EXT4_BG_INODE_UNINIT;
1622 			if (!test_opt(sb, INIT_INODE_TABLE))
1623 				flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1624 		} else
1625 			flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1626 	}
1627 
1628 	if (last_group == n_group && ext4_has_group_desc_csum(sb))
1629 		/* We need to initialize block bitmap of last group. */
1630 		flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1631 
1632 	if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1633 		group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1634 		group_data[i - 1].free_clusters_count -= clusters_per_group -
1635 						       last - 1;
1636 	}
1637 
1638 	return 1;
1639 }
1640 
1641 /* Add group descriptor data to an existing or new group descriptor block.
1642  * Ensure we handle all possible error conditions _before_ we start modifying
1643  * the filesystem, because we cannot abort the transaction and not have it
1644  * write the data to disk.
1645  *
1646  * If we are on a GDT block boundary, we need to get the reserved GDT block.
1647  * Otherwise, we may need to add backup GDT blocks for a sparse group.
1648  *
1649  * We only need to hold the superblock lock while we are actually adding
1650  * in the new group's counts to the superblock.  Prior to that we have
1651  * not really "added" the group at all.  We re-check that we are still
1652  * adding in the last group in case things have changed since verifying.
1653  */
1654 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1655 {
1656 	struct ext4_new_flex_group_data flex_gd;
1657 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1658 	struct ext4_super_block *es = sbi->s_es;
1659 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1660 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1661 	struct inode *inode = NULL;
1662 	int gdb_off;
1663 	int err;
1664 	__u16 bg_flags = 0;
1665 
1666 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1667 
1668 	if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1669 		ext4_warning(sb, "Can't resize non-sparse filesystem further");
1670 		return -EPERM;
1671 	}
1672 
1673 	if (ext4_blocks_count(es) + input->blocks_count <
1674 	    ext4_blocks_count(es)) {
1675 		ext4_warning(sb, "blocks_count overflow");
1676 		return -EINVAL;
1677 	}
1678 
1679 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1680 	    le32_to_cpu(es->s_inodes_count)) {
1681 		ext4_warning(sb, "inodes_count overflow");
1682 		return -EINVAL;
1683 	}
1684 
1685 	if (reserved_gdb || gdb_off == 0) {
1686 		if (!ext4_has_feature_resize_inode(sb) ||
1687 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1688 			ext4_warning(sb,
1689 				     "No reserved GDT blocks, can't resize");
1690 			return -EPERM;
1691 		}
1692 		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1693 		if (IS_ERR(inode)) {
1694 			ext4_warning(sb, "Error opening resize inode");
1695 			return PTR_ERR(inode);
1696 		}
1697 	}
1698 
1699 
1700 	err = verify_group_input(sb, input);
1701 	if (err)
1702 		goto out;
1703 
1704 	err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1705 	if (err)
1706 		goto out;
1707 
1708 	err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1709 	if (err)
1710 		goto out;
1711 
1712 	flex_gd.count = 1;
1713 	flex_gd.groups = input;
1714 	flex_gd.bg_flags = &bg_flags;
1715 	err = ext4_flex_group_add(sb, inode, &flex_gd);
1716 out:
1717 	iput(inode);
1718 	return err;
1719 } /* ext4_group_add */
1720 
1721 /*
1722  * extend a group without checking assuming that checking has been done.
1723  */
1724 static int ext4_group_extend_no_check(struct super_block *sb,
1725 				      ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1726 {
1727 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1728 	handle_t *handle;
1729 	int err = 0, err2;
1730 
1731 	/* We will update the superblock, one block bitmap, and
1732 	 * one group descriptor via ext4_group_add_blocks().
1733 	 */
1734 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1735 	if (IS_ERR(handle)) {
1736 		err = PTR_ERR(handle);
1737 		ext4_warning(sb, "error %d on journal start", err);
1738 		return err;
1739 	}
1740 
1741 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1742 	err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1743 					    EXT4_JTR_NONE);
1744 	if (err) {
1745 		ext4_warning(sb, "error %d on journal write access", err);
1746 		goto errout;
1747 	}
1748 
1749 	lock_buffer(EXT4_SB(sb)->s_sbh);
1750 	ext4_blocks_count_set(es, o_blocks_count + add);
1751 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1752 	ext4_superblock_csum_set(sb);
1753 	unlock_buffer(EXT4_SB(sb)->s_sbh);
1754 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1755 		   o_blocks_count + add);
1756 	/* We add the blocks to the bitmap and set the group need init bit */
1757 	err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1758 	if (err)
1759 		goto errout;
1760 	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1761 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1762 		   o_blocks_count + add);
1763 errout:
1764 	err2 = ext4_journal_stop(handle);
1765 	if (err2 && !err)
1766 		err = err2;
1767 
1768 	if (!err) {
1769 		if (test_opt(sb, DEBUG))
1770 			printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1771 			       "blocks\n", ext4_blocks_count(es));
1772 		update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
1773 			       (char *)es, sizeof(struct ext4_super_block), 0);
1774 	}
1775 	return err;
1776 }
1777 
1778 /*
1779  * Extend the filesystem to the new number of blocks specified.  This entry
1780  * point is only used to extend the current filesystem to the end of the last
1781  * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
1782  * for emergencies (because it has no dependencies on reserved blocks).
1783  *
1784  * If we _really_ wanted, we could use default values to call ext4_group_add()
1785  * allow the "remount" trick to work for arbitrary resizing, assuming enough
1786  * GDT blocks are reserved to grow to the desired size.
1787  */
1788 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1789 		      ext4_fsblk_t n_blocks_count)
1790 {
1791 	ext4_fsblk_t o_blocks_count;
1792 	ext4_grpblk_t last;
1793 	ext4_grpblk_t add;
1794 	struct buffer_head *bh;
1795 	int err;
1796 	ext4_group_t group;
1797 
1798 	o_blocks_count = ext4_blocks_count(es);
1799 
1800 	if (test_opt(sb, DEBUG))
1801 		ext4_msg(sb, KERN_DEBUG,
1802 			 "extending last group from %llu to %llu blocks",
1803 			 o_blocks_count, n_blocks_count);
1804 
1805 	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1806 		return 0;
1807 
1808 	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1809 		ext4_msg(sb, KERN_ERR,
1810 			 "filesystem too large to resize to %llu blocks safely",
1811 			 n_blocks_count);
1812 		return -EINVAL;
1813 	}
1814 
1815 	if (n_blocks_count < o_blocks_count) {
1816 		ext4_warning(sb, "can't shrink FS - resize aborted");
1817 		return -EINVAL;
1818 	}
1819 
1820 	/* Handle the remaining blocks in the last group only. */
1821 	ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1822 
1823 	if (last == 0) {
1824 		ext4_warning(sb, "need to use ext2online to resize further");
1825 		return -EPERM;
1826 	}
1827 
1828 	add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1829 
1830 	if (o_blocks_count + add < o_blocks_count) {
1831 		ext4_warning(sb, "blocks_count overflow");
1832 		return -EINVAL;
1833 	}
1834 
1835 	if (o_blocks_count + add > n_blocks_count)
1836 		add = n_blocks_count - o_blocks_count;
1837 
1838 	if (o_blocks_count + add < n_blocks_count)
1839 		ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1840 			     o_blocks_count + add, add);
1841 
1842 	/* See if the device is actually as big as what was requested */
1843 	bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1844 	if (IS_ERR(bh)) {
1845 		ext4_warning(sb, "can't read last block, resize aborted");
1846 		return -ENOSPC;
1847 	}
1848 	brelse(bh);
1849 
1850 	err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1851 	return err;
1852 } /* ext4_group_extend */
1853 
1854 
1855 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1856 {
1857 	return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1858 }
1859 
1860 /*
1861  * Release the resize inode and drop the resize_inode feature if there
1862  * are no more reserved gdt blocks, and then convert the file system
1863  * to enable meta_bg
1864  */
1865 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1866 {
1867 	handle_t *handle;
1868 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1869 	struct ext4_super_block *es = sbi->s_es;
1870 	struct ext4_inode_info *ei = EXT4_I(inode);
1871 	ext4_fsblk_t nr;
1872 	int i, ret, err = 0;
1873 	int credits = 1;
1874 
1875 	ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1876 	if (inode) {
1877 		if (es->s_reserved_gdt_blocks) {
1878 			ext4_error(sb, "Unexpected non-zero "
1879 				   "s_reserved_gdt_blocks");
1880 			return -EPERM;
1881 		}
1882 
1883 		/* Do a quick sanity check of the resize inode */
1884 		if (inode->i_blocks != 1 << (inode->i_blkbits -
1885 					     (9 - sbi->s_cluster_bits)))
1886 			goto invalid_resize_inode;
1887 		for (i = 0; i < EXT4_N_BLOCKS; i++) {
1888 			if (i == EXT4_DIND_BLOCK) {
1889 				if (ei->i_data[i])
1890 					continue;
1891 				else
1892 					goto invalid_resize_inode;
1893 			}
1894 			if (ei->i_data[i])
1895 				goto invalid_resize_inode;
1896 		}
1897 		credits += 3;	/* block bitmap, bg descriptor, resize inode */
1898 	}
1899 
1900 	handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1901 	if (IS_ERR(handle))
1902 		return PTR_ERR(handle);
1903 
1904 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1905 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1906 					    EXT4_JTR_NONE);
1907 	if (err)
1908 		goto errout;
1909 
1910 	lock_buffer(sbi->s_sbh);
1911 	ext4_clear_feature_resize_inode(sb);
1912 	ext4_set_feature_meta_bg(sb);
1913 	sbi->s_es->s_first_meta_bg =
1914 		cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1915 	ext4_superblock_csum_set(sb);
1916 	unlock_buffer(sbi->s_sbh);
1917 
1918 	err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1919 	if (err) {
1920 		ext4_std_error(sb, err);
1921 		goto errout;
1922 	}
1923 
1924 	if (inode) {
1925 		nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1926 		ext4_free_blocks(handle, inode, NULL, nr, 1,
1927 				 EXT4_FREE_BLOCKS_METADATA |
1928 				 EXT4_FREE_BLOCKS_FORGET);
1929 		ei->i_data[EXT4_DIND_BLOCK] = 0;
1930 		inode->i_blocks = 0;
1931 
1932 		err = ext4_mark_inode_dirty(handle, inode);
1933 		if (err)
1934 			ext4_std_error(sb, err);
1935 	}
1936 
1937 errout:
1938 	ret = ext4_journal_stop(handle);
1939 	if (!err)
1940 		err = ret;
1941 	return ret;
1942 
1943 invalid_resize_inode:
1944 	ext4_error(sb, "corrupted/inconsistent resize inode");
1945 	return -EINVAL;
1946 }
1947 
1948 /*
1949  * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1950  *
1951  * @sb: super block of the fs to be resized
1952  * @n_blocks_count: the number of blocks resides in the resized fs
1953  */
1954 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1955 {
1956 	struct ext4_new_flex_group_data *flex_gd = NULL;
1957 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1958 	struct ext4_super_block *es = sbi->s_es;
1959 	struct buffer_head *bh;
1960 	struct inode *resize_inode = NULL;
1961 	ext4_grpblk_t add, offset;
1962 	unsigned long n_desc_blocks;
1963 	unsigned long o_desc_blocks;
1964 	ext4_group_t o_group;
1965 	ext4_group_t n_group;
1966 	ext4_fsblk_t o_blocks_count;
1967 	ext4_fsblk_t n_blocks_count_retry = 0;
1968 	unsigned long last_update_time = 0;
1969 	int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
1970 	int meta_bg;
1971 
1972 	/* See if the device is actually as big as what was requested */
1973 	bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
1974 	if (IS_ERR(bh)) {
1975 		ext4_warning(sb, "can't read last block, resize aborted");
1976 		return -ENOSPC;
1977 	}
1978 	brelse(bh);
1979 
1980 retry:
1981 	o_blocks_count = ext4_blocks_count(es);
1982 
1983 	ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1984 		 "to %llu blocks", o_blocks_count, n_blocks_count);
1985 
1986 	if (n_blocks_count < o_blocks_count) {
1987 		/* On-line shrinking not supported */
1988 		ext4_warning(sb, "can't shrink FS - resize aborted");
1989 		return -EINVAL;
1990 	}
1991 
1992 	if (n_blocks_count == o_blocks_count)
1993 		/* Nothing need to do */
1994 		return 0;
1995 
1996 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1997 	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1998 		ext4_warning(sb, "resize would cause inodes_count overflow");
1999 		return -EINVAL;
2000 	}
2001 	ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2002 
2003 	n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2004 	o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2005 
2006 	meta_bg = ext4_has_feature_meta_bg(sb);
2007 
2008 	if (ext4_has_feature_resize_inode(sb)) {
2009 		if (meta_bg) {
2010 			ext4_error(sb, "resize_inode and meta_bg enabled "
2011 				   "simultaneously");
2012 			return -EINVAL;
2013 		}
2014 		if (n_desc_blocks > o_desc_blocks +
2015 		    le16_to_cpu(es->s_reserved_gdt_blocks)) {
2016 			n_blocks_count_retry = n_blocks_count;
2017 			n_desc_blocks = o_desc_blocks +
2018 				le16_to_cpu(es->s_reserved_gdt_blocks);
2019 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2020 			n_blocks_count = (ext4_fsblk_t)n_group *
2021 				EXT4_BLOCKS_PER_GROUP(sb) +
2022 				le32_to_cpu(es->s_first_data_block);
2023 			n_group--; /* set to last group number */
2024 		}
2025 
2026 		if (!resize_inode)
2027 			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2028 						 EXT4_IGET_SPECIAL);
2029 		if (IS_ERR(resize_inode)) {
2030 			ext4_warning(sb, "Error opening resize inode");
2031 			return PTR_ERR(resize_inode);
2032 		}
2033 	}
2034 
2035 	if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2036 		err = ext4_convert_meta_bg(sb, resize_inode);
2037 		if (err)
2038 			goto out;
2039 		if (resize_inode) {
2040 			iput(resize_inode);
2041 			resize_inode = NULL;
2042 		}
2043 		if (n_blocks_count_retry) {
2044 			n_blocks_count = n_blocks_count_retry;
2045 			n_blocks_count_retry = 0;
2046 			goto retry;
2047 		}
2048 	}
2049 
2050 	/*
2051 	 * Make sure the last group has enough space so that it's
2052 	 * guaranteed to have enough space for all metadata blocks
2053 	 * that it might need to hold.  (We might not need to store
2054 	 * the inode table blocks in the last block group, but there
2055 	 * will be cases where this might be needed.)
2056 	 */
2057 	if ((ext4_group_first_block_no(sb, n_group) +
2058 	     ext4_group_overhead_blocks(sb, n_group) + 2 +
2059 	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2060 		n_blocks_count = ext4_group_first_block_no(sb, n_group);
2061 		n_group--;
2062 		n_blocks_count_retry = 0;
2063 		if (resize_inode) {
2064 			iput(resize_inode);
2065 			resize_inode = NULL;
2066 		}
2067 		goto retry;
2068 	}
2069 
2070 	/* extend the last group */
2071 	if (n_group == o_group)
2072 		add = n_blocks_count - o_blocks_count;
2073 	else
2074 		add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2075 	if (add > 0) {
2076 		err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2077 		if (err)
2078 			goto out;
2079 	}
2080 
2081 	if (ext4_blocks_count(es) == n_blocks_count)
2082 		goto out;
2083 
2084 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2085 	if (err)
2086 		goto out;
2087 
2088 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2089 	if (err)
2090 		goto out;
2091 
2092 	flex_gd = alloc_flex_gd(flexbg_size);
2093 	if (flex_gd == NULL) {
2094 		err = -ENOMEM;
2095 		goto out;
2096 	}
2097 
2098 	/* Add flex groups. Note that a regular group is a
2099 	 * flex group with 1 group.
2100 	 */
2101 	while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2102 					      flexbg_size)) {
2103 		if (jiffies - last_update_time > HZ * 10) {
2104 			if (last_update_time)
2105 				ext4_msg(sb, KERN_INFO,
2106 					 "resized to %llu blocks",
2107 					 ext4_blocks_count(es));
2108 			last_update_time = jiffies;
2109 		}
2110 		if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2111 			break;
2112 		err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2113 		if (unlikely(err))
2114 			break;
2115 	}
2116 
2117 	if (!err && n_blocks_count_retry) {
2118 		n_blocks_count = n_blocks_count_retry;
2119 		n_blocks_count_retry = 0;
2120 		free_flex_gd(flex_gd);
2121 		flex_gd = NULL;
2122 		if (resize_inode) {
2123 			iput(resize_inode);
2124 			resize_inode = NULL;
2125 		}
2126 		goto retry;
2127 	}
2128 
2129 out:
2130 	if (flex_gd)
2131 		free_flex_gd(flex_gd);
2132 	if (resize_inode != NULL)
2133 		iput(resize_inode);
2134 	if (err)
2135 		ext4_warning(sb, "error (%d) occurred during "
2136 			     "file system resize", err);
2137 	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2138 		 ext4_blocks_count(es));
2139 	return err;
2140 }
2141