1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13 #define EXT4FS_DEBUG
14
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/jiffies.h>
18
19 #include "ext4_jbd2.h"
20
21 struct ext4_rcu_ptr {
22 struct rcu_head rcu;
23 void *ptr;
24 };
25
ext4_rcu_ptr_callback(struct rcu_head * head)26 static void ext4_rcu_ptr_callback(struct rcu_head *head)
27 {
28 struct ext4_rcu_ptr *ptr;
29
30 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
31 kvfree(ptr->ptr);
32 kfree(ptr);
33 }
34
ext4_kvfree_array_rcu(void * to_free)35 void ext4_kvfree_array_rcu(void *to_free)
36 {
37 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
38
39 if (ptr) {
40 ptr->ptr = to_free;
41 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
42 return;
43 }
44 synchronize_rcu();
45 kvfree(to_free);
46 }
47
ext4_resize_begin(struct super_block * sb)48 int ext4_resize_begin(struct super_block *sb)
49 {
50 struct ext4_sb_info *sbi = EXT4_SB(sb);
51 int ret = 0;
52
53 if (!capable(CAP_SYS_RESOURCE))
54 return -EPERM;
55
56 /*
57 * If the reserved GDT blocks is non-zero, the resize_inode feature
58 * should always be set.
59 */
60 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
61 !ext4_has_feature_resize_inode(sb)) {
62 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
63 return -EFSCORRUPTED;
64 }
65
66 /*
67 * If we are not using the primary superblock/GDT copy don't resize,
68 * because the user tools have no way of handling this. Probably a
69 * bad time to do it anyways.
70 */
71 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
72 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
73 ext4_warning(sb, "won't resize using backup superblock at %llu",
74 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
75 return -EPERM;
76 }
77
78 /*
79 * We are not allowed to do online-resizing on a filesystem mounted
80 * with error, because it can destroy the filesystem easily.
81 */
82 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
83 ext4_warning(sb, "There are errors in the filesystem, "
84 "so online resizing is not allowed");
85 return -EPERM;
86 }
87
88 if (ext4_has_feature_sparse_super2(sb)) {
89 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
90 return -EOPNOTSUPP;
91 }
92
93 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
94 &EXT4_SB(sb)->s_ext4_flags))
95 ret = -EBUSY;
96
97 return ret;
98 }
99
ext4_resize_end(struct super_block * sb,bool update_backups)100 int ext4_resize_end(struct super_block *sb, bool update_backups)
101 {
102 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
103 smp_mb__after_atomic();
104 if (update_backups)
105 return ext4_update_overhead(sb, true);
106 return 0;
107 }
108
ext4_meta_bg_first_group(struct super_block * sb,ext4_group_t group)109 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
110 ext4_group_t group) {
111 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
112 EXT4_DESC_PER_BLOCK_BITS(sb);
113 }
114
ext4_meta_bg_first_block_no(struct super_block * sb,ext4_group_t group)115 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
116 ext4_group_t group) {
117 group = ext4_meta_bg_first_group(sb, group);
118 return ext4_group_first_block_no(sb, group);
119 }
120
ext4_group_overhead_blocks(struct super_block * sb,ext4_group_t group)121 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
122 ext4_group_t group) {
123 ext4_grpblk_t overhead;
124 overhead = ext4_bg_num_gdb(sb, group);
125 if (ext4_bg_has_super(sb, group))
126 overhead += 1 +
127 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
128 return overhead;
129 }
130
131 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
132 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
133
verify_group_input(struct super_block * sb,struct ext4_new_group_data * input)134 static int verify_group_input(struct super_block *sb,
135 struct ext4_new_group_data *input)
136 {
137 struct ext4_sb_info *sbi = EXT4_SB(sb);
138 struct ext4_super_block *es = sbi->s_es;
139 ext4_fsblk_t start = ext4_blocks_count(es);
140 ext4_fsblk_t end = start + input->blocks_count;
141 ext4_group_t group = input->group;
142 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
143 unsigned overhead;
144 ext4_fsblk_t metaend;
145 struct buffer_head *bh = NULL;
146 ext4_grpblk_t free_blocks_count, offset;
147 int err = -EINVAL;
148
149 if (group != sbi->s_groups_count) {
150 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
151 input->group, sbi->s_groups_count);
152 return -EINVAL;
153 }
154
155 overhead = ext4_group_overhead_blocks(sb, group);
156 metaend = start + overhead;
157 input->free_clusters_count = free_blocks_count =
158 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
159
160 if (test_opt(sb, DEBUG))
161 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
162 "(%d free, %u reserved)\n",
163 ext4_bg_has_super(sb, input->group) ? "normal" :
164 "no-super", input->group, input->blocks_count,
165 free_blocks_count, input->reserved_blocks);
166
167 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
168 if (offset != 0)
169 ext4_warning(sb, "Last group not full");
170 else if (input->reserved_blocks > input->blocks_count / 5)
171 ext4_warning(sb, "Reserved blocks too high (%u)",
172 input->reserved_blocks);
173 else if (free_blocks_count < 0)
174 ext4_warning(sb, "Bad blocks count %u",
175 input->blocks_count);
176 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
177 err = PTR_ERR(bh);
178 bh = NULL;
179 ext4_warning(sb, "Cannot read last block (%llu)",
180 end - 1);
181 } else if (outside(input->block_bitmap, start, end))
182 ext4_warning(sb, "Block bitmap not in group (block %llu)",
183 (unsigned long long)input->block_bitmap);
184 else if (outside(input->inode_bitmap, start, end))
185 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
186 (unsigned long long)input->inode_bitmap);
187 else if (outside(input->inode_table, start, end) ||
188 outside(itend - 1, start, end))
189 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
190 (unsigned long long)input->inode_table, itend - 1);
191 else if (input->inode_bitmap == input->block_bitmap)
192 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
193 (unsigned long long)input->block_bitmap);
194 else if (inside(input->block_bitmap, input->inode_table, itend))
195 ext4_warning(sb, "Block bitmap (%llu) in inode table "
196 "(%llu-%llu)",
197 (unsigned long long)input->block_bitmap,
198 (unsigned long long)input->inode_table, itend - 1);
199 else if (inside(input->inode_bitmap, input->inode_table, itend))
200 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
201 "(%llu-%llu)",
202 (unsigned long long)input->inode_bitmap,
203 (unsigned long long)input->inode_table, itend - 1);
204 else if (inside(input->block_bitmap, start, metaend))
205 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
206 (unsigned long long)input->block_bitmap,
207 start, metaend - 1);
208 else if (inside(input->inode_bitmap, start, metaend))
209 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
210 (unsigned long long)input->inode_bitmap,
211 start, metaend - 1);
212 else if (inside(input->inode_table, start, metaend) ||
213 inside(itend - 1, start, metaend))
214 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
215 "(%llu-%llu)",
216 (unsigned long long)input->inode_table,
217 itend - 1, start, metaend - 1);
218 else
219 err = 0;
220 brelse(bh);
221
222 return err;
223 }
224
225 /*
226 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
227 * group each time.
228 */
229 struct ext4_new_flex_group_data {
230 struct ext4_new_group_data *groups; /* new_group_data for groups
231 in the flex group */
232 __u16 *bg_flags; /* block group flags of groups
233 in @groups */
234 ext4_group_t resize_bg; /* number of allocated
235 new_group_data */
236 ext4_group_t count; /* number of groups in @groups
237 */
238 };
239
240 /*
241 * Avoiding memory allocation failures due to too many groups added each time.
242 */
243 #define MAX_RESIZE_BG 16384
244
245 /*
246 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
247 * @flexbg_size.
248 *
249 * Returns NULL on failure otherwise address of the allocated structure.
250 */
alloc_flex_gd(unsigned int flexbg_size)251 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
252 {
253 struct ext4_new_flex_group_data *flex_gd;
254
255 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
256 if (flex_gd == NULL)
257 goto out3;
258
259 if (unlikely(flexbg_size > MAX_RESIZE_BG))
260 flex_gd->resize_bg = MAX_RESIZE_BG;
261 else
262 flex_gd->resize_bg = flexbg_size;
263
264 flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
265 sizeof(struct ext4_new_group_data),
266 GFP_NOFS);
267 if (flex_gd->groups == NULL)
268 goto out2;
269
270 flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
271 GFP_NOFS);
272 if (flex_gd->bg_flags == NULL)
273 goto out1;
274
275 return flex_gd;
276
277 out1:
278 kfree(flex_gd->groups);
279 out2:
280 kfree(flex_gd);
281 out3:
282 return NULL;
283 }
284
free_flex_gd(struct ext4_new_flex_group_data * flex_gd)285 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
286 {
287 kfree(flex_gd->bg_flags);
288 kfree(flex_gd->groups);
289 kfree(flex_gd);
290 }
291
292 /*
293 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
294 * and inode tables for a flex group.
295 *
296 * This function is used by 64bit-resize. Note that this function allocates
297 * group tables from the 1st group of groups contained by @flexgd, which may
298 * be a partial of a flex group.
299 *
300 * @sb: super block of fs to which the groups belongs
301 *
302 * Returns 0 on a successful allocation of the metadata blocks in the
303 * block group.
304 */
ext4_alloc_group_tables(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,unsigned int flexbg_size)305 static int ext4_alloc_group_tables(struct super_block *sb,
306 struct ext4_new_flex_group_data *flex_gd,
307 unsigned int flexbg_size)
308 {
309 struct ext4_new_group_data *group_data = flex_gd->groups;
310 ext4_fsblk_t start_blk;
311 ext4_fsblk_t last_blk;
312 ext4_group_t src_group;
313 ext4_group_t bb_index = 0;
314 ext4_group_t ib_index = 0;
315 ext4_group_t it_index = 0;
316 ext4_group_t group;
317 ext4_group_t last_group;
318 unsigned overhead;
319 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
320 int i;
321
322 BUG_ON(flex_gd->count == 0 || group_data == NULL);
323
324 src_group = group_data[0].group;
325 last_group = src_group + flex_gd->count - 1;
326
327 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
328 (last_group & ~(flexbg_size - 1))));
329 next_group:
330 group = group_data[0].group;
331 if (src_group >= group_data[0].group + flex_gd->count)
332 return -ENOSPC;
333 start_blk = ext4_group_first_block_no(sb, src_group);
334 last_blk = start_blk + group_data[src_group - group].blocks_count;
335
336 overhead = ext4_group_overhead_blocks(sb, src_group);
337
338 start_blk += overhead;
339
340 /* We collect contiguous blocks as much as possible. */
341 src_group++;
342 for (; src_group <= last_group; src_group++) {
343 overhead = ext4_group_overhead_blocks(sb, src_group);
344 if (overhead == 0)
345 last_blk += group_data[src_group - group].blocks_count;
346 else
347 break;
348 }
349
350 /* Allocate block bitmaps */
351 for (; bb_index < flex_gd->count; bb_index++) {
352 if (start_blk >= last_blk)
353 goto next_group;
354 group_data[bb_index].block_bitmap = start_blk++;
355 group = ext4_get_group_number(sb, start_blk - 1);
356 group -= group_data[0].group;
357 group_data[group].mdata_blocks++;
358 flex_gd->bg_flags[group] &= uninit_mask;
359 }
360
361 /* Allocate inode bitmaps */
362 for (; ib_index < flex_gd->count; ib_index++) {
363 if (start_blk >= last_blk)
364 goto next_group;
365 group_data[ib_index].inode_bitmap = start_blk++;
366 group = ext4_get_group_number(sb, start_blk - 1);
367 group -= group_data[0].group;
368 group_data[group].mdata_blocks++;
369 flex_gd->bg_flags[group] &= uninit_mask;
370 }
371
372 /* Allocate inode tables */
373 for (; it_index < flex_gd->count; it_index++) {
374 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
375 ext4_fsblk_t next_group_start;
376
377 if (start_blk + itb > last_blk)
378 goto next_group;
379 group_data[it_index].inode_table = start_blk;
380 group = ext4_get_group_number(sb, start_blk);
381 next_group_start = ext4_group_first_block_no(sb, group + 1);
382 group -= group_data[0].group;
383
384 if (start_blk + itb > next_group_start) {
385 flex_gd->bg_flags[group + 1] &= uninit_mask;
386 overhead = start_blk + itb - next_group_start;
387 group_data[group + 1].mdata_blocks += overhead;
388 itb -= overhead;
389 }
390
391 group_data[group].mdata_blocks += itb;
392 flex_gd->bg_flags[group] &= uninit_mask;
393 start_blk += EXT4_SB(sb)->s_itb_per_group;
394 }
395
396 /* Update free clusters count to exclude metadata blocks */
397 for (i = 0; i < flex_gd->count; i++) {
398 group_data[i].free_clusters_count -=
399 EXT4_NUM_B2C(EXT4_SB(sb),
400 group_data[i].mdata_blocks);
401 }
402
403 if (test_opt(sb, DEBUG)) {
404 int i;
405 group = group_data[0].group;
406
407 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
408 "%u groups, flexbg size is %u:\n", flex_gd->count,
409 flexbg_size);
410
411 for (i = 0; i < flex_gd->count; i++) {
412 ext4_debug(
413 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
414 ext4_bg_has_super(sb, group + i) ? "normal" :
415 "no-super", group + i,
416 group_data[i].blocks_count,
417 group_data[i].free_clusters_count,
418 group_data[i].mdata_blocks);
419 }
420 }
421 return 0;
422 }
423
bclean(handle_t * handle,struct super_block * sb,ext4_fsblk_t blk)424 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
425 ext4_fsblk_t blk)
426 {
427 struct buffer_head *bh;
428 int err;
429
430 bh = sb_getblk(sb, blk);
431 if (unlikely(!bh))
432 return ERR_PTR(-ENOMEM);
433 BUFFER_TRACE(bh, "get_write_access");
434 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
435 if (err) {
436 brelse(bh);
437 bh = ERR_PTR(err);
438 } else {
439 memset(bh->b_data, 0, sb->s_blocksize);
440 set_buffer_uptodate(bh);
441 }
442
443 return bh;
444 }
445
ext4_resize_ensure_credits_batch(handle_t * handle,int credits)446 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
447 {
448 return ext4_journal_ensure_credits_fn(handle, credits,
449 EXT4_MAX_TRANS_DATA, 0, 0);
450 }
451
452 /*
453 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
454 *
455 * Helper function for ext4_setup_new_group_blocks() which set .
456 *
457 * @sb: super block
458 * @handle: journal handle
459 * @flex_gd: flex group data
460 */
set_flexbg_block_bitmap(struct super_block * sb,handle_t * handle,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t first_cluster,ext4_fsblk_t last_cluster)461 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
462 struct ext4_new_flex_group_data *flex_gd,
463 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
464 {
465 struct ext4_sb_info *sbi = EXT4_SB(sb);
466 ext4_group_t count = last_cluster - first_cluster + 1;
467 ext4_group_t count2;
468
469 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
470 last_cluster);
471 for (count2 = count; count > 0;
472 count -= count2, first_cluster += count2) {
473 ext4_fsblk_t start;
474 struct buffer_head *bh;
475 ext4_group_t group;
476 int err;
477
478 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
479 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
480 group -= flex_gd->groups[0].group;
481
482 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
483 if (count2 > count)
484 count2 = count;
485
486 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
487 BUG_ON(flex_gd->count > 1);
488 continue;
489 }
490
491 err = ext4_resize_ensure_credits_batch(handle, 1);
492 if (err < 0)
493 return err;
494
495 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
496 if (unlikely(!bh))
497 return -ENOMEM;
498
499 BUFFER_TRACE(bh, "get_write_access");
500 err = ext4_journal_get_write_access(handle, sb, bh,
501 EXT4_JTR_NONE);
502 if (err) {
503 brelse(bh);
504 return err;
505 }
506 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
507 first_cluster, first_cluster - start, count2);
508 mb_set_bits(bh->b_data, first_cluster - start, count2);
509
510 err = ext4_handle_dirty_metadata(handle, NULL, bh);
511 brelse(bh);
512 if (unlikely(err))
513 return err;
514 }
515
516 return 0;
517 }
518
519 /*
520 * Set up the block and inode bitmaps, and the inode table for the new groups.
521 * This doesn't need to be part of the main transaction, since we are only
522 * changing blocks outside the actual filesystem. We still do journaling to
523 * ensure the recovery is correct in case of a failure just after resize.
524 * If any part of this fails, we simply abort the resize.
525 *
526 * setup_new_flex_group_blocks handles a flex group as follow:
527 * 1. copy super block and GDT, and initialize group tables if necessary.
528 * In this step, we only set bits in blocks bitmaps for blocks taken by
529 * super block and GDT.
530 * 2. allocate group tables in block bitmaps, that is, set bits in block
531 * bitmap for blocks taken by group tables.
532 */
setup_new_flex_group_blocks(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)533 static int setup_new_flex_group_blocks(struct super_block *sb,
534 struct ext4_new_flex_group_data *flex_gd)
535 {
536 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
537 ext4_fsblk_t start;
538 ext4_fsblk_t block;
539 struct ext4_sb_info *sbi = EXT4_SB(sb);
540 struct ext4_super_block *es = sbi->s_es;
541 struct ext4_new_group_data *group_data = flex_gd->groups;
542 __u16 *bg_flags = flex_gd->bg_flags;
543 handle_t *handle;
544 ext4_group_t group, count;
545 struct buffer_head *bh = NULL;
546 int reserved_gdb, i, j, err = 0, err2;
547 int meta_bg;
548
549 BUG_ON(!flex_gd->count || !group_data ||
550 group_data[0].group != sbi->s_groups_count);
551
552 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
553 meta_bg = ext4_has_feature_meta_bg(sb);
554
555 /* This transaction may be extended/restarted along the way */
556 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
557 if (IS_ERR(handle))
558 return PTR_ERR(handle);
559
560 group = group_data[0].group;
561 for (i = 0; i < flex_gd->count; i++, group++) {
562 unsigned long gdblocks;
563 ext4_grpblk_t overhead;
564
565 gdblocks = ext4_bg_num_gdb(sb, group);
566 start = ext4_group_first_block_no(sb, group);
567
568 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
569 goto handle_itb;
570
571 if (meta_bg == 1)
572 goto handle_itb;
573
574 block = start + ext4_bg_has_super(sb, group);
575 /* Copy all of the GDT blocks into the backup in this group */
576 for (j = 0; j < gdblocks; j++, block++) {
577 struct buffer_head *gdb;
578
579 ext4_debug("update backup group %#04llx\n", block);
580 err = ext4_resize_ensure_credits_batch(handle, 1);
581 if (err < 0)
582 goto out;
583
584 gdb = sb_getblk(sb, block);
585 if (unlikely(!gdb)) {
586 err = -ENOMEM;
587 goto out;
588 }
589
590 BUFFER_TRACE(gdb, "get_write_access");
591 err = ext4_journal_get_write_access(handle, sb, gdb,
592 EXT4_JTR_NONE);
593 if (err) {
594 brelse(gdb);
595 goto out;
596 }
597 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
598 s_group_desc, j)->b_data, gdb->b_size);
599 set_buffer_uptodate(gdb);
600
601 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
602 if (unlikely(err)) {
603 brelse(gdb);
604 goto out;
605 }
606 brelse(gdb);
607 }
608
609 /* Zero out all of the reserved backup group descriptor
610 * table blocks
611 */
612 if (ext4_bg_has_super(sb, group)) {
613 err = sb_issue_zeroout(sb, gdblocks + start + 1,
614 reserved_gdb, GFP_NOFS);
615 if (err)
616 goto out;
617 }
618
619 handle_itb:
620 /* Initialize group tables of the grop @group */
621 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
622 goto handle_bb;
623
624 /* Zero out all of the inode table blocks */
625 block = group_data[i].inode_table;
626 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
627 block, sbi->s_itb_per_group);
628 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
629 GFP_NOFS);
630 if (err)
631 goto out;
632
633 handle_bb:
634 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
635 goto handle_ib;
636
637 /* Initialize block bitmap of the @group */
638 block = group_data[i].block_bitmap;
639 err = ext4_resize_ensure_credits_batch(handle, 1);
640 if (err < 0)
641 goto out;
642
643 bh = bclean(handle, sb, block);
644 if (IS_ERR(bh)) {
645 err = PTR_ERR(bh);
646 goto out;
647 }
648 overhead = ext4_group_overhead_blocks(sb, group);
649 if (overhead != 0) {
650 ext4_debug("mark backup superblock %#04llx (+0)\n",
651 start);
652 mb_set_bits(bh->b_data, 0,
653 EXT4_NUM_B2C(sbi, overhead));
654 }
655 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
656 sb->s_blocksize * 8, bh->b_data);
657 err = ext4_handle_dirty_metadata(handle, NULL, bh);
658 brelse(bh);
659 if (err)
660 goto out;
661
662 handle_ib:
663 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
664 continue;
665
666 /* Initialize inode bitmap of the @group */
667 block = group_data[i].inode_bitmap;
668 err = ext4_resize_ensure_credits_batch(handle, 1);
669 if (err < 0)
670 goto out;
671 /* Mark unused entries in inode bitmap used */
672 bh = bclean(handle, sb, block);
673 if (IS_ERR(bh)) {
674 err = PTR_ERR(bh);
675 goto out;
676 }
677
678 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
679 sb->s_blocksize * 8, bh->b_data);
680 err = ext4_handle_dirty_metadata(handle, NULL, bh);
681 brelse(bh);
682 if (err)
683 goto out;
684 }
685
686 /* Mark group tables in block bitmap */
687 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
688 count = group_table_count[j];
689 start = (&group_data[0].block_bitmap)[j];
690 block = start;
691 for (i = 1; i < flex_gd->count; i++) {
692 block += group_table_count[j];
693 if (block == (&group_data[i].block_bitmap)[j]) {
694 count += group_table_count[j];
695 continue;
696 }
697 err = set_flexbg_block_bitmap(sb, handle,
698 flex_gd,
699 EXT4_B2C(sbi, start),
700 EXT4_B2C(sbi,
701 start + count
702 - 1));
703 if (err)
704 goto out;
705 count = group_table_count[j];
706 start = (&group_data[i].block_bitmap)[j];
707 block = start;
708 }
709
710 if (count) {
711 err = set_flexbg_block_bitmap(sb, handle,
712 flex_gd,
713 EXT4_B2C(sbi, start),
714 EXT4_B2C(sbi,
715 start + count
716 - 1));
717 if (err)
718 goto out;
719 }
720 }
721
722 out:
723 err2 = ext4_journal_stop(handle);
724 if (err2 && !err)
725 err = err2;
726
727 return err;
728 }
729
730 /*
731 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
732 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
733 * calling this for the first time. In a sparse filesystem it will be the
734 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
735 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
736 */
ext4_list_backups(struct super_block * sb,unsigned int * three,unsigned int * five,unsigned int * seven)737 unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
738 unsigned int *five, unsigned int *seven)
739 {
740 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
741 unsigned int *min = three;
742 int mult = 3;
743 unsigned int ret;
744
745 if (ext4_has_feature_sparse_super2(sb)) {
746 do {
747 if (*min > 2)
748 return UINT_MAX;
749 ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
750 *min += 1;
751 } while (!ret);
752 return ret;
753 }
754
755 if (!ext4_has_feature_sparse_super(sb)) {
756 ret = *min;
757 *min += 1;
758 return ret;
759 }
760
761 if (*five < *min) {
762 min = five;
763 mult = 5;
764 }
765 if (*seven < *min) {
766 min = seven;
767 mult = 7;
768 }
769
770 ret = *min;
771 *min *= mult;
772
773 return ret;
774 }
775
776 /*
777 * Check that all of the backup GDT blocks are held in the primary GDT block.
778 * It is assumed that they are stored in group order. Returns the number of
779 * groups in current filesystem that have BACKUPS, or -ve error code.
780 */
verify_reserved_gdb(struct super_block * sb,ext4_group_t end,struct buffer_head * primary)781 static int verify_reserved_gdb(struct super_block *sb,
782 ext4_group_t end,
783 struct buffer_head *primary)
784 {
785 const ext4_fsblk_t blk = primary->b_blocknr;
786 unsigned three = 1;
787 unsigned five = 5;
788 unsigned seven = 7;
789 unsigned grp;
790 __le32 *p = (__le32 *)primary->b_data;
791 int gdbackups = 0;
792
793 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
794 if (le32_to_cpu(*p++) !=
795 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
796 ext4_warning(sb, "reserved GDT %llu"
797 " missing grp %d (%llu)",
798 blk, grp,
799 grp *
800 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
801 blk);
802 return -EINVAL;
803 }
804 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
805 return -EFBIG;
806 }
807
808 return gdbackups;
809 }
810
811 /*
812 * Called when we need to bring a reserved group descriptor table block into
813 * use from the resize inode. The primary copy of the new GDT block currently
814 * is an indirect block (under the double indirect block in the resize inode).
815 * The new backup GDT blocks will be stored as leaf blocks in this indirect
816 * block, in group order. Even though we know all the block numbers we need,
817 * we check to ensure that the resize inode has actually reserved these blocks.
818 *
819 * Don't need to update the block bitmaps because the blocks are still in use.
820 *
821 * We get all of the error cases out of the way, so that we are sure to not
822 * fail once we start modifying the data on disk, because JBD has no rollback.
823 */
add_new_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)824 static int add_new_gdb(handle_t *handle, struct inode *inode,
825 ext4_group_t group)
826 {
827 struct super_block *sb = inode->i_sb;
828 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
829 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
830 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
831 struct buffer_head **o_group_desc, **n_group_desc = NULL;
832 struct buffer_head *dind = NULL;
833 struct buffer_head *gdb_bh = NULL;
834 int gdbackups;
835 struct ext4_iloc iloc = { .bh = NULL };
836 __le32 *data;
837 int err;
838
839 if (test_opt(sb, DEBUG))
840 printk(KERN_DEBUG
841 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
842 gdb_num);
843
844 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
845 if (IS_ERR(gdb_bh))
846 return PTR_ERR(gdb_bh);
847
848 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
849 if (gdbackups < 0) {
850 err = gdbackups;
851 goto errout;
852 }
853
854 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
855 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
856 if (IS_ERR(dind)) {
857 err = PTR_ERR(dind);
858 dind = NULL;
859 goto errout;
860 }
861
862 data = (__le32 *)dind->b_data;
863 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
864 ext4_warning(sb, "new group %u GDT block %llu not reserved",
865 group, gdblock);
866 err = -EINVAL;
867 goto errout;
868 }
869
870 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
871 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
872 EXT4_JTR_NONE);
873 if (unlikely(err))
874 goto errout;
875
876 BUFFER_TRACE(gdb_bh, "get_write_access");
877 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
878 if (unlikely(err))
879 goto errout;
880
881 BUFFER_TRACE(dind, "get_write_access");
882 err = ext4_journal_get_write_access(handle, sb, dind, EXT4_JTR_NONE);
883 if (unlikely(err)) {
884 ext4_std_error(sb, err);
885 goto errout;
886 }
887
888 /* ext4_reserve_inode_write() gets a reference on the iloc */
889 err = ext4_reserve_inode_write(handle, inode, &iloc);
890 if (unlikely(err))
891 goto errout;
892
893 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
894 GFP_KERNEL);
895 if (!n_group_desc) {
896 err = -ENOMEM;
897 ext4_warning(sb, "not enough memory for %lu groups",
898 gdb_num + 1);
899 goto errout;
900 }
901
902 /*
903 * Finally, we have all of the possible failures behind us...
904 *
905 * Remove new GDT block from inode double-indirect block and clear out
906 * the new GDT block for use (which also "frees" the backup GDT blocks
907 * from the reserved inode). We don't need to change the bitmaps for
908 * these blocks, because they are marked as in-use from being in the
909 * reserved inode, and will become GDT blocks (primary and backup).
910 */
911 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
912 err = ext4_handle_dirty_metadata(handle, NULL, dind);
913 if (unlikely(err)) {
914 ext4_std_error(sb, err);
915 goto errout;
916 }
917 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
918 (9 - EXT4_SB(sb)->s_cluster_bits);
919 ext4_mark_iloc_dirty(handle, inode, &iloc);
920 memset(gdb_bh->b_data, 0, sb->s_blocksize);
921 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
922 if (unlikely(err)) {
923 ext4_std_error(sb, err);
924 iloc.bh = NULL;
925 goto errout;
926 }
927 brelse(dind);
928
929 rcu_read_lock();
930 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
931 memcpy(n_group_desc, o_group_desc,
932 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
933 rcu_read_unlock();
934 n_group_desc[gdb_num] = gdb_bh;
935 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
936 EXT4_SB(sb)->s_gdb_count++;
937 ext4_kvfree_array_rcu(o_group_desc);
938
939 lock_buffer(EXT4_SB(sb)->s_sbh);
940 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
941 ext4_superblock_csum_set(sb);
942 unlock_buffer(EXT4_SB(sb)->s_sbh);
943 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
944 if (err)
945 ext4_std_error(sb, err);
946 return err;
947 errout:
948 kvfree(n_group_desc);
949 brelse(iloc.bh);
950 brelse(dind);
951 brelse(gdb_bh);
952
953 ext4_debug("leaving with error %d\n", err);
954 return err;
955 }
956
957 /*
958 * add_new_gdb_meta_bg is the sister of add_new_gdb.
959 */
add_new_gdb_meta_bg(struct super_block * sb,handle_t * handle,ext4_group_t group)960 static int add_new_gdb_meta_bg(struct super_block *sb,
961 handle_t *handle, ext4_group_t group) {
962 ext4_fsblk_t gdblock;
963 struct buffer_head *gdb_bh;
964 struct buffer_head **o_group_desc, **n_group_desc;
965 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
966 int err;
967
968 gdblock = ext4_meta_bg_first_block_no(sb, group) +
969 ext4_bg_has_super(sb, group);
970 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
971 if (IS_ERR(gdb_bh))
972 return PTR_ERR(gdb_bh);
973 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
974 GFP_KERNEL);
975 if (!n_group_desc) {
976 brelse(gdb_bh);
977 err = -ENOMEM;
978 ext4_warning(sb, "not enough memory for %lu groups",
979 gdb_num + 1);
980 return err;
981 }
982
983 rcu_read_lock();
984 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
985 memcpy(n_group_desc, o_group_desc,
986 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
987 rcu_read_unlock();
988 n_group_desc[gdb_num] = gdb_bh;
989
990 BUFFER_TRACE(gdb_bh, "get_write_access");
991 err = ext4_journal_get_write_access(handle, sb, gdb_bh, EXT4_JTR_NONE);
992 if (err) {
993 kvfree(n_group_desc);
994 brelse(gdb_bh);
995 return err;
996 }
997
998 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
999 EXT4_SB(sb)->s_gdb_count++;
1000 ext4_kvfree_array_rcu(o_group_desc);
1001 return err;
1002 }
1003
1004 /*
1005 * Called when we are adding a new group which has a backup copy of each of
1006 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
1007 * We need to add these reserved backup GDT blocks to the resize inode, so
1008 * that they are kept for future resizing and not allocated to files.
1009 *
1010 * Each reserved backup GDT block will go into a different indirect block.
1011 * The indirect blocks are actually the primary reserved GDT blocks,
1012 * so we know in advance what their block numbers are. We only get the
1013 * double-indirect block to verify it is pointing to the primary reserved
1014 * GDT blocks so we don't overwrite a data block by accident. The reserved
1015 * backup GDT blocks are stored in their reserved primary GDT block.
1016 */
reserve_backup_gdb(handle_t * handle,struct inode * inode,ext4_group_t group)1017 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1018 ext4_group_t group)
1019 {
1020 struct super_block *sb = inode->i_sb;
1021 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1022 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1023 struct buffer_head **primary;
1024 struct buffer_head *dind;
1025 struct ext4_iloc iloc;
1026 ext4_fsblk_t blk;
1027 __le32 *data, *end;
1028 int gdbackups = 0;
1029 int res, i;
1030 int err;
1031
1032 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1033 if (!primary)
1034 return -ENOMEM;
1035
1036 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1037 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1038 if (IS_ERR(dind)) {
1039 err = PTR_ERR(dind);
1040 dind = NULL;
1041 goto exit_free;
1042 }
1043
1044 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1045 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1046 EXT4_ADDR_PER_BLOCK(sb));
1047 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1048
1049 /* Get each reserved primary GDT block and verify it holds backups */
1050 for (res = 0; res < reserved_gdb; res++, blk++) {
1051 if (le32_to_cpu(*data) != blk) {
1052 ext4_warning(sb, "reserved block %llu"
1053 " not at offset %ld",
1054 blk,
1055 (long)(data - (__le32 *)dind->b_data));
1056 err = -EINVAL;
1057 goto exit_bh;
1058 }
1059 primary[res] = ext4_sb_bread(sb, blk, 0);
1060 if (IS_ERR(primary[res])) {
1061 err = PTR_ERR(primary[res]);
1062 primary[res] = NULL;
1063 goto exit_bh;
1064 }
1065 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1066 if (gdbackups < 0) {
1067 brelse(primary[res]);
1068 err = gdbackups;
1069 goto exit_bh;
1070 }
1071 if (++data >= end)
1072 data = (__le32 *)dind->b_data;
1073 }
1074
1075 for (i = 0; i < reserved_gdb; i++) {
1076 BUFFER_TRACE(primary[i], "get_write_access");
1077 if ((err = ext4_journal_get_write_access(handle, sb, primary[i],
1078 EXT4_JTR_NONE)))
1079 goto exit_bh;
1080 }
1081
1082 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1083 goto exit_bh;
1084
1085 /*
1086 * Finally we can add each of the reserved backup GDT blocks from
1087 * the new group to its reserved primary GDT block.
1088 */
1089 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1090 for (i = 0; i < reserved_gdb; i++) {
1091 int err2;
1092 data = (__le32 *)primary[i]->b_data;
1093 /* printk("reserving backup %lu[%u] = %lu\n",
1094 primary[i]->b_blocknr, gdbackups,
1095 blk + primary[i]->b_blocknr); */
1096 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1097 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1098 if (!err)
1099 err = err2;
1100 }
1101
1102 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1103 ext4_mark_iloc_dirty(handle, inode, &iloc);
1104
1105 exit_bh:
1106 while (--res >= 0)
1107 brelse(primary[res]);
1108 brelse(dind);
1109
1110 exit_free:
1111 kfree(primary);
1112
1113 return err;
1114 }
1115
ext4_set_block_group_nr(struct super_block * sb,char * data,ext4_group_t group)1116 static inline void ext4_set_block_group_nr(struct super_block *sb, char *data,
1117 ext4_group_t group)
1118 {
1119 struct ext4_super_block *es = (struct ext4_super_block *) data;
1120
1121 es->s_block_group_nr = cpu_to_le16(group);
1122 if (ext4_has_metadata_csum(sb))
1123 es->s_checksum = ext4_superblock_csum(sb, es);
1124 }
1125
1126 /*
1127 * Update the backup copies of the ext4 metadata. These don't need to be part
1128 * of the main resize transaction, because e2fsck will re-write them if there
1129 * is a problem (basically only OOM will cause a problem). However, we
1130 * _should_ update the backups if possible, in case the primary gets trashed
1131 * for some reason and we need to run e2fsck from a backup superblock. The
1132 * important part is that the new block and inode counts are in the backup
1133 * superblocks, and the location of the new group metadata in the GDT backups.
1134 *
1135 * We do not need take the s_resize_lock for this, because these
1136 * blocks are not otherwise touched by the filesystem code when it is
1137 * mounted. We don't need to worry about last changing from
1138 * sbi->s_groups_count, because the worst that can happen is that we
1139 * do not copy the full number of backups at this time. The resize
1140 * which changed s_groups_count will backup again.
1141 */
update_backups(struct super_block * sb,sector_t blk_off,char * data,int size,int meta_bg)1142 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1143 int size, int meta_bg)
1144 {
1145 struct ext4_sb_info *sbi = EXT4_SB(sb);
1146 ext4_group_t last;
1147 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1148 unsigned three = 1;
1149 unsigned five = 5;
1150 unsigned seven = 7;
1151 ext4_group_t group = 0;
1152 int rest = sb->s_blocksize - size;
1153 handle_t *handle;
1154 int err = 0, err2;
1155
1156 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1157 if (IS_ERR(handle)) {
1158 group = 1;
1159 err = PTR_ERR(handle);
1160 goto exit_err;
1161 }
1162
1163 if (meta_bg == 0) {
1164 group = ext4_list_backups(sb, &three, &five, &seven);
1165 last = sbi->s_groups_count;
1166 } else {
1167 group = ext4_get_group_number(sb, blk_off) + 1;
1168 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1169 }
1170
1171 while (group < sbi->s_groups_count) {
1172 struct buffer_head *bh;
1173 ext4_fsblk_t backup_block;
1174 int has_super = ext4_bg_has_super(sb, group);
1175 ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group);
1176
1177 /* Out of journal space, and can't get more - abort - so sad */
1178 err = ext4_resize_ensure_credits_batch(handle, 1);
1179 if (err < 0)
1180 break;
1181
1182 if (meta_bg == 0)
1183 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1184 else
1185 backup_block = first_block + has_super;
1186
1187 bh = sb_getblk(sb, backup_block);
1188 if (unlikely(!bh)) {
1189 err = -ENOMEM;
1190 break;
1191 }
1192 ext4_debug("update metadata backup %llu(+%llu)\n",
1193 backup_block, backup_block -
1194 ext4_group_first_block_no(sb, group));
1195 BUFFER_TRACE(bh, "get_write_access");
1196 if ((err = ext4_journal_get_write_access(handle, sb, bh,
1197 EXT4_JTR_NONE))) {
1198 brelse(bh);
1199 break;
1200 }
1201 lock_buffer(bh);
1202 memcpy(bh->b_data, data, size);
1203 if (rest)
1204 memset(bh->b_data + size, 0, rest);
1205 if (has_super && (backup_block == first_block))
1206 ext4_set_block_group_nr(sb, bh->b_data, group);
1207 set_buffer_uptodate(bh);
1208 unlock_buffer(bh);
1209 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1210 if (unlikely(err))
1211 ext4_std_error(sb, err);
1212 brelse(bh);
1213
1214 if (meta_bg == 0)
1215 group = ext4_list_backups(sb, &three, &five, &seven);
1216 else if (group == last)
1217 break;
1218 else
1219 group = last;
1220 }
1221 if ((err2 = ext4_journal_stop(handle)) && !err)
1222 err = err2;
1223
1224 /*
1225 * Ugh! Need to have e2fsck write the backup copies. It is too
1226 * late to revert the resize, we shouldn't fail just because of
1227 * the backup copies (they are only needed in case of corruption).
1228 *
1229 * However, if we got here we have a journal problem too, so we
1230 * can't really start a transaction to mark the superblock.
1231 * Chicken out and just set the flag on the hope it will be written
1232 * to disk, and if not - we will simply wait until next fsck.
1233 */
1234 exit_err:
1235 if (err) {
1236 ext4_warning(sb, "can't update backup for group %u (err %d), "
1237 "forcing fsck on next reboot", group, err);
1238 sbi->s_mount_state &= ~EXT4_VALID_FS;
1239 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1240 mark_buffer_dirty(sbi->s_sbh);
1241 }
1242 }
1243
1244 /*
1245 * ext4_add_new_descs() adds @count group descriptor of groups
1246 * starting at @group
1247 *
1248 * @handle: journal handle
1249 * @sb: super block
1250 * @group: the group no. of the first group desc to be added
1251 * @resize_inode: the resize inode
1252 * @count: number of group descriptors to be added
1253 */
ext4_add_new_descs(handle_t * handle,struct super_block * sb,ext4_group_t group,struct inode * resize_inode,ext4_group_t count)1254 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1255 ext4_group_t group, struct inode *resize_inode,
1256 ext4_group_t count)
1257 {
1258 struct ext4_sb_info *sbi = EXT4_SB(sb);
1259 struct ext4_super_block *es = sbi->s_es;
1260 struct buffer_head *gdb_bh;
1261 int i, gdb_off, gdb_num, err = 0;
1262 int meta_bg;
1263
1264 meta_bg = ext4_has_feature_meta_bg(sb);
1265 for (i = 0; i < count; i++, group++) {
1266 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1267 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1268
1269 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1270 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1271
1272 /*
1273 * We will only either add reserved group blocks to a backup group
1274 * or remove reserved blocks for the first group in a new group block.
1275 * Doing both would be mean more complex code, and sane people don't
1276 * use non-sparse filesystems anymore. This is already checked above.
1277 */
1278 if (gdb_off) {
1279 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1280 gdb_num);
1281 BUFFER_TRACE(gdb_bh, "get_write_access");
1282 err = ext4_journal_get_write_access(handle, sb, gdb_bh,
1283 EXT4_JTR_NONE);
1284
1285 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1286 err = reserve_backup_gdb(handle, resize_inode, group);
1287 } else if (meta_bg != 0) {
1288 err = add_new_gdb_meta_bg(sb, handle, group);
1289 } else {
1290 err = add_new_gdb(handle, resize_inode, group);
1291 }
1292 if (err)
1293 break;
1294 }
1295 return err;
1296 }
1297
ext4_get_bitmap(struct super_block * sb,__u64 block)1298 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1299 {
1300 struct buffer_head *bh = sb_getblk(sb, block);
1301 if (unlikely(!bh))
1302 return NULL;
1303 if (!bh_uptodate_or_lock(bh)) {
1304 if (ext4_read_bh(bh, 0, NULL) < 0) {
1305 brelse(bh);
1306 return NULL;
1307 }
1308 }
1309
1310 return bh;
1311 }
1312
ext4_set_bitmap_checksums(struct super_block * sb,struct ext4_group_desc * gdp,struct ext4_new_group_data * group_data)1313 static int ext4_set_bitmap_checksums(struct super_block *sb,
1314 struct ext4_group_desc *gdp,
1315 struct ext4_new_group_data *group_data)
1316 {
1317 struct buffer_head *bh;
1318
1319 if (!ext4_has_metadata_csum(sb))
1320 return 0;
1321
1322 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1323 if (!bh)
1324 return -EIO;
1325 ext4_inode_bitmap_csum_set(sb, gdp, bh,
1326 EXT4_INODES_PER_GROUP(sb) / 8);
1327 brelse(bh);
1328
1329 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1330 if (!bh)
1331 return -EIO;
1332 ext4_block_bitmap_csum_set(sb, gdp, bh);
1333 brelse(bh);
1334
1335 return 0;
1336 }
1337
1338 /*
1339 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1340 */
ext4_setup_new_descs(handle_t * handle,struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1341 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1342 struct ext4_new_flex_group_data *flex_gd)
1343 {
1344 struct ext4_new_group_data *group_data = flex_gd->groups;
1345 struct ext4_group_desc *gdp;
1346 struct ext4_sb_info *sbi = EXT4_SB(sb);
1347 struct buffer_head *gdb_bh;
1348 ext4_group_t group;
1349 __u16 *bg_flags = flex_gd->bg_flags;
1350 int i, gdb_off, gdb_num, err = 0;
1351
1352
1353 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1354 group = group_data->group;
1355
1356 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1357 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1358
1359 /*
1360 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1361 */
1362 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1363 /* Update group descriptor block for new group */
1364 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1365 gdb_off * EXT4_DESC_SIZE(sb));
1366
1367 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1368 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1369 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1370 err = ext4_set_bitmap_checksums(sb, gdp, group_data);
1371 if (err) {
1372 ext4_std_error(sb, err);
1373 break;
1374 }
1375
1376 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1377 ext4_free_group_clusters_set(sb, gdp,
1378 group_data->free_clusters_count);
1379 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1380 if (ext4_has_group_desc_csum(sb))
1381 ext4_itable_unused_set(sb, gdp,
1382 EXT4_INODES_PER_GROUP(sb));
1383 gdp->bg_flags = cpu_to_le16(*bg_flags);
1384 ext4_group_desc_csum_set(sb, group, gdp);
1385
1386 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1387 if (unlikely(err)) {
1388 ext4_std_error(sb, err);
1389 break;
1390 }
1391
1392 /*
1393 * We can allocate memory for mb_alloc based on the new group
1394 * descriptor
1395 */
1396 err = ext4_mb_add_groupinfo(sb, group, gdp);
1397 if (err)
1398 break;
1399 }
1400 return err;
1401 }
1402
ext4_add_overhead(struct super_block * sb,const ext4_fsblk_t overhead)1403 static void ext4_add_overhead(struct super_block *sb,
1404 const ext4_fsblk_t overhead)
1405 {
1406 struct ext4_sb_info *sbi = EXT4_SB(sb);
1407 struct ext4_super_block *es = sbi->s_es;
1408
1409 sbi->s_overhead += overhead;
1410 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1411 smp_wmb();
1412 }
1413
1414 /*
1415 * ext4_update_super() updates the super block so that the newly added
1416 * groups can be seen by the filesystem.
1417 *
1418 * @sb: super block
1419 * @flex_gd: new added groups
1420 */
ext4_update_super(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd)1421 static void ext4_update_super(struct super_block *sb,
1422 struct ext4_new_flex_group_data *flex_gd)
1423 {
1424 ext4_fsblk_t blocks_count = 0;
1425 ext4_fsblk_t free_blocks = 0;
1426 ext4_fsblk_t reserved_blocks = 0;
1427 struct ext4_new_group_data *group_data = flex_gd->groups;
1428 struct ext4_sb_info *sbi = EXT4_SB(sb);
1429 struct ext4_super_block *es = sbi->s_es;
1430 int i;
1431
1432 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1433 /*
1434 * Make the new blocks and inodes valid next. We do this before
1435 * increasing the group count so that once the group is enabled,
1436 * all of its blocks and inodes are already valid.
1437 *
1438 * We always allocate group-by-group, then block-by-block or
1439 * inode-by-inode within a group, so enabling these
1440 * blocks/inodes before the group is live won't actually let us
1441 * allocate the new space yet.
1442 */
1443 for (i = 0; i < flex_gd->count; i++) {
1444 blocks_count += group_data[i].blocks_count;
1445 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1446 }
1447
1448 reserved_blocks = ext4_r_blocks_count(es) * 100;
1449 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1450 reserved_blocks *= blocks_count;
1451 do_div(reserved_blocks, 100);
1452
1453 lock_buffer(sbi->s_sbh);
1454 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1455 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1456 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1457 flex_gd->count);
1458 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1459 flex_gd->count);
1460
1461 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1462 /*
1463 * We need to protect s_groups_count against other CPUs seeing
1464 * inconsistent state in the superblock.
1465 *
1466 * The precise rules we use are:
1467 *
1468 * * Writers must perform a smp_wmb() after updating all
1469 * dependent data and before modifying the groups count
1470 *
1471 * * Readers must perform an smp_rmb() after reading the groups
1472 * count and before reading any dependent data.
1473 *
1474 * NB. These rules can be relaxed when checking the group count
1475 * while freeing data, as we can only allocate from a block
1476 * group after serialising against the group count, and we can
1477 * only then free after serialising in turn against that
1478 * allocation.
1479 */
1480 smp_wmb();
1481
1482 /* Update the global fs size fields */
1483 sbi->s_groups_count += flex_gd->count;
1484 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1485 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1486
1487 /* Update the reserved block counts only once the new group is
1488 * active. */
1489 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1490 reserved_blocks);
1491
1492 /* Update the free space counts */
1493 percpu_counter_add(&sbi->s_freeclusters_counter,
1494 EXT4_NUM_B2C(sbi, free_blocks));
1495 percpu_counter_add(&sbi->s_freeinodes_counter,
1496 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1497
1498 ext4_debug("free blocks count %llu",
1499 percpu_counter_read(&sbi->s_freeclusters_counter));
1500 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1501 ext4_group_t flex_group;
1502 struct flex_groups *fg;
1503
1504 flex_group = ext4_flex_group(sbi, group_data[0].group);
1505 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1506 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1507 &fg->free_clusters);
1508 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1509 &fg->free_inodes);
1510 }
1511
1512 /*
1513 * Update the fs overhead information.
1514 *
1515 * For bigalloc, if the superblock already has a properly calculated
1516 * overhead, update it with a value based on numbers already computed
1517 * above for the newly allocated capacity.
1518 */
1519 if (ext4_has_feature_bigalloc(sb) && (sbi->s_overhead != 0))
1520 ext4_add_overhead(sb,
1521 EXT4_NUM_B2C(sbi, blocks_count - free_blocks));
1522 else
1523 ext4_calculate_overhead(sb);
1524 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1525
1526 ext4_superblock_csum_set(sb);
1527 unlock_buffer(sbi->s_sbh);
1528 if (test_opt(sb, DEBUG))
1529 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1530 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1531 blocks_count, free_blocks, reserved_blocks);
1532 }
1533
1534 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1535 * _before_ we start modifying the filesystem, because we cannot abort the
1536 * transaction and not have it write the data to disk.
1537 */
ext4_flex_group_add(struct super_block * sb,struct inode * resize_inode,struct ext4_new_flex_group_data * flex_gd)1538 static int ext4_flex_group_add(struct super_block *sb,
1539 struct inode *resize_inode,
1540 struct ext4_new_flex_group_data *flex_gd)
1541 {
1542 struct ext4_sb_info *sbi = EXT4_SB(sb);
1543 struct ext4_super_block *es = sbi->s_es;
1544 ext4_fsblk_t o_blocks_count;
1545 ext4_grpblk_t last;
1546 ext4_group_t group;
1547 handle_t *handle;
1548 unsigned reserved_gdb;
1549 int err = 0, err2 = 0, credit;
1550
1551 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1552
1553 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1554 o_blocks_count = ext4_blocks_count(es);
1555 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1556 BUG_ON(last);
1557
1558 err = setup_new_flex_group_blocks(sb, flex_gd);
1559 if (err)
1560 goto exit;
1561 /*
1562 * We will always be modifying at least the superblock and GDT
1563 * blocks. If we are adding a group past the last current GDT block,
1564 * we will also modify the inode and the dindirect block. If we
1565 * are adding a group with superblock/GDT backups we will also
1566 * modify each of the reserved GDT dindirect blocks.
1567 */
1568 credit = 3; /* sb, resize inode, resize inode dindirect */
1569 /* GDT blocks */
1570 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1571 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1572 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1573 if (IS_ERR(handle)) {
1574 err = PTR_ERR(handle);
1575 goto exit;
1576 }
1577
1578 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1579 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1580 EXT4_JTR_NONE);
1581 if (err)
1582 goto exit_journal;
1583
1584 group = flex_gd->groups[0].group;
1585 BUG_ON(group != sbi->s_groups_count);
1586 err = ext4_add_new_descs(handle, sb, group,
1587 resize_inode, flex_gd->count);
1588 if (err)
1589 goto exit_journal;
1590
1591 err = ext4_setup_new_descs(handle, sb, flex_gd);
1592 if (err)
1593 goto exit_journal;
1594
1595 ext4_update_super(sb, flex_gd);
1596
1597 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1598
1599 exit_journal:
1600 err2 = ext4_journal_stop(handle);
1601 if (!err)
1602 err = err2;
1603
1604 if (!err) {
1605 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1606 int gdb_num_end = ((group + flex_gd->count - 1) /
1607 EXT4_DESC_PER_BLOCK(sb));
1608 int meta_bg = ext4_has_feature_meta_bg(sb) &&
1609 gdb_num >= le32_to_cpu(es->s_first_meta_bg);
1610 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1611 ext4_group_first_block_no(sb, 0);
1612 sector_t old_gdb = 0;
1613
1614 update_backups(sb, ext4_group_first_block_no(sb, 0),
1615 (char *)es, sizeof(struct ext4_super_block), 0);
1616 for (; gdb_num <= gdb_num_end; gdb_num++) {
1617 struct buffer_head *gdb_bh;
1618
1619 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1620 gdb_num);
1621 if (old_gdb == gdb_bh->b_blocknr)
1622 continue;
1623 update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1624 gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1625 old_gdb = gdb_bh->b_blocknr;
1626 }
1627 }
1628 exit:
1629 return err;
1630 }
1631
ext4_setup_next_flex_gd(struct super_block * sb,struct ext4_new_flex_group_data * flex_gd,ext4_fsblk_t n_blocks_count)1632 static int ext4_setup_next_flex_gd(struct super_block *sb,
1633 struct ext4_new_flex_group_data *flex_gd,
1634 ext4_fsblk_t n_blocks_count)
1635 {
1636 struct ext4_sb_info *sbi = EXT4_SB(sb);
1637 struct ext4_super_block *es = sbi->s_es;
1638 struct ext4_new_group_data *group_data = flex_gd->groups;
1639 ext4_fsblk_t o_blocks_count;
1640 ext4_group_t n_group;
1641 ext4_group_t group;
1642 ext4_group_t last_group;
1643 ext4_grpblk_t last;
1644 ext4_grpblk_t clusters_per_group;
1645 unsigned long i;
1646
1647 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1648
1649 o_blocks_count = ext4_blocks_count(es);
1650
1651 if (o_blocks_count == n_blocks_count)
1652 return 0;
1653
1654 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1655 BUG_ON(last);
1656 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1657
1658 last_group = group | (flex_gd->resize_bg - 1);
1659 if (last_group > n_group)
1660 last_group = n_group;
1661
1662 flex_gd->count = last_group - group + 1;
1663
1664 for (i = 0; i < flex_gd->count; i++) {
1665 int overhead;
1666
1667 group_data[i].group = group + i;
1668 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1669 overhead = ext4_group_overhead_blocks(sb, group + i);
1670 group_data[i].mdata_blocks = overhead;
1671 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1672 if (ext4_has_group_desc_csum(sb)) {
1673 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1674 EXT4_BG_INODE_UNINIT;
1675 if (!test_opt(sb, INIT_INODE_TABLE))
1676 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1677 } else
1678 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1679 }
1680
1681 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1682 /* We need to initialize block bitmap of last group. */
1683 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1684
1685 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1686 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1687 group_data[i - 1].free_clusters_count -= clusters_per_group -
1688 last - 1;
1689 }
1690
1691 return 1;
1692 }
1693
1694 /* Add group descriptor data to an existing or new group descriptor block.
1695 * Ensure we handle all possible error conditions _before_ we start modifying
1696 * the filesystem, because we cannot abort the transaction and not have it
1697 * write the data to disk.
1698 *
1699 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1700 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1701 *
1702 * We only need to hold the superblock lock while we are actually adding
1703 * in the new group's counts to the superblock. Prior to that we have
1704 * not really "added" the group at all. We re-check that we are still
1705 * adding in the last group in case things have changed since verifying.
1706 */
ext4_group_add(struct super_block * sb,struct ext4_new_group_data * input)1707 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1708 {
1709 struct ext4_new_flex_group_data flex_gd;
1710 struct ext4_sb_info *sbi = EXT4_SB(sb);
1711 struct ext4_super_block *es = sbi->s_es;
1712 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1713 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1714 struct inode *inode = NULL;
1715 int gdb_off;
1716 int err;
1717 __u16 bg_flags = 0;
1718
1719 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1720
1721 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1722 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1723 return -EPERM;
1724 }
1725
1726 if (ext4_blocks_count(es) + input->blocks_count <
1727 ext4_blocks_count(es)) {
1728 ext4_warning(sb, "blocks_count overflow");
1729 return -EINVAL;
1730 }
1731
1732 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1733 le32_to_cpu(es->s_inodes_count)) {
1734 ext4_warning(sb, "inodes_count overflow");
1735 return -EINVAL;
1736 }
1737
1738 if (reserved_gdb || gdb_off == 0) {
1739 if (!ext4_has_feature_resize_inode(sb) ||
1740 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1741 ext4_warning(sb,
1742 "No reserved GDT blocks, can't resize");
1743 return -EPERM;
1744 }
1745 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1746 if (IS_ERR(inode)) {
1747 ext4_warning(sb, "Error opening resize inode");
1748 return PTR_ERR(inode);
1749 }
1750 }
1751
1752
1753 err = verify_group_input(sb, input);
1754 if (err)
1755 goto out;
1756
1757 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1758 if (err)
1759 goto out;
1760
1761 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1762 if (err)
1763 goto out;
1764
1765 flex_gd.count = 1;
1766 flex_gd.groups = input;
1767 flex_gd.bg_flags = &bg_flags;
1768 err = ext4_flex_group_add(sb, inode, &flex_gd);
1769 out:
1770 iput(inode);
1771 return err;
1772 } /* ext4_group_add */
1773
1774 /*
1775 * extend a group without checking assuming that checking has been done.
1776 */
ext4_group_extend_no_check(struct super_block * sb,ext4_fsblk_t o_blocks_count,ext4_grpblk_t add)1777 static int ext4_group_extend_no_check(struct super_block *sb,
1778 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1779 {
1780 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1781 handle_t *handle;
1782 int err = 0, err2;
1783
1784 /* We will update the superblock, one block bitmap, and
1785 * one group descriptor via ext4_group_add_blocks().
1786 */
1787 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1788 if (IS_ERR(handle)) {
1789 err = PTR_ERR(handle);
1790 ext4_warning(sb, "error %d on journal start", err);
1791 return err;
1792 }
1793
1794 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1795 err = ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
1796 EXT4_JTR_NONE);
1797 if (err) {
1798 ext4_warning(sb, "error %d on journal write access", err);
1799 goto errout;
1800 }
1801
1802 lock_buffer(EXT4_SB(sb)->s_sbh);
1803 ext4_blocks_count_set(es, o_blocks_count + add);
1804 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1805 ext4_superblock_csum_set(sb);
1806 unlock_buffer(EXT4_SB(sb)->s_sbh);
1807 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1808 o_blocks_count + add);
1809 /* We add the blocks to the bitmap and set the group need init bit */
1810 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1811 if (err)
1812 goto errout;
1813 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1814 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1815 o_blocks_count + add);
1816 errout:
1817 err2 = ext4_journal_stop(handle);
1818 if (err2 && !err)
1819 err = err2;
1820
1821 if (!err) {
1822 if (test_opt(sb, DEBUG))
1823 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1824 "blocks\n", ext4_blocks_count(es));
1825 update_backups(sb, ext4_group_first_block_no(sb, 0),
1826 (char *)es, sizeof(struct ext4_super_block), 0);
1827 }
1828 return err;
1829 }
1830
1831 /*
1832 * Extend the filesystem to the new number of blocks specified. This entry
1833 * point is only used to extend the current filesystem to the end of the last
1834 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1835 * for emergencies (because it has no dependencies on reserved blocks).
1836 *
1837 * If we _really_ wanted, we could use default values to call ext4_group_add()
1838 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1839 * GDT blocks are reserved to grow to the desired size.
1840 */
ext4_group_extend(struct super_block * sb,struct ext4_super_block * es,ext4_fsblk_t n_blocks_count)1841 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1842 ext4_fsblk_t n_blocks_count)
1843 {
1844 ext4_fsblk_t o_blocks_count;
1845 ext4_grpblk_t last;
1846 ext4_grpblk_t add;
1847 struct buffer_head *bh;
1848 ext4_group_t group;
1849
1850 o_blocks_count = ext4_blocks_count(es);
1851
1852 if (test_opt(sb, DEBUG))
1853 ext4_msg(sb, KERN_DEBUG,
1854 "extending last group from %llu to %llu blocks",
1855 o_blocks_count, n_blocks_count);
1856
1857 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1858 return 0;
1859
1860 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1861 ext4_msg(sb, KERN_ERR,
1862 "filesystem too large to resize to %llu blocks safely",
1863 n_blocks_count);
1864 return -EINVAL;
1865 }
1866
1867 if (n_blocks_count < o_blocks_count) {
1868 ext4_warning(sb, "can't shrink FS - resize aborted");
1869 return -EINVAL;
1870 }
1871
1872 /* Handle the remaining blocks in the last group only. */
1873 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1874
1875 if (last == 0) {
1876 ext4_warning(sb, "need to use ext2online to resize further");
1877 return -EPERM;
1878 }
1879
1880 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1881
1882 if (o_blocks_count + add < o_blocks_count) {
1883 ext4_warning(sb, "blocks_count overflow");
1884 return -EINVAL;
1885 }
1886
1887 if (o_blocks_count + add > n_blocks_count)
1888 add = n_blocks_count - o_blocks_count;
1889
1890 if (o_blocks_count + add < n_blocks_count)
1891 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1892 o_blocks_count + add, add);
1893
1894 /* See if the device is actually as big as what was requested */
1895 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1896 if (IS_ERR(bh)) {
1897 ext4_warning(sb, "can't read last block, resize aborted");
1898 return -ENOSPC;
1899 }
1900 brelse(bh);
1901
1902 return ext4_group_extend_no_check(sb, o_blocks_count, add);
1903 } /* ext4_group_extend */
1904
1905
num_desc_blocks(struct super_block * sb,ext4_group_t groups)1906 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1907 {
1908 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1909 }
1910
1911 /*
1912 * Release the resize inode and drop the resize_inode feature if there
1913 * are no more reserved gdt blocks, and then convert the file system
1914 * to enable meta_bg
1915 */
ext4_convert_meta_bg(struct super_block * sb,struct inode * inode)1916 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1917 {
1918 handle_t *handle;
1919 struct ext4_sb_info *sbi = EXT4_SB(sb);
1920 struct ext4_super_block *es = sbi->s_es;
1921 struct ext4_inode_info *ei = EXT4_I(inode);
1922 ext4_fsblk_t nr;
1923 int i, ret, err = 0;
1924 int credits = 1;
1925
1926 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1927 if (inode) {
1928 if (es->s_reserved_gdt_blocks) {
1929 ext4_error(sb, "Unexpected non-zero "
1930 "s_reserved_gdt_blocks");
1931 return -EPERM;
1932 }
1933
1934 /* Do a quick sanity check of the resize inode */
1935 if (inode->i_blocks != 1 << (inode->i_blkbits -
1936 (9 - sbi->s_cluster_bits)))
1937 goto invalid_resize_inode;
1938 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1939 if (i == EXT4_DIND_BLOCK) {
1940 if (ei->i_data[i])
1941 continue;
1942 else
1943 goto invalid_resize_inode;
1944 }
1945 if (ei->i_data[i])
1946 goto invalid_resize_inode;
1947 }
1948 credits += 3; /* block bitmap, bg descriptor, resize inode */
1949 }
1950
1951 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1952 if (IS_ERR(handle))
1953 return PTR_ERR(handle);
1954
1955 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1956 err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
1957 EXT4_JTR_NONE);
1958 if (err)
1959 goto errout;
1960
1961 lock_buffer(sbi->s_sbh);
1962 ext4_clear_feature_resize_inode(sb);
1963 ext4_set_feature_meta_bg(sb);
1964 sbi->s_es->s_first_meta_bg =
1965 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1966 ext4_superblock_csum_set(sb);
1967 unlock_buffer(sbi->s_sbh);
1968
1969 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1970 if (err) {
1971 ext4_std_error(sb, err);
1972 goto errout;
1973 }
1974
1975 if (inode) {
1976 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1977 ext4_free_blocks(handle, inode, NULL, nr, 1,
1978 EXT4_FREE_BLOCKS_METADATA |
1979 EXT4_FREE_BLOCKS_FORGET);
1980 ei->i_data[EXT4_DIND_BLOCK] = 0;
1981 inode->i_blocks = 0;
1982
1983 err = ext4_mark_inode_dirty(handle, inode);
1984 if (err)
1985 ext4_std_error(sb, err);
1986 }
1987
1988 errout:
1989 ret = ext4_journal_stop(handle);
1990 return err ? err : ret;
1991
1992 invalid_resize_inode:
1993 ext4_error(sb, "corrupted/inconsistent resize inode");
1994 return -EINVAL;
1995 }
1996
1997 /*
1998 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1999 *
2000 * @sb: super block of the fs to be resized
2001 * @n_blocks_count: the number of blocks resides in the resized fs
2002 */
ext4_resize_fs(struct super_block * sb,ext4_fsblk_t n_blocks_count)2003 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2004 {
2005 struct ext4_new_flex_group_data *flex_gd = NULL;
2006 struct ext4_sb_info *sbi = EXT4_SB(sb);
2007 struct ext4_super_block *es = sbi->s_es;
2008 struct buffer_head *bh;
2009 struct inode *resize_inode = NULL;
2010 ext4_grpblk_t add, offset;
2011 unsigned long n_desc_blocks;
2012 unsigned long o_desc_blocks;
2013 ext4_group_t o_group;
2014 ext4_group_t n_group;
2015 ext4_fsblk_t o_blocks_count;
2016 ext4_fsblk_t n_blocks_count_retry = 0;
2017 unsigned long last_update_time = 0;
2018 int err = 0;
2019 int meta_bg;
2020 unsigned int flexbg_size = ext4_flex_bg_size(sbi);
2021
2022 /* See if the device is actually as big as what was requested */
2023 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
2024 if (IS_ERR(bh)) {
2025 ext4_warning(sb, "can't read last block, resize aborted");
2026 return -ENOSPC;
2027 }
2028 brelse(bh);
2029
2030 /*
2031 * For bigalloc, trim the requested size to the nearest cluster
2032 * boundary to avoid creating an unusable filesystem. We do this
2033 * silently, instead of returning an error, to avoid breaking
2034 * callers that blindly resize the filesystem to the full size of
2035 * the underlying block device.
2036 */
2037 if (ext4_has_feature_bigalloc(sb))
2038 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
2039
2040 retry:
2041 o_blocks_count = ext4_blocks_count(es);
2042
2043 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
2044 "to %llu blocks", o_blocks_count, n_blocks_count);
2045
2046 if (n_blocks_count < o_blocks_count) {
2047 /* On-line shrinking not supported */
2048 ext4_warning(sb, "can't shrink FS - resize aborted");
2049 return -EINVAL;
2050 }
2051
2052 if (n_blocks_count == o_blocks_count)
2053 /* Nothing need to do */
2054 return 0;
2055
2056 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2057 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2058 ext4_warning(sb, "resize would cause inodes_count overflow");
2059 return -EINVAL;
2060 }
2061 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2062
2063 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2064 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2065
2066 meta_bg = ext4_has_feature_meta_bg(sb);
2067
2068 if (ext4_has_feature_resize_inode(sb)) {
2069 if (meta_bg) {
2070 ext4_error(sb, "resize_inode and meta_bg enabled "
2071 "simultaneously");
2072 return -EINVAL;
2073 }
2074 if (n_desc_blocks > o_desc_blocks +
2075 le16_to_cpu(es->s_reserved_gdt_blocks)) {
2076 n_blocks_count_retry = n_blocks_count;
2077 n_desc_blocks = o_desc_blocks +
2078 le16_to_cpu(es->s_reserved_gdt_blocks);
2079 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2080 n_blocks_count = (ext4_fsblk_t)n_group *
2081 EXT4_BLOCKS_PER_GROUP(sb) +
2082 le32_to_cpu(es->s_first_data_block);
2083 n_group--; /* set to last group number */
2084 }
2085
2086 if (!resize_inode)
2087 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2088 EXT4_IGET_SPECIAL);
2089 if (IS_ERR(resize_inode)) {
2090 ext4_warning(sb, "Error opening resize inode");
2091 return PTR_ERR(resize_inode);
2092 }
2093 }
2094
2095 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2096 err = ext4_convert_meta_bg(sb, resize_inode);
2097 if (err)
2098 goto out;
2099 if (resize_inode) {
2100 iput(resize_inode);
2101 resize_inode = NULL;
2102 }
2103 if (n_blocks_count_retry) {
2104 n_blocks_count = n_blocks_count_retry;
2105 n_blocks_count_retry = 0;
2106 goto retry;
2107 }
2108 }
2109
2110 /*
2111 * Make sure the last group has enough space so that it's
2112 * guaranteed to have enough space for all metadata blocks
2113 * that it might need to hold. (We might not need to store
2114 * the inode table blocks in the last block group, but there
2115 * will be cases where this might be needed.)
2116 */
2117 if ((ext4_group_first_block_no(sb, n_group) +
2118 ext4_group_overhead_blocks(sb, n_group) + 2 +
2119 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2120 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2121 n_group--;
2122 n_blocks_count_retry = 0;
2123 if (resize_inode) {
2124 iput(resize_inode);
2125 resize_inode = NULL;
2126 }
2127 goto retry;
2128 }
2129
2130 /* extend the last group */
2131 if (n_group == o_group)
2132 add = n_blocks_count - o_blocks_count;
2133 else
2134 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2135 if (add > 0) {
2136 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2137 if (err)
2138 goto out;
2139 }
2140
2141 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2142 goto out;
2143
2144 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2145 if (err)
2146 goto out;
2147
2148 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2149 if (err)
2150 goto out;
2151
2152 flex_gd = alloc_flex_gd(flexbg_size);
2153 if (flex_gd == NULL) {
2154 err = -ENOMEM;
2155 goto out;
2156 }
2157
2158 /* Add flex groups. Note that a regular group is a
2159 * flex group with 1 group.
2160 */
2161 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2162 if (time_is_before_jiffies(last_update_time + HZ * 10)) {
2163 if (last_update_time)
2164 ext4_msg(sb, KERN_INFO,
2165 "resized to %llu blocks",
2166 ext4_blocks_count(es));
2167 last_update_time = jiffies;
2168 }
2169 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2170 break;
2171 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2172 if (unlikely(err))
2173 break;
2174 }
2175
2176 if (!err && n_blocks_count_retry) {
2177 n_blocks_count = n_blocks_count_retry;
2178 n_blocks_count_retry = 0;
2179 free_flex_gd(flex_gd);
2180 flex_gd = NULL;
2181 if (resize_inode) {
2182 iput(resize_inode);
2183 resize_inode = NULL;
2184 }
2185 goto retry;
2186 }
2187
2188 out:
2189 if (flex_gd)
2190 free_flex_gd(flex_gd);
2191 if (resize_inode != NULL)
2192 iput(resize_inode);
2193 if (err)
2194 ext4_warning(sb, "error (%d) occurred during "
2195 "file system resize", err);
2196 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2197 ext4_blocks_count(es));
2198 return err;
2199 }
2200