xref: /openbmc/linux/fs/ext4/ialloc.c (revision 82ced6fd)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "group.h"
31 
32 /*
33  * ialloc.c contains the inodes allocation and deallocation routines
34  */
35 
36 /*
37  * The free inodes are managed by bitmaps.  A file system contains several
38  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
39  * block for inodes, N blocks for the inode table and data blocks.
40  *
41  * The file system contains group descriptors which are located after the
42  * super block.  Each descriptor contains the number of the bitmap block and
43  * the free blocks count in the block.
44  */
45 
46 /*
47  * To avoid calling the atomic setbit hundreds or thousands of times, we only
48  * need to use it within a single byte (to ensure we get endianness right).
49  * We can use memset for the rest of the bitmap as there are no other users.
50  */
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
52 {
53 	int i;
54 
55 	if (start_bit >= end_bit)
56 		return;
57 
58 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 		ext4_set_bit(i, bitmap);
61 	if (i < end_bit)
62 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
63 }
64 
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 				ext4_group_t block_group,
68 				struct ext4_group_desc *gdp)
69 {
70 	struct ext4_sb_info *sbi = EXT4_SB(sb);
71 
72 	J_ASSERT_BH(bh, buffer_locked(bh));
73 
74 	/* If checksum is bad mark all blocks and inodes use to prevent
75 	 * allocation, essentially implementing a per-group read-only flag. */
76 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 		ext4_error(sb, __func__, "Checksum bad for group %u",
78 			   block_group);
79 		ext4_free_blks_set(sb, gdp, 0);
80 		ext4_free_inodes_set(sb, gdp, 0);
81 		ext4_itable_unused_set(sb, gdp, 0);
82 		memset(bh->b_data, 0xff, sb->s_blocksize);
83 		return 0;
84 	}
85 
86 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
88 			bh->b_data);
89 
90 	return EXT4_INODES_PER_GROUP(sb);
91 }
92 
93 /*
94  * Read the inode allocation bitmap for a given block_group, reading
95  * into the specified slot in the superblock's bitmap cache.
96  *
97  * Return buffer_head of bitmap on success or NULL.
98  */
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
101 {
102 	struct ext4_group_desc *desc;
103 	struct buffer_head *bh = NULL;
104 	ext4_fsblk_t bitmap_blk;
105 
106 	desc = ext4_get_group_desc(sb, block_group, NULL);
107 	if (!desc)
108 		return NULL;
109 	bitmap_blk = ext4_inode_bitmap(sb, desc);
110 	bh = sb_getblk(sb, bitmap_blk);
111 	if (unlikely(!bh)) {
112 		ext4_error(sb, __func__,
113 			    "Cannot read inode bitmap - "
114 			    "block_group = %u, inode_bitmap = %llu",
115 			    block_group, bitmap_blk);
116 		return NULL;
117 	}
118 	if (bitmap_uptodate(bh))
119 		return bh;
120 
121 	lock_buffer(bh);
122 	if (bitmap_uptodate(bh)) {
123 		unlock_buffer(bh);
124 		return bh;
125 	}
126 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
127 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
128 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
129 		set_bitmap_uptodate(bh);
130 		set_buffer_uptodate(bh);
131 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 		unlock_buffer(bh);
133 		return bh;
134 	}
135 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 	if (buffer_uptodate(bh)) {
137 		/*
138 		 * if not uninit if bh is uptodate,
139 		 * bitmap is also uptodate
140 		 */
141 		set_bitmap_uptodate(bh);
142 		unlock_buffer(bh);
143 		return bh;
144 	}
145 	/*
146 	 * submit the buffer_head for read. We can
147 	 * safely mark the bitmap as uptodate now.
148 	 * We do it here so the bitmap uptodate bit
149 	 * get set with buffer lock held.
150 	 */
151 	set_bitmap_uptodate(bh);
152 	if (bh_submit_read(bh) < 0) {
153 		put_bh(bh);
154 		ext4_error(sb, __func__,
155 			    "Cannot read inode bitmap - "
156 			    "block_group = %u, inode_bitmap = %llu",
157 			    block_group, bitmap_blk);
158 		return NULL;
159 	}
160 	return bh;
161 }
162 
163 /*
164  * NOTE! When we get the inode, we're the only people
165  * that have access to it, and as such there are no
166  * race conditions we have to worry about. The inode
167  * is not on the hash-lists, and it cannot be reached
168  * through the filesystem because the directory entry
169  * has been deleted earlier.
170  *
171  * HOWEVER: we must make sure that we get no aliases,
172  * which means that we have to call "clear_inode()"
173  * _before_ we mark the inode not in use in the inode
174  * bitmaps. Otherwise a newly created file might use
175  * the same inode number (not actually the same pointer
176  * though), and then we'd have two inodes sharing the
177  * same inode number and space on the harddisk.
178  */
179 void ext4_free_inode(handle_t *handle, struct inode *inode)
180 {
181 	struct super_block *sb = inode->i_sb;
182 	int is_directory;
183 	unsigned long ino;
184 	struct buffer_head *bitmap_bh = NULL;
185 	struct buffer_head *bh2;
186 	ext4_group_t block_group;
187 	unsigned long bit;
188 	struct ext4_group_desc *gdp;
189 	struct ext4_super_block *es;
190 	struct ext4_sb_info *sbi;
191 	int fatal = 0, err, count, cleared;
192 
193 	if (atomic_read(&inode->i_count) > 1) {
194 		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
195 		       atomic_read(&inode->i_count));
196 		return;
197 	}
198 	if (inode->i_nlink) {
199 		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
200 		       inode->i_nlink);
201 		return;
202 	}
203 	if (!sb) {
204 		printk(KERN_ERR "ext4_free_inode: inode on "
205 		       "nonexistent device\n");
206 		return;
207 	}
208 	sbi = EXT4_SB(sb);
209 
210 	ino = inode->i_ino;
211 	ext4_debug("freeing inode %lu\n", ino);
212 	trace_mark(ext4_free_inode,
213 		   "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
214 		   sb->s_id, inode->i_ino, inode->i_mode,
215 		   (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
216 		   (unsigned long long) inode->i_blocks);
217 
218 	/*
219 	 * Note: we must free any quota before locking the superblock,
220 	 * as writing the quota to disk may need the lock as well.
221 	 */
222 	vfs_dq_init(inode);
223 	ext4_xattr_delete_inode(handle, inode);
224 	vfs_dq_free_inode(inode);
225 	vfs_dq_drop(inode);
226 
227 	is_directory = S_ISDIR(inode->i_mode);
228 
229 	/* Do this BEFORE marking the inode not in use or returning an error */
230 	clear_inode(inode);
231 
232 	es = EXT4_SB(sb)->s_es;
233 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
234 		ext4_error(sb, "ext4_free_inode",
235 			   "reserved or nonexistent inode %lu", ino);
236 		goto error_return;
237 	}
238 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
239 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
240 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
241 	if (!bitmap_bh)
242 		goto error_return;
243 
244 	BUFFER_TRACE(bitmap_bh, "get_write_access");
245 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
246 	if (fatal)
247 		goto error_return;
248 
249 	/* Ok, now we can actually update the inode bitmaps.. */
250 	spin_lock(sb_bgl_lock(sbi, block_group));
251 	cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
252 	spin_unlock(sb_bgl_lock(sbi, block_group));
253 	if (!cleared)
254 		ext4_error(sb, "ext4_free_inode",
255 			   "bit already cleared for inode %lu", ino);
256 	else {
257 		gdp = ext4_get_group_desc(sb, block_group, &bh2);
258 
259 		BUFFER_TRACE(bh2, "get_write_access");
260 		fatal = ext4_journal_get_write_access(handle, bh2);
261 		if (fatal) goto error_return;
262 
263 		if (gdp) {
264 			spin_lock(sb_bgl_lock(sbi, block_group));
265 			count = ext4_free_inodes_count(sb, gdp) + 1;
266 			ext4_free_inodes_set(sb, gdp, count);
267 			if (is_directory) {
268 				count = ext4_used_dirs_count(sb, gdp) - 1;
269 				ext4_used_dirs_set(sb, gdp, count);
270 				if (sbi->s_log_groups_per_flex) {
271 					ext4_group_t f;
272 
273 					f = ext4_flex_group(sbi, block_group);
274 					atomic_dec(&sbi->s_flex_groups[f].free_inodes);
275 				}
276 
277 			}
278 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
279 							block_group, gdp);
280 			spin_unlock(sb_bgl_lock(sbi, block_group));
281 			percpu_counter_inc(&sbi->s_freeinodes_counter);
282 			if (is_directory)
283 				percpu_counter_dec(&sbi->s_dirs_counter);
284 
285 			if (sbi->s_log_groups_per_flex) {
286 				ext4_group_t f;
287 
288 				f = ext4_flex_group(sbi, block_group);
289 				atomic_inc(&sbi->s_flex_groups[f].free_inodes);
290 			}
291 		}
292 		BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
293 		err = ext4_handle_dirty_metadata(handle, NULL, bh2);
294 		if (!fatal) fatal = err;
295 	}
296 	BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
297 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
298 	if (!fatal)
299 		fatal = err;
300 	sb->s_dirt = 1;
301 error_return:
302 	brelse(bitmap_bh);
303 	ext4_std_error(sb, fatal);
304 }
305 
306 /*
307  * There are two policies for allocating an inode.  If the new inode is
308  * a directory, then a forward search is made for a block group with both
309  * free space and a low directory-to-inode ratio; if that fails, then of
310  * the groups with above-average free space, that group with the fewest
311  * directories already is chosen.
312  *
313  * For other inodes, search forward from the parent directory\'s block
314  * group to find a free inode.
315  */
316 static int find_group_dir(struct super_block *sb, struct inode *parent,
317 				ext4_group_t *best_group)
318 {
319 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
320 	unsigned int freei, avefreei;
321 	struct ext4_group_desc *desc, *best_desc = NULL;
322 	ext4_group_t group;
323 	int ret = -1;
324 
325 	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
326 	avefreei = freei / ngroups;
327 
328 	for (group = 0; group < ngroups; group++) {
329 		desc = ext4_get_group_desc(sb, group, NULL);
330 		if (!desc || !ext4_free_inodes_count(sb, desc))
331 			continue;
332 		if (ext4_free_inodes_count(sb, desc) < avefreei)
333 			continue;
334 		if (!best_desc ||
335 		    (ext4_free_blks_count(sb, desc) >
336 		     ext4_free_blks_count(sb, best_desc))) {
337 			*best_group = group;
338 			best_desc = desc;
339 			ret = 0;
340 		}
341 	}
342 	return ret;
343 }
344 
345 #define free_block_ratio 10
346 
347 static int find_group_flex(struct super_block *sb, struct inode *parent,
348 			   ext4_group_t *best_group)
349 {
350 	struct ext4_sb_info *sbi = EXT4_SB(sb);
351 	struct ext4_group_desc *desc;
352 	struct buffer_head *bh;
353 	struct flex_groups *flex_group = sbi->s_flex_groups;
354 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
355 	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
356 	ext4_group_t ngroups = sbi->s_groups_count;
357 	int flex_size = ext4_flex_bg_size(sbi);
358 	ext4_group_t best_flex = parent_fbg_group;
359 	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
360 	int flexbg_free_blocks;
361 	int flex_freeb_ratio;
362 	ext4_group_t n_fbg_groups;
363 	ext4_group_t i;
364 
365 	n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
366 		sbi->s_log_groups_per_flex;
367 
368 find_close_to_parent:
369 	flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
370 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
371 	if (atomic_read(&flex_group[best_flex].free_inodes) &&
372 	    flex_freeb_ratio > free_block_ratio)
373 		goto found_flexbg;
374 
375 	if (best_flex && best_flex == parent_fbg_group) {
376 		best_flex--;
377 		goto find_close_to_parent;
378 	}
379 
380 	for (i = 0; i < n_fbg_groups; i++) {
381 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
382 			continue;
383 
384 		flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
385 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
386 
387 		if (flex_freeb_ratio > free_block_ratio &&
388 		    (atomic_read(&flex_group[i].free_inodes))) {
389 			best_flex = i;
390 			goto found_flexbg;
391 		}
392 
393 		if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
394 		    ((atomic_read(&flex_group[i].free_blocks) >
395 		      atomic_read(&flex_group[best_flex].free_blocks)) &&
396 		     atomic_read(&flex_group[i].free_inodes)))
397 			best_flex = i;
398 	}
399 
400 	if (!atomic_read(&flex_group[best_flex].free_inodes) ||
401 	    !atomic_read(&flex_group[best_flex].free_blocks))
402 		return -1;
403 
404 found_flexbg:
405 	for (i = best_flex * flex_size; i < ngroups &&
406 		     i < (best_flex + 1) * flex_size; i++) {
407 		desc = ext4_get_group_desc(sb, i, &bh);
408 		if (ext4_free_inodes_count(sb, desc)) {
409 			*best_group = i;
410 			goto out;
411 		}
412 	}
413 
414 	return -1;
415 out:
416 	return 0;
417 }
418 
419 struct orlov_stats {
420 	__u32 free_inodes;
421 	__u32 free_blocks;
422 	__u32 used_dirs;
423 };
424 
425 /*
426  * Helper function for Orlov's allocator; returns critical information
427  * for a particular block group or flex_bg.  If flex_size is 1, then g
428  * is a block group number; otherwise it is flex_bg number.
429  */
430 void get_orlov_stats(struct super_block *sb, ext4_group_t g,
431 		       int flex_size, struct orlov_stats *stats)
432 {
433 	struct ext4_group_desc *desc;
434 	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
435 
436 	if (flex_size > 1) {
437 		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
438 		stats->free_blocks = atomic_read(&flex_group[g].free_blocks);
439 		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
440 		return;
441 	}
442 
443 	desc = ext4_get_group_desc(sb, g, NULL);
444 	if (desc) {
445 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
446 		stats->free_blocks = ext4_free_blks_count(sb, desc);
447 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
448 	} else {
449 		stats->free_inodes = 0;
450 		stats->free_blocks = 0;
451 		stats->used_dirs = 0;
452 	}
453 }
454 
455 /*
456  * Orlov's allocator for directories.
457  *
458  * We always try to spread first-level directories.
459  *
460  * If there are blockgroups with both free inodes and free blocks counts
461  * not worse than average we return one with smallest directory count.
462  * Otherwise we simply return a random group.
463  *
464  * For the rest rules look so:
465  *
466  * It's OK to put directory into a group unless
467  * it has too many directories already (max_dirs) or
468  * it has too few free inodes left (min_inodes) or
469  * it has too few free blocks left (min_blocks) or
470  * Parent's group is preferred, if it doesn't satisfy these
471  * conditions we search cyclically through the rest. If none
472  * of the groups look good we just look for a group with more
473  * free inodes than average (starting at parent's group).
474  */
475 
476 static int find_group_orlov(struct super_block *sb, struct inode *parent,
477 			    ext4_group_t *group, int mode)
478 {
479 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
480 	struct ext4_sb_info *sbi = EXT4_SB(sb);
481 	ext4_group_t ngroups = sbi->s_groups_count;
482 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
483 	unsigned int freei, avefreei;
484 	ext4_fsblk_t freeb, avefreeb;
485 	unsigned int ndirs;
486 	int max_dirs, min_inodes;
487 	ext4_grpblk_t min_blocks;
488 	ext4_group_t i, grp, g;
489 	struct ext4_group_desc *desc;
490 	struct orlov_stats stats;
491 	int flex_size = ext4_flex_bg_size(sbi);
492 
493 	if (flex_size > 1) {
494 		ngroups = (ngroups + flex_size - 1) >>
495 			sbi->s_log_groups_per_flex;
496 		parent_group >>= sbi->s_log_groups_per_flex;
497 	}
498 
499 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
500 	avefreei = freei / ngroups;
501 	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
502 	avefreeb = freeb;
503 	do_div(avefreeb, ngroups);
504 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
505 
506 	if (S_ISDIR(mode) &&
507 	    ((parent == sb->s_root->d_inode) ||
508 	     (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
509 		int best_ndir = inodes_per_group;
510 		int ret = -1;
511 
512 		get_random_bytes(&grp, sizeof(grp));
513 		parent_group = (unsigned)grp % ngroups;
514 		for (i = 0; i < ngroups; i++) {
515 			g = (parent_group + i) % ngroups;
516 			get_orlov_stats(sb, g, flex_size, &stats);
517 			if (!stats.free_inodes)
518 				continue;
519 			if (stats.used_dirs >= best_ndir)
520 				continue;
521 			if (stats.free_inodes < avefreei)
522 				continue;
523 			if (stats.free_blocks < avefreeb)
524 				continue;
525 			grp = g;
526 			ret = 0;
527 			best_ndir = stats.used_dirs;
528 		}
529 		if (ret)
530 			goto fallback;
531 	found_flex_bg:
532 		if (flex_size == 1) {
533 			*group = grp;
534 			return 0;
535 		}
536 
537 		/*
538 		 * We pack inodes at the beginning of the flexgroup's
539 		 * inode tables.  Block allocation decisions will do
540 		 * something similar, although regular files will
541 		 * start at 2nd block group of the flexgroup.  See
542 		 * ext4_ext_find_goal() and ext4_find_near().
543 		 */
544 		grp *= flex_size;
545 		for (i = 0; i < flex_size; i++) {
546 			if (grp+i >= sbi->s_groups_count)
547 				break;
548 			desc = ext4_get_group_desc(sb, grp+i, NULL);
549 			if (desc && ext4_free_inodes_count(sb, desc)) {
550 				*group = grp+i;
551 				return 0;
552 			}
553 		}
554 		goto fallback;
555 	}
556 
557 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
558 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
559 	if (min_inodes < 1)
560 		min_inodes = 1;
561 	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4;
562 
563 	/*
564 	 * Start looking in the flex group where we last allocated an
565 	 * inode for this parent directory
566 	 */
567 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
568 		parent_group = EXT4_I(parent)->i_last_alloc_group;
569 		if (flex_size > 1)
570 			parent_group >>= sbi->s_log_groups_per_flex;
571 	}
572 
573 	for (i = 0; i < ngroups; i++) {
574 		grp = (parent_group + i) % ngroups;
575 		get_orlov_stats(sb, grp, flex_size, &stats);
576 		if (stats.used_dirs >= max_dirs)
577 			continue;
578 		if (stats.free_inodes < min_inodes)
579 			continue;
580 		if (stats.free_blocks < min_blocks)
581 			continue;
582 		goto found_flex_bg;
583 	}
584 
585 fallback:
586 	ngroups = sbi->s_groups_count;
587 	avefreei = freei / ngroups;
588 fallback_retry:
589 	parent_group = EXT4_I(parent)->i_block_group;
590 	for (i = 0; i < ngroups; i++) {
591 		grp = (parent_group + i) % ngroups;
592 		desc = ext4_get_group_desc(sb, grp, NULL);
593 		if (desc && ext4_free_inodes_count(sb, desc) &&
594 		    ext4_free_inodes_count(sb, desc) >= avefreei) {
595 			*group = grp;
596 			return 0;
597 		}
598 	}
599 
600 	if (avefreei) {
601 		/*
602 		 * The free-inodes counter is approximate, and for really small
603 		 * filesystems the above test can fail to find any blockgroups
604 		 */
605 		avefreei = 0;
606 		goto fallback_retry;
607 	}
608 
609 	return -1;
610 }
611 
612 static int find_group_other(struct super_block *sb, struct inode *parent,
613 			    ext4_group_t *group, int mode)
614 {
615 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
616 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
617 	struct ext4_group_desc *desc;
618 	ext4_group_t i, last;
619 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
620 
621 	/*
622 	 * Try to place the inode is the same flex group as its
623 	 * parent.  If we can't find space, use the Orlov algorithm to
624 	 * find another flex group, and store that information in the
625 	 * parent directory's inode information so that use that flex
626 	 * group for future allocations.
627 	 */
628 	if (flex_size > 1) {
629 		int retry = 0;
630 
631 	try_again:
632 		parent_group &= ~(flex_size-1);
633 		last = parent_group + flex_size;
634 		if (last > ngroups)
635 			last = ngroups;
636 		for  (i = parent_group; i < last; i++) {
637 			desc = ext4_get_group_desc(sb, i, NULL);
638 			if (desc && ext4_free_inodes_count(sb, desc)) {
639 				*group = i;
640 				return 0;
641 			}
642 		}
643 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
644 			retry = 1;
645 			parent_group = EXT4_I(parent)->i_last_alloc_group;
646 			goto try_again;
647 		}
648 		/*
649 		 * If this didn't work, use the Orlov search algorithm
650 		 * to find a new flex group; we pass in the mode to
651 		 * avoid the topdir algorithms.
652 		 */
653 		*group = parent_group + flex_size;
654 		if (*group > ngroups)
655 			*group = 0;
656 		return find_group_orlov(sb, parent, group, mode);
657 	}
658 
659 	/*
660 	 * Try to place the inode in its parent directory
661 	 */
662 	*group = parent_group;
663 	desc = ext4_get_group_desc(sb, *group, NULL);
664 	if (desc && ext4_free_inodes_count(sb, desc) &&
665 			ext4_free_blks_count(sb, desc))
666 		return 0;
667 
668 	/*
669 	 * We're going to place this inode in a different blockgroup from its
670 	 * parent.  We want to cause files in a common directory to all land in
671 	 * the same blockgroup.  But we want files which are in a different
672 	 * directory which shares a blockgroup with our parent to land in a
673 	 * different blockgroup.
674 	 *
675 	 * So add our directory's i_ino into the starting point for the hash.
676 	 */
677 	*group = (*group + parent->i_ino) % ngroups;
678 
679 	/*
680 	 * Use a quadratic hash to find a group with a free inode and some free
681 	 * blocks.
682 	 */
683 	for (i = 1; i < ngroups; i <<= 1) {
684 		*group += i;
685 		if (*group >= ngroups)
686 			*group -= ngroups;
687 		desc = ext4_get_group_desc(sb, *group, NULL);
688 		if (desc && ext4_free_inodes_count(sb, desc) &&
689 				ext4_free_blks_count(sb, desc))
690 			return 0;
691 	}
692 
693 	/*
694 	 * That failed: try linear search for a free inode, even if that group
695 	 * has no free blocks.
696 	 */
697 	*group = parent_group;
698 	for (i = 0; i < ngroups; i++) {
699 		if (++*group >= ngroups)
700 			*group = 0;
701 		desc = ext4_get_group_desc(sb, *group, NULL);
702 		if (desc && ext4_free_inodes_count(sb, desc))
703 			return 0;
704 	}
705 
706 	return -1;
707 }
708 
709 /*
710  * claim the inode from the inode bitmap. If the group
711  * is uninit we need to take the groups's sb_bgl_lock
712  * and clear the uninit flag. The inode bitmap update
713  * and group desc uninit flag clear should be done
714  * after holding sb_bgl_lock so that ext4_read_inode_bitmap
715  * doesn't race with the ext4_claim_inode
716  */
717 static int ext4_claim_inode(struct super_block *sb,
718 			struct buffer_head *inode_bitmap_bh,
719 			unsigned long ino, ext4_group_t group, int mode)
720 {
721 	int free = 0, retval = 0, count;
722 	struct ext4_sb_info *sbi = EXT4_SB(sb);
723 	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
724 
725 	spin_lock(sb_bgl_lock(sbi, group));
726 	if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
727 		/* not a free inode */
728 		retval = 1;
729 		goto err_ret;
730 	}
731 	ino++;
732 	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
733 			ino > EXT4_INODES_PER_GROUP(sb)) {
734 		spin_unlock(sb_bgl_lock(sbi, group));
735 		ext4_error(sb, __func__,
736 			   "reserved inode or inode > inodes count - "
737 			   "block_group = %u, inode=%lu", group,
738 			   ino + group * EXT4_INODES_PER_GROUP(sb));
739 		return 1;
740 	}
741 	/* If we didn't allocate from within the initialized part of the inode
742 	 * table then we need to initialize up to this inode. */
743 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
744 
745 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
746 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
747 			/* When marking the block group with
748 			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
749 			 * on the value of bg_itable_unused even though
750 			 * mke2fs could have initialized the same for us.
751 			 * Instead we calculated the value below
752 			 */
753 
754 			free = 0;
755 		} else {
756 			free = EXT4_INODES_PER_GROUP(sb) -
757 				ext4_itable_unused_count(sb, gdp);
758 		}
759 
760 		/*
761 		 * Check the relative inode number against the last used
762 		 * relative inode number in this group. if it is greater
763 		 * we need to  update the bg_itable_unused count
764 		 *
765 		 */
766 		if (ino > free)
767 			ext4_itable_unused_set(sb, gdp,
768 					(EXT4_INODES_PER_GROUP(sb) - ino));
769 	}
770 	count = ext4_free_inodes_count(sb, gdp) - 1;
771 	ext4_free_inodes_set(sb, gdp, count);
772 	if (S_ISDIR(mode)) {
773 		count = ext4_used_dirs_count(sb, gdp) + 1;
774 		ext4_used_dirs_set(sb, gdp, count);
775 		if (sbi->s_log_groups_per_flex) {
776 			ext4_group_t f = ext4_flex_group(sbi, group);
777 
778 			atomic_inc(&sbi->s_flex_groups[f].free_inodes);
779 		}
780 	}
781 	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
782 err_ret:
783 	spin_unlock(sb_bgl_lock(sbi, group));
784 	return retval;
785 }
786 
787 /*
788  * There are two policies for allocating an inode.  If the new inode is
789  * a directory, then a forward search is made for a block group with both
790  * free space and a low directory-to-inode ratio; if that fails, then of
791  * the groups with above-average free space, that group with the fewest
792  * directories already is chosen.
793  *
794  * For other inodes, search forward from the parent directory's block
795  * group to find a free inode.
796  */
797 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
798 {
799 	struct super_block *sb;
800 	struct buffer_head *inode_bitmap_bh = NULL;
801 	struct buffer_head *group_desc_bh;
802 	ext4_group_t group = 0;
803 	unsigned long ino = 0;
804 	struct inode *inode;
805 	struct ext4_group_desc *gdp = NULL;
806 	struct ext4_super_block *es;
807 	struct ext4_inode_info *ei;
808 	struct ext4_sb_info *sbi;
809 	int ret2, err = 0;
810 	struct inode *ret;
811 	ext4_group_t i;
812 	int free = 0;
813 	static int once = 1;
814 	ext4_group_t flex_group;
815 
816 	/* Cannot create files in a deleted directory */
817 	if (!dir || !dir->i_nlink)
818 		return ERR_PTR(-EPERM);
819 
820 	sb = dir->i_sb;
821 	trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
822 		   dir->i_ino, mode);
823 	inode = new_inode(sb);
824 	if (!inode)
825 		return ERR_PTR(-ENOMEM);
826 	ei = EXT4_I(inode);
827 
828 	sbi = EXT4_SB(sb);
829 	es = sbi->s_es;
830 
831 	if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) {
832 		ret2 = find_group_flex(sb, dir, &group);
833 		if (ret2 == -1) {
834 			ret2 = find_group_other(sb, dir, &group, mode);
835 			if (ret2 == 0 && once) {
836 				once = 0;
837 				printk(KERN_NOTICE "ext4: find_group_flex "
838 				       "failed, fallback succeeded dir %lu\n",
839 				       dir->i_ino);
840 			}
841 		}
842 		goto got_group;
843 	}
844 
845 	if (S_ISDIR(mode)) {
846 		if (test_opt(sb, OLDALLOC))
847 			ret2 = find_group_dir(sb, dir, &group);
848 		else
849 			ret2 = find_group_orlov(sb, dir, &group, mode);
850 	} else
851 		ret2 = find_group_other(sb, dir, &group, mode);
852 
853 got_group:
854 	EXT4_I(dir)->i_last_alloc_group = group;
855 	err = -ENOSPC;
856 	if (ret2 == -1)
857 		goto out;
858 
859 	for (i = 0; i < sbi->s_groups_count; i++) {
860 		err = -EIO;
861 
862 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
863 		if (!gdp)
864 			goto fail;
865 
866 		brelse(inode_bitmap_bh);
867 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
868 		if (!inode_bitmap_bh)
869 			goto fail;
870 
871 		ino = 0;
872 
873 repeat_in_this_group:
874 		ino = ext4_find_next_zero_bit((unsigned long *)
875 					      inode_bitmap_bh->b_data,
876 					      EXT4_INODES_PER_GROUP(sb), ino);
877 
878 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
879 
880 			BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
881 			err = ext4_journal_get_write_access(handle,
882 							    inode_bitmap_bh);
883 			if (err)
884 				goto fail;
885 
886 			BUFFER_TRACE(group_desc_bh, "get_write_access");
887 			err = ext4_journal_get_write_access(handle,
888 								group_desc_bh);
889 			if (err)
890 				goto fail;
891 			if (!ext4_claim_inode(sb, inode_bitmap_bh,
892 						ino, group, mode)) {
893 				/* we won it */
894 				BUFFER_TRACE(inode_bitmap_bh,
895 					"call ext4_handle_dirty_metadata");
896 				err = ext4_handle_dirty_metadata(handle,
897 								 inode,
898 							inode_bitmap_bh);
899 				if (err)
900 					goto fail;
901 				/* zero bit is inode number 1*/
902 				ino++;
903 				goto got;
904 			}
905 			/* we lost it */
906 			ext4_handle_release_buffer(handle, inode_bitmap_bh);
907 			ext4_handle_release_buffer(handle, group_desc_bh);
908 
909 			if (++ino < EXT4_INODES_PER_GROUP(sb))
910 				goto repeat_in_this_group;
911 		}
912 
913 		/*
914 		 * This case is possible in concurrent environment.  It is very
915 		 * rare.  We cannot repeat the find_group_xxx() call because
916 		 * that will simply return the same blockgroup, because the
917 		 * group descriptor metadata has not yet been updated.
918 		 * So we just go onto the next blockgroup.
919 		 */
920 		if (++group == sbi->s_groups_count)
921 			group = 0;
922 	}
923 	err = -ENOSPC;
924 	goto out;
925 
926 got:
927 	/* We may have to initialize the block bitmap if it isn't already */
928 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
929 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
930 		struct buffer_head *block_bitmap_bh;
931 
932 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
933 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
934 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
935 		if (err) {
936 			brelse(block_bitmap_bh);
937 			goto fail;
938 		}
939 
940 		free = 0;
941 		spin_lock(sb_bgl_lock(sbi, group));
942 		/* recheck and clear flag under lock if we still need to */
943 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
944 			free = ext4_free_blocks_after_init(sb, group, gdp);
945 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
946 			ext4_free_blks_set(sb, gdp, free);
947 			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
948 								gdp);
949 		}
950 		spin_unlock(sb_bgl_lock(sbi, group));
951 
952 		/* Don't need to dirty bitmap block if we didn't change it */
953 		if (free) {
954 			BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
955 			err = ext4_handle_dirty_metadata(handle,
956 							NULL, block_bitmap_bh);
957 		}
958 
959 		brelse(block_bitmap_bh);
960 		if (err)
961 			goto fail;
962 	}
963 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
964 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
965 	if (err)
966 		goto fail;
967 
968 	percpu_counter_dec(&sbi->s_freeinodes_counter);
969 	if (S_ISDIR(mode))
970 		percpu_counter_inc(&sbi->s_dirs_counter);
971 	sb->s_dirt = 1;
972 
973 	if (sbi->s_log_groups_per_flex) {
974 		flex_group = ext4_flex_group(sbi, group);
975 		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
976 	}
977 
978 	inode->i_uid = current_fsuid();
979 	if (test_opt(sb, GRPID))
980 		inode->i_gid = dir->i_gid;
981 	else if (dir->i_mode & S_ISGID) {
982 		inode->i_gid = dir->i_gid;
983 		if (S_ISDIR(mode))
984 			mode |= S_ISGID;
985 	} else
986 		inode->i_gid = current_fsgid();
987 	inode->i_mode = mode;
988 
989 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
990 	/* This is the optimal IO size (for stat), not the fs block size */
991 	inode->i_blocks = 0;
992 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
993 						       ext4_current_time(inode);
994 
995 	memset(ei->i_data, 0, sizeof(ei->i_data));
996 	ei->i_dir_start_lookup = 0;
997 	ei->i_disksize = 0;
998 
999 	/*
1000 	 * Don't inherit extent flag from directory, amongst others. We set
1001 	 * extent flag on newly created directory and file only if -o extent
1002 	 * mount option is specified
1003 	 */
1004 	ei->i_flags =
1005 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1006 	ei->i_file_acl = 0;
1007 	ei->i_dtime = 0;
1008 	ei->i_block_group = group;
1009 	ei->i_last_alloc_group = ~0;
1010 
1011 	ext4_set_inode_flags(inode);
1012 	if (IS_DIRSYNC(inode))
1013 		ext4_handle_sync(handle);
1014 	if (insert_inode_locked(inode) < 0) {
1015 		err = -EINVAL;
1016 		goto fail_drop;
1017 	}
1018 	spin_lock(&sbi->s_next_gen_lock);
1019 	inode->i_generation = sbi->s_next_generation++;
1020 	spin_unlock(&sbi->s_next_gen_lock);
1021 
1022 	ei->i_state = EXT4_STATE_NEW;
1023 
1024 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1025 
1026 	ret = inode;
1027 	if (vfs_dq_alloc_inode(inode)) {
1028 		err = -EDQUOT;
1029 		goto fail_drop;
1030 	}
1031 
1032 	err = ext4_init_acl(handle, inode, dir);
1033 	if (err)
1034 		goto fail_free_drop;
1035 
1036 	err = ext4_init_security(handle, inode, dir);
1037 	if (err)
1038 		goto fail_free_drop;
1039 
1040 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1041 		/* set extent flag only for directory, file and normal symlink*/
1042 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1043 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
1044 			ext4_ext_tree_init(handle, inode);
1045 		}
1046 	}
1047 
1048 	err = ext4_mark_inode_dirty(handle, inode);
1049 	if (err) {
1050 		ext4_std_error(sb, err);
1051 		goto fail_free_drop;
1052 	}
1053 
1054 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1055 	trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
1056 		   sb->s_id, inode->i_ino, dir->i_ino, mode);
1057 	goto really_out;
1058 fail:
1059 	ext4_std_error(sb, err);
1060 out:
1061 	iput(inode);
1062 	ret = ERR_PTR(err);
1063 really_out:
1064 	brelse(inode_bitmap_bh);
1065 	return ret;
1066 
1067 fail_free_drop:
1068 	vfs_dq_free_inode(inode);
1069 
1070 fail_drop:
1071 	vfs_dq_drop(inode);
1072 	inode->i_flags |= S_NOQUOTA;
1073 	inode->i_nlink = 0;
1074 	unlock_new_inode(inode);
1075 	iput(inode);
1076 	brelse(inode_bitmap_bh);
1077 	return ERR_PTR(err);
1078 }
1079 
1080 /* Verify that we are loading a valid orphan from disk */
1081 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1082 {
1083 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1084 	ext4_group_t block_group;
1085 	int bit;
1086 	struct buffer_head *bitmap_bh;
1087 	struct inode *inode = NULL;
1088 	long err = -EIO;
1089 
1090 	/* Error cases - e2fsck has already cleaned up for us */
1091 	if (ino > max_ino) {
1092 		ext4_warning(sb, __func__,
1093 			     "bad orphan ino %lu!  e2fsck was run?", ino);
1094 		goto error;
1095 	}
1096 
1097 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1098 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1099 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1100 	if (!bitmap_bh) {
1101 		ext4_warning(sb, __func__,
1102 			     "inode bitmap error for orphan %lu", ino);
1103 		goto error;
1104 	}
1105 
1106 	/* Having the inode bit set should be a 100% indicator that this
1107 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1108 	 * inodes that were being truncated, so we can't check i_nlink==0.
1109 	 */
1110 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1111 		goto bad_orphan;
1112 
1113 	inode = ext4_iget(sb, ino);
1114 	if (IS_ERR(inode))
1115 		goto iget_failed;
1116 
1117 	/*
1118 	 * If the orphans has i_nlinks > 0 then it should be able to be
1119 	 * truncated, otherwise it won't be removed from the orphan list
1120 	 * during processing and an infinite loop will result.
1121 	 */
1122 	if (inode->i_nlink && !ext4_can_truncate(inode))
1123 		goto bad_orphan;
1124 
1125 	if (NEXT_ORPHAN(inode) > max_ino)
1126 		goto bad_orphan;
1127 	brelse(bitmap_bh);
1128 	return inode;
1129 
1130 iget_failed:
1131 	err = PTR_ERR(inode);
1132 	inode = NULL;
1133 bad_orphan:
1134 	ext4_warning(sb, __func__,
1135 		     "bad orphan inode %lu!  e2fsck was run?", ino);
1136 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1137 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
1138 	       ext4_test_bit(bit, bitmap_bh->b_data));
1139 	printk(KERN_NOTICE "inode=%p\n", inode);
1140 	if (inode) {
1141 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1142 		       is_bad_inode(inode));
1143 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1144 		       NEXT_ORPHAN(inode));
1145 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1146 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1147 		/* Avoid freeing blocks if we got a bad deleted inode */
1148 		if (inode->i_nlink == 0)
1149 			inode->i_blocks = 0;
1150 		iput(inode);
1151 	}
1152 	brelse(bitmap_bh);
1153 error:
1154 	return ERR_PTR(err);
1155 }
1156 
1157 unsigned long ext4_count_free_inodes(struct super_block *sb)
1158 {
1159 	unsigned long desc_count;
1160 	struct ext4_group_desc *gdp;
1161 	ext4_group_t i;
1162 #ifdef EXT4FS_DEBUG
1163 	struct ext4_super_block *es;
1164 	unsigned long bitmap_count, x;
1165 	struct buffer_head *bitmap_bh = NULL;
1166 
1167 	es = EXT4_SB(sb)->s_es;
1168 	desc_count = 0;
1169 	bitmap_count = 0;
1170 	gdp = NULL;
1171 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1172 		gdp = ext4_get_group_desc(sb, i, NULL);
1173 		if (!gdp)
1174 			continue;
1175 		desc_count += ext4_free_inodes_count(sb, gdp);
1176 		brelse(bitmap_bh);
1177 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1178 		if (!bitmap_bh)
1179 			continue;
1180 
1181 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1182 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1183 			i, ext4_free_inodes_count(sb, gdp), x);
1184 		bitmap_count += x;
1185 	}
1186 	brelse(bitmap_bh);
1187 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1188 	       "stored = %u, computed = %lu, %lu\n",
1189 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1190 	return desc_count;
1191 #else
1192 	desc_count = 0;
1193 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1194 		gdp = ext4_get_group_desc(sb, i, NULL);
1195 		if (!gdp)
1196 			continue;
1197 		desc_count += ext4_free_inodes_count(sb, gdp);
1198 		cond_resched();
1199 	}
1200 	return desc_count;
1201 #endif
1202 }
1203 
1204 /* Called at mount-time, super-block is locked */
1205 unsigned long ext4_count_dirs(struct super_block * sb)
1206 {
1207 	unsigned long count = 0;
1208 	ext4_group_t i;
1209 
1210 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1211 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1212 		if (!gdp)
1213 			continue;
1214 		count += ext4_used_dirs_count(sb, gdp);
1215 	}
1216 	return count;
1217 }
1218