xref: /openbmc/linux/fs/ext4/ialloc.c (revision e5242c5f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/ialloc.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  BSD ufs-inspired inode and directory allocation by
11  *  Stephen Tweedie (sct@redhat.com), 1993
12  *  Big-endian to little-endian byte-swapping/bitmaps by
13  *        David S. Miller (davem@caip.rutgers.edu), 1995
14  */
15 
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <linux/cred.h>
26 
27 #include <asm/byteorder.h>
28 
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 #include <trace/events/ext4.h>
35 
36 /*
37  * ialloc.c contains the inodes allocation and deallocation routines
38  */
39 
40 /*
41  * The free inodes are managed by bitmaps.  A file system contains several
42  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
43  * block for inodes, N blocks for the inode table and data blocks.
44  *
45  * The file system contains group descriptors which are located after the
46  * super block.  Each descriptor contains the number of the bitmap block and
47  * the free blocks count in the block.
48  */
49 
50 /*
51  * To avoid calling the atomic setbit hundreds or thousands of times, we only
52  * need to use it within a single byte (to ensure we get endianness right).
53  * We can use memset for the rest of the bitmap as there are no other users.
54  */
55 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
56 {
57 	int i;
58 
59 	if (start_bit >= end_bit)
60 		return;
61 
62 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
63 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
64 		ext4_set_bit(i, bitmap);
65 	if (i < end_bit)
66 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
67 }
68 
69 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
70 {
71 	if (uptodate) {
72 		set_buffer_uptodate(bh);
73 		set_bitmap_uptodate(bh);
74 	}
75 	unlock_buffer(bh);
76 	put_bh(bh);
77 }
78 
79 static int ext4_validate_inode_bitmap(struct super_block *sb,
80 				      struct ext4_group_desc *desc,
81 				      ext4_group_t block_group,
82 				      struct buffer_head *bh)
83 {
84 	ext4_fsblk_t	blk;
85 	struct ext4_group_info *grp;
86 
87 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
88 		return 0;
89 
90 	grp = ext4_get_group_info(sb, block_group);
91 
92 	if (buffer_verified(bh))
93 		return 0;
94 	if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
95 		return -EFSCORRUPTED;
96 
97 	ext4_lock_group(sb, block_group);
98 	if (buffer_verified(bh))
99 		goto verified;
100 	blk = ext4_inode_bitmap(sb, desc);
101 	if (!ext4_inode_bitmap_csum_verify(sb, desc, bh,
102 					   EXT4_INODES_PER_GROUP(sb) / 8) ||
103 	    ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
104 		ext4_unlock_group(sb, block_group);
105 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
106 			   "inode_bitmap = %llu", block_group, blk);
107 		ext4_mark_group_bitmap_corrupted(sb, block_group,
108 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
109 		return -EFSBADCRC;
110 	}
111 	set_buffer_verified(bh);
112 verified:
113 	ext4_unlock_group(sb, block_group);
114 	return 0;
115 }
116 
117 /*
118  * Read the inode allocation bitmap for a given block_group, reading
119  * into the specified slot in the superblock's bitmap cache.
120  *
121  * Return buffer_head of bitmap on success, or an ERR_PTR on error.
122  */
123 static struct buffer_head *
124 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
125 {
126 	struct ext4_group_desc *desc;
127 	struct ext4_sb_info *sbi = EXT4_SB(sb);
128 	struct buffer_head *bh = NULL;
129 	ext4_fsblk_t bitmap_blk;
130 	int err;
131 
132 	desc = ext4_get_group_desc(sb, block_group, NULL);
133 	if (!desc)
134 		return ERR_PTR(-EFSCORRUPTED);
135 
136 	bitmap_blk = ext4_inode_bitmap(sb, desc);
137 	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
138 	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
139 		ext4_error(sb, "Invalid inode bitmap blk %llu in "
140 			   "block_group %u", bitmap_blk, block_group);
141 		ext4_mark_group_bitmap_corrupted(sb, block_group,
142 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
143 		return ERR_PTR(-EFSCORRUPTED);
144 	}
145 	bh = sb_getblk(sb, bitmap_blk);
146 	if (unlikely(!bh)) {
147 		ext4_warning(sb, "Cannot read inode bitmap - "
148 			     "block_group = %u, inode_bitmap = %llu",
149 			     block_group, bitmap_blk);
150 		return ERR_PTR(-ENOMEM);
151 	}
152 	if (bitmap_uptodate(bh))
153 		goto verify;
154 
155 	lock_buffer(bh);
156 	if (bitmap_uptodate(bh)) {
157 		unlock_buffer(bh);
158 		goto verify;
159 	}
160 
161 	ext4_lock_group(sb, block_group);
162 	if (ext4_has_group_desc_csum(sb) &&
163 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
164 		if (block_group == 0) {
165 			ext4_unlock_group(sb, block_group);
166 			unlock_buffer(bh);
167 			ext4_error(sb, "Inode bitmap for bg 0 marked "
168 				   "uninitialized");
169 			err = -EFSCORRUPTED;
170 			goto out;
171 		}
172 		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
173 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
174 				     sb->s_blocksize * 8, bh->b_data);
175 		set_bitmap_uptodate(bh);
176 		set_buffer_uptodate(bh);
177 		set_buffer_verified(bh);
178 		ext4_unlock_group(sb, block_group);
179 		unlock_buffer(bh);
180 		return bh;
181 	}
182 	ext4_unlock_group(sb, block_group);
183 
184 	if (buffer_uptodate(bh)) {
185 		/*
186 		 * if not uninit if bh is uptodate,
187 		 * bitmap is also uptodate
188 		 */
189 		set_bitmap_uptodate(bh);
190 		unlock_buffer(bh);
191 		goto verify;
192 	}
193 	/*
194 	 * submit the buffer_head for reading
195 	 */
196 	trace_ext4_load_inode_bitmap(sb, block_group);
197 	ext4_read_bh(bh, REQ_META | REQ_PRIO,
198 		     ext4_end_bitmap_read,
199 		     ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_EIO));
200 	if (!buffer_uptodate(bh)) {
201 		put_bh(bh);
202 		ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
203 			       "block_group = %u, inode_bitmap = %llu",
204 			       block_group, bitmap_blk);
205 		ext4_mark_group_bitmap_corrupted(sb, block_group,
206 				EXT4_GROUP_INFO_IBITMAP_CORRUPT);
207 		return ERR_PTR(-EIO);
208 	}
209 
210 verify:
211 	err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
212 	if (err)
213 		goto out;
214 	return bh;
215 out:
216 	put_bh(bh);
217 	return ERR_PTR(err);
218 }
219 
220 /*
221  * NOTE! When we get the inode, we're the only people
222  * that have access to it, and as such there are no
223  * race conditions we have to worry about. The inode
224  * is not on the hash-lists, and it cannot be reached
225  * through the filesystem because the directory entry
226  * has been deleted earlier.
227  *
228  * HOWEVER: we must make sure that we get no aliases,
229  * which means that we have to call "clear_inode()"
230  * _before_ we mark the inode not in use in the inode
231  * bitmaps. Otherwise a newly created file might use
232  * the same inode number (not actually the same pointer
233  * though), and then we'd have two inodes sharing the
234  * same inode number and space on the harddisk.
235  */
236 void ext4_free_inode(handle_t *handle, struct inode *inode)
237 {
238 	struct super_block *sb = inode->i_sb;
239 	int is_directory;
240 	unsigned long ino;
241 	struct buffer_head *bitmap_bh = NULL;
242 	struct buffer_head *bh2;
243 	ext4_group_t block_group;
244 	unsigned long bit;
245 	struct ext4_group_desc *gdp;
246 	struct ext4_super_block *es;
247 	struct ext4_sb_info *sbi;
248 	int fatal = 0, err, count, cleared;
249 	struct ext4_group_info *grp;
250 
251 	if (!sb) {
252 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
253 		       "nonexistent device\n", __func__, __LINE__);
254 		return;
255 	}
256 	if (atomic_read(&inode->i_count) > 1) {
257 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
258 			 __func__, __LINE__, inode->i_ino,
259 			 atomic_read(&inode->i_count));
260 		return;
261 	}
262 	if (inode->i_nlink) {
263 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
264 			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
265 		return;
266 	}
267 	sbi = EXT4_SB(sb);
268 
269 	ino = inode->i_ino;
270 	ext4_debug("freeing inode %lu\n", ino);
271 	trace_ext4_free_inode(inode);
272 
273 	dquot_initialize(inode);
274 	dquot_free_inode(inode);
275 
276 	is_directory = S_ISDIR(inode->i_mode);
277 
278 	/* Do this BEFORE marking the inode not in use or returning an error */
279 	ext4_clear_inode(inode);
280 
281 	es = sbi->s_es;
282 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
283 		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
284 		goto error_return;
285 	}
286 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
287 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
288 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
289 	/* Don't bother if the inode bitmap is corrupt. */
290 	if (IS_ERR(bitmap_bh)) {
291 		fatal = PTR_ERR(bitmap_bh);
292 		bitmap_bh = NULL;
293 		goto error_return;
294 	}
295 	if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
296 		grp = ext4_get_group_info(sb, block_group);
297 		if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
298 			fatal = -EFSCORRUPTED;
299 			goto error_return;
300 		}
301 	}
302 
303 	BUFFER_TRACE(bitmap_bh, "get_write_access");
304 	fatal = ext4_journal_get_write_access(handle, sb, bitmap_bh,
305 					      EXT4_JTR_NONE);
306 	if (fatal)
307 		goto error_return;
308 
309 	fatal = -ESRCH;
310 	gdp = ext4_get_group_desc(sb, block_group, &bh2);
311 	if (gdp) {
312 		BUFFER_TRACE(bh2, "get_write_access");
313 		fatal = ext4_journal_get_write_access(handle, sb, bh2,
314 						      EXT4_JTR_NONE);
315 	}
316 	ext4_lock_group(sb, block_group);
317 	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
318 	if (fatal || !cleared) {
319 		ext4_unlock_group(sb, block_group);
320 		goto out;
321 	}
322 
323 	count = ext4_free_inodes_count(sb, gdp) + 1;
324 	ext4_free_inodes_set(sb, gdp, count);
325 	if (is_directory) {
326 		count = ext4_used_dirs_count(sb, gdp) - 1;
327 		ext4_used_dirs_set(sb, gdp, count);
328 		if (percpu_counter_initialized(&sbi->s_dirs_counter))
329 			percpu_counter_dec(&sbi->s_dirs_counter);
330 	}
331 	ext4_inode_bitmap_csum_set(sb, gdp, bitmap_bh,
332 				   EXT4_INODES_PER_GROUP(sb) / 8);
333 	ext4_group_desc_csum_set(sb, block_group, gdp);
334 	ext4_unlock_group(sb, block_group);
335 
336 	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
337 		percpu_counter_inc(&sbi->s_freeinodes_counter);
338 	if (sbi->s_log_groups_per_flex) {
339 		struct flex_groups *fg;
340 
341 		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
342 					 ext4_flex_group(sbi, block_group));
343 		atomic_inc(&fg->free_inodes);
344 		if (is_directory)
345 			atomic_dec(&fg->used_dirs);
346 	}
347 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
348 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
349 out:
350 	if (cleared) {
351 		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
352 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
353 		if (!fatal)
354 			fatal = err;
355 	} else {
356 		ext4_error(sb, "bit already cleared for inode %lu", ino);
357 		ext4_mark_group_bitmap_corrupted(sb, block_group,
358 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
359 	}
360 
361 error_return:
362 	brelse(bitmap_bh);
363 	ext4_std_error(sb, fatal);
364 }
365 
366 struct orlov_stats {
367 	__u64 free_clusters;
368 	__u32 free_inodes;
369 	__u32 used_dirs;
370 };
371 
372 /*
373  * Helper function for Orlov's allocator; returns critical information
374  * for a particular block group or flex_bg.  If flex_size is 1, then g
375  * is a block group number; otherwise it is flex_bg number.
376  */
377 static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
378 			    int flex_size, struct orlov_stats *stats)
379 {
380 	struct ext4_group_desc *desc;
381 
382 	if (flex_size > 1) {
383 		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
384 							     s_flex_groups, g);
385 		stats->free_inodes = atomic_read(&fg->free_inodes);
386 		stats->free_clusters = atomic64_read(&fg->free_clusters);
387 		stats->used_dirs = atomic_read(&fg->used_dirs);
388 		return;
389 	}
390 
391 	desc = ext4_get_group_desc(sb, g, NULL);
392 	if (desc) {
393 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
394 		stats->free_clusters = ext4_free_group_clusters(sb, desc);
395 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
396 	} else {
397 		stats->free_inodes = 0;
398 		stats->free_clusters = 0;
399 		stats->used_dirs = 0;
400 	}
401 }
402 
403 /*
404  * Orlov's allocator for directories.
405  *
406  * We always try to spread first-level directories.
407  *
408  * If there are blockgroups with both free inodes and free clusters counts
409  * not worse than average we return one with smallest directory count.
410  * Otherwise we simply return a random group.
411  *
412  * For the rest rules look so:
413  *
414  * It's OK to put directory into a group unless
415  * it has too many directories already (max_dirs) or
416  * it has too few free inodes left (min_inodes) or
417  * it has too few free clusters left (min_clusters) or
418  * Parent's group is preferred, if it doesn't satisfy these
419  * conditions we search cyclically through the rest. If none
420  * of the groups look good we just look for a group with more
421  * free inodes than average (starting at parent's group).
422  */
423 
424 static int find_group_orlov(struct super_block *sb, struct inode *parent,
425 			    ext4_group_t *group, umode_t mode,
426 			    const struct qstr *qstr)
427 {
428 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
429 	struct ext4_sb_info *sbi = EXT4_SB(sb);
430 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
431 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
432 	unsigned int freei, avefreei, grp_free;
433 	ext4_fsblk_t freec, avefreec;
434 	unsigned int ndirs;
435 	int max_dirs, min_inodes;
436 	ext4_grpblk_t min_clusters;
437 	ext4_group_t i, grp, g, ngroups;
438 	struct ext4_group_desc *desc;
439 	struct orlov_stats stats;
440 	int flex_size = ext4_flex_bg_size(sbi);
441 	struct dx_hash_info hinfo;
442 
443 	ngroups = real_ngroups;
444 	if (flex_size > 1) {
445 		ngroups = (real_ngroups + flex_size - 1) >>
446 			sbi->s_log_groups_per_flex;
447 		parent_group >>= sbi->s_log_groups_per_flex;
448 	}
449 
450 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
451 	avefreei = freei / ngroups;
452 	freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
453 	avefreec = freec;
454 	do_div(avefreec, ngroups);
455 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
456 
457 	if (S_ISDIR(mode) &&
458 	    ((parent == d_inode(sb->s_root)) ||
459 	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
460 		int best_ndir = inodes_per_group;
461 		int ret = -1;
462 
463 		if (qstr) {
464 			hinfo.hash_version = DX_HASH_HALF_MD4;
465 			hinfo.seed = sbi->s_hash_seed;
466 			ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
467 			parent_group = hinfo.hash % ngroups;
468 		} else
469 			parent_group = get_random_u32_below(ngroups);
470 		for (i = 0; i < ngroups; i++) {
471 			g = (parent_group + i) % ngroups;
472 			get_orlov_stats(sb, g, flex_size, &stats);
473 			if (!stats.free_inodes)
474 				continue;
475 			if (stats.used_dirs >= best_ndir)
476 				continue;
477 			if (stats.free_inodes < avefreei)
478 				continue;
479 			if (stats.free_clusters < avefreec)
480 				continue;
481 			grp = g;
482 			ret = 0;
483 			best_ndir = stats.used_dirs;
484 		}
485 		if (ret)
486 			goto fallback;
487 	found_flex_bg:
488 		if (flex_size == 1) {
489 			*group = grp;
490 			return 0;
491 		}
492 
493 		/*
494 		 * We pack inodes at the beginning of the flexgroup's
495 		 * inode tables.  Block allocation decisions will do
496 		 * something similar, although regular files will
497 		 * start at 2nd block group of the flexgroup.  See
498 		 * ext4_ext_find_goal() and ext4_find_near().
499 		 */
500 		grp *= flex_size;
501 		for (i = 0; i < flex_size; i++) {
502 			if (grp+i >= real_ngroups)
503 				break;
504 			desc = ext4_get_group_desc(sb, grp+i, NULL);
505 			if (desc && ext4_free_inodes_count(sb, desc)) {
506 				*group = grp+i;
507 				return 0;
508 			}
509 		}
510 		goto fallback;
511 	}
512 
513 	max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
514 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
515 	if (min_inodes < 1)
516 		min_inodes = 1;
517 	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
518 	if (min_clusters < 0)
519 		min_clusters = 0;
520 
521 	/*
522 	 * Start looking in the flex group where we last allocated an
523 	 * inode for this parent directory
524 	 */
525 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
526 		parent_group = EXT4_I(parent)->i_last_alloc_group;
527 		if (flex_size > 1)
528 			parent_group >>= sbi->s_log_groups_per_flex;
529 	}
530 
531 	for (i = 0; i < ngroups; i++) {
532 		grp = (parent_group + i) % ngroups;
533 		get_orlov_stats(sb, grp, flex_size, &stats);
534 		if (stats.used_dirs >= max_dirs)
535 			continue;
536 		if (stats.free_inodes < min_inodes)
537 			continue;
538 		if (stats.free_clusters < min_clusters)
539 			continue;
540 		goto found_flex_bg;
541 	}
542 
543 fallback:
544 	ngroups = real_ngroups;
545 	avefreei = freei / ngroups;
546 fallback_retry:
547 	parent_group = EXT4_I(parent)->i_block_group;
548 	for (i = 0; i < ngroups; i++) {
549 		grp = (parent_group + i) % ngroups;
550 		desc = ext4_get_group_desc(sb, grp, NULL);
551 		if (desc) {
552 			grp_free = ext4_free_inodes_count(sb, desc);
553 			if (grp_free && grp_free >= avefreei) {
554 				*group = grp;
555 				return 0;
556 			}
557 		}
558 	}
559 
560 	if (avefreei) {
561 		/*
562 		 * The free-inodes counter is approximate, and for really small
563 		 * filesystems the above test can fail to find any blockgroups
564 		 */
565 		avefreei = 0;
566 		goto fallback_retry;
567 	}
568 
569 	return -1;
570 }
571 
572 static int find_group_other(struct super_block *sb, struct inode *parent,
573 			    ext4_group_t *group, umode_t mode)
574 {
575 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
576 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
577 	struct ext4_group_desc *desc;
578 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
579 
580 	/*
581 	 * Try to place the inode is the same flex group as its
582 	 * parent.  If we can't find space, use the Orlov algorithm to
583 	 * find another flex group, and store that information in the
584 	 * parent directory's inode information so that use that flex
585 	 * group for future allocations.
586 	 */
587 	if (flex_size > 1) {
588 		int retry = 0;
589 
590 	try_again:
591 		parent_group &= ~(flex_size-1);
592 		last = parent_group + flex_size;
593 		if (last > ngroups)
594 			last = ngroups;
595 		for  (i = parent_group; i < last; i++) {
596 			desc = ext4_get_group_desc(sb, i, NULL);
597 			if (desc && ext4_free_inodes_count(sb, desc)) {
598 				*group = i;
599 				return 0;
600 			}
601 		}
602 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
603 			retry = 1;
604 			parent_group = EXT4_I(parent)->i_last_alloc_group;
605 			goto try_again;
606 		}
607 		/*
608 		 * If this didn't work, use the Orlov search algorithm
609 		 * to find a new flex group; we pass in the mode to
610 		 * avoid the topdir algorithms.
611 		 */
612 		*group = parent_group + flex_size;
613 		if (*group > ngroups)
614 			*group = 0;
615 		return find_group_orlov(sb, parent, group, mode, NULL);
616 	}
617 
618 	/*
619 	 * Try to place the inode in its parent directory
620 	 */
621 	*group = parent_group;
622 	desc = ext4_get_group_desc(sb, *group, NULL);
623 	if (desc && ext4_free_inodes_count(sb, desc) &&
624 	    ext4_free_group_clusters(sb, desc))
625 		return 0;
626 
627 	/*
628 	 * We're going to place this inode in a different blockgroup from its
629 	 * parent.  We want to cause files in a common directory to all land in
630 	 * the same blockgroup.  But we want files which are in a different
631 	 * directory which shares a blockgroup with our parent to land in a
632 	 * different blockgroup.
633 	 *
634 	 * So add our directory's i_ino into the starting point for the hash.
635 	 */
636 	*group = (*group + parent->i_ino) % ngroups;
637 
638 	/*
639 	 * Use a quadratic hash to find a group with a free inode and some free
640 	 * blocks.
641 	 */
642 	for (i = 1; i < ngroups; i <<= 1) {
643 		*group += i;
644 		if (*group >= ngroups)
645 			*group -= ngroups;
646 		desc = ext4_get_group_desc(sb, *group, NULL);
647 		if (desc && ext4_free_inodes_count(sb, desc) &&
648 		    ext4_free_group_clusters(sb, desc))
649 			return 0;
650 	}
651 
652 	/*
653 	 * That failed: try linear search for a free inode, even if that group
654 	 * has no free blocks.
655 	 */
656 	*group = parent_group;
657 	for (i = 0; i < ngroups; i++) {
658 		if (++*group >= ngroups)
659 			*group = 0;
660 		desc = ext4_get_group_desc(sb, *group, NULL);
661 		if (desc && ext4_free_inodes_count(sb, desc))
662 			return 0;
663 	}
664 
665 	return -1;
666 }
667 
668 /*
669  * In no journal mode, if an inode has recently been deleted, we want
670  * to avoid reusing it until we're reasonably sure the inode table
671  * block has been written back to disk.  (Yes, these values are
672  * somewhat arbitrary...)
673  */
674 #define RECENTCY_MIN	60
675 #define RECENTCY_DIRTY	300
676 
677 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
678 {
679 	struct ext4_group_desc	*gdp;
680 	struct ext4_inode	*raw_inode;
681 	struct buffer_head	*bh;
682 	int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
683 	int offset, ret = 0;
684 	int recentcy = RECENTCY_MIN;
685 	u32 dtime, now;
686 
687 	gdp = ext4_get_group_desc(sb, group, NULL);
688 	if (unlikely(!gdp))
689 		return 0;
690 
691 	bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
692 		       (ino / inodes_per_block));
693 	if (!bh || !buffer_uptodate(bh))
694 		/*
695 		 * If the block is not in the buffer cache, then it
696 		 * must have been written out.
697 		 */
698 		goto out;
699 
700 	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
701 	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
702 
703 	/* i_dtime is only 32 bits on disk, but we only care about relative
704 	 * times in the range of a few minutes (i.e. long enough to sync a
705 	 * recently-deleted inode to disk), so using the low 32 bits of the
706 	 * clock (a 68 year range) is enough, see time_before32() */
707 	dtime = le32_to_cpu(raw_inode->i_dtime);
708 	now = ktime_get_real_seconds();
709 	if (buffer_dirty(bh))
710 		recentcy += RECENTCY_DIRTY;
711 
712 	if (dtime && time_before32(dtime, now) &&
713 	    time_before32(now, dtime + recentcy))
714 		ret = 1;
715 out:
716 	brelse(bh);
717 	return ret;
718 }
719 
720 static int find_inode_bit(struct super_block *sb, ext4_group_t group,
721 			  struct buffer_head *bitmap, unsigned long *ino)
722 {
723 	bool check_recently_deleted = EXT4_SB(sb)->s_journal == NULL;
724 	unsigned long recently_deleted_ino = EXT4_INODES_PER_GROUP(sb);
725 
726 next:
727 	*ino = ext4_find_next_zero_bit((unsigned long *)
728 				       bitmap->b_data,
729 				       EXT4_INODES_PER_GROUP(sb), *ino);
730 	if (*ino >= EXT4_INODES_PER_GROUP(sb))
731 		goto not_found;
732 
733 	if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
734 		recently_deleted_ino = *ino;
735 		*ino = *ino + 1;
736 		if (*ino < EXT4_INODES_PER_GROUP(sb))
737 			goto next;
738 		goto not_found;
739 	}
740 	return 1;
741 not_found:
742 	if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb))
743 		return 0;
744 	/*
745 	 * Not reusing recently deleted inodes is mostly a preference. We don't
746 	 * want to report ENOSPC or skew allocation patterns because of that.
747 	 * So return even recently deleted inode if we could find better in the
748 	 * given range.
749 	 */
750 	*ino = recently_deleted_ino;
751 	return 1;
752 }
753 
754 int ext4_mark_inode_used(struct super_block *sb, int ino)
755 {
756 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
757 	struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL;
758 	struct ext4_group_desc *gdp;
759 	ext4_group_t group;
760 	int bit;
761 	int err;
762 
763 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
764 		return -EFSCORRUPTED;
765 
766 	group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
767 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
768 	inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
769 	if (IS_ERR(inode_bitmap_bh))
770 		return PTR_ERR(inode_bitmap_bh);
771 
772 	if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) {
773 		err = 0;
774 		goto out;
775 	}
776 
777 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
778 	if (!gdp || !group_desc_bh) {
779 		err = -EINVAL;
780 		goto out;
781 	}
782 
783 	ext4_set_bit(bit, inode_bitmap_bh->b_data);
784 
785 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
786 	err = ext4_handle_dirty_metadata(NULL, NULL, inode_bitmap_bh);
787 	if (err) {
788 		ext4_std_error(sb, err);
789 		goto out;
790 	}
791 	err = sync_dirty_buffer(inode_bitmap_bh);
792 	if (err) {
793 		ext4_std_error(sb, err);
794 		goto out;
795 	}
796 
797 	/* We may have to initialize the block bitmap if it isn't already */
798 	if (ext4_has_group_desc_csum(sb) &&
799 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
800 		struct buffer_head *block_bitmap_bh;
801 
802 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
803 		if (IS_ERR(block_bitmap_bh)) {
804 			err = PTR_ERR(block_bitmap_bh);
805 			goto out;
806 		}
807 
808 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
809 		err = ext4_handle_dirty_metadata(NULL, NULL, block_bitmap_bh);
810 		sync_dirty_buffer(block_bitmap_bh);
811 
812 		/* recheck and clear flag under lock if we still need to */
813 		ext4_lock_group(sb, group);
814 		if (ext4_has_group_desc_csum(sb) &&
815 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
816 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
817 			ext4_free_group_clusters_set(sb, gdp,
818 				ext4_free_clusters_after_init(sb, group, gdp));
819 			ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
820 			ext4_group_desc_csum_set(sb, group, gdp);
821 		}
822 		ext4_unlock_group(sb, group);
823 		brelse(block_bitmap_bh);
824 
825 		if (err) {
826 			ext4_std_error(sb, err);
827 			goto out;
828 		}
829 	}
830 
831 	/* Update the relevant bg descriptor fields */
832 	if (ext4_has_group_desc_csum(sb)) {
833 		int free;
834 
835 		ext4_lock_group(sb, group); /* while we modify the bg desc */
836 		free = EXT4_INODES_PER_GROUP(sb) -
837 			ext4_itable_unused_count(sb, gdp);
838 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
839 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
840 			free = 0;
841 		}
842 
843 		/*
844 		 * Check the relative inode number against the last used
845 		 * relative inode number in this group. if it is greater
846 		 * we need to update the bg_itable_unused count
847 		 */
848 		if (bit >= free)
849 			ext4_itable_unused_set(sb, gdp,
850 					(EXT4_INODES_PER_GROUP(sb) - bit - 1));
851 	} else {
852 		ext4_lock_group(sb, group);
853 	}
854 
855 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
856 	if (ext4_has_group_desc_csum(sb)) {
857 		ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
858 					   EXT4_INODES_PER_GROUP(sb) / 8);
859 		ext4_group_desc_csum_set(sb, group, gdp);
860 	}
861 
862 	ext4_unlock_group(sb, group);
863 	err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
864 	sync_dirty_buffer(group_desc_bh);
865 out:
866 	brelse(inode_bitmap_bh);
867 	return err;
868 }
869 
870 static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
871 					    bool encrypt)
872 {
873 	struct super_block *sb = dir->i_sb;
874 	int nblocks = 0;
875 #ifdef CONFIG_EXT4_FS_POSIX_ACL
876 	struct posix_acl *p = get_inode_acl(dir, ACL_TYPE_DEFAULT);
877 
878 	if (IS_ERR(p))
879 		return PTR_ERR(p);
880 	if (p) {
881 		int acl_size = p->a_count * sizeof(ext4_acl_entry);
882 
883 		nblocks += (S_ISDIR(mode) ? 2 : 1) *
884 			__ext4_xattr_set_credits(sb, NULL /* inode */,
885 						 NULL /* block_bh */, acl_size,
886 						 true /* is_create */);
887 		posix_acl_release(p);
888 	}
889 #endif
890 
891 #ifdef CONFIG_SECURITY
892 	{
893 		int num_security_xattrs = 1;
894 
895 #ifdef CONFIG_INTEGRITY
896 		num_security_xattrs++;
897 #endif
898 		/*
899 		 * We assume that security xattrs are never more than 1k.
900 		 * In practice they are under 128 bytes.
901 		 */
902 		nblocks += num_security_xattrs *
903 			__ext4_xattr_set_credits(sb, NULL /* inode */,
904 						 NULL /* block_bh */, 1024,
905 						 true /* is_create */);
906 	}
907 #endif
908 	if (encrypt)
909 		nblocks += __ext4_xattr_set_credits(sb,
910 						    NULL /* inode */,
911 						    NULL /* block_bh */,
912 						    FSCRYPT_SET_CONTEXT_MAX_SIZE,
913 						    true /* is_create */);
914 	return nblocks;
915 }
916 
917 /*
918  * There are two policies for allocating an inode.  If the new inode is
919  * a directory, then a forward search is made for a block group with both
920  * free space and a low directory-to-inode ratio; if that fails, then of
921  * the groups with above-average free space, that group with the fewest
922  * directories already is chosen.
923  *
924  * For other inodes, search forward from the parent directory's block
925  * group to find a free inode.
926  */
927 struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
928 			       handle_t *handle, struct inode *dir,
929 			       umode_t mode, const struct qstr *qstr,
930 			       __u32 goal, uid_t *owner, __u32 i_flags,
931 			       int handle_type, unsigned int line_no,
932 			       int nblocks)
933 {
934 	struct super_block *sb;
935 	struct buffer_head *inode_bitmap_bh = NULL;
936 	struct buffer_head *group_desc_bh;
937 	ext4_group_t ngroups, group = 0;
938 	unsigned long ino = 0;
939 	struct inode *inode;
940 	struct ext4_group_desc *gdp = NULL;
941 	struct ext4_inode_info *ei;
942 	struct ext4_sb_info *sbi;
943 	int ret2, err;
944 	struct inode *ret;
945 	ext4_group_t i;
946 	ext4_group_t flex_group;
947 	struct ext4_group_info *grp = NULL;
948 	bool encrypt = false;
949 
950 	/* Cannot create files in a deleted directory */
951 	if (!dir || !dir->i_nlink)
952 		return ERR_PTR(-EPERM);
953 
954 	sb = dir->i_sb;
955 	sbi = EXT4_SB(sb);
956 
957 	if (unlikely(ext4_forced_shutdown(sb)))
958 		return ERR_PTR(-EIO);
959 
960 	ngroups = ext4_get_groups_count(sb);
961 	trace_ext4_request_inode(dir, mode);
962 	inode = new_inode(sb);
963 	if (!inode)
964 		return ERR_PTR(-ENOMEM);
965 	ei = EXT4_I(inode);
966 
967 	/*
968 	 * Initialize owners and quota early so that we don't have to account
969 	 * for quota initialization worst case in standard inode creating
970 	 * transaction
971 	 */
972 	if (owner) {
973 		inode->i_mode = mode;
974 		i_uid_write(inode, owner[0]);
975 		i_gid_write(inode, owner[1]);
976 	} else if (test_opt(sb, GRPID)) {
977 		inode->i_mode = mode;
978 		inode_fsuid_set(inode, idmap);
979 		inode->i_gid = dir->i_gid;
980 	} else
981 		inode_init_owner(idmap, inode, dir, mode);
982 
983 	if (ext4_has_feature_project(sb) &&
984 	    ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
985 		ei->i_projid = EXT4_I(dir)->i_projid;
986 	else
987 		ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID);
988 
989 	if (!(i_flags & EXT4_EA_INODE_FL)) {
990 		err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
991 		if (err)
992 			goto out;
993 	}
994 
995 	err = dquot_initialize(inode);
996 	if (err)
997 		goto out;
998 
999 	if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
1000 		ret2 = ext4_xattr_credits_for_new_inode(dir, mode, encrypt);
1001 		if (ret2 < 0) {
1002 			err = ret2;
1003 			goto out;
1004 		}
1005 		nblocks += ret2;
1006 	}
1007 
1008 	if (!goal)
1009 		goal = sbi->s_inode_goal;
1010 
1011 	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
1012 		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
1013 		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
1014 		ret2 = 0;
1015 		goto got_group;
1016 	}
1017 
1018 	if (S_ISDIR(mode))
1019 		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
1020 	else
1021 		ret2 = find_group_other(sb, dir, &group, mode);
1022 
1023 got_group:
1024 	EXT4_I(dir)->i_last_alloc_group = group;
1025 	err = -ENOSPC;
1026 	if (ret2 == -1)
1027 		goto out;
1028 
1029 	/*
1030 	 * Normally we will only go through one pass of this loop,
1031 	 * unless we get unlucky and it turns out the group we selected
1032 	 * had its last inode grabbed by someone else.
1033 	 */
1034 	for (i = 0; i < ngroups; i++, ino = 0) {
1035 		err = -EIO;
1036 
1037 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1038 		if (!gdp)
1039 			goto out;
1040 
1041 		/*
1042 		 * Check free inodes count before loading bitmap.
1043 		 */
1044 		if (ext4_free_inodes_count(sb, gdp) == 0)
1045 			goto next_group;
1046 
1047 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1048 			grp = ext4_get_group_info(sb, group);
1049 			/*
1050 			 * Skip groups with already-known suspicious inode
1051 			 * tables
1052 			 */
1053 			if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
1054 				goto next_group;
1055 		}
1056 
1057 		brelse(inode_bitmap_bh);
1058 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
1059 		/* Skip groups with suspicious inode tables */
1060 		if (IS_ERR(inode_bitmap_bh)) {
1061 			inode_bitmap_bh = NULL;
1062 			goto next_group;
1063 		}
1064 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
1065 		    EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
1066 			goto next_group;
1067 
1068 repeat_in_this_group:
1069 		ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1070 		if (!ret2)
1071 			goto next_group;
1072 
1073 		if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) {
1074 			ext4_error(sb, "reserved inode found cleared - "
1075 				   "inode=%lu", ino + 1);
1076 			ext4_mark_group_bitmap_corrupted(sb, group,
1077 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1078 			goto next_group;
1079 		}
1080 
1081 		if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) {
1082 			BUG_ON(nblocks <= 0);
1083 			handle = __ext4_journal_start_sb(NULL, dir->i_sb,
1084 				 line_no, handle_type, nblocks, 0,
1085 				 ext4_trans_default_revoke_credits(sb));
1086 			if (IS_ERR(handle)) {
1087 				err = PTR_ERR(handle);
1088 				ext4_std_error(sb, err);
1089 				goto out;
1090 			}
1091 		}
1092 		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
1093 		err = ext4_journal_get_write_access(handle, sb, inode_bitmap_bh,
1094 						    EXT4_JTR_NONE);
1095 		if (err) {
1096 			ext4_std_error(sb, err);
1097 			goto out;
1098 		}
1099 		ext4_lock_group(sb, group);
1100 		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
1101 		if (ret2) {
1102 			/* Someone already took the bit. Repeat the search
1103 			 * with lock held.
1104 			 */
1105 			ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1106 			if (ret2) {
1107 				ext4_set_bit(ino, inode_bitmap_bh->b_data);
1108 				ret2 = 0;
1109 			} else {
1110 				ret2 = 1; /* we didn't grab the inode */
1111 			}
1112 		}
1113 		ext4_unlock_group(sb, group);
1114 		ino++;		/* the inode bitmap is zero-based */
1115 		if (!ret2)
1116 			goto got; /* we grabbed the inode! */
1117 
1118 		if (ino < EXT4_INODES_PER_GROUP(sb))
1119 			goto repeat_in_this_group;
1120 next_group:
1121 		if (++group == ngroups)
1122 			group = 0;
1123 	}
1124 	err = -ENOSPC;
1125 	goto out;
1126 
1127 got:
1128 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
1129 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
1130 	if (err) {
1131 		ext4_std_error(sb, err);
1132 		goto out;
1133 	}
1134 
1135 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1136 	err = ext4_journal_get_write_access(handle, sb, group_desc_bh,
1137 					    EXT4_JTR_NONE);
1138 	if (err) {
1139 		ext4_std_error(sb, err);
1140 		goto out;
1141 	}
1142 
1143 	/* We may have to initialize the block bitmap if it isn't already */
1144 	if (ext4_has_group_desc_csum(sb) &&
1145 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1146 		struct buffer_head *block_bitmap_bh;
1147 
1148 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
1149 		if (IS_ERR(block_bitmap_bh)) {
1150 			err = PTR_ERR(block_bitmap_bh);
1151 			goto out;
1152 		}
1153 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
1154 		err = ext4_journal_get_write_access(handle, sb, block_bitmap_bh,
1155 						    EXT4_JTR_NONE);
1156 		if (err) {
1157 			brelse(block_bitmap_bh);
1158 			ext4_std_error(sb, err);
1159 			goto out;
1160 		}
1161 
1162 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
1163 		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
1164 
1165 		/* recheck and clear flag under lock if we still need to */
1166 		ext4_lock_group(sb, group);
1167 		if (ext4_has_group_desc_csum(sb) &&
1168 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1169 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1170 			ext4_free_group_clusters_set(sb, gdp,
1171 				ext4_free_clusters_after_init(sb, group, gdp));
1172 			ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
1173 			ext4_group_desc_csum_set(sb, group, gdp);
1174 		}
1175 		ext4_unlock_group(sb, group);
1176 		brelse(block_bitmap_bh);
1177 
1178 		if (err) {
1179 			ext4_std_error(sb, err);
1180 			goto out;
1181 		}
1182 	}
1183 
1184 	/* Update the relevant bg descriptor fields */
1185 	if (ext4_has_group_desc_csum(sb)) {
1186 		int free;
1187 		struct ext4_group_info *grp = NULL;
1188 
1189 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1190 			grp = ext4_get_group_info(sb, group);
1191 			if (!grp) {
1192 				err = -EFSCORRUPTED;
1193 				goto out;
1194 			}
1195 			down_read(&grp->alloc_sem); /*
1196 						     * protect vs itable
1197 						     * lazyinit
1198 						     */
1199 		}
1200 		ext4_lock_group(sb, group); /* while we modify the bg desc */
1201 		free = EXT4_INODES_PER_GROUP(sb) -
1202 			ext4_itable_unused_count(sb, gdp);
1203 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
1204 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
1205 			free = 0;
1206 		}
1207 		/*
1208 		 * Check the relative inode number against the last used
1209 		 * relative inode number in this group. if it is greater
1210 		 * we need to update the bg_itable_unused count
1211 		 */
1212 		if (ino > free)
1213 			ext4_itable_unused_set(sb, gdp,
1214 					(EXT4_INODES_PER_GROUP(sb) - ino));
1215 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY))
1216 			up_read(&grp->alloc_sem);
1217 	} else {
1218 		ext4_lock_group(sb, group);
1219 	}
1220 
1221 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
1222 	if (S_ISDIR(mode)) {
1223 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
1224 		if (sbi->s_log_groups_per_flex) {
1225 			ext4_group_t f = ext4_flex_group(sbi, group);
1226 
1227 			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
1228 							f)->used_dirs);
1229 		}
1230 	}
1231 	if (ext4_has_group_desc_csum(sb)) {
1232 		ext4_inode_bitmap_csum_set(sb, gdp, inode_bitmap_bh,
1233 					   EXT4_INODES_PER_GROUP(sb) / 8);
1234 		ext4_group_desc_csum_set(sb, group, gdp);
1235 	}
1236 	ext4_unlock_group(sb, group);
1237 
1238 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
1239 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
1240 	if (err) {
1241 		ext4_std_error(sb, err);
1242 		goto out;
1243 	}
1244 
1245 	percpu_counter_dec(&sbi->s_freeinodes_counter);
1246 	if (S_ISDIR(mode))
1247 		percpu_counter_inc(&sbi->s_dirs_counter);
1248 
1249 	if (sbi->s_log_groups_per_flex) {
1250 		flex_group = ext4_flex_group(sbi, group);
1251 		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1252 						flex_group)->free_inodes);
1253 	}
1254 
1255 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1256 	/* This is the optimal IO size (for stat), not the fs block size */
1257 	inode->i_blocks = 0;
1258 	inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
1259 	ei->i_crtime = inode->i_mtime;
1260 
1261 	memset(ei->i_data, 0, sizeof(ei->i_data));
1262 	ei->i_dir_start_lookup = 0;
1263 	ei->i_disksize = 0;
1264 
1265 	/* Don't inherit extent flag from directory, amongst others. */
1266 	ei->i_flags =
1267 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1268 	ei->i_flags |= i_flags;
1269 	ei->i_file_acl = 0;
1270 	ei->i_dtime = 0;
1271 	ei->i_block_group = group;
1272 	ei->i_last_alloc_group = ~0;
1273 
1274 	ext4_set_inode_flags(inode, true);
1275 	if (IS_DIRSYNC(inode))
1276 		ext4_handle_sync(handle);
1277 	if (insert_inode_locked(inode) < 0) {
1278 		/*
1279 		 * Likely a bitmap corruption causing inode to be allocated
1280 		 * twice.
1281 		 */
1282 		err = -EIO;
1283 		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1284 			   inode->i_ino);
1285 		ext4_mark_group_bitmap_corrupted(sb, group,
1286 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1287 		goto out;
1288 	}
1289 	inode->i_generation = get_random_u32();
1290 
1291 	/* Precompute checksum seed for inode metadata */
1292 	if (ext4_has_metadata_csum(sb)) {
1293 		__u32 csum;
1294 		__le32 inum = cpu_to_le32(inode->i_ino);
1295 		__le32 gen = cpu_to_le32(inode->i_generation);
1296 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1297 				   sizeof(inum));
1298 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1299 					      sizeof(gen));
1300 	}
1301 
1302 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1303 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
1304 
1305 	ei->i_extra_isize = sbi->s_want_extra_isize;
1306 	ei->i_inline_off = 0;
1307 	if (ext4_has_feature_inline_data(sb) &&
1308 	    (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
1309 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1310 	ret = inode;
1311 	err = dquot_alloc_inode(inode);
1312 	if (err)
1313 		goto fail_drop;
1314 
1315 	/*
1316 	 * Since the encryption xattr will always be unique, create it first so
1317 	 * that it's less likely to end up in an external xattr block and
1318 	 * prevent its deduplication.
1319 	 */
1320 	if (encrypt) {
1321 		err = fscrypt_set_context(inode, handle);
1322 		if (err)
1323 			goto fail_free_drop;
1324 	}
1325 
1326 	if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
1327 		err = ext4_init_acl(handle, inode, dir);
1328 		if (err)
1329 			goto fail_free_drop;
1330 
1331 		err = ext4_init_security(handle, inode, dir, qstr);
1332 		if (err)
1333 			goto fail_free_drop;
1334 	}
1335 
1336 	if (ext4_has_feature_extents(sb)) {
1337 		/* set extent flag only for directory, file and normal symlink*/
1338 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1339 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1340 			ext4_ext_tree_init(handle, inode);
1341 		}
1342 	}
1343 
1344 	if (ext4_handle_valid(handle)) {
1345 		ei->i_sync_tid = handle->h_transaction->t_tid;
1346 		ei->i_datasync_tid = handle->h_transaction->t_tid;
1347 	}
1348 
1349 	err = ext4_mark_inode_dirty(handle, inode);
1350 	if (err) {
1351 		ext4_std_error(sb, err);
1352 		goto fail_free_drop;
1353 	}
1354 
1355 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1356 	trace_ext4_allocate_inode(inode, dir, mode);
1357 	brelse(inode_bitmap_bh);
1358 	return ret;
1359 
1360 fail_free_drop:
1361 	dquot_free_inode(inode);
1362 fail_drop:
1363 	clear_nlink(inode);
1364 	unlock_new_inode(inode);
1365 out:
1366 	dquot_drop(inode);
1367 	inode->i_flags |= S_NOQUOTA;
1368 	iput(inode);
1369 	brelse(inode_bitmap_bh);
1370 	return ERR_PTR(err);
1371 }
1372 
1373 /* Verify that we are loading a valid orphan from disk */
1374 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1375 {
1376 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1377 	ext4_group_t block_group;
1378 	int bit;
1379 	struct buffer_head *bitmap_bh = NULL;
1380 	struct inode *inode = NULL;
1381 	int err = -EFSCORRUPTED;
1382 
1383 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
1384 		goto bad_orphan;
1385 
1386 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1387 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1388 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1389 	if (IS_ERR(bitmap_bh))
1390 		return ERR_CAST(bitmap_bh);
1391 
1392 	/* Having the inode bit set should be a 100% indicator that this
1393 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1394 	 * inodes that were being truncated, so we can't check i_nlink==0.
1395 	 */
1396 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1397 		goto bad_orphan;
1398 
1399 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1400 	if (IS_ERR(inode)) {
1401 		err = PTR_ERR(inode);
1402 		ext4_error_err(sb, -err,
1403 			       "couldn't read orphan inode %lu (err %d)",
1404 			       ino, err);
1405 		brelse(bitmap_bh);
1406 		return inode;
1407 	}
1408 
1409 	/*
1410 	 * If the orphans has i_nlinks > 0 then it should be able to
1411 	 * be truncated, otherwise it won't be removed from the orphan
1412 	 * list during processing and an infinite loop will result.
1413 	 * Similarly, it must not be a bad inode.
1414 	 */
1415 	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
1416 	    is_bad_inode(inode))
1417 		goto bad_orphan;
1418 
1419 	if (NEXT_ORPHAN(inode) > max_ino)
1420 		goto bad_orphan;
1421 	brelse(bitmap_bh);
1422 	return inode;
1423 
1424 bad_orphan:
1425 	ext4_error(sb, "bad orphan inode %lu", ino);
1426 	if (bitmap_bh)
1427 		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1428 		       bit, (unsigned long long)bitmap_bh->b_blocknr,
1429 		       ext4_test_bit(bit, bitmap_bh->b_data));
1430 	if (inode) {
1431 		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1432 		       is_bad_inode(inode));
1433 		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1434 		       NEXT_ORPHAN(inode));
1435 		printk(KERN_ERR "max_ino=%lu\n", max_ino);
1436 		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1437 		/* Avoid freeing blocks if we got a bad deleted inode */
1438 		if (inode->i_nlink == 0)
1439 			inode->i_blocks = 0;
1440 		iput(inode);
1441 	}
1442 	brelse(bitmap_bh);
1443 	return ERR_PTR(err);
1444 }
1445 
1446 unsigned long ext4_count_free_inodes(struct super_block *sb)
1447 {
1448 	unsigned long desc_count;
1449 	struct ext4_group_desc *gdp;
1450 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1451 #ifdef EXT4FS_DEBUG
1452 	struct ext4_super_block *es;
1453 	unsigned long bitmap_count, x;
1454 	struct buffer_head *bitmap_bh = NULL;
1455 
1456 	es = EXT4_SB(sb)->s_es;
1457 	desc_count = 0;
1458 	bitmap_count = 0;
1459 	gdp = NULL;
1460 	for (i = 0; i < ngroups; i++) {
1461 		gdp = ext4_get_group_desc(sb, i, NULL);
1462 		if (!gdp)
1463 			continue;
1464 		desc_count += ext4_free_inodes_count(sb, gdp);
1465 		brelse(bitmap_bh);
1466 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1467 		if (IS_ERR(bitmap_bh)) {
1468 			bitmap_bh = NULL;
1469 			continue;
1470 		}
1471 
1472 		x = ext4_count_free(bitmap_bh->b_data,
1473 				    EXT4_INODES_PER_GROUP(sb) / 8);
1474 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1475 			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1476 		bitmap_count += x;
1477 	}
1478 	brelse(bitmap_bh);
1479 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1480 	       "stored = %u, computed = %lu, %lu\n",
1481 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1482 	return desc_count;
1483 #else
1484 	desc_count = 0;
1485 	for (i = 0; i < ngroups; i++) {
1486 		gdp = ext4_get_group_desc(sb, i, NULL);
1487 		if (!gdp)
1488 			continue;
1489 		desc_count += ext4_free_inodes_count(sb, gdp);
1490 		cond_resched();
1491 	}
1492 	return desc_count;
1493 #endif
1494 }
1495 
1496 /* Called at mount-time, super-block is locked */
1497 unsigned long ext4_count_dirs(struct super_block * sb)
1498 {
1499 	unsigned long count = 0;
1500 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1501 
1502 	for (i = 0; i < ngroups; i++) {
1503 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1504 		if (!gdp)
1505 			continue;
1506 		count += ext4_used_dirs_count(sb, gdp);
1507 	}
1508 	return count;
1509 }
1510 
1511 /*
1512  * Zeroes not yet zeroed inode table - just write zeroes through the whole
1513  * inode table. Must be called without any spinlock held. The only place
1514  * where it is called from on active part of filesystem is ext4lazyinit
1515  * thread, so we do not need any special locks, however we have to prevent
1516  * inode allocation from the current group, so we take alloc_sem lock, to
1517  * block ext4_new_inode() until we are finished.
1518  */
1519 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1520 				 int barrier)
1521 {
1522 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1523 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1524 	struct ext4_group_desc *gdp = NULL;
1525 	struct buffer_head *group_desc_bh;
1526 	handle_t *handle;
1527 	ext4_fsblk_t blk;
1528 	int num, ret = 0, used_blks = 0;
1529 	unsigned long used_inos = 0;
1530 
1531 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1532 	if (!gdp || !grp)
1533 		goto out;
1534 
1535 	/*
1536 	 * We do not need to lock this, because we are the only one
1537 	 * handling this flag.
1538 	 */
1539 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1540 		goto out;
1541 
1542 	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1543 	if (IS_ERR(handle)) {
1544 		ret = PTR_ERR(handle);
1545 		goto out;
1546 	}
1547 
1548 	down_write(&grp->alloc_sem);
1549 	/*
1550 	 * If inode bitmap was already initialized there may be some
1551 	 * used inodes so we need to skip blocks with used inodes in
1552 	 * inode table.
1553 	 */
1554 	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1555 		used_inos = EXT4_INODES_PER_GROUP(sb) -
1556 			    ext4_itable_unused_count(sb, gdp);
1557 		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
1558 
1559 		/* Bogus inode unused count? */
1560 		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
1561 			ext4_error(sb, "Something is wrong with group %u: "
1562 				   "used itable blocks: %d; "
1563 				   "itable unused count: %u",
1564 				   group, used_blks,
1565 				   ext4_itable_unused_count(sb, gdp));
1566 			ret = 1;
1567 			goto err_out;
1568 		}
1569 
1570 		used_inos += group * EXT4_INODES_PER_GROUP(sb);
1571 		/*
1572 		 * Are there some uninitialized inodes in the inode table
1573 		 * before the first normal inode?
1574 		 */
1575 		if ((used_blks != sbi->s_itb_per_group) &&
1576 		     (used_inos < EXT4_FIRST_INO(sb))) {
1577 			ext4_error(sb, "Something is wrong with group %u: "
1578 				   "itable unused count: %u; "
1579 				   "itables initialized count: %ld",
1580 				   group, ext4_itable_unused_count(sb, gdp),
1581 				   used_inos);
1582 			ret = 1;
1583 			goto err_out;
1584 		}
1585 	}
1586 
1587 	blk = ext4_inode_table(sb, gdp) + used_blks;
1588 	num = sbi->s_itb_per_group - used_blks;
1589 
1590 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1591 	ret = ext4_journal_get_write_access(handle, sb, group_desc_bh,
1592 					    EXT4_JTR_NONE);
1593 	if (ret)
1594 		goto err_out;
1595 
1596 	/*
1597 	 * Skip zeroout if the inode table is full. But we set the ZEROED
1598 	 * flag anyway, because obviously, when it is full it does not need
1599 	 * further zeroing.
1600 	 */
1601 	if (unlikely(num == 0))
1602 		goto skip_zeroout;
1603 
1604 	ext4_debug("going to zero out inode table in group %d\n",
1605 		   group);
1606 	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1607 	if (ret < 0)
1608 		goto err_out;
1609 	if (barrier)
1610 		blkdev_issue_flush(sb->s_bdev);
1611 
1612 skip_zeroout:
1613 	ext4_lock_group(sb, group);
1614 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1615 	ext4_group_desc_csum_set(sb, group, gdp);
1616 	ext4_unlock_group(sb, group);
1617 
1618 	BUFFER_TRACE(group_desc_bh,
1619 		     "call ext4_handle_dirty_metadata");
1620 	ret = ext4_handle_dirty_metadata(handle, NULL,
1621 					 group_desc_bh);
1622 
1623 err_out:
1624 	up_write(&grp->alloc_sem);
1625 	ext4_journal_stop(handle);
1626 out:
1627 	return ret;
1628 }
1629