xref: /openbmc/linux/fs/ext4/ialloc.c (revision 384740dc)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "group.h"
31 
32 /*
33  * ialloc.c contains the inodes allocation and deallocation routines
34  */
35 
36 /*
37  * The free inodes are managed by bitmaps.  A file system contains several
38  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
39  * block for inodes, N blocks for the inode table and data blocks.
40  *
41  * The file system contains group descriptors which are located after the
42  * super block.  Each descriptor contains the number of the bitmap block and
43  * the free blocks count in the block.
44  */
45 
46 /*
47  * To avoid calling the atomic setbit hundreds or thousands of times, we only
48  * need to use it within a single byte (to ensure we get endianness right).
49  * We can use memset for the rest of the bitmap as there are no other users.
50  */
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
52 {
53 	int i;
54 
55 	if (start_bit >= end_bit)
56 		return;
57 
58 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 		ext4_set_bit(i, bitmap);
61 	if (i < end_bit)
62 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
63 }
64 
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 				ext4_group_t block_group,
68 				struct ext4_group_desc *gdp)
69 {
70 	struct ext4_sb_info *sbi = EXT4_SB(sb);
71 
72 	J_ASSERT_BH(bh, buffer_locked(bh));
73 
74 	/* If checksum is bad mark all blocks and inodes use to prevent
75 	 * allocation, essentially implementing a per-group read-only flag. */
76 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 		ext4_error(sb, __func__, "Checksum bad for group %lu\n",
78 			   block_group);
79 		gdp->bg_free_blocks_count = 0;
80 		gdp->bg_free_inodes_count = 0;
81 		gdp->bg_itable_unused = 0;
82 		memset(bh->b_data, 0xff, sb->s_blocksize);
83 		return 0;
84 	}
85 
86 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
88 			bh->b_data);
89 
90 	return EXT4_INODES_PER_GROUP(sb);
91 }
92 
93 /*
94  * Read the inode allocation bitmap for a given block_group, reading
95  * into the specified slot in the superblock's bitmap cache.
96  *
97  * Return buffer_head of bitmap on success or NULL.
98  */
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
101 {
102 	struct ext4_group_desc *desc;
103 	struct buffer_head *bh = NULL;
104 	ext4_fsblk_t bitmap_blk;
105 
106 	desc = ext4_get_group_desc(sb, block_group, NULL);
107 	if (!desc)
108 		return NULL;
109 	bitmap_blk = ext4_inode_bitmap(sb, desc);
110 	bh = sb_getblk(sb, bitmap_blk);
111 	if (unlikely(!bh)) {
112 		ext4_error(sb, __func__,
113 			    "Cannot read inode bitmap - "
114 			    "block_group = %lu, inode_bitmap = %llu",
115 			    block_group, bitmap_blk);
116 		return NULL;
117 	}
118 	if (bh_uptodate_or_lock(bh))
119 		return bh;
120 
121 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
122 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
123 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
124 		set_buffer_uptodate(bh);
125 		unlock_buffer(bh);
126 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
127 		return bh;
128 	}
129 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
130 	if (bh_submit_read(bh) < 0) {
131 		put_bh(bh);
132 		ext4_error(sb, __func__,
133 			    "Cannot read inode bitmap - "
134 			    "block_group = %lu, inode_bitmap = %llu",
135 			    block_group, bitmap_blk);
136 		return NULL;
137 	}
138 	return bh;
139 }
140 
141 /*
142  * NOTE! When we get the inode, we're the only people
143  * that have access to it, and as such there are no
144  * race conditions we have to worry about. The inode
145  * is not on the hash-lists, and it cannot be reached
146  * through the filesystem because the directory entry
147  * has been deleted earlier.
148  *
149  * HOWEVER: we must make sure that we get no aliases,
150  * which means that we have to call "clear_inode()"
151  * _before_ we mark the inode not in use in the inode
152  * bitmaps. Otherwise a newly created file might use
153  * the same inode number (not actually the same pointer
154  * though), and then we'd have two inodes sharing the
155  * same inode number and space on the harddisk.
156  */
157 void ext4_free_inode (handle_t *handle, struct inode * inode)
158 {
159 	struct super_block * sb = inode->i_sb;
160 	int is_directory;
161 	unsigned long ino;
162 	struct buffer_head *bitmap_bh = NULL;
163 	struct buffer_head *bh2;
164 	ext4_group_t block_group;
165 	unsigned long bit;
166 	struct ext4_group_desc * gdp;
167 	struct ext4_super_block * es;
168 	struct ext4_sb_info *sbi;
169 	int fatal = 0, err;
170 	ext4_group_t flex_group;
171 
172 	if (atomic_read(&inode->i_count) > 1) {
173 		printk ("ext4_free_inode: inode has count=%d\n",
174 					atomic_read(&inode->i_count));
175 		return;
176 	}
177 	if (inode->i_nlink) {
178 		printk ("ext4_free_inode: inode has nlink=%d\n",
179 			inode->i_nlink);
180 		return;
181 	}
182 	if (!sb) {
183 		printk("ext4_free_inode: inode on nonexistent device\n");
184 		return;
185 	}
186 	sbi = EXT4_SB(sb);
187 
188 	ino = inode->i_ino;
189 	ext4_debug ("freeing inode %lu\n", ino);
190 
191 	/*
192 	 * Note: we must free any quota before locking the superblock,
193 	 * as writing the quota to disk may need the lock as well.
194 	 */
195 	DQUOT_INIT(inode);
196 	ext4_xattr_delete_inode(handle, inode);
197 	DQUOT_FREE_INODE(inode);
198 	DQUOT_DROP(inode);
199 
200 	is_directory = S_ISDIR(inode->i_mode);
201 
202 	/* Do this BEFORE marking the inode not in use or returning an error */
203 	clear_inode (inode);
204 
205 	es = EXT4_SB(sb)->s_es;
206 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
207 		ext4_error (sb, "ext4_free_inode",
208 			    "reserved or nonexistent inode %lu", ino);
209 		goto error_return;
210 	}
211 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
212 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
213 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
214 	if (!bitmap_bh)
215 		goto error_return;
216 
217 	BUFFER_TRACE(bitmap_bh, "get_write_access");
218 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
219 	if (fatal)
220 		goto error_return;
221 
222 	/* Ok, now we can actually update the inode bitmaps.. */
223 	if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
224 					bit, bitmap_bh->b_data))
225 		ext4_error (sb, "ext4_free_inode",
226 			      "bit already cleared for inode %lu", ino);
227 	else {
228 		gdp = ext4_get_group_desc (sb, block_group, &bh2);
229 
230 		BUFFER_TRACE(bh2, "get_write_access");
231 		fatal = ext4_journal_get_write_access(handle, bh2);
232 		if (fatal) goto error_return;
233 
234 		if (gdp) {
235 			spin_lock(sb_bgl_lock(sbi, block_group));
236 			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
237 			if (is_directory)
238 				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
239 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
240 							block_group, gdp);
241 			spin_unlock(sb_bgl_lock(sbi, block_group));
242 			percpu_counter_inc(&sbi->s_freeinodes_counter);
243 			if (is_directory)
244 				percpu_counter_dec(&sbi->s_dirs_counter);
245 
246 			if (sbi->s_log_groups_per_flex) {
247 				flex_group = ext4_flex_group(sbi, block_group);
248 				spin_lock(sb_bgl_lock(sbi, flex_group));
249 				sbi->s_flex_groups[flex_group].free_inodes++;
250 				spin_unlock(sb_bgl_lock(sbi, flex_group));
251 			}
252 		}
253 		BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
254 		err = ext4_journal_dirty_metadata(handle, bh2);
255 		if (!fatal) fatal = err;
256 	}
257 	BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
258 	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
259 	if (!fatal)
260 		fatal = err;
261 	sb->s_dirt = 1;
262 error_return:
263 	brelse(bitmap_bh);
264 	ext4_std_error(sb, fatal);
265 }
266 
267 /*
268  * There are two policies for allocating an inode.  If the new inode is
269  * a directory, then a forward search is made for a block group with both
270  * free space and a low directory-to-inode ratio; if that fails, then of
271  * the groups with above-average free space, that group with the fewest
272  * directories already is chosen.
273  *
274  * For other inodes, search forward from the parent directory\'s block
275  * group to find a free inode.
276  */
277 static int find_group_dir(struct super_block *sb, struct inode *parent,
278 				ext4_group_t *best_group)
279 {
280 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
281 	unsigned int freei, avefreei;
282 	struct ext4_group_desc *desc, *best_desc = NULL;
283 	ext4_group_t group;
284 	int ret = -1;
285 
286 	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
287 	avefreei = freei / ngroups;
288 
289 	for (group = 0; group < ngroups; group++) {
290 		desc = ext4_get_group_desc (sb, group, NULL);
291 		if (!desc || !desc->bg_free_inodes_count)
292 			continue;
293 		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
294 			continue;
295 		if (!best_desc ||
296 		    (le16_to_cpu(desc->bg_free_blocks_count) >
297 		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
298 			*best_group = group;
299 			best_desc = desc;
300 			ret = 0;
301 		}
302 	}
303 	return ret;
304 }
305 
306 #define free_block_ratio 10
307 
308 static int find_group_flex(struct super_block *sb, struct inode *parent,
309 			   ext4_group_t *best_group)
310 {
311 	struct ext4_sb_info *sbi = EXT4_SB(sb);
312 	struct ext4_group_desc *desc;
313 	struct buffer_head *bh;
314 	struct flex_groups *flex_group = sbi->s_flex_groups;
315 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
316 	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
317 	ext4_group_t ngroups = sbi->s_groups_count;
318 	int flex_size = ext4_flex_bg_size(sbi);
319 	ext4_group_t best_flex = parent_fbg_group;
320 	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
321 	int flexbg_free_blocks;
322 	int flex_freeb_ratio;
323 	ext4_group_t n_fbg_groups;
324 	ext4_group_t i;
325 
326 	n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
327 		sbi->s_log_groups_per_flex;
328 
329 find_close_to_parent:
330 	flexbg_free_blocks = flex_group[best_flex].free_blocks;
331 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
332 	if (flex_group[best_flex].free_inodes &&
333 	    flex_freeb_ratio > free_block_ratio)
334 		goto found_flexbg;
335 
336 	if (best_flex && best_flex == parent_fbg_group) {
337 		best_flex--;
338 		goto find_close_to_parent;
339 	}
340 
341 	for (i = 0; i < n_fbg_groups; i++) {
342 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
343 			continue;
344 
345 		flexbg_free_blocks = flex_group[i].free_blocks;
346 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
347 
348 		if (flex_freeb_ratio > free_block_ratio &&
349 		    flex_group[i].free_inodes) {
350 			best_flex = i;
351 			goto found_flexbg;
352 		}
353 
354 		if (flex_group[best_flex].free_inodes == 0 ||
355 		    (flex_group[i].free_blocks >
356 		     flex_group[best_flex].free_blocks &&
357 		     flex_group[i].free_inodes))
358 			best_flex = i;
359 	}
360 
361 	if (!flex_group[best_flex].free_inodes ||
362 	    !flex_group[best_flex].free_blocks)
363 		return -1;
364 
365 found_flexbg:
366 	for (i = best_flex * flex_size; i < ngroups &&
367 		     i < (best_flex + 1) * flex_size; i++) {
368 		desc = ext4_get_group_desc(sb, i, &bh);
369 		if (le16_to_cpu(desc->bg_free_inodes_count)) {
370 			*best_group = i;
371 			goto out;
372 		}
373 	}
374 
375 	return -1;
376 out:
377 	return 0;
378 }
379 
380 /*
381  * Orlov's allocator for directories.
382  *
383  * We always try to spread first-level directories.
384  *
385  * If there are blockgroups with both free inodes and free blocks counts
386  * not worse than average we return one with smallest directory count.
387  * Otherwise we simply return a random group.
388  *
389  * For the rest rules look so:
390  *
391  * It's OK to put directory into a group unless
392  * it has too many directories already (max_dirs) or
393  * it has too few free inodes left (min_inodes) or
394  * it has too few free blocks left (min_blocks) or
395  * it's already running too large debt (max_debt).
396  * Parent's group is preferred, if it doesn't satisfy these
397  * conditions we search cyclically through the rest. If none
398  * of the groups look good we just look for a group with more
399  * free inodes than average (starting at parent's group).
400  *
401  * Debt is incremented each time we allocate a directory and decremented
402  * when we allocate an inode, within 0--255.
403  */
404 
405 #define INODE_COST 64
406 #define BLOCK_COST 256
407 
408 static int find_group_orlov(struct super_block *sb, struct inode *parent,
409 				ext4_group_t *group)
410 {
411 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
412 	struct ext4_sb_info *sbi = EXT4_SB(sb);
413 	struct ext4_super_block *es = sbi->s_es;
414 	ext4_group_t ngroups = sbi->s_groups_count;
415 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
416 	unsigned int freei, avefreei;
417 	ext4_fsblk_t freeb, avefreeb;
418 	ext4_fsblk_t blocks_per_dir;
419 	unsigned int ndirs;
420 	int max_debt, max_dirs, min_inodes;
421 	ext4_grpblk_t min_blocks;
422 	ext4_group_t i;
423 	struct ext4_group_desc *desc;
424 
425 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
426 	avefreei = freei / ngroups;
427 	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
428 	avefreeb = freeb;
429 	do_div(avefreeb, ngroups);
430 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
431 
432 	if ((parent == sb->s_root->d_inode) ||
433 	    (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
434 		int best_ndir = inodes_per_group;
435 		ext4_group_t grp;
436 		int ret = -1;
437 
438 		get_random_bytes(&grp, sizeof(grp));
439 		parent_group = (unsigned)grp % ngroups;
440 		for (i = 0; i < ngroups; i++) {
441 			grp = (parent_group + i) % ngroups;
442 			desc = ext4_get_group_desc(sb, grp, NULL);
443 			if (!desc || !desc->bg_free_inodes_count)
444 				continue;
445 			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
446 				continue;
447 			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
448 				continue;
449 			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
450 				continue;
451 			*group = grp;
452 			ret = 0;
453 			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
454 		}
455 		if (ret == 0)
456 			return ret;
457 		goto fallback;
458 	}
459 
460 	blocks_per_dir = ext4_blocks_count(es) - freeb;
461 	do_div(blocks_per_dir, ndirs);
462 
463 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
464 	min_inodes = avefreei - inodes_per_group / 4;
465 	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
466 
467 	max_debt = EXT4_BLOCKS_PER_GROUP(sb);
468 	max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
469 	if (max_debt * INODE_COST > inodes_per_group)
470 		max_debt = inodes_per_group / INODE_COST;
471 	if (max_debt > 255)
472 		max_debt = 255;
473 	if (max_debt == 0)
474 		max_debt = 1;
475 
476 	for (i = 0; i < ngroups; i++) {
477 		*group = (parent_group + i) % ngroups;
478 		desc = ext4_get_group_desc(sb, *group, NULL);
479 		if (!desc || !desc->bg_free_inodes_count)
480 			continue;
481 		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
482 			continue;
483 		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
484 			continue;
485 		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
486 			continue;
487 		return 0;
488 	}
489 
490 fallback:
491 	for (i = 0; i < ngroups; i++) {
492 		*group = (parent_group + i) % ngroups;
493 		desc = ext4_get_group_desc(sb, *group, NULL);
494 		if (desc && desc->bg_free_inodes_count &&
495 			le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
496 			return 0;
497 	}
498 
499 	if (avefreei) {
500 		/*
501 		 * The free-inodes counter is approximate, and for really small
502 		 * filesystems the above test can fail to find any blockgroups
503 		 */
504 		avefreei = 0;
505 		goto fallback;
506 	}
507 
508 	return -1;
509 }
510 
511 static int find_group_other(struct super_block *sb, struct inode *parent,
512 				ext4_group_t *group)
513 {
514 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
515 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
516 	struct ext4_group_desc *desc;
517 	ext4_group_t i;
518 
519 	/*
520 	 * Try to place the inode in its parent directory
521 	 */
522 	*group = parent_group;
523 	desc = ext4_get_group_desc(sb, *group, NULL);
524 	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
525 			le16_to_cpu(desc->bg_free_blocks_count))
526 		return 0;
527 
528 	/*
529 	 * We're going to place this inode in a different blockgroup from its
530 	 * parent.  We want to cause files in a common directory to all land in
531 	 * the same blockgroup.  But we want files which are in a different
532 	 * directory which shares a blockgroup with our parent to land in a
533 	 * different blockgroup.
534 	 *
535 	 * So add our directory's i_ino into the starting point for the hash.
536 	 */
537 	*group = (*group + parent->i_ino) % ngroups;
538 
539 	/*
540 	 * Use a quadratic hash to find a group with a free inode and some free
541 	 * blocks.
542 	 */
543 	for (i = 1; i < ngroups; i <<= 1) {
544 		*group += i;
545 		if (*group >= ngroups)
546 			*group -= ngroups;
547 		desc = ext4_get_group_desc(sb, *group, NULL);
548 		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
549 				le16_to_cpu(desc->bg_free_blocks_count))
550 			return 0;
551 	}
552 
553 	/*
554 	 * That failed: try linear search for a free inode, even if that group
555 	 * has no free blocks.
556 	 */
557 	*group = parent_group;
558 	for (i = 0; i < ngroups; i++) {
559 		if (++*group >= ngroups)
560 			*group = 0;
561 		desc = ext4_get_group_desc(sb, *group, NULL);
562 		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
563 			return 0;
564 	}
565 
566 	return -1;
567 }
568 
569 /*
570  * There are two policies for allocating an inode.  If the new inode is
571  * a directory, then a forward search is made for a block group with both
572  * free space and a low directory-to-inode ratio; if that fails, then of
573  * the groups with above-average free space, that group with the fewest
574  * directories already is chosen.
575  *
576  * For other inodes, search forward from the parent directory's block
577  * group to find a free inode.
578  */
579 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
580 {
581 	struct super_block *sb;
582 	struct buffer_head *bitmap_bh = NULL;
583 	struct buffer_head *bh2;
584 	ext4_group_t group = 0;
585 	unsigned long ino = 0;
586 	struct inode * inode;
587 	struct ext4_group_desc * gdp = NULL;
588 	struct ext4_super_block * es;
589 	struct ext4_inode_info *ei;
590 	struct ext4_sb_info *sbi;
591 	int ret2, err = 0;
592 	struct inode *ret;
593 	ext4_group_t i;
594 	int free = 0;
595 	ext4_group_t flex_group;
596 
597 	/* Cannot create files in a deleted directory */
598 	if (!dir || !dir->i_nlink)
599 		return ERR_PTR(-EPERM);
600 
601 	sb = dir->i_sb;
602 	inode = new_inode(sb);
603 	if (!inode)
604 		return ERR_PTR(-ENOMEM);
605 	ei = EXT4_I(inode);
606 
607 	sbi = EXT4_SB(sb);
608 	es = sbi->s_es;
609 
610 	if (sbi->s_log_groups_per_flex) {
611 		ret2 = find_group_flex(sb, dir, &group);
612 		goto got_group;
613 	}
614 
615 	if (S_ISDIR(mode)) {
616 		if (test_opt (sb, OLDALLOC))
617 			ret2 = find_group_dir(sb, dir, &group);
618 		else
619 			ret2 = find_group_orlov(sb, dir, &group);
620 	} else
621 		ret2 = find_group_other(sb, dir, &group);
622 
623 got_group:
624 	err = -ENOSPC;
625 	if (ret2 == -1)
626 		goto out;
627 
628 	for (i = 0; i < sbi->s_groups_count; i++) {
629 		err = -EIO;
630 
631 		gdp = ext4_get_group_desc(sb, group, &bh2);
632 		if (!gdp)
633 			goto fail;
634 
635 		brelse(bitmap_bh);
636 		bitmap_bh = ext4_read_inode_bitmap(sb, group);
637 		if (!bitmap_bh)
638 			goto fail;
639 
640 		ino = 0;
641 
642 repeat_in_this_group:
643 		ino = ext4_find_next_zero_bit((unsigned long *)
644 				bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
645 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
646 
647 			BUFFER_TRACE(bitmap_bh, "get_write_access");
648 			err = ext4_journal_get_write_access(handle, bitmap_bh);
649 			if (err)
650 				goto fail;
651 
652 			if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
653 						ino, bitmap_bh->b_data)) {
654 				/* we won it */
655 				BUFFER_TRACE(bitmap_bh,
656 					"call ext4_journal_dirty_metadata");
657 				err = ext4_journal_dirty_metadata(handle,
658 								bitmap_bh);
659 				if (err)
660 					goto fail;
661 				goto got;
662 			}
663 			/* we lost it */
664 			jbd2_journal_release_buffer(handle, bitmap_bh);
665 
666 			if (++ino < EXT4_INODES_PER_GROUP(sb))
667 				goto repeat_in_this_group;
668 		}
669 
670 		/*
671 		 * This case is possible in concurrent environment.  It is very
672 		 * rare.  We cannot repeat the find_group_xxx() call because
673 		 * that will simply return the same blockgroup, because the
674 		 * group descriptor metadata has not yet been updated.
675 		 * So we just go onto the next blockgroup.
676 		 */
677 		if (++group == sbi->s_groups_count)
678 			group = 0;
679 	}
680 	err = -ENOSPC;
681 	goto out;
682 
683 got:
684 	ino++;
685 	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
686 	    ino > EXT4_INODES_PER_GROUP(sb)) {
687 		ext4_error(sb, __func__,
688 			   "reserved inode or inode > inodes count - "
689 			   "block_group = %lu, inode=%lu", group,
690 			   ino + group * EXT4_INODES_PER_GROUP(sb));
691 		err = -EIO;
692 		goto fail;
693 	}
694 
695 	BUFFER_TRACE(bh2, "get_write_access");
696 	err = ext4_journal_get_write_access(handle, bh2);
697 	if (err) goto fail;
698 
699 	/* We may have to initialize the block bitmap if it isn't already */
700 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
701 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
702 		struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group);
703 
704 		BUFFER_TRACE(block_bh, "get block bitmap access");
705 		err = ext4_journal_get_write_access(handle, block_bh);
706 		if (err) {
707 			brelse(block_bh);
708 			goto fail;
709 		}
710 
711 		free = 0;
712 		spin_lock(sb_bgl_lock(sbi, group));
713 		/* recheck and clear flag under lock if we still need to */
714 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
715 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
716 			free = ext4_free_blocks_after_init(sb, group, gdp);
717 			gdp->bg_free_blocks_count = cpu_to_le16(free);
718 		}
719 		spin_unlock(sb_bgl_lock(sbi, group));
720 
721 		/* Don't need to dirty bitmap block if we didn't change it */
722 		if (free) {
723 			BUFFER_TRACE(block_bh, "dirty block bitmap");
724 			err = ext4_journal_dirty_metadata(handle, block_bh);
725 		}
726 
727 		brelse(block_bh);
728 		if (err)
729 			goto fail;
730 	}
731 
732 	spin_lock(sb_bgl_lock(sbi, group));
733 	/* If we didn't allocate from within the initialized part of the inode
734 	 * table then we need to initialize up to this inode. */
735 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
736 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
737 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
738 
739 			/* When marking the block group with
740 			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
741 			 * on the value of bg_itable_unused even though
742 			 * mke2fs could have initialized the same for us.
743 			 * Instead we calculated the value below
744 			 */
745 
746 			free = 0;
747 		} else {
748 			free = EXT4_INODES_PER_GROUP(sb) -
749 				le16_to_cpu(gdp->bg_itable_unused);
750 		}
751 
752 		/*
753 		 * Check the relative inode number against the last used
754 		 * relative inode number in this group. if it is greater
755 		 * we need to  update the bg_itable_unused count
756 		 *
757 		 */
758 		if (ino > free)
759 			gdp->bg_itable_unused =
760 				cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
761 	}
762 
763 	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
764 	if (S_ISDIR(mode)) {
765 		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
766 	}
767 	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
768 	spin_unlock(sb_bgl_lock(sbi, group));
769 	BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
770 	err = ext4_journal_dirty_metadata(handle, bh2);
771 	if (err) goto fail;
772 
773 	percpu_counter_dec(&sbi->s_freeinodes_counter);
774 	if (S_ISDIR(mode))
775 		percpu_counter_inc(&sbi->s_dirs_counter);
776 	sb->s_dirt = 1;
777 
778 	if (sbi->s_log_groups_per_flex) {
779 		flex_group = ext4_flex_group(sbi, group);
780 		spin_lock(sb_bgl_lock(sbi, flex_group));
781 		sbi->s_flex_groups[flex_group].free_inodes--;
782 		spin_unlock(sb_bgl_lock(sbi, flex_group));
783 	}
784 
785 	inode->i_uid = current->fsuid;
786 	if (test_opt (sb, GRPID))
787 		inode->i_gid = dir->i_gid;
788 	else if (dir->i_mode & S_ISGID) {
789 		inode->i_gid = dir->i_gid;
790 		if (S_ISDIR(mode))
791 			mode |= S_ISGID;
792 	} else
793 		inode->i_gid = current->fsgid;
794 	inode->i_mode = mode;
795 
796 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
797 	/* This is the optimal IO size (for stat), not the fs block size */
798 	inode->i_blocks = 0;
799 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
800 						       ext4_current_time(inode);
801 
802 	memset(ei->i_data, 0, sizeof(ei->i_data));
803 	ei->i_dir_start_lookup = 0;
804 	ei->i_disksize = 0;
805 
806 	/*
807 	 * Don't inherit extent flag from directory. We set extent flag on
808 	 * newly created directory and file only if -o extent mount option is
809 	 * specified
810 	 */
811 	ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
812 	if (S_ISLNK(mode))
813 		ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
814 	/* dirsync only applies to directories */
815 	if (!S_ISDIR(mode))
816 		ei->i_flags &= ~EXT4_DIRSYNC_FL;
817 	ei->i_file_acl = 0;
818 	ei->i_dtime = 0;
819 	ei->i_block_alloc_info = NULL;
820 	ei->i_block_group = group;
821 
822 	ext4_set_inode_flags(inode);
823 	if (IS_DIRSYNC(inode))
824 		handle->h_sync = 1;
825 	insert_inode_hash(inode);
826 	spin_lock(&sbi->s_next_gen_lock);
827 	inode->i_generation = sbi->s_next_generation++;
828 	spin_unlock(&sbi->s_next_gen_lock);
829 
830 	ei->i_state = EXT4_STATE_NEW;
831 
832 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
833 
834 	ret = inode;
835 	if(DQUOT_ALLOC_INODE(inode)) {
836 		err = -EDQUOT;
837 		goto fail_drop;
838 	}
839 
840 	err = ext4_init_acl(handle, inode, dir);
841 	if (err)
842 		goto fail_free_drop;
843 
844 	err = ext4_init_security(handle,inode, dir);
845 	if (err)
846 		goto fail_free_drop;
847 
848 	if (test_opt(sb, EXTENTS)) {
849 		/* set extent flag only for directory, file and normal symlink*/
850 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
851 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
852 			ext4_ext_tree_init(handle, inode);
853 		}
854 	}
855 
856 	err = ext4_mark_inode_dirty(handle, inode);
857 	if (err) {
858 		ext4_std_error(sb, err);
859 		goto fail_free_drop;
860 	}
861 
862 	ext4_debug("allocating inode %lu\n", inode->i_ino);
863 	goto really_out;
864 fail:
865 	ext4_std_error(sb, err);
866 out:
867 	iput(inode);
868 	ret = ERR_PTR(err);
869 really_out:
870 	brelse(bitmap_bh);
871 	return ret;
872 
873 fail_free_drop:
874 	DQUOT_FREE_INODE(inode);
875 
876 fail_drop:
877 	DQUOT_DROP(inode);
878 	inode->i_flags |= S_NOQUOTA;
879 	inode->i_nlink = 0;
880 	iput(inode);
881 	brelse(bitmap_bh);
882 	return ERR_PTR(err);
883 }
884 
885 /* Verify that we are loading a valid orphan from disk */
886 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
887 {
888 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
889 	ext4_group_t block_group;
890 	int bit;
891 	struct buffer_head *bitmap_bh;
892 	struct inode *inode = NULL;
893 	long err = -EIO;
894 
895 	/* Error cases - e2fsck has already cleaned up for us */
896 	if (ino > max_ino) {
897 		ext4_warning(sb, __func__,
898 			     "bad orphan ino %lu!  e2fsck was run?", ino);
899 		goto error;
900 	}
901 
902 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
903 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
904 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
905 	if (!bitmap_bh) {
906 		ext4_warning(sb, __func__,
907 			     "inode bitmap error for orphan %lu", ino);
908 		goto error;
909 	}
910 
911 	/* Having the inode bit set should be a 100% indicator that this
912 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
913 	 * inodes that were being truncated, so we can't check i_nlink==0.
914 	 */
915 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
916 		goto bad_orphan;
917 
918 	inode = ext4_iget(sb, ino);
919 	if (IS_ERR(inode))
920 		goto iget_failed;
921 
922 	/*
923 	 * If the orphans has i_nlinks > 0 then it should be able to be
924 	 * truncated, otherwise it won't be removed from the orphan list
925 	 * during processing and an infinite loop will result.
926 	 */
927 	if (inode->i_nlink && !ext4_can_truncate(inode))
928 		goto bad_orphan;
929 
930 	if (NEXT_ORPHAN(inode) > max_ino)
931 		goto bad_orphan;
932 	brelse(bitmap_bh);
933 	return inode;
934 
935 iget_failed:
936 	err = PTR_ERR(inode);
937 	inode = NULL;
938 bad_orphan:
939 	ext4_warning(sb, __func__,
940 		     "bad orphan inode %lu!  e2fsck was run?", ino);
941 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
942 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
943 	       ext4_test_bit(bit, bitmap_bh->b_data));
944 	printk(KERN_NOTICE "inode=%p\n", inode);
945 	if (inode) {
946 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
947 		       is_bad_inode(inode));
948 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
949 		       NEXT_ORPHAN(inode));
950 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
951 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
952 		/* Avoid freeing blocks if we got a bad deleted inode */
953 		if (inode->i_nlink == 0)
954 			inode->i_blocks = 0;
955 		iput(inode);
956 	}
957 	brelse(bitmap_bh);
958 error:
959 	return ERR_PTR(err);
960 }
961 
962 unsigned long ext4_count_free_inodes (struct super_block * sb)
963 {
964 	unsigned long desc_count;
965 	struct ext4_group_desc *gdp;
966 	ext4_group_t i;
967 #ifdef EXT4FS_DEBUG
968 	struct ext4_super_block *es;
969 	unsigned long bitmap_count, x;
970 	struct buffer_head *bitmap_bh = NULL;
971 
972 	es = EXT4_SB(sb)->s_es;
973 	desc_count = 0;
974 	bitmap_count = 0;
975 	gdp = NULL;
976 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
977 		gdp = ext4_get_group_desc (sb, i, NULL);
978 		if (!gdp)
979 			continue;
980 		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
981 		brelse(bitmap_bh);
982 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
983 		if (!bitmap_bh)
984 			continue;
985 
986 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
987 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
988 			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
989 		bitmap_count += x;
990 	}
991 	brelse(bitmap_bh);
992 	printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
993 		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
994 	return desc_count;
995 #else
996 	desc_count = 0;
997 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
998 		gdp = ext4_get_group_desc (sb, i, NULL);
999 		if (!gdp)
1000 			continue;
1001 		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
1002 		cond_resched();
1003 	}
1004 	return desc_count;
1005 #endif
1006 }
1007 
1008 /* Called at mount-time, super-block is locked */
1009 unsigned long ext4_count_dirs (struct super_block * sb)
1010 {
1011 	unsigned long count = 0;
1012 	ext4_group_t i;
1013 
1014 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1015 		struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);
1016 		if (!gdp)
1017 			continue;
1018 		count += le16_to_cpu(gdp->bg_used_dirs_count);
1019 	}
1020 	return count;
1021 }
1022 
1023