xref: /openbmc/linux/fs/ext4/ialloc.c (revision 78c99ba1)
1 /*
2  *  linux/fs/ext4/ialloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  BSD ufs-inspired inode and directory allocation by
10  *  Stephen Tweedie (sct@redhat.com), 1993
11  *  Big-endian to little-endian byte-swapping/bitmaps by
12  *        David S. Miller (davem@caip.rutgers.edu), 1995
13  */
14 
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
26 #include "ext4.h"
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 
31 /*
32  * ialloc.c contains the inodes allocation and deallocation routines
33  */
34 
35 /*
36  * The free inodes are managed by bitmaps.  A file system contains several
37  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
38  * block for inodes, N blocks for the inode table and data blocks.
39  *
40  * The file system contains group descriptors which are located after the
41  * super block.  Each descriptor contains the number of the bitmap block and
42  * the free blocks count in the block.
43  */
44 
45 /*
46  * To avoid calling the atomic setbit hundreds or thousands of times, we only
47  * need to use it within a single byte (to ensure we get endianness right).
48  * We can use memset for the rest of the bitmap as there are no other users.
49  */
50 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
51 {
52 	int i;
53 
54 	if (start_bit >= end_bit)
55 		return;
56 
57 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
58 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
59 		ext4_set_bit(i, bitmap);
60 	if (i < end_bit)
61 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
62 }
63 
64 /* Initializes an uninitialized inode bitmap */
65 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
66 				ext4_group_t block_group,
67 				struct ext4_group_desc *gdp)
68 {
69 	struct ext4_sb_info *sbi = EXT4_SB(sb);
70 
71 	J_ASSERT_BH(bh, buffer_locked(bh));
72 
73 	/* If checksum is bad mark all blocks and inodes use to prevent
74 	 * allocation, essentially implementing a per-group read-only flag. */
75 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
76 		ext4_error(sb, __func__, "Checksum bad for group %u",
77 			   block_group);
78 		ext4_free_blks_set(sb, gdp, 0);
79 		ext4_free_inodes_set(sb, gdp, 0);
80 		ext4_itable_unused_set(sb, gdp, 0);
81 		memset(bh->b_data, 0xff, sb->s_blocksize);
82 		return 0;
83 	}
84 
85 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
86 	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
87 			bh->b_data);
88 
89 	return EXT4_INODES_PER_GROUP(sb);
90 }
91 
92 /*
93  * Read the inode allocation bitmap for a given block_group, reading
94  * into the specified slot in the superblock's bitmap cache.
95  *
96  * Return buffer_head of bitmap on success or NULL.
97  */
98 static struct buffer_head *
99 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
100 {
101 	struct ext4_group_desc *desc;
102 	struct buffer_head *bh = NULL;
103 	ext4_fsblk_t bitmap_blk;
104 
105 	desc = ext4_get_group_desc(sb, block_group, NULL);
106 	if (!desc)
107 		return NULL;
108 	bitmap_blk = ext4_inode_bitmap(sb, desc);
109 	bh = sb_getblk(sb, bitmap_blk);
110 	if (unlikely(!bh)) {
111 		ext4_error(sb, __func__,
112 			    "Cannot read inode bitmap - "
113 			    "block_group = %u, inode_bitmap = %llu",
114 			    block_group, bitmap_blk);
115 		return NULL;
116 	}
117 	if (bitmap_uptodate(bh))
118 		return bh;
119 
120 	lock_buffer(bh);
121 	if (bitmap_uptodate(bh)) {
122 		unlock_buffer(bh);
123 		return bh;
124 	}
125 	ext4_lock_group(sb, block_group);
126 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
127 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
128 		set_bitmap_uptodate(bh);
129 		set_buffer_uptodate(bh);
130 		ext4_unlock_group(sb, block_group);
131 		unlock_buffer(bh);
132 		return bh;
133 	}
134 	ext4_unlock_group(sb, block_group);
135 	if (buffer_uptodate(bh)) {
136 		/*
137 		 * if not uninit if bh is uptodate,
138 		 * bitmap is also uptodate
139 		 */
140 		set_bitmap_uptodate(bh);
141 		unlock_buffer(bh);
142 		return bh;
143 	}
144 	/*
145 	 * submit the buffer_head for read. We can
146 	 * safely mark the bitmap as uptodate now.
147 	 * We do it here so the bitmap uptodate bit
148 	 * get set with buffer lock held.
149 	 */
150 	set_bitmap_uptodate(bh);
151 	if (bh_submit_read(bh) < 0) {
152 		put_bh(bh);
153 		ext4_error(sb, __func__,
154 			    "Cannot read inode bitmap - "
155 			    "block_group = %u, inode_bitmap = %llu",
156 			    block_group, bitmap_blk);
157 		return NULL;
158 	}
159 	return bh;
160 }
161 
162 /*
163  * NOTE! When we get the inode, we're the only people
164  * that have access to it, and as such there are no
165  * race conditions we have to worry about. The inode
166  * is not on the hash-lists, and it cannot be reached
167  * through the filesystem because the directory entry
168  * has been deleted earlier.
169  *
170  * HOWEVER: we must make sure that we get no aliases,
171  * which means that we have to call "clear_inode()"
172  * _before_ we mark the inode not in use in the inode
173  * bitmaps. Otherwise a newly created file might use
174  * the same inode number (not actually the same pointer
175  * though), and then we'd have two inodes sharing the
176  * same inode number and space on the harddisk.
177  */
178 void ext4_free_inode(handle_t *handle, struct inode *inode)
179 {
180 	struct super_block *sb = inode->i_sb;
181 	int is_directory;
182 	unsigned long ino;
183 	struct buffer_head *bitmap_bh = NULL;
184 	struct buffer_head *bh2;
185 	ext4_group_t block_group;
186 	unsigned long bit;
187 	struct ext4_group_desc *gdp;
188 	struct ext4_super_block *es;
189 	struct ext4_sb_info *sbi;
190 	int fatal = 0, err, count, cleared;
191 
192 	if (atomic_read(&inode->i_count) > 1) {
193 		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
194 		       atomic_read(&inode->i_count));
195 		return;
196 	}
197 	if (inode->i_nlink) {
198 		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
199 		       inode->i_nlink);
200 		return;
201 	}
202 	if (!sb) {
203 		printk(KERN_ERR "ext4_free_inode: inode on "
204 		       "nonexistent device\n");
205 		return;
206 	}
207 	sbi = EXT4_SB(sb);
208 
209 	ino = inode->i_ino;
210 	ext4_debug("freeing inode %lu\n", ino);
211 	trace_mark(ext4_free_inode,
212 		   "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
213 		   sb->s_id, inode->i_ino, inode->i_mode,
214 		   (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
215 		   (unsigned long long) inode->i_blocks);
216 
217 	/*
218 	 * Note: we must free any quota before locking the superblock,
219 	 * as writing the quota to disk may need the lock as well.
220 	 */
221 	vfs_dq_init(inode);
222 	ext4_xattr_delete_inode(handle, inode);
223 	vfs_dq_free_inode(inode);
224 	vfs_dq_drop(inode);
225 
226 	is_directory = S_ISDIR(inode->i_mode);
227 
228 	/* Do this BEFORE marking the inode not in use or returning an error */
229 	clear_inode(inode);
230 
231 	es = EXT4_SB(sb)->s_es;
232 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
233 		ext4_error(sb, "ext4_free_inode",
234 			   "reserved or nonexistent inode %lu", ino);
235 		goto error_return;
236 	}
237 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
238 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
239 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
240 	if (!bitmap_bh)
241 		goto error_return;
242 
243 	BUFFER_TRACE(bitmap_bh, "get_write_access");
244 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
245 	if (fatal)
246 		goto error_return;
247 
248 	/* Ok, now we can actually update the inode bitmaps.. */
249 	cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
250 					bit, bitmap_bh->b_data);
251 	if (!cleared)
252 		ext4_error(sb, "ext4_free_inode",
253 			   "bit already cleared for inode %lu", ino);
254 	else {
255 		gdp = ext4_get_group_desc(sb, block_group, &bh2);
256 
257 		BUFFER_TRACE(bh2, "get_write_access");
258 		fatal = ext4_journal_get_write_access(handle, bh2);
259 		if (fatal) goto error_return;
260 
261 		if (gdp) {
262 			ext4_lock_group(sb, block_group);
263 			count = ext4_free_inodes_count(sb, gdp) + 1;
264 			ext4_free_inodes_set(sb, gdp, count);
265 			if (is_directory) {
266 				count = ext4_used_dirs_count(sb, gdp) - 1;
267 				ext4_used_dirs_set(sb, gdp, count);
268 				if (sbi->s_log_groups_per_flex) {
269 					ext4_group_t f;
270 
271 					f = ext4_flex_group(sbi, block_group);
272 					atomic_dec(&sbi->s_flex_groups[f].free_inodes);
273 				}
274 
275 			}
276 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
277 							block_group, gdp);
278 			ext4_unlock_group(sb, block_group);
279 			percpu_counter_inc(&sbi->s_freeinodes_counter);
280 			if (is_directory)
281 				percpu_counter_dec(&sbi->s_dirs_counter);
282 
283 			if (sbi->s_log_groups_per_flex) {
284 				ext4_group_t f;
285 
286 				f = ext4_flex_group(sbi, block_group);
287 				atomic_inc(&sbi->s_flex_groups[f].free_inodes);
288 			}
289 		}
290 		BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
291 		err = ext4_handle_dirty_metadata(handle, NULL, bh2);
292 		if (!fatal) fatal = err;
293 	}
294 	BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
295 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
296 	if (!fatal)
297 		fatal = err;
298 	sb->s_dirt = 1;
299 error_return:
300 	brelse(bitmap_bh);
301 	ext4_std_error(sb, fatal);
302 }
303 
304 /*
305  * There are two policies for allocating an inode.  If the new inode is
306  * a directory, then a forward search is made for a block group with both
307  * free space and a low directory-to-inode ratio; if that fails, then of
308  * the groups with above-average free space, that group with the fewest
309  * directories already is chosen.
310  *
311  * For other inodes, search forward from the parent directory\'s block
312  * group to find a free inode.
313  */
314 static int find_group_dir(struct super_block *sb, struct inode *parent,
315 				ext4_group_t *best_group)
316 {
317 	ext4_group_t ngroups = ext4_get_groups_count(sb);
318 	unsigned int freei, avefreei;
319 	struct ext4_group_desc *desc, *best_desc = NULL;
320 	ext4_group_t group;
321 	int ret = -1;
322 
323 	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
324 	avefreei = freei / ngroups;
325 
326 	for (group = 0; group < ngroups; group++) {
327 		desc = ext4_get_group_desc(sb, group, NULL);
328 		if (!desc || !ext4_free_inodes_count(sb, desc))
329 			continue;
330 		if (ext4_free_inodes_count(sb, desc) < avefreei)
331 			continue;
332 		if (!best_desc ||
333 		    (ext4_free_blks_count(sb, desc) >
334 		     ext4_free_blks_count(sb, best_desc))) {
335 			*best_group = group;
336 			best_desc = desc;
337 			ret = 0;
338 		}
339 	}
340 	return ret;
341 }
342 
343 #define free_block_ratio 10
344 
345 static int find_group_flex(struct super_block *sb, struct inode *parent,
346 			   ext4_group_t *best_group)
347 {
348 	struct ext4_sb_info *sbi = EXT4_SB(sb);
349 	struct ext4_group_desc *desc;
350 	struct flex_groups *flex_group = sbi->s_flex_groups;
351 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
352 	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
353 	ext4_group_t ngroups = ext4_get_groups_count(sb);
354 	int flex_size = ext4_flex_bg_size(sbi);
355 	ext4_group_t best_flex = parent_fbg_group;
356 	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
357 	int flexbg_free_blocks;
358 	int flex_freeb_ratio;
359 	ext4_group_t n_fbg_groups;
360 	ext4_group_t i;
361 
362 	n_fbg_groups = (ngroups + flex_size - 1) >>
363 		sbi->s_log_groups_per_flex;
364 
365 find_close_to_parent:
366 	flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
367 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
368 	if (atomic_read(&flex_group[best_flex].free_inodes) &&
369 	    flex_freeb_ratio > free_block_ratio)
370 		goto found_flexbg;
371 
372 	if (best_flex && best_flex == parent_fbg_group) {
373 		best_flex--;
374 		goto find_close_to_parent;
375 	}
376 
377 	for (i = 0; i < n_fbg_groups; i++) {
378 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
379 			continue;
380 
381 		flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
382 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
383 
384 		if (flex_freeb_ratio > free_block_ratio &&
385 		    (atomic_read(&flex_group[i].free_inodes))) {
386 			best_flex = i;
387 			goto found_flexbg;
388 		}
389 
390 		if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
391 		    ((atomic_read(&flex_group[i].free_blocks) >
392 		      atomic_read(&flex_group[best_flex].free_blocks)) &&
393 		     atomic_read(&flex_group[i].free_inodes)))
394 			best_flex = i;
395 	}
396 
397 	if (!atomic_read(&flex_group[best_flex].free_inodes) ||
398 	    !atomic_read(&flex_group[best_flex].free_blocks))
399 		return -1;
400 
401 found_flexbg:
402 	for (i = best_flex * flex_size; i < ngroups &&
403 		     i < (best_flex + 1) * flex_size; i++) {
404 		desc = ext4_get_group_desc(sb, i, NULL);
405 		if (ext4_free_inodes_count(sb, desc)) {
406 			*best_group = i;
407 			goto out;
408 		}
409 	}
410 
411 	return -1;
412 out:
413 	return 0;
414 }
415 
416 struct orlov_stats {
417 	__u32 free_inodes;
418 	__u32 free_blocks;
419 	__u32 used_dirs;
420 };
421 
422 /*
423  * Helper function for Orlov's allocator; returns critical information
424  * for a particular block group or flex_bg.  If flex_size is 1, then g
425  * is a block group number; otherwise it is flex_bg number.
426  */
427 void get_orlov_stats(struct super_block *sb, ext4_group_t g,
428 		       int flex_size, struct orlov_stats *stats)
429 {
430 	struct ext4_group_desc *desc;
431 	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
432 
433 	if (flex_size > 1) {
434 		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
435 		stats->free_blocks = atomic_read(&flex_group[g].free_blocks);
436 		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
437 		return;
438 	}
439 
440 	desc = ext4_get_group_desc(sb, g, NULL);
441 	if (desc) {
442 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
443 		stats->free_blocks = ext4_free_blks_count(sb, desc);
444 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
445 	} else {
446 		stats->free_inodes = 0;
447 		stats->free_blocks = 0;
448 		stats->used_dirs = 0;
449 	}
450 }
451 
452 /*
453  * Orlov's allocator for directories.
454  *
455  * We always try to spread first-level directories.
456  *
457  * If there are blockgroups with both free inodes and free blocks counts
458  * not worse than average we return one with smallest directory count.
459  * Otherwise we simply return a random group.
460  *
461  * For the rest rules look so:
462  *
463  * It's OK to put directory into a group unless
464  * it has too many directories already (max_dirs) or
465  * it has too few free inodes left (min_inodes) or
466  * it has too few free blocks left (min_blocks) or
467  * Parent's group is preferred, if it doesn't satisfy these
468  * conditions we search cyclically through the rest. If none
469  * of the groups look good we just look for a group with more
470  * free inodes than average (starting at parent's group).
471  */
472 
473 static int find_group_orlov(struct super_block *sb, struct inode *parent,
474 			    ext4_group_t *group, int mode)
475 {
476 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
477 	struct ext4_sb_info *sbi = EXT4_SB(sb);
478 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
479 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
480 	unsigned int freei, avefreei;
481 	ext4_fsblk_t freeb, avefreeb;
482 	unsigned int ndirs;
483 	int max_dirs, min_inodes;
484 	ext4_grpblk_t min_blocks;
485 	ext4_group_t i, grp, g, ngroups;
486 	struct ext4_group_desc *desc;
487 	struct orlov_stats stats;
488 	int flex_size = ext4_flex_bg_size(sbi);
489 
490 	ngroups = real_ngroups;
491 	if (flex_size > 1) {
492 		ngroups = (real_ngroups + flex_size - 1) >>
493 			sbi->s_log_groups_per_flex;
494 		parent_group >>= sbi->s_log_groups_per_flex;
495 	}
496 
497 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
498 	avefreei = freei / ngroups;
499 	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
500 	avefreeb = freeb;
501 	do_div(avefreeb, ngroups);
502 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
503 
504 	if (S_ISDIR(mode) &&
505 	    ((parent == sb->s_root->d_inode) ||
506 	     (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
507 		int best_ndir = inodes_per_group;
508 		int ret = -1;
509 
510 		get_random_bytes(&grp, sizeof(grp));
511 		parent_group = (unsigned)grp % ngroups;
512 		for (i = 0; i < ngroups; i++) {
513 			g = (parent_group + i) % ngroups;
514 			get_orlov_stats(sb, g, flex_size, &stats);
515 			if (!stats.free_inodes)
516 				continue;
517 			if (stats.used_dirs >= best_ndir)
518 				continue;
519 			if (stats.free_inodes < avefreei)
520 				continue;
521 			if (stats.free_blocks < avefreeb)
522 				continue;
523 			grp = g;
524 			ret = 0;
525 			best_ndir = stats.used_dirs;
526 		}
527 		if (ret)
528 			goto fallback;
529 	found_flex_bg:
530 		if (flex_size == 1) {
531 			*group = grp;
532 			return 0;
533 		}
534 
535 		/*
536 		 * We pack inodes at the beginning of the flexgroup's
537 		 * inode tables.  Block allocation decisions will do
538 		 * something similar, although regular files will
539 		 * start at 2nd block group of the flexgroup.  See
540 		 * ext4_ext_find_goal() and ext4_find_near().
541 		 */
542 		grp *= flex_size;
543 		for (i = 0; i < flex_size; i++) {
544 			if (grp+i >= real_ngroups)
545 				break;
546 			desc = ext4_get_group_desc(sb, grp+i, NULL);
547 			if (desc && ext4_free_inodes_count(sb, desc)) {
548 				*group = grp+i;
549 				return 0;
550 			}
551 		}
552 		goto fallback;
553 	}
554 
555 	max_dirs = ndirs / ngroups + inodes_per_group / 16;
556 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
557 	if (min_inodes < 1)
558 		min_inodes = 1;
559 	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4;
560 
561 	/*
562 	 * Start looking in the flex group where we last allocated an
563 	 * inode for this parent directory
564 	 */
565 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
566 		parent_group = EXT4_I(parent)->i_last_alloc_group;
567 		if (flex_size > 1)
568 			parent_group >>= sbi->s_log_groups_per_flex;
569 	}
570 
571 	for (i = 0; i < ngroups; i++) {
572 		grp = (parent_group + i) % ngroups;
573 		get_orlov_stats(sb, grp, flex_size, &stats);
574 		if (stats.used_dirs >= max_dirs)
575 			continue;
576 		if (stats.free_inodes < min_inodes)
577 			continue;
578 		if (stats.free_blocks < min_blocks)
579 			continue;
580 		goto found_flex_bg;
581 	}
582 
583 fallback:
584 	ngroups = real_ngroups;
585 	avefreei = freei / ngroups;
586 fallback_retry:
587 	parent_group = EXT4_I(parent)->i_block_group;
588 	for (i = 0; i < ngroups; i++) {
589 		grp = (parent_group + i) % ngroups;
590 		desc = ext4_get_group_desc(sb, grp, NULL);
591 		if (desc && ext4_free_inodes_count(sb, desc) &&
592 		    ext4_free_inodes_count(sb, desc) >= avefreei) {
593 			*group = grp;
594 			return 0;
595 		}
596 	}
597 
598 	if (avefreei) {
599 		/*
600 		 * The free-inodes counter is approximate, and for really small
601 		 * filesystems the above test can fail to find any blockgroups
602 		 */
603 		avefreei = 0;
604 		goto fallback_retry;
605 	}
606 
607 	return -1;
608 }
609 
610 static int find_group_other(struct super_block *sb, struct inode *parent,
611 			    ext4_group_t *group, int mode)
612 {
613 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
614 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
615 	struct ext4_group_desc *desc;
616 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
617 
618 	/*
619 	 * Try to place the inode is the same flex group as its
620 	 * parent.  If we can't find space, use the Orlov algorithm to
621 	 * find another flex group, and store that information in the
622 	 * parent directory's inode information so that use that flex
623 	 * group for future allocations.
624 	 */
625 	if (flex_size > 1) {
626 		int retry = 0;
627 
628 	try_again:
629 		parent_group &= ~(flex_size-1);
630 		last = parent_group + flex_size;
631 		if (last > ngroups)
632 			last = ngroups;
633 		for  (i = parent_group; i < last; i++) {
634 			desc = ext4_get_group_desc(sb, i, NULL);
635 			if (desc && ext4_free_inodes_count(sb, desc)) {
636 				*group = i;
637 				return 0;
638 			}
639 		}
640 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
641 			retry = 1;
642 			parent_group = EXT4_I(parent)->i_last_alloc_group;
643 			goto try_again;
644 		}
645 		/*
646 		 * If this didn't work, use the Orlov search algorithm
647 		 * to find a new flex group; we pass in the mode to
648 		 * avoid the topdir algorithms.
649 		 */
650 		*group = parent_group + flex_size;
651 		if (*group > ngroups)
652 			*group = 0;
653 		return find_group_orlov(sb, parent, group, mode);
654 	}
655 
656 	/*
657 	 * Try to place the inode in its parent directory
658 	 */
659 	*group = parent_group;
660 	desc = ext4_get_group_desc(sb, *group, NULL);
661 	if (desc && ext4_free_inodes_count(sb, desc) &&
662 			ext4_free_blks_count(sb, desc))
663 		return 0;
664 
665 	/*
666 	 * We're going to place this inode in a different blockgroup from its
667 	 * parent.  We want to cause files in a common directory to all land in
668 	 * the same blockgroup.  But we want files which are in a different
669 	 * directory which shares a blockgroup with our parent to land in a
670 	 * different blockgroup.
671 	 *
672 	 * So add our directory's i_ino into the starting point for the hash.
673 	 */
674 	*group = (*group + parent->i_ino) % ngroups;
675 
676 	/*
677 	 * Use a quadratic hash to find a group with a free inode and some free
678 	 * blocks.
679 	 */
680 	for (i = 1; i < ngroups; i <<= 1) {
681 		*group += i;
682 		if (*group >= ngroups)
683 			*group -= ngroups;
684 		desc = ext4_get_group_desc(sb, *group, NULL);
685 		if (desc && ext4_free_inodes_count(sb, desc) &&
686 				ext4_free_blks_count(sb, desc))
687 			return 0;
688 	}
689 
690 	/*
691 	 * That failed: try linear search for a free inode, even if that group
692 	 * has no free blocks.
693 	 */
694 	*group = parent_group;
695 	for (i = 0; i < ngroups; i++) {
696 		if (++*group >= ngroups)
697 			*group = 0;
698 		desc = ext4_get_group_desc(sb, *group, NULL);
699 		if (desc && ext4_free_inodes_count(sb, desc))
700 			return 0;
701 	}
702 
703 	return -1;
704 }
705 
706 /*
707  * claim the inode from the inode bitmap. If the group
708  * is uninit we need to take the groups's ext4_group_lock
709  * and clear the uninit flag. The inode bitmap update
710  * and group desc uninit flag clear should be done
711  * after holding ext4_group_lock so that ext4_read_inode_bitmap
712  * doesn't race with the ext4_claim_inode
713  */
714 static int ext4_claim_inode(struct super_block *sb,
715 			struct buffer_head *inode_bitmap_bh,
716 			unsigned long ino, ext4_group_t group, int mode)
717 {
718 	int free = 0, retval = 0, count;
719 	struct ext4_sb_info *sbi = EXT4_SB(sb);
720 	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
721 
722 	ext4_lock_group(sb, group);
723 	if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
724 		/* not a free inode */
725 		retval = 1;
726 		goto err_ret;
727 	}
728 	ino++;
729 	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
730 			ino > EXT4_INODES_PER_GROUP(sb)) {
731 		ext4_unlock_group(sb, group);
732 		ext4_error(sb, __func__,
733 			   "reserved inode or inode > inodes count - "
734 			   "block_group = %u, inode=%lu", group,
735 			   ino + group * EXT4_INODES_PER_GROUP(sb));
736 		return 1;
737 	}
738 	/* If we didn't allocate from within the initialized part of the inode
739 	 * table then we need to initialize up to this inode. */
740 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
741 
742 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
743 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
744 			/* When marking the block group with
745 			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
746 			 * on the value of bg_itable_unused even though
747 			 * mke2fs could have initialized the same for us.
748 			 * Instead we calculated the value below
749 			 */
750 
751 			free = 0;
752 		} else {
753 			free = EXT4_INODES_PER_GROUP(sb) -
754 				ext4_itable_unused_count(sb, gdp);
755 		}
756 
757 		/*
758 		 * Check the relative inode number against the last used
759 		 * relative inode number in this group. if it is greater
760 		 * we need to  update the bg_itable_unused count
761 		 *
762 		 */
763 		if (ino > free)
764 			ext4_itable_unused_set(sb, gdp,
765 					(EXT4_INODES_PER_GROUP(sb) - ino));
766 	}
767 	count = ext4_free_inodes_count(sb, gdp) - 1;
768 	ext4_free_inodes_set(sb, gdp, count);
769 	if (S_ISDIR(mode)) {
770 		count = ext4_used_dirs_count(sb, gdp) + 1;
771 		ext4_used_dirs_set(sb, gdp, count);
772 		if (sbi->s_log_groups_per_flex) {
773 			ext4_group_t f = ext4_flex_group(sbi, group);
774 
775 			atomic_inc(&sbi->s_flex_groups[f].free_inodes);
776 		}
777 	}
778 	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
779 err_ret:
780 	ext4_unlock_group(sb, group);
781 	return retval;
782 }
783 
784 /*
785  * There are two policies for allocating an inode.  If the new inode is
786  * a directory, then a forward search is made for a block group with both
787  * free space and a low directory-to-inode ratio; if that fails, then of
788  * the groups with above-average free space, that group with the fewest
789  * directories already is chosen.
790  *
791  * For other inodes, search forward from the parent directory's block
792  * group to find a free inode.
793  */
794 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
795 {
796 	struct super_block *sb;
797 	struct buffer_head *inode_bitmap_bh = NULL;
798 	struct buffer_head *group_desc_bh;
799 	ext4_group_t ngroups, group = 0;
800 	unsigned long ino = 0;
801 	struct inode *inode;
802 	struct ext4_group_desc *gdp = NULL;
803 	struct ext4_inode_info *ei;
804 	struct ext4_sb_info *sbi;
805 	int ret2, err = 0;
806 	struct inode *ret;
807 	ext4_group_t i;
808 	int free = 0;
809 	static int once = 1;
810 	ext4_group_t flex_group;
811 
812 	/* Cannot create files in a deleted directory */
813 	if (!dir || !dir->i_nlink)
814 		return ERR_PTR(-EPERM);
815 
816 	sb = dir->i_sb;
817 	ngroups = ext4_get_groups_count(sb);
818 	trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
819 		   dir->i_ino, mode);
820 	inode = new_inode(sb);
821 	if (!inode)
822 		return ERR_PTR(-ENOMEM);
823 	ei = EXT4_I(inode);
824 	sbi = EXT4_SB(sb);
825 
826 	if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) {
827 		ret2 = find_group_flex(sb, dir, &group);
828 		if (ret2 == -1) {
829 			ret2 = find_group_other(sb, dir, &group, mode);
830 			if (ret2 == 0 && once) {
831 				once = 0;
832 				printk(KERN_NOTICE "ext4: find_group_flex "
833 				       "failed, fallback succeeded dir %lu\n",
834 				       dir->i_ino);
835 			}
836 		}
837 		goto got_group;
838 	}
839 
840 	if (S_ISDIR(mode)) {
841 		if (test_opt(sb, OLDALLOC))
842 			ret2 = find_group_dir(sb, dir, &group);
843 		else
844 			ret2 = find_group_orlov(sb, dir, &group, mode);
845 	} else
846 		ret2 = find_group_other(sb, dir, &group, mode);
847 
848 got_group:
849 	EXT4_I(dir)->i_last_alloc_group = group;
850 	err = -ENOSPC;
851 	if (ret2 == -1)
852 		goto out;
853 
854 	for (i = 0; i < ngroups; i++) {
855 		err = -EIO;
856 
857 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
858 		if (!gdp)
859 			goto fail;
860 
861 		brelse(inode_bitmap_bh);
862 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
863 		if (!inode_bitmap_bh)
864 			goto fail;
865 
866 		ino = 0;
867 
868 repeat_in_this_group:
869 		ino = ext4_find_next_zero_bit((unsigned long *)
870 					      inode_bitmap_bh->b_data,
871 					      EXT4_INODES_PER_GROUP(sb), ino);
872 
873 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
874 
875 			BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
876 			err = ext4_journal_get_write_access(handle,
877 							    inode_bitmap_bh);
878 			if (err)
879 				goto fail;
880 
881 			BUFFER_TRACE(group_desc_bh, "get_write_access");
882 			err = ext4_journal_get_write_access(handle,
883 								group_desc_bh);
884 			if (err)
885 				goto fail;
886 			if (!ext4_claim_inode(sb, inode_bitmap_bh,
887 						ino, group, mode)) {
888 				/* we won it */
889 				BUFFER_TRACE(inode_bitmap_bh,
890 					"call ext4_handle_dirty_metadata");
891 				err = ext4_handle_dirty_metadata(handle,
892 								 inode,
893 							inode_bitmap_bh);
894 				if (err)
895 					goto fail;
896 				/* zero bit is inode number 1*/
897 				ino++;
898 				goto got;
899 			}
900 			/* we lost it */
901 			ext4_handle_release_buffer(handle, inode_bitmap_bh);
902 			ext4_handle_release_buffer(handle, group_desc_bh);
903 
904 			if (++ino < EXT4_INODES_PER_GROUP(sb))
905 				goto repeat_in_this_group;
906 		}
907 
908 		/*
909 		 * This case is possible in concurrent environment.  It is very
910 		 * rare.  We cannot repeat the find_group_xxx() call because
911 		 * that will simply return the same blockgroup, because the
912 		 * group descriptor metadata has not yet been updated.
913 		 * So we just go onto the next blockgroup.
914 		 */
915 		if (++group == ngroups)
916 			group = 0;
917 	}
918 	err = -ENOSPC;
919 	goto out;
920 
921 got:
922 	/* We may have to initialize the block bitmap if it isn't already */
923 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
924 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
925 		struct buffer_head *block_bitmap_bh;
926 
927 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
928 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
929 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
930 		if (err) {
931 			brelse(block_bitmap_bh);
932 			goto fail;
933 		}
934 
935 		free = 0;
936 		ext4_lock_group(sb, group);
937 		/* recheck and clear flag under lock if we still need to */
938 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
939 			free = ext4_free_blocks_after_init(sb, group, gdp);
940 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
941 			ext4_free_blks_set(sb, gdp, free);
942 			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
943 								gdp);
944 		}
945 		ext4_unlock_group(sb, group);
946 
947 		/* Don't need to dirty bitmap block if we didn't change it */
948 		if (free) {
949 			BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
950 			err = ext4_handle_dirty_metadata(handle,
951 							NULL, block_bitmap_bh);
952 		}
953 
954 		brelse(block_bitmap_bh);
955 		if (err)
956 			goto fail;
957 	}
958 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
959 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
960 	if (err)
961 		goto fail;
962 
963 	percpu_counter_dec(&sbi->s_freeinodes_counter);
964 	if (S_ISDIR(mode))
965 		percpu_counter_inc(&sbi->s_dirs_counter);
966 	sb->s_dirt = 1;
967 
968 	if (sbi->s_log_groups_per_flex) {
969 		flex_group = ext4_flex_group(sbi, group);
970 		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
971 	}
972 
973 	inode->i_uid = current_fsuid();
974 	if (test_opt(sb, GRPID))
975 		inode->i_gid = dir->i_gid;
976 	else if (dir->i_mode & S_ISGID) {
977 		inode->i_gid = dir->i_gid;
978 		if (S_ISDIR(mode))
979 			mode |= S_ISGID;
980 	} else
981 		inode->i_gid = current_fsgid();
982 	inode->i_mode = mode;
983 
984 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
985 	/* This is the optimal IO size (for stat), not the fs block size */
986 	inode->i_blocks = 0;
987 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
988 						       ext4_current_time(inode);
989 
990 	memset(ei->i_data, 0, sizeof(ei->i_data));
991 	ei->i_dir_start_lookup = 0;
992 	ei->i_disksize = 0;
993 
994 	/*
995 	 * Don't inherit extent flag from directory, amongst others. We set
996 	 * extent flag on newly created directory and file only if -o extent
997 	 * mount option is specified
998 	 */
999 	ei->i_flags =
1000 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1001 	ei->i_file_acl = 0;
1002 	ei->i_dtime = 0;
1003 	ei->i_block_group = group;
1004 	ei->i_last_alloc_group = ~0;
1005 
1006 	ext4_set_inode_flags(inode);
1007 	if (IS_DIRSYNC(inode))
1008 		ext4_handle_sync(handle);
1009 	if (insert_inode_locked(inode) < 0) {
1010 		err = -EINVAL;
1011 		goto fail_drop;
1012 	}
1013 	spin_lock(&sbi->s_next_gen_lock);
1014 	inode->i_generation = sbi->s_next_generation++;
1015 	spin_unlock(&sbi->s_next_gen_lock);
1016 
1017 	ei->i_state = EXT4_STATE_NEW;
1018 
1019 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1020 
1021 	ret = inode;
1022 	if (vfs_dq_alloc_inode(inode)) {
1023 		err = -EDQUOT;
1024 		goto fail_drop;
1025 	}
1026 
1027 	err = ext4_init_acl(handle, inode, dir);
1028 	if (err)
1029 		goto fail_free_drop;
1030 
1031 	err = ext4_init_security(handle, inode, dir);
1032 	if (err)
1033 		goto fail_free_drop;
1034 
1035 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1036 		/* set extent flag only for directory, file and normal symlink*/
1037 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1038 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
1039 			ext4_ext_tree_init(handle, inode);
1040 		}
1041 	}
1042 
1043 	err = ext4_mark_inode_dirty(handle, inode);
1044 	if (err) {
1045 		ext4_std_error(sb, err);
1046 		goto fail_free_drop;
1047 	}
1048 
1049 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1050 	trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
1051 		   sb->s_id, inode->i_ino, dir->i_ino, mode);
1052 	goto really_out;
1053 fail:
1054 	ext4_std_error(sb, err);
1055 out:
1056 	iput(inode);
1057 	ret = ERR_PTR(err);
1058 really_out:
1059 	brelse(inode_bitmap_bh);
1060 	return ret;
1061 
1062 fail_free_drop:
1063 	vfs_dq_free_inode(inode);
1064 
1065 fail_drop:
1066 	vfs_dq_drop(inode);
1067 	inode->i_flags |= S_NOQUOTA;
1068 	inode->i_nlink = 0;
1069 	unlock_new_inode(inode);
1070 	iput(inode);
1071 	brelse(inode_bitmap_bh);
1072 	return ERR_PTR(err);
1073 }
1074 
1075 /* Verify that we are loading a valid orphan from disk */
1076 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1077 {
1078 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1079 	ext4_group_t block_group;
1080 	int bit;
1081 	struct buffer_head *bitmap_bh;
1082 	struct inode *inode = NULL;
1083 	long err = -EIO;
1084 
1085 	/* Error cases - e2fsck has already cleaned up for us */
1086 	if (ino > max_ino) {
1087 		ext4_warning(sb, __func__,
1088 			     "bad orphan ino %lu!  e2fsck was run?", ino);
1089 		goto error;
1090 	}
1091 
1092 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1093 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1094 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1095 	if (!bitmap_bh) {
1096 		ext4_warning(sb, __func__,
1097 			     "inode bitmap error for orphan %lu", ino);
1098 		goto error;
1099 	}
1100 
1101 	/* Having the inode bit set should be a 100% indicator that this
1102 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1103 	 * inodes that were being truncated, so we can't check i_nlink==0.
1104 	 */
1105 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1106 		goto bad_orphan;
1107 
1108 	inode = ext4_iget(sb, ino);
1109 	if (IS_ERR(inode))
1110 		goto iget_failed;
1111 
1112 	/*
1113 	 * If the orphans has i_nlinks > 0 then it should be able to be
1114 	 * truncated, otherwise it won't be removed from the orphan list
1115 	 * during processing and an infinite loop will result.
1116 	 */
1117 	if (inode->i_nlink && !ext4_can_truncate(inode))
1118 		goto bad_orphan;
1119 
1120 	if (NEXT_ORPHAN(inode) > max_ino)
1121 		goto bad_orphan;
1122 	brelse(bitmap_bh);
1123 	return inode;
1124 
1125 iget_failed:
1126 	err = PTR_ERR(inode);
1127 	inode = NULL;
1128 bad_orphan:
1129 	ext4_warning(sb, __func__,
1130 		     "bad orphan inode %lu!  e2fsck was run?", ino);
1131 	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1132 	       bit, (unsigned long long)bitmap_bh->b_blocknr,
1133 	       ext4_test_bit(bit, bitmap_bh->b_data));
1134 	printk(KERN_NOTICE "inode=%p\n", inode);
1135 	if (inode) {
1136 		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
1137 		       is_bad_inode(inode));
1138 		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
1139 		       NEXT_ORPHAN(inode));
1140 		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1141 		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1142 		/* Avoid freeing blocks if we got a bad deleted inode */
1143 		if (inode->i_nlink == 0)
1144 			inode->i_blocks = 0;
1145 		iput(inode);
1146 	}
1147 	brelse(bitmap_bh);
1148 error:
1149 	return ERR_PTR(err);
1150 }
1151 
1152 unsigned long ext4_count_free_inodes(struct super_block *sb)
1153 {
1154 	unsigned long desc_count;
1155 	struct ext4_group_desc *gdp;
1156 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1157 #ifdef EXT4FS_DEBUG
1158 	struct ext4_super_block *es;
1159 	unsigned long bitmap_count, x;
1160 	struct buffer_head *bitmap_bh = NULL;
1161 
1162 	es = EXT4_SB(sb)->s_es;
1163 	desc_count = 0;
1164 	bitmap_count = 0;
1165 	gdp = NULL;
1166 	for (i = 0; i < ngroups; i++) {
1167 		gdp = ext4_get_group_desc(sb, i, NULL);
1168 		if (!gdp)
1169 			continue;
1170 		desc_count += ext4_free_inodes_count(sb, gdp);
1171 		brelse(bitmap_bh);
1172 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1173 		if (!bitmap_bh)
1174 			continue;
1175 
1176 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1177 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1178 			i, ext4_free_inodes_count(sb, gdp), x);
1179 		bitmap_count += x;
1180 	}
1181 	brelse(bitmap_bh);
1182 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1183 	       "stored = %u, computed = %lu, %lu\n",
1184 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1185 	return desc_count;
1186 #else
1187 	desc_count = 0;
1188 	for (i = 0; i < ngroups; i++) {
1189 		gdp = ext4_get_group_desc(sb, i, NULL);
1190 		if (!gdp)
1191 			continue;
1192 		desc_count += ext4_free_inodes_count(sb, gdp);
1193 		cond_resched();
1194 	}
1195 	return desc_count;
1196 #endif
1197 }
1198 
1199 /* Called at mount-time, super-block is locked */
1200 unsigned long ext4_count_dirs(struct super_block * sb)
1201 {
1202 	unsigned long count = 0;
1203 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1204 
1205 	for (i = 0; i < ngroups; i++) {
1206 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1207 		if (!gdp)
1208 			continue;
1209 		count += ext4_used_dirs_count(sb, gdp);
1210 	}
1211 	return count;
1212 }
1213