xref: /openbmc/linux/fs/ext4/ialloc.c (revision 206a81c1)
1  /*
2   *  linux/fs/ext4/ialloc.c
3   *
4   * Copyright (C) 1992, 1993, 1994, 1995
5   * Remy Card (card@masi.ibp.fr)
6   * Laboratoire MASI - Institut Blaise Pascal
7   * Universite Pierre et Marie Curie (Paris VI)
8   *
9   *  BSD ufs-inspired inode and directory allocation by
10   *  Stephen Tweedie (sct@redhat.com), 1993
11   *  Big-endian to little-endian byte-swapping/bitmaps by
12   *        David S. Miller (davem@caip.rutgers.edu), 1995
13   */
14  
15  #include <linux/time.h>
16  #include <linux/fs.h>
17  #include <linux/jbd2.h>
18  #include <linux/stat.h>
19  #include <linux/string.h>
20  #include <linux/quotaops.h>
21  #include <linux/buffer_head.h>
22  #include <linux/random.h>
23  #include <linux/bitops.h>
24  #include <linux/blkdev.h>
25  #include <asm/byteorder.h>
26  
27  #include "ext4.h"
28  #include "ext4_jbd2.h"
29  #include "xattr.h"
30  #include "acl.h"
31  
32  #include <trace/events/ext4.h>
33  
34  /*
35   * ialloc.c contains the inodes allocation and deallocation routines
36   */
37  
38  /*
39   * The free inodes are managed by bitmaps.  A file system contains several
40   * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
41   * block for inodes, N blocks for the inode table and data blocks.
42   *
43   * The file system contains group descriptors which are located after the
44   * super block.  Each descriptor contains the number of the bitmap block and
45   * the free blocks count in the block.
46   */
47  
48  /*
49   * To avoid calling the atomic setbit hundreds or thousands of times, we only
50   * need to use it within a single byte (to ensure we get endianness right).
51   * We can use memset for the rest of the bitmap as there are no other users.
52   */
53  void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
54  {
55  	int i;
56  
57  	if (start_bit >= end_bit)
58  		return;
59  
60  	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
61  	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
62  		ext4_set_bit(i, bitmap);
63  	if (i < end_bit)
64  		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
65  }
66  
67  /* Initializes an uninitialized inode bitmap */
68  static unsigned ext4_init_inode_bitmap(struct super_block *sb,
69  				       struct buffer_head *bh,
70  				       ext4_group_t block_group,
71  				       struct ext4_group_desc *gdp)
72  {
73  	struct ext4_group_info *grp;
74  	J_ASSERT_BH(bh, buffer_locked(bh));
75  
76  	/* If checksum is bad mark all blocks and inodes use to prevent
77  	 * allocation, essentially implementing a per-group read-only flag. */
78  	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79  		ext4_error(sb, "Checksum bad for group %u", block_group);
80  		grp = ext4_get_group_info(sb, block_group);
81  		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
82  		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
83  		return 0;
84  	}
85  
86  	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87  	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
88  			bh->b_data);
89  	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
90  				   EXT4_INODES_PER_GROUP(sb) / 8);
91  	ext4_group_desc_csum_set(sb, block_group, gdp);
92  
93  	return EXT4_INODES_PER_GROUP(sb);
94  }
95  
96  void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
97  {
98  	if (uptodate) {
99  		set_buffer_uptodate(bh);
100  		set_bitmap_uptodate(bh);
101  	}
102  	unlock_buffer(bh);
103  	put_bh(bh);
104  }
105  
106  /*
107   * Read the inode allocation bitmap for a given block_group, reading
108   * into the specified slot in the superblock's bitmap cache.
109   *
110   * Return buffer_head of bitmap on success or NULL.
111   */
112  static struct buffer_head *
113  ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
114  {
115  	struct ext4_group_desc *desc;
116  	struct buffer_head *bh = NULL;
117  	ext4_fsblk_t bitmap_blk;
118  	struct ext4_group_info *grp;
119  
120  	desc = ext4_get_group_desc(sb, block_group, NULL);
121  	if (!desc)
122  		return NULL;
123  
124  	bitmap_blk = ext4_inode_bitmap(sb, desc);
125  	bh = sb_getblk(sb, bitmap_blk);
126  	if (unlikely(!bh)) {
127  		ext4_error(sb, "Cannot read inode bitmap - "
128  			    "block_group = %u, inode_bitmap = %llu",
129  			    block_group, bitmap_blk);
130  		return NULL;
131  	}
132  	if (bitmap_uptodate(bh))
133  		goto verify;
134  
135  	lock_buffer(bh);
136  	if (bitmap_uptodate(bh)) {
137  		unlock_buffer(bh);
138  		goto verify;
139  	}
140  
141  	ext4_lock_group(sb, block_group);
142  	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
143  		ext4_init_inode_bitmap(sb, bh, block_group, desc);
144  		set_bitmap_uptodate(bh);
145  		set_buffer_uptodate(bh);
146  		set_buffer_verified(bh);
147  		ext4_unlock_group(sb, block_group);
148  		unlock_buffer(bh);
149  		return bh;
150  	}
151  	ext4_unlock_group(sb, block_group);
152  
153  	if (buffer_uptodate(bh)) {
154  		/*
155  		 * if not uninit if bh is uptodate,
156  		 * bitmap is also uptodate
157  		 */
158  		set_bitmap_uptodate(bh);
159  		unlock_buffer(bh);
160  		goto verify;
161  	}
162  	/*
163  	 * submit the buffer_head for reading
164  	 */
165  	trace_ext4_load_inode_bitmap(sb, block_group);
166  	bh->b_end_io = ext4_end_bitmap_read;
167  	get_bh(bh);
168  	submit_bh(READ | REQ_META | REQ_PRIO, bh);
169  	wait_on_buffer(bh);
170  	if (!buffer_uptodate(bh)) {
171  		put_bh(bh);
172  		ext4_error(sb, "Cannot read inode bitmap - "
173  			   "block_group = %u, inode_bitmap = %llu",
174  			   block_group, bitmap_blk);
175  		return NULL;
176  	}
177  
178  verify:
179  	ext4_lock_group(sb, block_group);
180  	if (!buffer_verified(bh) &&
181  	    !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
182  					   EXT4_INODES_PER_GROUP(sb) / 8)) {
183  		ext4_unlock_group(sb, block_group);
184  		put_bh(bh);
185  		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
186  			   "inode_bitmap = %llu", block_group, bitmap_blk);
187  		grp = ext4_get_group_info(sb, block_group);
188  		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
189  		return NULL;
190  	}
191  	ext4_unlock_group(sb, block_group);
192  	set_buffer_verified(bh);
193  	return bh;
194  }
195  
196  /*
197   * NOTE! When we get the inode, we're the only people
198   * that have access to it, and as such there are no
199   * race conditions we have to worry about. The inode
200   * is not on the hash-lists, and it cannot be reached
201   * through the filesystem because the directory entry
202   * has been deleted earlier.
203   *
204   * HOWEVER: we must make sure that we get no aliases,
205   * which means that we have to call "clear_inode()"
206   * _before_ we mark the inode not in use in the inode
207   * bitmaps. Otherwise a newly created file might use
208   * the same inode number (not actually the same pointer
209   * though), and then we'd have two inodes sharing the
210   * same inode number and space on the harddisk.
211   */
212  void ext4_free_inode(handle_t *handle, struct inode *inode)
213  {
214  	struct super_block *sb = inode->i_sb;
215  	int is_directory;
216  	unsigned long ino;
217  	struct buffer_head *bitmap_bh = NULL;
218  	struct buffer_head *bh2;
219  	ext4_group_t block_group;
220  	unsigned long bit;
221  	struct ext4_group_desc *gdp;
222  	struct ext4_super_block *es;
223  	struct ext4_sb_info *sbi;
224  	int fatal = 0, err, count, cleared;
225  	struct ext4_group_info *grp;
226  
227  	if (!sb) {
228  		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
229  		       "nonexistent device\n", __func__, __LINE__);
230  		return;
231  	}
232  	if (atomic_read(&inode->i_count) > 1) {
233  		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
234  			 __func__, __LINE__, inode->i_ino,
235  			 atomic_read(&inode->i_count));
236  		return;
237  	}
238  	if (inode->i_nlink) {
239  		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
240  			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
241  		return;
242  	}
243  	sbi = EXT4_SB(sb);
244  
245  	ino = inode->i_ino;
246  	ext4_debug("freeing inode %lu\n", ino);
247  	trace_ext4_free_inode(inode);
248  
249  	/*
250  	 * Note: we must free any quota before locking the superblock,
251  	 * as writing the quota to disk may need the lock as well.
252  	 */
253  	dquot_initialize(inode);
254  	ext4_xattr_delete_inode(handle, inode);
255  	dquot_free_inode(inode);
256  	dquot_drop(inode);
257  
258  	is_directory = S_ISDIR(inode->i_mode);
259  
260  	/* Do this BEFORE marking the inode not in use or returning an error */
261  	ext4_clear_inode(inode);
262  
263  	es = EXT4_SB(sb)->s_es;
264  	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
265  		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
266  		goto error_return;
267  	}
268  	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
269  	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
270  	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
271  	/* Don't bother if the inode bitmap is corrupt. */
272  	grp = ext4_get_group_info(sb, block_group);
273  	if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
274  		goto error_return;
275  
276  	BUFFER_TRACE(bitmap_bh, "get_write_access");
277  	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
278  	if (fatal)
279  		goto error_return;
280  
281  	fatal = -ESRCH;
282  	gdp = ext4_get_group_desc(sb, block_group, &bh2);
283  	if (gdp) {
284  		BUFFER_TRACE(bh2, "get_write_access");
285  		fatal = ext4_journal_get_write_access(handle, bh2);
286  	}
287  	ext4_lock_group(sb, block_group);
288  	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
289  	if (fatal || !cleared) {
290  		ext4_unlock_group(sb, block_group);
291  		goto out;
292  	}
293  
294  	count = ext4_free_inodes_count(sb, gdp) + 1;
295  	ext4_free_inodes_set(sb, gdp, count);
296  	if (is_directory) {
297  		count = ext4_used_dirs_count(sb, gdp) - 1;
298  		ext4_used_dirs_set(sb, gdp, count);
299  		percpu_counter_dec(&sbi->s_dirs_counter);
300  	}
301  	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
302  				   EXT4_INODES_PER_GROUP(sb) / 8);
303  	ext4_group_desc_csum_set(sb, block_group, gdp);
304  	ext4_unlock_group(sb, block_group);
305  
306  	percpu_counter_inc(&sbi->s_freeinodes_counter);
307  	if (sbi->s_log_groups_per_flex) {
308  		ext4_group_t f = ext4_flex_group(sbi, block_group);
309  
310  		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
311  		if (is_directory)
312  			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
313  	}
314  	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
315  	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
316  out:
317  	if (cleared) {
318  		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
319  		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
320  		if (!fatal)
321  			fatal = err;
322  	} else {
323  		ext4_error(sb, "bit already cleared for inode %lu", ino);
324  		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
325  	}
326  
327  error_return:
328  	brelse(bitmap_bh);
329  	ext4_std_error(sb, fatal);
330  }
331  
332  struct orlov_stats {
333  	__u64 free_clusters;
334  	__u32 free_inodes;
335  	__u32 used_dirs;
336  };
337  
338  /*
339   * Helper function for Orlov's allocator; returns critical information
340   * for a particular block group or flex_bg.  If flex_size is 1, then g
341   * is a block group number; otherwise it is flex_bg number.
342   */
343  static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
344  			    int flex_size, struct orlov_stats *stats)
345  {
346  	struct ext4_group_desc *desc;
347  	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
348  
349  	if (flex_size > 1) {
350  		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
351  		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
352  		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
353  		return;
354  	}
355  
356  	desc = ext4_get_group_desc(sb, g, NULL);
357  	if (desc) {
358  		stats->free_inodes = ext4_free_inodes_count(sb, desc);
359  		stats->free_clusters = ext4_free_group_clusters(sb, desc);
360  		stats->used_dirs = ext4_used_dirs_count(sb, desc);
361  	} else {
362  		stats->free_inodes = 0;
363  		stats->free_clusters = 0;
364  		stats->used_dirs = 0;
365  	}
366  }
367  
368  /*
369   * Orlov's allocator for directories.
370   *
371   * We always try to spread first-level directories.
372   *
373   * If there are blockgroups with both free inodes and free blocks counts
374   * not worse than average we return one with smallest directory count.
375   * Otherwise we simply return a random group.
376   *
377   * For the rest rules look so:
378   *
379   * It's OK to put directory into a group unless
380   * it has too many directories already (max_dirs) or
381   * it has too few free inodes left (min_inodes) or
382   * it has too few free blocks left (min_blocks) or
383   * Parent's group is preferred, if it doesn't satisfy these
384   * conditions we search cyclically through the rest. If none
385   * of the groups look good we just look for a group with more
386   * free inodes than average (starting at parent's group).
387   */
388  
389  static int find_group_orlov(struct super_block *sb, struct inode *parent,
390  			    ext4_group_t *group, umode_t mode,
391  			    const struct qstr *qstr)
392  {
393  	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
394  	struct ext4_sb_info *sbi = EXT4_SB(sb);
395  	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
396  	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
397  	unsigned int freei, avefreei, grp_free;
398  	ext4_fsblk_t freeb, avefreec;
399  	unsigned int ndirs;
400  	int max_dirs, min_inodes;
401  	ext4_grpblk_t min_clusters;
402  	ext4_group_t i, grp, g, ngroups;
403  	struct ext4_group_desc *desc;
404  	struct orlov_stats stats;
405  	int flex_size = ext4_flex_bg_size(sbi);
406  	struct dx_hash_info hinfo;
407  
408  	ngroups = real_ngroups;
409  	if (flex_size > 1) {
410  		ngroups = (real_ngroups + flex_size - 1) >>
411  			sbi->s_log_groups_per_flex;
412  		parent_group >>= sbi->s_log_groups_per_flex;
413  	}
414  
415  	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
416  	avefreei = freei / ngroups;
417  	freeb = EXT4_C2B(sbi,
418  		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
419  	avefreec = freeb;
420  	do_div(avefreec, ngroups);
421  	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
422  
423  	if (S_ISDIR(mode) &&
424  	    ((parent == sb->s_root->d_inode) ||
425  	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
426  		int best_ndir = inodes_per_group;
427  		int ret = -1;
428  
429  		if (qstr) {
430  			hinfo.hash_version = DX_HASH_HALF_MD4;
431  			hinfo.seed = sbi->s_hash_seed;
432  			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
433  			grp = hinfo.hash;
434  		} else
435  			grp = prandom_u32();
436  		parent_group = (unsigned)grp % ngroups;
437  		for (i = 0; i < ngroups; i++) {
438  			g = (parent_group + i) % ngroups;
439  			get_orlov_stats(sb, g, flex_size, &stats);
440  			if (!stats.free_inodes)
441  				continue;
442  			if (stats.used_dirs >= best_ndir)
443  				continue;
444  			if (stats.free_inodes < avefreei)
445  				continue;
446  			if (stats.free_clusters < avefreec)
447  				continue;
448  			grp = g;
449  			ret = 0;
450  			best_ndir = stats.used_dirs;
451  		}
452  		if (ret)
453  			goto fallback;
454  	found_flex_bg:
455  		if (flex_size == 1) {
456  			*group = grp;
457  			return 0;
458  		}
459  
460  		/*
461  		 * We pack inodes at the beginning of the flexgroup's
462  		 * inode tables.  Block allocation decisions will do
463  		 * something similar, although regular files will
464  		 * start at 2nd block group of the flexgroup.  See
465  		 * ext4_ext_find_goal() and ext4_find_near().
466  		 */
467  		grp *= flex_size;
468  		for (i = 0; i < flex_size; i++) {
469  			if (grp+i >= real_ngroups)
470  				break;
471  			desc = ext4_get_group_desc(sb, grp+i, NULL);
472  			if (desc && ext4_free_inodes_count(sb, desc)) {
473  				*group = grp+i;
474  				return 0;
475  			}
476  		}
477  		goto fallback;
478  	}
479  
480  	max_dirs = ndirs / ngroups + inodes_per_group / 16;
481  	min_inodes = avefreei - inodes_per_group*flex_size / 4;
482  	if (min_inodes < 1)
483  		min_inodes = 1;
484  	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
485  
486  	/*
487  	 * Start looking in the flex group where we last allocated an
488  	 * inode for this parent directory
489  	 */
490  	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
491  		parent_group = EXT4_I(parent)->i_last_alloc_group;
492  		if (flex_size > 1)
493  			parent_group >>= sbi->s_log_groups_per_flex;
494  	}
495  
496  	for (i = 0; i < ngroups; i++) {
497  		grp = (parent_group + i) % ngroups;
498  		get_orlov_stats(sb, grp, flex_size, &stats);
499  		if (stats.used_dirs >= max_dirs)
500  			continue;
501  		if (stats.free_inodes < min_inodes)
502  			continue;
503  		if (stats.free_clusters < min_clusters)
504  			continue;
505  		goto found_flex_bg;
506  	}
507  
508  fallback:
509  	ngroups = real_ngroups;
510  	avefreei = freei / ngroups;
511  fallback_retry:
512  	parent_group = EXT4_I(parent)->i_block_group;
513  	for (i = 0; i < ngroups; i++) {
514  		grp = (parent_group + i) % ngroups;
515  		desc = ext4_get_group_desc(sb, grp, NULL);
516  		if (desc) {
517  			grp_free = ext4_free_inodes_count(sb, desc);
518  			if (grp_free && grp_free >= avefreei) {
519  				*group = grp;
520  				return 0;
521  			}
522  		}
523  	}
524  
525  	if (avefreei) {
526  		/*
527  		 * The free-inodes counter is approximate, and for really small
528  		 * filesystems the above test can fail to find any blockgroups
529  		 */
530  		avefreei = 0;
531  		goto fallback_retry;
532  	}
533  
534  	return -1;
535  }
536  
537  static int find_group_other(struct super_block *sb, struct inode *parent,
538  			    ext4_group_t *group, umode_t mode)
539  {
540  	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
541  	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
542  	struct ext4_group_desc *desc;
543  	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
544  
545  	/*
546  	 * Try to place the inode is the same flex group as its
547  	 * parent.  If we can't find space, use the Orlov algorithm to
548  	 * find another flex group, and store that information in the
549  	 * parent directory's inode information so that use that flex
550  	 * group for future allocations.
551  	 */
552  	if (flex_size > 1) {
553  		int retry = 0;
554  
555  	try_again:
556  		parent_group &= ~(flex_size-1);
557  		last = parent_group + flex_size;
558  		if (last > ngroups)
559  			last = ngroups;
560  		for  (i = parent_group; i < last; i++) {
561  			desc = ext4_get_group_desc(sb, i, NULL);
562  			if (desc && ext4_free_inodes_count(sb, desc)) {
563  				*group = i;
564  				return 0;
565  			}
566  		}
567  		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
568  			retry = 1;
569  			parent_group = EXT4_I(parent)->i_last_alloc_group;
570  			goto try_again;
571  		}
572  		/*
573  		 * If this didn't work, use the Orlov search algorithm
574  		 * to find a new flex group; we pass in the mode to
575  		 * avoid the topdir algorithms.
576  		 */
577  		*group = parent_group + flex_size;
578  		if (*group > ngroups)
579  			*group = 0;
580  		return find_group_orlov(sb, parent, group, mode, NULL);
581  	}
582  
583  	/*
584  	 * Try to place the inode in its parent directory
585  	 */
586  	*group = parent_group;
587  	desc = ext4_get_group_desc(sb, *group, NULL);
588  	if (desc && ext4_free_inodes_count(sb, desc) &&
589  	    ext4_free_group_clusters(sb, desc))
590  		return 0;
591  
592  	/*
593  	 * We're going to place this inode in a different blockgroup from its
594  	 * parent.  We want to cause files in a common directory to all land in
595  	 * the same blockgroup.  But we want files which are in a different
596  	 * directory which shares a blockgroup with our parent to land in a
597  	 * different blockgroup.
598  	 *
599  	 * So add our directory's i_ino into the starting point for the hash.
600  	 */
601  	*group = (*group + parent->i_ino) % ngroups;
602  
603  	/*
604  	 * Use a quadratic hash to find a group with a free inode and some free
605  	 * blocks.
606  	 */
607  	for (i = 1; i < ngroups; i <<= 1) {
608  		*group += i;
609  		if (*group >= ngroups)
610  			*group -= ngroups;
611  		desc = ext4_get_group_desc(sb, *group, NULL);
612  		if (desc && ext4_free_inodes_count(sb, desc) &&
613  		    ext4_free_group_clusters(sb, desc))
614  			return 0;
615  	}
616  
617  	/*
618  	 * That failed: try linear search for a free inode, even if that group
619  	 * has no free blocks.
620  	 */
621  	*group = parent_group;
622  	for (i = 0; i < ngroups; i++) {
623  		if (++*group >= ngroups)
624  			*group = 0;
625  		desc = ext4_get_group_desc(sb, *group, NULL);
626  		if (desc && ext4_free_inodes_count(sb, desc))
627  			return 0;
628  	}
629  
630  	return -1;
631  }
632  
633  /*
634   * In no journal mode, if an inode has recently been deleted, we want
635   * to avoid reusing it until we're reasonably sure the inode table
636   * block has been written back to disk.  (Yes, these values are
637   * somewhat arbitrary...)
638   */
639  #define RECENTCY_MIN	5
640  #define RECENTCY_DIRTY	30
641  
642  static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
643  {
644  	struct ext4_group_desc	*gdp;
645  	struct ext4_inode	*raw_inode;
646  	struct buffer_head	*bh;
647  	unsigned long		dtime, now;
648  	int	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
649  	int	offset, ret = 0, recentcy = RECENTCY_MIN;
650  
651  	gdp = ext4_get_group_desc(sb, group, NULL);
652  	if (unlikely(!gdp))
653  		return 0;
654  
655  	bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
656  		       (ino / inodes_per_block));
657  	if (unlikely(!bh) || !buffer_uptodate(bh))
658  		/*
659  		 * If the block is not in the buffer cache, then it
660  		 * must have been written out.
661  		 */
662  		goto out;
663  
664  	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
665  	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
666  	dtime = le32_to_cpu(raw_inode->i_dtime);
667  	now = get_seconds();
668  	if (buffer_dirty(bh))
669  		recentcy += RECENTCY_DIRTY;
670  
671  	if (dtime && (dtime < now) && (now < dtime + recentcy))
672  		ret = 1;
673  out:
674  	brelse(bh);
675  	return ret;
676  }
677  
678  /*
679   * There are two policies for allocating an inode.  If the new inode is
680   * a directory, then a forward search is made for a block group with both
681   * free space and a low directory-to-inode ratio; if that fails, then of
682   * the groups with above-average free space, that group with the fewest
683   * directories already is chosen.
684   *
685   * For other inodes, search forward from the parent directory's block
686   * group to find a free inode.
687   */
688  struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
689  			       umode_t mode, const struct qstr *qstr,
690  			       __u32 goal, uid_t *owner, int handle_type,
691  			       unsigned int line_no, int nblocks)
692  {
693  	struct super_block *sb;
694  	struct buffer_head *inode_bitmap_bh = NULL;
695  	struct buffer_head *group_desc_bh;
696  	ext4_group_t ngroups, group = 0;
697  	unsigned long ino = 0;
698  	struct inode *inode;
699  	struct ext4_group_desc *gdp = NULL;
700  	struct ext4_inode_info *ei;
701  	struct ext4_sb_info *sbi;
702  	int ret2, err = 0;
703  	struct inode *ret;
704  	ext4_group_t i;
705  	ext4_group_t flex_group;
706  	struct ext4_group_info *grp;
707  
708  	/* Cannot create files in a deleted directory */
709  	if (!dir || !dir->i_nlink)
710  		return ERR_PTR(-EPERM);
711  
712  	sb = dir->i_sb;
713  	ngroups = ext4_get_groups_count(sb);
714  	trace_ext4_request_inode(dir, mode);
715  	inode = new_inode(sb);
716  	if (!inode)
717  		return ERR_PTR(-ENOMEM);
718  	ei = EXT4_I(inode);
719  	sbi = EXT4_SB(sb);
720  
721  	/*
722  	 * Initalize owners and quota early so that we don't have to account
723  	 * for quota initialization worst case in standard inode creating
724  	 * transaction
725  	 */
726  	if (owner) {
727  		inode->i_mode = mode;
728  		i_uid_write(inode, owner[0]);
729  		i_gid_write(inode, owner[1]);
730  	} else if (test_opt(sb, GRPID)) {
731  		inode->i_mode = mode;
732  		inode->i_uid = current_fsuid();
733  		inode->i_gid = dir->i_gid;
734  	} else
735  		inode_init_owner(inode, dir, mode);
736  	dquot_initialize(inode);
737  
738  	if (!goal)
739  		goal = sbi->s_inode_goal;
740  
741  	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
742  		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
743  		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
744  		ret2 = 0;
745  		goto got_group;
746  	}
747  
748  	if (S_ISDIR(mode))
749  		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
750  	else
751  		ret2 = find_group_other(sb, dir, &group, mode);
752  
753  got_group:
754  	EXT4_I(dir)->i_last_alloc_group = group;
755  	err = -ENOSPC;
756  	if (ret2 == -1)
757  		goto out;
758  
759  	/*
760  	 * Normally we will only go through one pass of this loop,
761  	 * unless we get unlucky and it turns out the group we selected
762  	 * had its last inode grabbed by someone else.
763  	 */
764  	for (i = 0; i < ngroups; i++, ino = 0) {
765  		err = -EIO;
766  
767  		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
768  		if (!gdp)
769  			goto out;
770  
771  		/*
772  		 * Check free inodes count before loading bitmap.
773  		 */
774  		if (ext4_free_inodes_count(sb, gdp) == 0) {
775  			if (++group == ngroups)
776  				group = 0;
777  			continue;
778  		}
779  
780  		grp = ext4_get_group_info(sb, group);
781  		/* Skip groups with already-known suspicious inode tables */
782  		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
783  			if (++group == ngroups)
784  				group = 0;
785  			continue;
786  		}
787  
788  		brelse(inode_bitmap_bh);
789  		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
790  		/* Skip groups with suspicious inode tables */
791  		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
792  			if (++group == ngroups)
793  				group = 0;
794  			continue;
795  		}
796  
797  repeat_in_this_group:
798  		ino = ext4_find_next_zero_bit((unsigned long *)
799  					      inode_bitmap_bh->b_data,
800  					      EXT4_INODES_PER_GROUP(sb), ino);
801  		if (ino >= EXT4_INODES_PER_GROUP(sb))
802  			goto next_group;
803  		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
804  			ext4_error(sb, "reserved inode found cleared - "
805  				   "inode=%lu", ino + 1);
806  			continue;
807  		}
808  		if ((EXT4_SB(sb)->s_journal == NULL) &&
809  		    recently_deleted(sb, group, ino)) {
810  			ino++;
811  			goto next_inode;
812  		}
813  		if (!handle) {
814  			BUG_ON(nblocks <= 0);
815  			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
816  							 handle_type, nblocks,
817  							 0);
818  			if (IS_ERR(handle)) {
819  				err = PTR_ERR(handle);
820  				ext4_std_error(sb, err);
821  				goto out;
822  			}
823  		}
824  		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
825  		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
826  		if (err) {
827  			ext4_std_error(sb, err);
828  			goto out;
829  		}
830  		ext4_lock_group(sb, group);
831  		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
832  		ext4_unlock_group(sb, group);
833  		ino++;		/* the inode bitmap is zero-based */
834  		if (!ret2)
835  			goto got; /* we grabbed the inode! */
836  next_inode:
837  		if (ino < EXT4_INODES_PER_GROUP(sb))
838  			goto repeat_in_this_group;
839  next_group:
840  		if (++group == ngroups)
841  			group = 0;
842  	}
843  	err = -ENOSPC;
844  	goto out;
845  
846  got:
847  	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
848  	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
849  	if (err) {
850  		ext4_std_error(sb, err);
851  		goto out;
852  	}
853  
854  	/* We may have to initialize the block bitmap if it isn't already */
855  	if (ext4_has_group_desc_csum(sb) &&
856  	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
857  		struct buffer_head *block_bitmap_bh;
858  
859  		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
860  		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
861  		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
862  		if (err) {
863  			brelse(block_bitmap_bh);
864  			ext4_std_error(sb, err);
865  			goto out;
866  		}
867  
868  		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
869  		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
870  
871  		/* recheck and clear flag under lock if we still need to */
872  		ext4_lock_group(sb, group);
873  		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
874  			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
875  			ext4_free_group_clusters_set(sb, gdp,
876  				ext4_free_clusters_after_init(sb, group, gdp));
877  			ext4_block_bitmap_csum_set(sb, group, gdp,
878  						   block_bitmap_bh);
879  			ext4_group_desc_csum_set(sb, group, gdp);
880  		}
881  		ext4_unlock_group(sb, group);
882  		brelse(block_bitmap_bh);
883  
884  		if (err) {
885  			ext4_std_error(sb, err);
886  			goto out;
887  		}
888  	}
889  
890  	BUFFER_TRACE(group_desc_bh, "get_write_access");
891  	err = ext4_journal_get_write_access(handle, group_desc_bh);
892  	if (err) {
893  		ext4_std_error(sb, err);
894  		goto out;
895  	}
896  
897  	/* Update the relevant bg descriptor fields */
898  	if (ext4_has_group_desc_csum(sb)) {
899  		int free;
900  		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
901  
902  		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
903  		ext4_lock_group(sb, group); /* while we modify the bg desc */
904  		free = EXT4_INODES_PER_GROUP(sb) -
905  			ext4_itable_unused_count(sb, gdp);
906  		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
907  			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
908  			free = 0;
909  		}
910  		/*
911  		 * Check the relative inode number against the last used
912  		 * relative inode number in this group. if it is greater
913  		 * we need to update the bg_itable_unused count
914  		 */
915  		if (ino > free)
916  			ext4_itable_unused_set(sb, gdp,
917  					(EXT4_INODES_PER_GROUP(sb) - ino));
918  		up_read(&grp->alloc_sem);
919  	} else {
920  		ext4_lock_group(sb, group);
921  	}
922  
923  	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
924  	if (S_ISDIR(mode)) {
925  		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
926  		if (sbi->s_log_groups_per_flex) {
927  			ext4_group_t f = ext4_flex_group(sbi, group);
928  
929  			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
930  		}
931  	}
932  	if (ext4_has_group_desc_csum(sb)) {
933  		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
934  					   EXT4_INODES_PER_GROUP(sb) / 8);
935  		ext4_group_desc_csum_set(sb, group, gdp);
936  	}
937  	ext4_unlock_group(sb, group);
938  
939  	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
940  	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
941  	if (err) {
942  		ext4_std_error(sb, err);
943  		goto out;
944  	}
945  
946  	percpu_counter_dec(&sbi->s_freeinodes_counter);
947  	if (S_ISDIR(mode))
948  		percpu_counter_inc(&sbi->s_dirs_counter);
949  
950  	if (sbi->s_log_groups_per_flex) {
951  		flex_group = ext4_flex_group(sbi, group);
952  		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
953  	}
954  
955  	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
956  	/* This is the optimal IO size (for stat), not the fs block size */
957  	inode->i_blocks = 0;
958  	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
959  						       ext4_current_time(inode);
960  
961  	memset(ei->i_data, 0, sizeof(ei->i_data));
962  	ei->i_dir_start_lookup = 0;
963  	ei->i_disksize = 0;
964  
965  	/* Don't inherit extent flag from directory, amongst others. */
966  	ei->i_flags =
967  		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
968  	ei->i_file_acl = 0;
969  	ei->i_dtime = 0;
970  	ei->i_block_group = group;
971  	ei->i_last_alloc_group = ~0;
972  
973  	ext4_set_inode_flags(inode);
974  	if (IS_DIRSYNC(inode))
975  		ext4_handle_sync(handle);
976  	if (insert_inode_locked(inode) < 0) {
977  		/*
978  		 * Likely a bitmap corruption causing inode to be allocated
979  		 * twice.
980  		 */
981  		err = -EIO;
982  		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
983  			   inode->i_ino);
984  		goto out;
985  	}
986  	spin_lock(&sbi->s_next_gen_lock);
987  	inode->i_generation = sbi->s_next_generation++;
988  	spin_unlock(&sbi->s_next_gen_lock);
989  
990  	/* Precompute checksum seed for inode metadata */
991  	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
992  			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
993  		__u32 csum;
994  		__le32 inum = cpu_to_le32(inode->i_ino);
995  		__le32 gen = cpu_to_le32(inode->i_generation);
996  		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
997  				   sizeof(inum));
998  		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
999  					      sizeof(gen));
1000  	}
1001  
1002  	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1003  	ext4_set_inode_state(inode, EXT4_STATE_NEW);
1004  
1005  	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1006  
1007  	ei->i_inline_off = 0;
1008  	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1009  		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1010  
1011  	ret = inode;
1012  	err = dquot_alloc_inode(inode);
1013  	if (err)
1014  		goto fail_drop;
1015  
1016  	err = ext4_init_acl(handle, inode, dir);
1017  	if (err)
1018  		goto fail_free_drop;
1019  
1020  	err = ext4_init_security(handle, inode, dir, qstr);
1021  	if (err)
1022  		goto fail_free_drop;
1023  
1024  	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1025  		/* set extent flag only for directory, file and normal symlink*/
1026  		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1027  			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1028  			ext4_ext_tree_init(handle, inode);
1029  		}
1030  	}
1031  
1032  	if (ext4_handle_valid(handle)) {
1033  		ei->i_sync_tid = handle->h_transaction->t_tid;
1034  		ei->i_datasync_tid = handle->h_transaction->t_tid;
1035  	}
1036  
1037  	err = ext4_mark_inode_dirty(handle, inode);
1038  	if (err) {
1039  		ext4_std_error(sb, err);
1040  		goto fail_free_drop;
1041  	}
1042  
1043  	ext4_debug("allocating inode %lu\n", inode->i_ino);
1044  	trace_ext4_allocate_inode(inode, dir, mode);
1045  	brelse(inode_bitmap_bh);
1046  	return ret;
1047  
1048  fail_free_drop:
1049  	dquot_free_inode(inode);
1050  fail_drop:
1051  	clear_nlink(inode);
1052  	unlock_new_inode(inode);
1053  out:
1054  	dquot_drop(inode);
1055  	inode->i_flags |= S_NOQUOTA;
1056  	iput(inode);
1057  	brelse(inode_bitmap_bh);
1058  	return ERR_PTR(err);
1059  }
1060  
1061  /* Verify that we are loading a valid orphan from disk */
1062  struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1063  {
1064  	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1065  	ext4_group_t block_group;
1066  	int bit;
1067  	struct buffer_head *bitmap_bh;
1068  	struct inode *inode = NULL;
1069  	long err = -EIO;
1070  
1071  	/* Error cases - e2fsck has already cleaned up for us */
1072  	if (ino > max_ino) {
1073  		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
1074  		goto error;
1075  	}
1076  
1077  	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1078  	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1079  	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1080  	if (!bitmap_bh) {
1081  		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1082  		goto error;
1083  	}
1084  
1085  	/* Having the inode bit set should be a 100% indicator that this
1086  	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1087  	 * inodes that were being truncated, so we can't check i_nlink==0.
1088  	 */
1089  	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1090  		goto bad_orphan;
1091  
1092  	inode = ext4_iget(sb, ino);
1093  	if (IS_ERR(inode))
1094  		goto iget_failed;
1095  
1096  	/*
1097  	 * If the orphans has i_nlinks > 0 then it should be able to be
1098  	 * truncated, otherwise it won't be removed from the orphan list
1099  	 * during processing and an infinite loop will result.
1100  	 */
1101  	if (inode->i_nlink && !ext4_can_truncate(inode))
1102  		goto bad_orphan;
1103  
1104  	if (NEXT_ORPHAN(inode) > max_ino)
1105  		goto bad_orphan;
1106  	brelse(bitmap_bh);
1107  	return inode;
1108  
1109  iget_failed:
1110  	err = PTR_ERR(inode);
1111  	inode = NULL;
1112  bad_orphan:
1113  	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1114  	printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1115  	       bit, (unsigned long long)bitmap_bh->b_blocknr,
1116  	       ext4_test_bit(bit, bitmap_bh->b_data));
1117  	printk(KERN_WARNING "inode=%p\n", inode);
1118  	if (inode) {
1119  		printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
1120  		       is_bad_inode(inode));
1121  		printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
1122  		       NEXT_ORPHAN(inode));
1123  		printk(KERN_WARNING "max_ino=%lu\n", max_ino);
1124  		printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
1125  		/* Avoid freeing blocks if we got a bad deleted inode */
1126  		if (inode->i_nlink == 0)
1127  			inode->i_blocks = 0;
1128  		iput(inode);
1129  	}
1130  	brelse(bitmap_bh);
1131  error:
1132  	return ERR_PTR(err);
1133  }
1134  
1135  unsigned long ext4_count_free_inodes(struct super_block *sb)
1136  {
1137  	unsigned long desc_count;
1138  	struct ext4_group_desc *gdp;
1139  	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1140  #ifdef EXT4FS_DEBUG
1141  	struct ext4_super_block *es;
1142  	unsigned long bitmap_count, x;
1143  	struct buffer_head *bitmap_bh = NULL;
1144  
1145  	es = EXT4_SB(sb)->s_es;
1146  	desc_count = 0;
1147  	bitmap_count = 0;
1148  	gdp = NULL;
1149  	for (i = 0; i < ngroups; i++) {
1150  		gdp = ext4_get_group_desc(sb, i, NULL);
1151  		if (!gdp)
1152  			continue;
1153  		desc_count += ext4_free_inodes_count(sb, gdp);
1154  		brelse(bitmap_bh);
1155  		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1156  		if (!bitmap_bh)
1157  			continue;
1158  
1159  		x = ext4_count_free(bitmap_bh->b_data,
1160  				    EXT4_INODES_PER_GROUP(sb) / 8);
1161  		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1162  			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1163  		bitmap_count += x;
1164  	}
1165  	brelse(bitmap_bh);
1166  	printk(KERN_DEBUG "ext4_count_free_inodes: "
1167  	       "stored = %u, computed = %lu, %lu\n",
1168  	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1169  	return desc_count;
1170  #else
1171  	desc_count = 0;
1172  	for (i = 0; i < ngroups; i++) {
1173  		gdp = ext4_get_group_desc(sb, i, NULL);
1174  		if (!gdp)
1175  			continue;
1176  		desc_count += ext4_free_inodes_count(sb, gdp);
1177  		cond_resched();
1178  	}
1179  	return desc_count;
1180  #endif
1181  }
1182  
1183  /* Called at mount-time, super-block is locked */
1184  unsigned long ext4_count_dirs(struct super_block * sb)
1185  {
1186  	unsigned long count = 0;
1187  	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1188  
1189  	for (i = 0; i < ngroups; i++) {
1190  		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1191  		if (!gdp)
1192  			continue;
1193  		count += ext4_used_dirs_count(sb, gdp);
1194  	}
1195  	return count;
1196  }
1197  
1198  /*
1199   * Zeroes not yet zeroed inode table - just write zeroes through the whole
1200   * inode table. Must be called without any spinlock held. The only place
1201   * where it is called from on active part of filesystem is ext4lazyinit
1202   * thread, so we do not need any special locks, however we have to prevent
1203   * inode allocation from the current group, so we take alloc_sem lock, to
1204   * block ext4_new_inode() until we are finished.
1205   */
1206  int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1207  				 int barrier)
1208  {
1209  	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1210  	struct ext4_sb_info *sbi = EXT4_SB(sb);
1211  	struct ext4_group_desc *gdp = NULL;
1212  	struct buffer_head *group_desc_bh;
1213  	handle_t *handle;
1214  	ext4_fsblk_t blk;
1215  	int num, ret = 0, used_blks = 0;
1216  
1217  	/* This should not happen, but just to be sure check this */
1218  	if (sb->s_flags & MS_RDONLY) {
1219  		ret = 1;
1220  		goto out;
1221  	}
1222  
1223  	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1224  	if (!gdp)
1225  		goto out;
1226  
1227  	/*
1228  	 * We do not need to lock this, because we are the only one
1229  	 * handling this flag.
1230  	 */
1231  	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1232  		goto out;
1233  
1234  	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1235  	if (IS_ERR(handle)) {
1236  		ret = PTR_ERR(handle);
1237  		goto out;
1238  	}
1239  
1240  	down_write(&grp->alloc_sem);
1241  	/*
1242  	 * If inode bitmap was already initialized there may be some
1243  	 * used inodes so we need to skip blocks with used inodes in
1244  	 * inode table.
1245  	 */
1246  	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
1247  		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
1248  			    ext4_itable_unused_count(sb, gdp)),
1249  			    sbi->s_inodes_per_block);
1250  
1251  	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1252  		ext4_error(sb, "Something is wrong with group %u: "
1253  			   "used itable blocks: %d; "
1254  			   "itable unused count: %u",
1255  			   group, used_blks,
1256  			   ext4_itable_unused_count(sb, gdp));
1257  		ret = 1;
1258  		goto err_out;
1259  	}
1260  
1261  	blk = ext4_inode_table(sb, gdp) + used_blks;
1262  	num = sbi->s_itb_per_group - used_blks;
1263  
1264  	BUFFER_TRACE(group_desc_bh, "get_write_access");
1265  	ret = ext4_journal_get_write_access(handle,
1266  					    group_desc_bh);
1267  	if (ret)
1268  		goto err_out;
1269  
1270  	/*
1271  	 * Skip zeroout if the inode table is full. But we set the ZEROED
1272  	 * flag anyway, because obviously, when it is full it does not need
1273  	 * further zeroing.
1274  	 */
1275  	if (unlikely(num == 0))
1276  		goto skip_zeroout;
1277  
1278  	ext4_debug("going to zero out inode table in group %d\n",
1279  		   group);
1280  	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1281  	if (ret < 0)
1282  		goto err_out;
1283  	if (barrier)
1284  		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1285  
1286  skip_zeroout:
1287  	ext4_lock_group(sb, group);
1288  	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1289  	ext4_group_desc_csum_set(sb, group, gdp);
1290  	ext4_unlock_group(sb, group);
1291  
1292  	BUFFER_TRACE(group_desc_bh,
1293  		     "call ext4_handle_dirty_metadata");
1294  	ret = ext4_handle_dirty_metadata(handle, NULL,
1295  					 group_desc_bh);
1296  
1297  err_out:
1298  	up_write(&grp->alloc_sem);
1299  	ext4_journal_stop(handle);
1300  out:
1301  	return ret;
1302  }
1303