xref: /openbmc/linux/fs/ext4/mballoc.c (revision 6189f1b0)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/log2.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/backing-dev.h>
30 #include <trace/events/ext4.h>
31 
32 #ifdef CONFIG_EXT4_DEBUG
33 ushort ext4_mballoc_debug __read_mostly;
34 
35 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
36 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
37 #endif
38 
39 /*
40  * MUSTDO:
41  *   - test ext4_ext_search_left() and ext4_ext_search_right()
42  *   - search for metadata in few groups
43  *
44  * TODO v4:
45  *   - normalization should take into account whether file is still open
46  *   - discard preallocations if no free space left (policy?)
47  *   - don't normalize tails
48  *   - quota
49  *   - reservation for superuser
50  *
51  * TODO v3:
52  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
53  *   - track min/max extents in each group for better group selection
54  *   - mb_mark_used() may allocate chunk right after splitting buddy
55  *   - tree of groups sorted by number of free blocks
56  *   - error handling
57  */
58 
59 /*
60  * The allocation request involve request for multiple number of blocks
61  * near to the goal(block) value specified.
62  *
63  * During initialization phase of the allocator we decide to use the
64  * group preallocation or inode preallocation depending on the size of
65  * the file. The size of the file could be the resulting file size we
66  * would have after allocation, or the current file size, which ever
67  * is larger. If the size is less than sbi->s_mb_stream_request we
68  * select to use the group preallocation. The default value of
69  * s_mb_stream_request is 16 blocks. This can also be tuned via
70  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
71  * terms of number of blocks.
72  *
73  * The main motivation for having small file use group preallocation is to
74  * ensure that we have small files closer together on the disk.
75  *
76  * First stage the allocator looks at the inode prealloc list,
77  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
78  * spaces for this particular inode. The inode prealloc space is
79  * represented as:
80  *
81  * pa_lstart -> the logical start block for this prealloc space
82  * pa_pstart -> the physical start block for this prealloc space
83  * pa_len    -> length for this prealloc space (in clusters)
84  * pa_free   ->  free space available in this prealloc space (in clusters)
85  *
86  * The inode preallocation space is used looking at the _logical_ start
87  * block. If only the logical file block falls within the range of prealloc
88  * space we will consume the particular prealloc space. This makes sure that
89  * we have contiguous physical blocks representing the file blocks
90  *
91  * The important thing to be noted in case of inode prealloc space is that
92  * we don't modify the values associated to inode prealloc space except
93  * pa_free.
94  *
95  * If we are not able to find blocks in the inode prealloc space and if we
96  * have the group allocation flag set then we look at the locality group
97  * prealloc space. These are per CPU prealloc list represented as
98  *
99  * ext4_sb_info.s_locality_groups[smp_processor_id()]
100  *
101  * The reason for having a per cpu locality group is to reduce the contention
102  * between CPUs. It is possible to get scheduled at this point.
103  *
104  * The locality group prealloc space is used looking at whether we have
105  * enough free space (pa_free) within the prealloc space.
106  *
107  * If we can't allocate blocks via inode prealloc or/and locality group
108  * prealloc then we look at the buddy cache. The buddy cache is represented
109  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
110  * mapped to the buddy and bitmap information regarding different
111  * groups. The buddy information is attached to buddy cache inode so that
112  * we can access them through the page cache. The information regarding
113  * each group is loaded via ext4_mb_load_buddy.  The information involve
114  * block bitmap and buddy information. The information are stored in the
115  * inode as:
116  *
117  *  {                        page                        }
118  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
119  *
120  *
121  * one block each for bitmap and buddy information.  So for each group we
122  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
123  * blocksize) blocks.  So it can have information regarding groups_per_page
124  * which is blocks_per_page/2
125  *
126  * The buddy cache inode is not stored on disk. The inode is thrown
127  * away when the filesystem is unmounted.
128  *
129  * We look for count number of blocks in the buddy cache. If we were able
130  * to locate that many free blocks we return with additional information
131  * regarding rest of the contiguous physical block available
132  *
133  * Before allocating blocks via buddy cache we normalize the request
134  * blocks. This ensure we ask for more blocks that we needed. The extra
135  * blocks that we get after allocation is added to the respective prealloc
136  * list. In case of inode preallocation we follow a list of heuristics
137  * based on file size. This can be found in ext4_mb_normalize_request. If
138  * we are doing a group prealloc we try to normalize the request to
139  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
140  * dependent on the cluster size; for non-bigalloc file systems, it is
141  * 512 blocks. This can be tuned via
142  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
143  * terms of number of blocks. If we have mounted the file system with -O
144  * stripe=<value> option the group prealloc request is normalized to the
145  * the smallest multiple of the stripe value (sbi->s_stripe) which is
146  * greater than the default mb_group_prealloc.
147  *
148  * The regular allocator (using the buddy cache) supports a few tunables.
149  *
150  * /sys/fs/ext4/<partition>/mb_min_to_scan
151  * /sys/fs/ext4/<partition>/mb_max_to_scan
152  * /sys/fs/ext4/<partition>/mb_order2_req
153  *
154  * The regular allocator uses buddy scan only if the request len is power of
155  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
156  * value of s_mb_order2_reqs can be tuned via
157  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
158  * stripe size (sbi->s_stripe), we try to search for contiguous block in
159  * stripe size. This should result in better allocation on RAID setups. If
160  * not, we search in the specific group using bitmap for best extents. The
161  * tunable min_to_scan and max_to_scan control the behaviour here.
162  * min_to_scan indicate how long the mballoc __must__ look for a best
163  * extent and max_to_scan indicates how long the mballoc __can__ look for a
164  * best extent in the found extents. Searching for the blocks starts with
165  * the group specified as the goal value in allocation context via
166  * ac_g_ex. Each group is first checked based on the criteria whether it
167  * can be used for allocation. ext4_mb_good_group explains how the groups are
168  * checked.
169  *
170  * Both the prealloc space are getting populated as above. So for the first
171  * request we will hit the buddy cache which will result in this prealloc
172  * space getting filled. The prealloc space is then later used for the
173  * subsequent request.
174  */
175 
176 /*
177  * mballoc operates on the following data:
178  *  - on-disk bitmap
179  *  - in-core buddy (actually includes buddy and bitmap)
180  *  - preallocation descriptors (PAs)
181  *
182  * there are two types of preallocations:
183  *  - inode
184  *    assiged to specific inode and can be used for this inode only.
185  *    it describes part of inode's space preallocated to specific
186  *    physical blocks. any block from that preallocated can be used
187  *    independent. the descriptor just tracks number of blocks left
188  *    unused. so, before taking some block from descriptor, one must
189  *    make sure corresponded logical block isn't allocated yet. this
190  *    also means that freeing any block within descriptor's range
191  *    must discard all preallocated blocks.
192  *  - locality group
193  *    assigned to specific locality group which does not translate to
194  *    permanent set of inodes: inode can join and leave group. space
195  *    from this type of preallocation can be used for any inode. thus
196  *    it's consumed from the beginning to the end.
197  *
198  * relation between them can be expressed as:
199  *    in-core buddy = on-disk bitmap + preallocation descriptors
200  *
201  * this mean blocks mballoc considers used are:
202  *  - allocated blocks (persistent)
203  *  - preallocated blocks (non-persistent)
204  *
205  * consistency in mballoc world means that at any time a block is either
206  * free or used in ALL structures. notice: "any time" should not be read
207  * literally -- time is discrete and delimited by locks.
208  *
209  *  to keep it simple, we don't use block numbers, instead we count number of
210  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
211  *
212  * all operations can be expressed as:
213  *  - init buddy:			buddy = on-disk + PAs
214  *  - new PA:				buddy += N; PA = N
215  *  - use inode PA:			on-disk += N; PA -= N
216  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
217  *  - use locality group PA		on-disk += N; PA -= N
218  *  - discard locality group PA		buddy -= PA; PA = 0
219  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
220  *        is used in real operation because we can't know actual used
221  *        bits from PA, only from on-disk bitmap
222  *
223  * if we follow this strict logic, then all operations above should be atomic.
224  * given some of them can block, we'd have to use something like semaphores
225  * killing performance on high-end SMP hardware. let's try to relax it using
226  * the following knowledge:
227  *  1) if buddy is referenced, it's already initialized
228  *  2) while block is used in buddy and the buddy is referenced,
229  *     nobody can re-allocate that block
230  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
231  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
232  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
233  *     block
234  *
235  * so, now we're building a concurrency table:
236  *  - init buddy vs.
237  *    - new PA
238  *      blocks for PA are allocated in the buddy, buddy must be referenced
239  *      until PA is linked to allocation group to avoid concurrent buddy init
240  *    - use inode PA
241  *      we need to make sure that either on-disk bitmap or PA has uptodate data
242  *      given (3) we care that PA-=N operation doesn't interfere with init
243  *    - discard inode PA
244  *      the simplest way would be to have buddy initialized by the discard
245  *    - use locality group PA
246  *      again PA-=N must be serialized with init
247  *    - discard locality group PA
248  *      the simplest way would be to have buddy initialized by the discard
249  *  - new PA vs.
250  *    - use inode PA
251  *      i_data_sem serializes them
252  *    - discard inode PA
253  *      discard process must wait until PA isn't used by another process
254  *    - use locality group PA
255  *      some mutex should serialize them
256  *    - discard locality group PA
257  *      discard process must wait until PA isn't used by another process
258  *  - use inode PA
259  *    - use inode PA
260  *      i_data_sem or another mutex should serializes them
261  *    - discard inode PA
262  *      discard process must wait until PA isn't used by another process
263  *    - use locality group PA
264  *      nothing wrong here -- they're different PAs covering different blocks
265  *    - discard locality group PA
266  *      discard process must wait until PA isn't used by another process
267  *
268  * now we're ready to make few consequences:
269  *  - PA is referenced and while it is no discard is possible
270  *  - PA is referenced until block isn't marked in on-disk bitmap
271  *  - PA changes only after on-disk bitmap
272  *  - discard must not compete with init. either init is done before
273  *    any discard or they're serialized somehow
274  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
275  *
276  * a special case when we've used PA to emptiness. no need to modify buddy
277  * in this case, but we should care about concurrent init
278  *
279  */
280 
281  /*
282  * Logic in few words:
283  *
284  *  - allocation:
285  *    load group
286  *    find blocks
287  *    mark bits in on-disk bitmap
288  *    release group
289  *
290  *  - use preallocation:
291  *    find proper PA (per-inode or group)
292  *    load group
293  *    mark bits in on-disk bitmap
294  *    release group
295  *    release PA
296  *
297  *  - free:
298  *    load group
299  *    mark bits in on-disk bitmap
300  *    release group
301  *
302  *  - discard preallocations in group:
303  *    mark PAs deleted
304  *    move them onto local list
305  *    load on-disk bitmap
306  *    load group
307  *    remove PA from object (inode or locality group)
308  *    mark free blocks in-core
309  *
310  *  - discard inode's preallocations:
311  */
312 
313 /*
314  * Locking rules
315  *
316  * Locks:
317  *  - bitlock on a group	(group)
318  *  - object (inode/locality)	(object)
319  *  - per-pa lock		(pa)
320  *
321  * Paths:
322  *  - new pa
323  *    object
324  *    group
325  *
326  *  - find and use pa:
327  *    pa
328  *
329  *  - release consumed pa:
330  *    pa
331  *    group
332  *    object
333  *
334  *  - generate in-core bitmap:
335  *    group
336  *        pa
337  *
338  *  - discard all for given object (inode, locality group):
339  *    object
340  *        pa
341  *    group
342  *
343  *  - discard all for given group:
344  *    group
345  *        pa
346  *    group
347  *        object
348  *
349  */
350 static struct kmem_cache *ext4_pspace_cachep;
351 static struct kmem_cache *ext4_ac_cachep;
352 static struct kmem_cache *ext4_free_data_cachep;
353 
354 /* We create slab caches for groupinfo data structures based on the
355  * superblock block size.  There will be one per mounted filesystem for
356  * each unique s_blocksize_bits */
357 #define NR_GRPINFO_CACHES 8
358 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
359 
360 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
361 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
362 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
363 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
364 };
365 
366 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
367 					ext4_group_t group);
368 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
369 						ext4_group_t group);
370 static void ext4_free_data_callback(struct super_block *sb,
371 				struct ext4_journal_cb_entry *jce, int rc);
372 
373 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
374 {
375 #if BITS_PER_LONG == 64
376 	*bit += ((unsigned long) addr & 7UL) << 3;
377 	addr = (void *) ((unsigned long) addr & ~7UL);
378 #elif BITS_PER_LONG == 32
379 	*bit += ((unsigned long) addr & 3UL) << 3;
380 	addr = (void *) ((unsigned long) addr & ~3UL);
381 #else
382 #error "how many bits you are?!"
383 #endif
384 	return addr;
385 }
386 
387 static inline int mb_test_bit(int bit, void *addr)
388 {
389 	/*
390 	 * ext4_test_bit on architecture like powerpc
391 	 * needs unsigned long aligned address
392 	 */
393 	addr = mb_correct_addr_and_bit(&bit, addr);
394 	return ext4_test_bit(bit, addr);
395 }
396 
397 static inline void mb_set_bit(int bit, void *addr)
398 {
399 	addr = mb_correct_addr_and_bit(&bit, addr);
400 	ext4_set_bit(bit, addr);
401 }
402 
403 static inline void mb_clear_bit(int bit, void *addr)
404 {
405 	addr = mb_correct_addr_and_bit(&bit, addr);
406 	ext4_clear_bit(bit, addr);
407 }
408 
409 static inline int mb_test_and_clear_bit(int bit, void *addr)
410 {
411 	addr = mb_correct_addr_and_bit(&bit, addr);
412 	return ext4_test_and_clear_bit(bit, addr);
413 }
414 
415 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
416 {
417 	int fix = 0, ret, tmpmax;
418 	addr = mb_correct_addr_and_bit(&fix, addr);
419 	tmpmax = max + fix;
420 	start += fix;
421 
422 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
423 	if (ret > max)
424 		return max;
425 	return ret;
426 }
427 
428 static inline int mb_find_next_bit(void *addr, int max, int start)
429 {
430 	int fix = 0, ret, tmpmax;
431 	addr = mb_correct_addr_and_bit(&fix, addr);
432 	tmpmax = max + fix;
433 	start += fix;
434 
435 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
436 	if (ret > max)
437 		return max;
438 	return ret;
439 }
440 
441 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
442 {
443 	char *bb;
444 
445 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
446 	BUG_ON(max == NULL);
447 
448 	if (order > e4b->bd_blkbits + 1) {
449 		*max = 0;
450 		return NULL;
451 	}
452 
453 	/* at order 0 we see each particular block */
454 	if (order == 0) {
455 		*max = 1 << (e4b->bd_blkbits + 3);
456 		return e4b->bd_bitmap;
457 	}
458 
459 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
460 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
461 
462 	return bb;
463 }
464 
465 #ifdef DOUBLE_CHECK
466 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
467 			   int first, int count)
468 {
469 	int i;
470 	struct super_block *sb = e4b->bd_sb;
471 
472 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
473 		return;
474 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
475 	for (i = 0; i < count; i++) {
476 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
477 			ext4_fsblk_t blocknr;
478 
479 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
480 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
481 			ext4_grp_locked_error(sb, e4b->bd_group,
482 					      inode ? inode->i_ino : 0,
483 					      blocknr,
484 					      "freeing block already freed "
485 					      "(bit %u)",
486 					      first + i);
487 		}
488 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
489 	}
490 }
491 
492 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
493 {
494 	int i;
495 
496 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
497 		return;
498 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
499 	for (i = 0; i < count; i++) {
500 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
501 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
502 	}
503 }
504 
505 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
506 {
507 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
508 		unsigned char *b1, *b2;
509 		int i;
510 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
511 		b2 = (unsigned char *) bitmap;
512 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
513 			if (b1[i] != b2[i]) {
514 				ext4_msg(e4b->bd_sb, KERN_ERR,
515 					 "corruption in group %u "
516 					 "at byte %u(%u): %x in copy != %x "
517 					 "on disk/prealloc",
518 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
519 				BUG();
520 			}
521 		}
522 	}
523 }
524 
525 #else
526 static inline void mb_free_blocks_double(struct inode *inode,
527 				struct ext4_buddy *e4b, int first, int count)
528 {
529 	return;
530 }
531 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
532 						int first, int count)
533 {
534 	return;
535 }
536 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
537 {
538 	return;
539 }
540 #endif
541 
542 #ifdef AGGRESSIVE_CHECK
543 
544 #define MB_CHECK_ASSERT(assert)						\
545 do {									\
546 	if (!(assert)) {						\
547 		printk(KERN_EMERG					\
548 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
549 			function, file, line, # assert);		\
550 		BUG();							\
551 	}								\
552 } while (0)
553 
554 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
555 				const char *function, int line)
556 {
557 	struct super_block *sb = e4b->bd_sb;
558 	int order = e4b->bd_blkbits + 1;
559 	int max;
560 	int max2;
561 	int i;
562 	int j;
563 	int k;
564 	int count;
565 	struct ext4_group_info *grp;
566 	int fragments = 0;
567 	int fstart;
568 	struct list_head *cur;
569 	void *buddy;
570 	void *buddy2;
571 
572 	{
573 		static int mb_check_counter;
574 		if (mb_check_counter++ % 100 != 0)
575 			return 0;
576 	}
577 
578 	while (order > 1) {
579 		buddy = mb_find_buddy(e4b, order, &max);
580 		MB_CHECK_ASSERT(buddy);
581 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
582 		MB_CHECK_ASSERT(buddy2);
583 		MB_CHECK_ASSERT(buddy != buddy2);
584 		MB_CHECK_ASSERT(max * 2 == max2);
585 
586 		count = 0;
587 		for (i = 0; i < max; i++) {
588 
589 			if (mb_test_bit(i, buddy)) {
590 				/* only single bit in buddy2 may be 1 */
591 				if (!mb_test_bit(i << 1, buddy2)) {
592 					MB_CHECK_ASSERT(
593 						mb_test_bit((i<<1)+1, buddy2));
594 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
595 					MB_CHECK_ASSERT(
596 						mb_test_bit(i << 1, buddy2));
597 				}
598 				continue;
599 			}
600 
601 			/* both bits in buddy2 must be 1 */
602 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
603 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
604 
605 			for (j = 0; j < (1 << order); j++) {
606 				k = (i * (1 << order)) + j;
607 				MB_CHECK_ASSERT(
608 					!mb_test_bit(k, e4b->bd_bitmap));
609 			}
610 			count++;
611 		}
612 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
613 		order--;
614 	}
615 
616 	fstart = -1;
617 	buddy = mb_find_buddy(e4b, 0, &max);
618 	for (i = 0; i < max; i++) {
619 		if (!mb_test_bit(i, buddy)) {
620 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
621 			if (fstart == -1) {
622 				fragments++;
623 				fstart = i;
624 			}
625 			continue;
626 		}
627 		fstart = -1;
628 		/* check used bits only */
629 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
630 			buddy2 = mb_find_buddy(e4b, j, &max2);
631 			k = i >> j;
632 			MB_CHECK_ASSERT(k < max2);
633 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
634 		}
635 	}
636 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
637 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
638 
639 	grp = ext4_get_group_info(sb, e4b->bd_group);
640 	list_for_each(cur, &grp->bb_prealloc_list) {
641 		ext4_group_t groupnr;
642 		struct ext4_prealloc_space *pa;
643 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
644 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
645 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
646 		for (i = 0; i < pa->pa_len; i++)
647 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
648 	}
649 	return 0;
650 }
651 #undef MB_CHECK_ASSERT
652 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
653 					__FILE__, __func__, __LINE__)
654 #else
655 #define mb_check_buddy(e4b)
656 #endif
657 
658 /*
659  * Divide blocks started from @first with length @len into
660  * smaller chunks with power of 2 blocks.
661  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
662  * then increase bb_counters[] for corresponded chunk size.
663  */
664 static void ext4_mb_mark_free_simple(struct super_block *sb,
665 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
666 					struct ext4_group_info *grp)
667 {
668 	struct ext4_sb_info *sbi = EXT4_SB(sb);
669 	ext4_grpblk_t min;
670 	ext4_grpblk_t max;
671 	ext4_grpblk_t chunk;
672 	unsigned short border;
673 
674 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
675 
676 	border = 2 << sb->s_blocksize_bits;
677 
678 	while (len > 0) {
679 		/* find how many blocks can be covered since this position */
680 		max = ffs(first | border) - 1;
681 
682 		/* find how many blocks of power 2 we need to mark */
683 		min = fls(len) - 1;
684 
685 		if (max < min)
686 			min = max;
687 		chunk = 1 << min;
688 
689 		/* mark multiblock chunks only */
690 		grp->bb_counters[min]++;
691 		if (min > 0)
692 			mb_clear_bit(first >> min,
693 				     buddy + sbi->s_mb_offsets[min]);
694 
695 		len -= chunk;
696 		first += chunk;
697 	}
698 }
699 
700 /*
701  * Cache the order of the largest free extent we have available in this block
702  * group.
703  */
704 static void
705 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
706 {
707 	int i;
708 	int bits;
709 
710 	grp->bb_largest_free_order = -1; /* uninit */
711 
712 	bits = sb->s_blocksize_bits + 1;
713 	for (i = bits; i >= 0; i--) {
714 		if (grp->bb_counters[i] > 0) {
715 			grp->bb_largest_free_order = i;
716 			break;
717 		}
718 	}
719 }
720 
721 static noinline_for_stack
722 void ext4_mb_generate_buddy(struct super_block *sb,
723 				void *buddy, void *bitmap, ext4_group_t group)
724 {
725 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
726 	struct ext4_sb_info *sbi = EXT4_SB(sb);
727 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
728 	ext4_grpblk_t i = 0;
729 	ext4_grpblk_t first;
730 	ext4_grpblk_t len;
731 	unsigned free = 0;
732 	unsigned fragments = 0;
733 	unsigned long long period = get_cycles();
734 
735 	/* initialize buddy from bitmap which is aggregation
736 	 * of on-disk bitmap and preallocations */
737 	i = mb_find_next_zero_bit(bitmap, max, 0);
738 	grp->bb_first_free = i;
739 	while (i < max) {
740 		fragments++;
741 		first = i;
742 		i = mb_find_next_bit(bitmap, max, i);
743 		len = i - first;
744 		free += len;
745 		if (len > 1)
746 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
747 		else
748 			grp->bb_counters[0]++;
749 		if (i < max)
750 			i = mb_find_next_zero_bit(bitmap, max, i);
751 	}
752 	grp->bb_fragments = fragments;
753 
754 	if (free != grp->bb_free) {
755 		ext4_grp_locked_error(sb, group, 0, 0,
756 				      "block bitmap and bg descriptor "
757 				      "inconsistent: %u vs %u free clusters",
758 				      free, grp->bb_free);
759 		/*
760 		 * If we intend to continue, we consider group descriptor
761 		 * corrupt and update bb_free using bitmap value
762 		 */
763 		grp->bb_free = free;
764 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
765 			percpu_counter_sub(&sbi->s_freeclusters_counter,
766 					   grp->bb_free);
767 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
768 	}
769 	mb_set_largest_free_order(sb, grp);
770 
771 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
772 
773 	period = get_cycles() - period;
774 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
775 	EXT4_SB(sb)->s_mb_buddies_generated++;
776 	EXT4_SB(sb)->s_mb_generation_time += period;
777 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
778 }
779 
780 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
781 {
782 	int count;
783 	int order = 1;
784 	void *buddy;
785 
786 	while ((buddy = mb_find_buddy(e4b, order++, &count))) {
787 		ext4_set_bits(buddy, 0, count);
788 	}
789 	e4b->bd_info->bb_fragments = 0;
790 	memset(e4b->bd_info->bb_counters, 0,
791 		sizeof(*e4b->bd_info->bb_counters) *
792 		(e4b->bd_sb->s_blocksize_bits + 2));
793 
794 	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
795 		e4b->bd_bitmap, e4b->bd_group);
796 }
797 
798 /* The buddy information is attached the buddy cache inode
799  * for convenience. The information regarding each group
800  * is loaded via ext4_mb_load_buddy. The information involve
801  * block bitmap and buddy information. The information are
802  * stored in the inode as
803  *
804  * {                        page                        }
805  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
806  *
807  *
808  * one block each for bitmap and buddy information.
809  * So for each group we take up 2 blocks. A page can
810  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
811  * So it can have information regarding groups_per_page which
812  * is blocks_per_page/2
813  *
814  * Locking note:  This routine takes the block group lock of all groups
815  * for this page; do not hold this lock when calling this routine!
816  */
817 
818 static int ext4_mb_init_cache(struct page *page, char *incore)
819 {
820 	ext4_group_t ngroups;
821 	int blocksize;
822 	int blocks_per_page;
823 	int groups_per_page;
824 	int err = 0;
825 	int i;
826 	ext4_group_t first_group, group;
827 	int first_block;
828 	struct super_block *sb;
829 	struct buffer_head *bhs;
830 	struct buffer_head **bh = NULL;
831 	struct inode *inode;
832 	char *data;
833 	char *bitmap;
834 	struct ext4_group_info *grinfo;
835 
836 	mb_debug(1, "init page %lu\n", page->index);
837 
838 	inode = page->mapping->host;
839 	sb = inode->i_sb;
840 	ngroups = ext4_get_groups_count(sb);
841 	blocksize = 1 << inode->i_blkbits;
842 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
843 
844 	groups_per_page = blocks_per_page >> 1;
845 	if (groups_per_page == 0)
846 		groups_per_page = 1;
847 
848 	/* allocate buffer_heads to read bitmaps */
849 	if (groups_per_page > 1) {
850 		i = sizeof(struct buffer_head *) * groups_per_page;
851 		bh = kzalloc(i, GFP_NOFS);
852 		if (bh == NULL) {
853 			err = -ENOMEM;
854 			goto out;
855 		}
856 	} else
857 		bh = &bhs;
858 
859 	first_group = page->index * blocks_per_page / 2;
860 
861 	/* read all groups the page covers into the cache */
862 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
863 		if (group >= ngroups)
864 			break;
865 
866 		grinfo = ext4_get_group_info(sb, group);
867 		/*
868 		 * If page is uptodate then we came here after online resize
869 		 * which added some new uninitialized group info structs, so
870 		 * we must skip all initialized uptodate buddies on the page,
871 		 * which may be currently in use by an allocating task.
872 		 */
873 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
874 			bh[i] = NULL;
875 			continue;
876 		}
877 		if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
878 			err = -ENOMEM;
879 			goto out;
880 		}
881 		mb_debug(1, "read bitmap for group %u\n", group);
882 	}
883 
884 	/* wait for I/O completion */
885 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
886 		if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i]))
887 			err = -EIO;
888 	}
889 
890 	first_block = page->index * blocks_per_page;
891 	for (i = 0; i < blocks_per_page; i++) {
892 		group = (first_block + i) >> 1;
893 		if (group >= ngroups)
894 			break;
895 
896 		if (!bh[group - first_group])
897 			/* skip initialized uptodate buddy */
898 			continue;
899 
900 		if (!buffer_verified(bh[group - first_group]))
901 			/* Skip faulty bitmaps */
902 			continue;
903 		err = 0;
904 
905 		/*
906 		 * data carry information regarding this
907 		 * particular group in the format specified
908 		 * above
909 		 *
910 		 */
911 		data = page_address(page) + (i * blocksize);
912 		bitmap = bh[group - first_group]->b_data;
913 
914 		/*
915 		 * We place the buddy block and bitmap block
916 		 * close together
917 		 */
918 		if ((first_block + i) & 1) {
919 			/* this is block of buddy */
920 			BUG_ON(incore == NULL);
921 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
922 				group, page->index, i * blocksize);
923 			trace_ext4_mb_buddy_bitmap_load(sb, group);
924 			grinfo = ext4_get_group_info(sb, group);
925 			grinfo->bb_fragments = 0;
926 			memset(grinfo->bb_counters, 0,
927 			       sizeof(*grinfo->bb_counters) *
928 				(sb->s_blocksize_bits+2));
929 			/*
930 			 * incore got set to the group block bitmap below
931 			 */
932 			ext4_lock_group(sb, group);
933 			/* init the buddy */
934 			memset(data, 0xff, blocksize);
935 			ext4_mb_generate_buddy(sb, data, incore, group);
936 			ext4_unlock_group(sb, group);
937 			incore = NULL;
938 		} else {
939 			/* this is block of bitmap */
940 			BUG_ON(incore != NULL);
941 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
942 				group, page->index, i * blocksize);
943 			trace_ext4_mb_bitmap_load(sb, group);
944 
945 			/* see comments in ext4_mb_put_pa() */
946 			ext4_lock_group(sb, group);
947 			memcpy(data, bitmap, blocksize);
948 
949 			/* mark all preallocated blks used in in-core bitmap */
950 			ext4_mb_generate_from_pa(sb, data, group);
951 			ext4_mb_generate_from_freelist(sb, data, group);
952 			ext4_unlock_group(sb, group);
953 
954 			/* set incore so that the buddy information can be
955 			 * generated using this
956 			 */
957 			incore = data;
958 		}
959 	}
960 	SetPageUptodate(page);
961 
962 out:
963 	if (bh) {
964 		for (i = 0; i < groups_per_page; i++)
965 			brelse(bh[i]);
966 		if (bh != &bhs)
967 			kfree(bh);
968 	}
969 	return err;
970 }
971 
972 /*
973  * Lock the buddy and bitmap pages. This make sure other parallel init_group
974  * on the same buddy page doesn't happen whild holding the buddy page lock.
975  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
976  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
977  */
978 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
979 		ext4_group_t group, struct ext4_buddy *e4b)
980 {
981 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
982 	int block, pnum, poff;
983 	int blocks_per_page;
984 	struct page *page;
985 
986 	e4b->bd_buddy_page = NULL;
987 	e4b->bd_bitmap_page = NULL;
988 
989 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
990 	/*
991 	 * the buddy cache inode stores the block bitmap
992 	 * and buddy information in consecutive blocks.
993 	 * So for each group we need two blocks.
994 	 */
995 	block = group * 2;
996 	pnum = block / blocks_per_page;
997 	poff = block % blocks_per_page;
998 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
999 	if (!page)
1000 		return -ENOMEM;
1001 	BUG_ON(page->mapping != inode->i_mapping);
1002 	e4b->bd_bitmap_page = page;
1003 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1004 
1005 	if (blocks_per_page >= 2) {
1006 		/* buddy and bitmap are on the same page */
1007 		return 0;
1008 	}
1009 
1010 	block++;
1011 	pnum = block / blocks_per_page;
1012 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1013 	if (!page)
1014 		return -ENOMEM;
1015 	BUG_ON(page->mapping != inode->i_mapping);
1016 	e4b->bd_buddy_page = page;
1017 	return 0;
1018 }
1019 
1020 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1021 {
1022 	if (e4b->bd_bitmap_page) {
1023 		unlock_page(e4b->bd_bitmap_page);
1024 		page_cache_release(e4b->bd_bitmap_page);
1025 	}
1026 	if (e4b->bd_buddy_page) {
1027 		unlock_page(e4b->bd_buddy_page);
1028 		page_cache_release(e4b->bd_buddy_page);
1029 	}
1030 }
1031 
1032 /*
1033  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1034  * block group lock of all groups for this page; do not hold the BG lock when
1035  * calling this routine!
1036  */
1037 static noinline_for_stack
1038 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1039 {
1040 
1041 	struct ext4_group_info *this_grp;
1042 	struct ext4_buddy e4b;
1043 	struct page *page;
1044 	int ret = 0;
1045 
1046 	might_sleep();
1047 	mb_debug(1, "init group %u\n", group);
1048 	this_grp = ext4_get_group_info(sb, group);
1049 	/*
1050 	 * This ensures that we don't reinit the buddy cache
1051 	 * page which map to the group from which we are already
1052 	 * allocating. If we are looking at the buddy cache we would
1053 	 * have taken a reference using ext4_mb_load_buddy and that
1054 	 * would have pinned buddy page to page cache.
1055 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1056 	 * page accessed.
1057 	 */
1058 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1059 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1060 		/*
1061 		 * somebody initialized the group
1062 		 * return without doing anything
1063 		 */
1064 		goto err;
1065 	}
1066 
1067 	page = e4b.bd_bitmap_page;
1068 	ret = ext4_mb_init_cache(page, NULL);
1069 	if (ret)
1070 		goto err;
1071 	if (!PageUptodate(page)) {
1072 		ret = -EIO;
1073 		goto err;
1074 	}
1075 
1076 	if (e4b.bd_buddy_page == NULL) {
1077 		/*
1078 		 * If both the bitmap and buddy are in
1079 		 * the same page we don't need to force
1080 		 * init the buddy
1081 		 */
1082 		ret = 0;
1083 		goto err;
1084 	}
1085 	/* init buddy cache */
1086 	page = e4b.bd_buddy_page;
1087 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1088 	if (ret)
1089 		goto err;
1090 	if (!PageUptodate(page)) {
1091 		ret = -EIO;
1092 		goto err;
1093 	}
1094 err:
1095 	ext4_mb_put_buddy_page_lock(&e4b);
1096 	return ret;
1097 }
1098 
1099 /*
1100  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1101  * block group lock of all groups for this page; do not hold the BG lock when
1102  * calling this routine!
1103  */
1104 static noinline_for_stack int
1105 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1106 					struct ext4_buddy *e4b)
1107 {
1108 	int blocks_per_page;
1109 	int block;
1110 	int pnum;
1111 	int poff;
1112 	struct page *page;
1113 	int ret;
1114 	struct ext4_group_info *grp;
1115 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1116 	struct inode *inode = sbi->s_buddy_cache;
1117 
1118 	might_sleep();
1119 	mb_debug(1, "load group %u\n", group);
1120 
1121 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1122 	grp = ext4_get_group_info(sb, group);
1123 
1124 	e4b->bd_blkbits = sb->s_blocksize_bits;
1125 	e4b->bd_info = grp;
1126 	e4b->bd_sb = sb;
1127 	e4b->bd_group = group;
1128 	e4b->bd_buddy_page = NULL;
1129 	e4b->bd_bitmap_page = NULL;
1130 
1131 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1132 		/*
1133 		 * we need full data about the group
1134 		 * to make a good selection
1135 		 */
1136 		ret = ext4_mb_init_group(sb, group);
1137 		if (ret)
1138 			return ret;
1139 	}
1140 
1141 	/*
1142 	 * the buddy cache inode stores the block bitmap
1143 	 * and buddy information in consecutive blocks.
1144 	 * So for each group we need two blocks.
1145 	 */
1146 	block = group * 2;
1147 	pnum = block / blocks_per_page;
1148 	poff = block % blocks_per_page;
1149 
1150 	/* we could use find_or_create_page(), but it locks page
1151 	 * what we'd like to avoid in fast path ... */
1152 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1153 	if (page == NULL || !PageUptodate(page)) {
1154 		if (page)
1155 			/*
1156 			 * drop the page reference and try
1157 			 * to get the page with lock. If we
1158 			 * are not uptodate that implies
1159 			 * somebody just created the page but
1160 			 * is yet to initialize the same. So
1161 			 * wait for it to initialize.
1162 			 */
1163 			page_cache_release(page);
1164 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1165 		if (page) {
1166 			BUG_ON(page->mapping != inode->i_mapping);
1167 			if (!PageUptodate(page)) {
1168 				ret = ext4_mb_init_cache(page, NULL);
1169 				if (ret) {
1170 					unlock_page(page);
1171 					goto err;
1172 				}
1173 				mb_cmp_bitmaps(e4b, page_address(page) +
1174 					       (poff * sb->s_blocksize));
1175 			}
1176 			unlock_page(page);
1177 		}
1178 	}
1179 	if (page == NULL) {
1180 		ret = -ENOMEM;
1181 		goto err;
1182 	}
1183 	if (!PageUptodate(page)) {
1184 		ret = -EIO;
1185 		goto err;
1186 	}
1187 
1188 	/* Pages marked accessed already */
1189 	e4b->bd_bitmap_page = page;
1190 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1191 
1192 	block++;
1193 	pnum = block / blocks_per_page;
1194 	poff = block % blocks_per_page;
1195 
1196 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1197 	if (page == NULL || !PageUptodate(page)) {
1198 		if (page)
1199 			page_cache_release(page);
1200 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1201 		if (page) {
1202 			BUG_ON(page->mapping != inode->i_mapping);
1203 			if (!PageUptodate(page)) {
1204 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1205 				if (ret) {
1206 					unlock_page(page);
1207 					goto err;
1208 				}
1209 			}
1210 			unlock_page(page);
1211 		}
1212 	}
1213 	if (page == NULL) {
1214 		ret = -ENOMEM;
1215 		goto err;
1216 	}
1217 	if (!PageUptodate(page)) {
1218 		ret = -EIO;
1219 		goto err;
1220 	}
1221 
1222 	/* Pages marked accessed already */
1223 	e4b->bd_buddy_page = page;
1224 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1225 
1226 	BUG_ON(e4b->bd_bitmap_page == NULL);
1227 	BUG_ON(e4b->bd_buddy_page == NULL);
1228 
1229 	return 0;
1230 
1231 err:
1232 	if (page)
1233 		page_cache_release(page);
1234 	if (e4b->bd_bitmap_page)
1235 		page_cache_release(e4b->bd_bitmap_page);
1236 	if (e4b->bd_buddy_page)
1237 		page_cache_release(e4b->bd_buddy_page);
1238 	e4b->bd_buddy = NULL;
1239 	e4b->bd_bitmap = NULL;
1240 	return ret;
1241 }
1242 
1243 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1244 {
1245 	if (e4b->bd_bitmap_page)
1246 		page_cache_release(e4b->bd_bitmap_page);
1247 	if (e4b->bd_buddy_page)
1248 		page_cache_release(e4b->bd_buddy_page);
1249 }
1250 
1251 
1252 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1253 {
1254 	int order = 1;
1255 	void *bb;
1256 
1257 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1258 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1259 
1260 	bb = e4b->bd_buddy;
1261 	while (order <= e4b->bd_blkbits + 1) {
1262 		block = block >> 1;
1263 		if (!mb_test_bit(block, bb)) {
1264 			/* this block is part of buddy of order 'order' */
1265 			return order;
1266 		}
1267 		bb += 1 << (e4b->bd_blkbits - order);
1268 		order++;
1269 	}
1270 	return 0;
1271 }
1272 
1273 static void mb_clear_bits(void *bm, int cur, int len)
1274 {
1275 	__u32 *addr;
1276 
1277 	len = cur + len;
1278 	while (cur < len) {
1279 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1280 			/* fast path: clear whole word at once */
1281 			addr = bm + (cur >> 3);
1282 			*addr = 0;
1283 			cur += 32;
1284 			continue;
1285 		}
1286 		mb_clear_bit(cur, bm);
1287 		cur++;
1288 	}
1289 }
1290 
1291 /* clear bits in given range
1292  * will return first found zero bit if any, -1 otherwise
1293  */
1294 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1295 {
1296 	__u32 *addr;
1297 	int zero_bit = -1;
1298 
1299 	len = cur + len;
1300 	while (cur < len) {
1301 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1302 			/* fast path: clear whole word at once */
1303 			addr = bm + (cur >> 3);
1304 			if (*addr != (__u32)(-1) && zero_bit == -1)
1305 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1306 			*addr = 0;
1307 			cur += 32;
1308 			continue;
1309 		}
1310 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1311 			zero_bit = cur;
1312 		cur++;
1313 	}
1314 
1315 	return zero_bit;
1316 }
1317 
1318 void ext4_set_bits(void *bm, int cur, int len)
1319 {
1320 	__u32 *addr;
1321 
1322 	len = cur + len;
1323 	while (cur < len) {
1324 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1325 			/* fast path: set whole word at once */
1326 			addr = bm + (cur >> 3);
1327 			*addr = 0xffffffff;
1328 			cur += 32;
1329 			continue;
1330 		}
1331 		mb_set_bit(cur, bm);
1332 		cur++;
1333 	}
1334 }
1335 
1336 /*
1337  * _________________________________________________________________ */
1338 
1339 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1340 {
1341 	if (mb_test_bit(*bit + side, bitmap)) {
1342 		mb_clear_bit(*bit, bitmap);
1343 		(*bit) -= side;
1344 		return 1;
1345 	}
1346 	else {
1347 		(*bit) += side;
1348 		mb_set_bit(*bit, bitmap);
1349 		return -1;
1350 	}
1351 }
1352 
1353 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1354 {
1355 	int max;
1356 	int order = 1;
1357 	void *buddy = mb_find_buddy(e4b, order, &max);
1358 
1359 	while (buddy) {
1360 		void *buddy2;
1361 
1362 		/* Bits in range [first; last] are known to be set since
1363 		 * corresponding blocks were allocated. Bits in range
1364 		 * (first; last) will stay set because they form buddies on
1365 		 * upper layer. We just deal with borders if they don't
1366 		 * align with upper layer and then go up.
1367 		 * Releasing entire group is all about clearing
1368 		 * single bit of highest order buddy.
1369 		 */
1370 
1371 		/* Example:
1372 		 * ---------------------------------
1373 		 * |   1   |   1   |   1   |   1   |
1374 		 * ---------------------------------
1375 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1376 		 * ---------------------------------
1377 		 *   0   1   2   3   4   5   6   7
1378 		 *      \_____________________/
1379 		 *
1380 		 * Neither [1] nor [6] is aligned to above layer.
1381 		 * Left neighbour [0] is free, so mark it busy,
1382 		 * decrease bb_counters and extend range to
1383 		 * [0; 6]
1384 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1385 		 * mark [6] free, increase bb_counters and shrink range to
1386 		 * [0; 5].
1387 		 * Then shift range to [0; 2], go up and do the same.
1388 		 */
1389 
1390 
1391 		if (first & 1)
1392 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1393 		if (!(last & 1))
1394 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1395 		if (first > last)
1396 			break;
1397 		order++;
1398 
1399 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1400 			mb_clear_bits(buddy, first, last - first + 1);
1401 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1402 			break;
1403 		}
1404 		first >>= 1;
1405 		last >>= 1;
1406 		buddy = buddy2;
1407 	}
1408 }
1409 
1410 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1411 			   int first, int count)
1412 {
1413 	int left_is_free = 0;
1414 	int right_is_free = 0;
1415 	int block;
1416 	int last = first + count - 1;
1417 	struct super_block *sb = e4b->bd_sb;
1418 
1419 	if (WARN_ON(count == 0))
1420 		return;
1421 	BUG_ON(last >= (sb->s_blocksize << 3));
1422 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1423 	/* Don't bother if the block group is corrupt. */
1424 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1425 		return;
1426 
1427 	mb_check_buddy(e4b);
1428 	mb_free_blocks_double(inode, e4b, first, count);
1429 
1430 	e4b->bd_info->bb_free += count;
1431 	if (first < e4b->bd_info->bb_first_free)
1432 		e4b->bd_info->bb_first_free = first;
1433 
1434 	/* access memory sequentially: check left neighbour,
1435 	 * clear range and then check right neighbour
1436 	 */
1437 	if (first != 0)
1438 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1439 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1440 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1441 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1442 
1443 	if (unlikely(block != -1)) {
1444 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1445 		ext4_fsblk_t blocknr;
1446 
1447 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1448 		blocknr += EXT4_C2B(EXT4_SB(sb), block);
1449 		ext4_grp_locked_error(sb, e4b->bd_group,
1450 				      inode ? inode->i_ino : 0,
1451 				      blocknr,
1452 				      "freeing already freed block "
1453 				      "(bit %u); block bitmap corrupt.",
1454 				      block);
1455 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1456 			percpu_counter_sub(&sbi->s_freeclusters_counter,
1457 					   e4b->bd_info->bb_free);
1458 		/* Mark the block group as corrupt. */
1459 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1460 			&e4b->bd_info->bb_state);
1461 		mb_regenerate_buddy(e4b);
1462 		goto done;
1463 	}
1464 
1465 	/* let's maintain fragments counter */
1466 	if (left_is_free && right_is_free)
1467 		e4b->bd_info->bb_fragments--;
1468 	else if (!left_is_free && !right_is_free)
1469 		e4b->bd_info->bb_fragments++;
1470 
1471 	/* buddy[0] == bd_bitmap is a special case, so handle
1472 	 * it right away and let mb_buddy_mark_free stay free of
1473 	 * zero order checks.
1474 	 * Check if neighbours are to be coaleasced,
1475 	 * adjust bitmap bb_counters and borders appropriately.
1476 	 */
1477 	if (first & 1) {
1478 		first += !left_is_free;
1479 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1480 	}
1481 	if (!(last & 1)) {
1482 		last -= !right_is_free;
1483 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1484 	}
1485 
1486 	if (first <= last)
1487 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1488 
1489 done:
1490 	mb_set_largest_free_order(sb, e4b->bd_info);
1491 	mb_check_buddy(e4b);
1492 }
1493 
1494 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1495 				int needed, struct ext4_free_extent *ex)
1496 {
1497 	int next = block;
1498 	int max, order;
1499 	void *buddy;
1500 
1501 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1502 	BUG_ON(ex == NULL);
1503 
1504 	buddy = mb_find_buddy(e4b, 0, &max);
1505 	BUG_ON(buddy == NULL);
1506 	BUG_ON(block >= max);
1507 	if (mb_test_bit(block, buddy)) {
1508 		ex->fe_len = 0;
1509 		ex->fe_start = 0;
1510 		ex->fe_group = 0;
1511 		return 0;
1512 	}
1513 
1514 	/* find actual order */
1515 	order = mb_find_order_for_block(e4b, block);
1516 	block = block >> order;
1517 
1518 	ex->fe_len = 1 << order;
1519 	ex->fe_start = block << order;
1520 	ex->fe_group = e4b->bd_group;
1521 
1522 	/* calc difference from given start */
1523 	next = next - ex->fe_start;
1524 	ex->fe_len -= next;
1525 	ex->fe_start += next;
1526 
1527 	while (needed > ex->fe_len &&
1528 	       mb_find_buddy(e4b, order, &max)) {
1529 
1530 		if (block + 1 >= max)
1531 			break;
1532 
1533 		next = (block + 1) * (1 << order);
1534 		if (mb_test_bit(next, e4b->bd_bitmap))
1535 			break;
1536 
1537 		order = mb_find_order_for_block(e4b, next);
1538 
1539 		block = next >> order;
1540 		ex->fe_len += 1 << order;
1541 	}
1542 
1543 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1544 	return ex->fe_len;
1545 }
1546 
1547 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1548 {
1549 	int ord;
1550 	int mlen = 0;
1551 	int max = 0;
1552 	int cur;
1553 	int start = ex->fe_start;
1554 	int len = ex->fe_len;
1555 	unsigned ret = 0;
1556 	int len0 = len;
1557 	void *buddy;
1558 
1559 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1560 	BUG_ON(e4b->bd_group != ex->fe_group);
1561 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1562 	mb_check_buddy(e4b);
1563 	mb_mark_used_double(e4b, start, len);
1564 
1565 	e4b->bd_info->bb_free -= len;
1566 	if (e4b->bd_info->bb_first_free == start)
1567 		e4b->bd_info->bb_first_free += len;
1568 
1569 	/* let's maintain fragments counter */
1570 	if (start != 0)
1571 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1572 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1573 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1574 	if (mlen && max)
1575 		e4b->bd_info->bb_fragments++;
1576 	else if (!mlen && !max)
1577 		e4b->bd_info->bb_fragments--;
1578 
1579 	/* let's maintain buddy itself */
1580 	while (len) {
1581 		ord = mb_find_order_for_block(e4b, start);
1582 
1583 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1584 			/* the whole chunk may be allocated at once! */
1585 			mlen = 1 << ord;
1586 			buddy = mb_find_buddy(e4b, ord, &max);
1587 			BUG_ON((start >> ord) >= max);
1588 			mb_set_bit(start >> ord, buddy);
1589 			e4b->bd_info->bb_counters[ord]--;
1590 			start += mlen;
1591 			len -= mlen;
1592 			BUG_ON(len < 0);
1593 			continue;
1594 		}
1595 
1596 		/* store for history */
1597 		if (ret == 0)
1598 			ret = len | (ord << 16);
1599 
1600 		/* we have to split large buddy */
1601 		BUG_ON(ord <= 0);
1602 		buddy = mb_find_buddy(e4b, ord, &max);
1603 		mb_set_bit(start >> ord, buddy);
1604 		e4b->bd_info->bb_counters[ord]--;
1605 
1606 		ord--;
1607 		cur = (start >> ord) & ~1U;
1608 		buddy = mb_find_buddy(e4b, ord, &max);
1609 		mb_clear_bit(cur, buddy);
1610 		mb_clear_bit(cur + 1, buddy);
1611 		e4b->bd_info->bb_counters[ord]++;
1612 		e4b->bd_info->bb_counters[ord]++;
1613 	}
1614 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1615 
1616 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1617 	mb_check_buddy(e4b);
1618 
1619 	return ret;
1620 }
1621 
1622 /*
1623  * Must be called under group lock!
1624  */
1625 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1626 					struct ext4_buddy *e4b)
1627 {
1628 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1629 	int ret;
1630 
1631 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1632 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1633 
1634 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1635 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1636 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1637 
1638 	/* preallocation can change ac_b_ex, thus we store actually
1639 	 * allocated blocks for history */
1640 	ac->ac_f_ex = ac->ac_b_ex;
1641 
1642 	ac->ac_status = AC_STATUS_FOUND;
1643 	ac->ac_tail = ret & 0xffff;
1644 	ac->ac_buddy = ret >> 16;
1645 
1646 	/*
1647 	 * take the page reference. We want the page to be pinned
1648 	 * so that we don't get a ext4_mb_init_cache_call for this
1649 	 * group until we update the bitmap. That would mean we
1650 	 * double allocate blocks. The reference is dropped
1651 	 * in ext4_mb_release_context
1652 	 */
1653 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1654 	get_page(ac->ac_bitmap_page);
1655 	ac->ac_buddy_page = e4b->bd_buddy_page;
1656 	get_page(ac->ac_buddy_page);
1657 	/* store last allocated for subsequent stream allocation */
1658 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1659 		spin_lock(&sbi->s_md_lock);
1660 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1661 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1662 		spin_unlock(&sbi->s_md_lock);
1663 	}
1664 }
1665 
1666 /*
1667  * regular allocator, for general purposes allocation
1668  */
1669 
1670 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1671 					struct ext4_buddy *e4b,
1672 					int finish_group)
1673 {
1674 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1675 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1676 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1677 	struct ext4_free_extent ex;
1678 	int max;
1679 
1680 	if (ac->ac_status == AC_STATUS_FOUND)
1681 		return;
1682 	/*
1683 	 * We don't want to scan for a whole year
1684 	 */
1685 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1686 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1687 		ac->ac_status = AC_STATUS_BREAK;
1688 		return;
1689 	}
1690 
1691 	/*
1692 	 * Haven't found good chunk so far, let's continue
1693 	 */
1694 	if (bex->fe_len < gex->fe_len)
1695 		return;
1696 
1697 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1698 			&& bex->fe_group == e4b->bd_group) {
1699 		/* recheck chunk's availability - we don't know
1700 		 * when it was found (within this lock-unlock
1701 		 * period or not) */
1702 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1703 		if (max >= gex->fe_len) {
1704 			ext4_mb_use_best_found(ac, e4b);
1705 			return;
1706 		}
1707 	}
1708 }
1709 
1710 /*
1711  * The routine checks whether found extent is good enough. If it is,
1712  * then the extent gets marked used and flag is set to the context
1713  * to stop scanning. Otherwise, the extent is compared with the
1714  * previous found extent and if new one is better, then it's stored
1715  * in the context. Later, the best found extent will be used, if
1716  * mballoc can't find good enough extent.
1717  *
1718  * FIXME: real allocation policy is to be designed yet!
1719  */
1720 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1721 					struct ext4_free_extent *ex,
1722 					struct ext4_buddy *e4b)
1723 {
1724 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1725 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1726 
1727 	BUG_ON(ex->fe_len <= 0);
1728 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1729 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1730 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1731 
1732 	ac->ac_found++;
1733 
1734 	/*
1735 	 * The special case - take what you catch first
1736 	 */
1737 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1738 		*bex = *ex;
1739 		ext4_mb_use_best_found(ac, e4b);
1740 		return;
1741 	}
1742 
1743 	/*
1744 	 * Let's check whether the chuck is good enough
1745 	 */
1746 	if (ex->fe_len == gex->fe_len) {
1747 		*bex = *ex;
1748 		ext4_mb_use_best_found(ac, e4b);
1749 		return;
1750 	}
1751 
1752 	/*
1753 	 * If this is first found extent, just store it in the context
1754 	 */
1755 	if (bex->fe_len == 0) {
1756 		*bex = *ex;
1757 		return;
1758 	}
1759 
1760 	/*
1761 	 * If new found extent is better, store it in the context
1762 	 */
1763 	if (bex->fe_len < gex->fe_len) {
1764 		/* if the request isn't satisfied, any found extent
1765 		 * larger than previous best one is better */
1766 		if (ex->fe_len > bex->fe_len)
1767 			*bex = *ex;
1768 	} else if (ex->fe_len > gex->fe_len) {
1769 		/* if the request is satisfied, then we try to find
1770 		 * an extent that still satisfy the request, but is
1771 		 * smaller than previous one */
1772 		if (ex->fe_len < bex->fe_len)
1773 			*bex = *ex;
1774 	}
1775 
1776 	ext4_mb_check_limits(ac, e4b, 0);
1777 }
1778 
1779 static noinline_for_stack
1780 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1781 					struct ext4_buddy *e4b)
1782 {
1783 	struct ext4_free_extent ex = ac->ac_b_ex;
1784 	ext4_group_t group = ex.fe_group;
1785 	int max;
1786 	int err;
1787 
1788 	BUG_ON(ex.fe_len <= 0);
1789 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1790 	if (err)
1791 		return err;
1792 
1793 	ext4_lock_group(ac->ac_sb, group);
1794 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1795 
1796 	if (max > 0) {
1797 		ac->ac_b_ex = ex;
1798 		ext4_mb_use_best_found(ac, e4b);
1799 	}
1800 
1801 	ext4_unlock_group(ac->ac_sb, group);
1802 	ext4_mb_unload_buddy(e4b);
1803 
1804 	return 0;
1805 }
1806 
1807 static noinline_for_stack
1808 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1809 				struct ext4_buddy *e4b)
1810 {
1811 	ext4_group_t group = ac->ac_g_ex.fe_group;
1812 	int max;
1813 	int err;
1814 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1815 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1816 	struct ext4_free_extent ex;
1817 
1818 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1819 		return 0;
1820 	if (grp->bb_free == 0)
1821 		return 0;
1822 
1823 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1824 	if (err)
1825 		return err;
1826 
1827 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1828 		ext4_mb_unload_buddy(e4b);
1829 		return 0;
1830 	}
1831 
1832 	ext4_lock_group(ac->ac_sb, group);
1833 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1834 			     ac->ac_g_ex.fe_len, &ex);
1835 	ex.fe_logical = 0xDEADFA11; /* debug value */
1836 
1837 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1838 		ext4_fsblk_t start;
1839 
1840 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1841 			ex.fe_start;
1842 		/* use do_div to get remainder (would be 64-bit modulo) */
1843 		if (do_div(start, sbi->s_stripe) == 0) {
1844 			ac->ac_found++;
1845 			ac->ac_b_ex = ex;
1846 			ext4_mb_use_best_found(ac, e4b);
1847 		}
1848 	} else if (max >= ac->ac_g_ex.fe_len) {
1849 		BUG_ON(ex.fe_len <= 0);
1850 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1851 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1852 		ac->ac_found++;
1853 		ac->ac_b_ex = ex;
1854 		ext4_mb_use_best_found(ac, e4b);
1855 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1856 		/* Sometimes, caller may want to merge even small
1857 		 * number of blocks to an existing extent */
1858 		BUG_ON(ex.fe_len <= 0);
1859 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1860 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1861 		ac->ac_found++;
1862 		ac->ac_b_ex = ex;
1863 		ext4_mb_use_best_found(ac, e4b);
1864 	}
1865 	ext4_unlock_group(ac->ac_sb, group);
1866 	ext4_mb_unload_buddy(e4b);
1867 
1868 	return 0;
1869 }
1870 
1871 /*
1872  * The routine scans buddy structures (not bitmap!) from given order
1873  * to max order and tries to find big enough chunk to satisfy the req
1874  */
1875 static noinline_for_stack
1876 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1877 					struct ext4_buddy *e4b)
1878 {
1879 	struct super_block *sb = ac->ac_sb;
1880 	struct ext4_group_info *grp = e4b->bd_info;
1881 	void *buddy;
1882 	int i;
1883 	int k;
1884 	int max;
1885 
1886 	BUG_ON(ac->ac_2order <= 0);
1887 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1888 		if (grp->bb_counters[i] == 0)
1889 			continue;
1890 
1891 		buddy = mb_find_buddy(e4b, i, &max);
1892 		BUG_ON(buddy == NULL);
1893 
1894 		k = mb_find_next_zero_bit(buddy, max, 0);
1895 		BUG_ON(k >= max);
1896 
1897 		ac->ac_found++;
1898 
1899 		ac->ac_b_ex.fe_len = 1 << i;
1900 		ac->ac_b_ex.fe_start = k << i;
1901 		ac->ac_b_ex.fe_group = e4b->bd_group;
1902 
1903 		ext4_mb_use_best_found(ac, e4b);
1904 
1905 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1906 
1907 		if (EXT4_SB(sb)->s_mb_stats)
1908 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1909 
1910 		break;
1911 	}
1912 }
1913 
1914 /*
1915  * The routine scans the group and measures all found extents.
1916  * In order to optimize scanning, caller must pass number of
1917  * free blocks in the group, so the routine can know upper limit.
1918  */
1919 static noinline_for_stack
1920 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1921 					struct ext4_buddy *e4b)
1922 {
1923 	struct super_block *sb = ac->ac_sb;
1924 	void *bitmap = e4b->bd_bitmap;
1925 	struct ext4_free_extent ex;
1926 	int i;
1927 	int free;
1928 
1929 	free = e4b->bd_info->bb_free;
1930 	BUG_ON(free <= 0);
1931 
1932 	i = e4b->bd_info->bb_first_free;
1933 
1934 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1935 		i = mb_find_next_zero_bit(bitmap,
1936 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1937 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1938 			/*
1939 			 * IF we have corrupt bitmap, we won't find any
1940 			 * free blocks even though group info says we
1941 			 * we have free blocks
1942 			 */
1943 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1944 					"%d free clusters as per "
1945 					"group info. But bitmap says 0",
1946 					free);
1947 			break;
1948 		}
1949 
1950 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
1951 		BUG_ON(ex.fe_len <= 0);
1952 		if (free < ex.fe_len) {
1953 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1954 					"%d free clusters as per "
1955 					"group info. But got %d blocks",
1956 					free, ex.fe_len);
1957 			/*
1958 			 * The number of free blocks differs. This mostly
1959 			 * indicate that the bitmap is corrupt. So exit
1960 			 * without claiming the space.
1961 			 */
1962 			break;
1963 		}
1964 		ex.fe_logical = 0xDEADC0DE; /* debug value */
1965 		ext4_mb_measure_extent(ac, &ex, e4b);
1966 
1967 		i += ex.fe_len;
1968 		free -= ex.fe_len;
1969 	}
1970 
1971 	ext4_mb_check_limits(ac, e4b, 1);
1972 }
1973 
1974 /*
1975  * This is a special case for storages like raid5
1976  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1977  */
1978 static noinline_for_stack
1979 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1980 				 struct ext4_buddy *e4b)
1981 {
1982 	struct super_block *sb = ac->ac_sb;
1983 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1984 	void *bitmap = e4b->bd_bitmap;
1985 	struct ext4_free_extent ex;
1986 	ext4_fsblk_t first_group_block;
1987 	ext4_fsblk_t a;
1988 	ext4_grpblk_t i;
1989 	int max;
1990 
1991 	BUG_ON(sbi->s_stripe == 0);
1992 
1993 	/* find first stripe-aligned block in group */
1994 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1995 
1996 	a = first_group_block + sbi->s_stripe - 1;
1997 	do_div(a, sbi->s_stripe);
1998 	i = (a * sbi->s_stripe) - first_group_block;
1999 
2000 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2001 		if (!mb_test_bit(i, bitmap)) {
2002 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2003 			if (max >= sbi->s_stripe) {
2004 				ac->ac_found++;
2005 				ex.fe_logical = 0xDEADF00D; /* debug value */
2006 				ac->ac_b_ex = ex;
2007 				ext4_mb_use_best_found(ac, e4b);
2008 				break;
2009 			}
2010 		}
2011 		i += sbi->s_stripe;
2012 	}
2013 }
2014 
2015 /*
2016  * This is now called BEFORE we load the buddy bitmap.
2017  * Returns either 1 or 0 indicating that the group is either suitable
2018  * for the allocation or not. In addition it can also return negative
2019  * error code when something goes wrong.
2020  */
2021 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
2022 				ext4_group_t group, int cr)
2023 {
2024 	unsigned free, fragments;
2025 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2026 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2027 
2028 	BUG_ON(cr < 0 || cr >= 4);
2029 
2030 	free = grp->bb_free;
2031 	if (free == 0)
2032 		return 0;
2033 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2034 		return 0;
2035 
2036 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2037 		return 0;
2038 
2039 	/* We only do this if the grp has never been initialized */
2040 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2041 		int ret = ext4_mb_init_group(ac->ac_sb, group);
2042 		if (ret)
2043 			return ret;
2044 	}
2045 
2046 	fragments = grp->bb_fragments;
2047 	if (fragments == 0)
2048 		return 0;
2049 
2050 	switch (cr) {
2051 	case 0:
2052 		BUG_ON(ac->ac_2order == 0);
2053 
2054 		/* Avoid using the first bg of a flexgroup for data files */
2055 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2056 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2057 		    ((group % flex_size) == 0))
2058 			return 0;
2059 
2060 		if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
2061 		    (free / fragments) >= ac->ac_g_ex.fe_len)
2062 			return 1;
2063 
2064 		if (grp->bb_largest_free_order < ac->ac_2order)
2065 			return 0;
2066 
2067 		return 1;
2068 	case 1:
2069 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2070 			return 1;
2071 		break;
2072 	case 2:
2073 		if (free >= ac->ac_g_ex.fe_len)
2074 			return 1;
2075 		break;
2076 	case 3:
2077 		return 1;
2078 	default:
2079 		BUG();
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 static noinline_for_stack int
2086 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2087 {
2088 	ext4_group_t ngroups, group, i;
2089 	int cr;
2090 	int err = 0, first_err = 0;
2091 	struct ext4_sb_info *sbi;
2092 	struct super_block *sb;
2093 	struct ext4_buddy e4b;
2094 
2095 	sb = ac->ac_sb;
2096 	sbi = EXT4_SB(sb);
2097 	ngroups = ext4_get_groups_count(sb);
2098 	/* non-extent files are limited to low blocks/groups */
2099 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2100 		ngroups = sbi->s_blockfile_groups;
2101 
2102 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2103 
2104 	/* first, try the goal */
2105 	err = ext4_mb_find_by_goal(ac, &e4b);
2106 	if (err || ac->ac_status == AC_STATUS_FOUND)
2107 		goto out;
2108 
2109 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2110 		goto out;
2111 
2112 	/*
2113 	 * ac->ac2_order is set only if the fe_len is a power of 2
2114 	 * if ac2_order is set we also set criteria to 0 so that we
2115 	 * try exact allocation using buddy.
2116 	 */
2117 	i = fls(ac->ac_g_ex.fe_len);
2118 	ac->ac_2order = 0;
2119 	/*
2120 	 * We search using buddy data only if the order of the request
2121 	 * is greater than equal to the sbi_s_mb_order2_reqs
2122 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2123 	 */
2124 	if (i >= sbi->s_mb_order2_reqs) {
2125 		/*
2126 		 * This should tell if fe_len is exactly power of 2
2127 		 */
2128 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2129 			ac->ac_2order = i - 1;
2130 	}
2131 
2132 	/* if stream allocation is enabled, use global goal */
2133 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2134 		/* TBD: may be hot point */
2135 		spin_lock(&sbi->s_md_lock);
2136 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2137 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2138 		spin_unlock(&sbi->s_md_lock);
2139 	}
2140 
2141 	/* Let's just scan groups to find more-less suitable blocks */
2142 	cr = ac->ac_2order ? 0 : 1;
2143 	/*
2144 	 * cr == 0 try to get exact allocation,
2145 	 * cr == 3  try to get anything
2146 	 */
2147 repeat:
2148 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2149 		ac->ac_criteria = cr;
2150 		/*
2151 		 * searching for the right group start
2152 		 * from the goal value specified
2153 		 */
2154 		group = ac->ac_g_ex.fe_group;
2155 
2156 		for (i = 0; i < ngroups; group++, i++) {
2157 			int ret = 0;
2158 			cond_resched();
2159 			/*
2160 			 * Artificially restricted ngroups for non-extent
2161 			 * files makes group > ngroups possible on first loop.
2162 			 */
2163 			if (group >= ngroups)
2164 				group = 0;
2165 
2166 			/* This now checks without needing the buddy page */
2167 			ret = ext4_mb_good_group(ac, group, cr);
2168 			if (ret <= 0) {
2169 				if (!first_err)
2170 					first_err = ret;
2171 				continue;
2172 			}
2173 
2174 			err = ext4_mb_load_buddy(sb, group, &e4b);
2175 			if (err)
2176 				goto out;
2177 
2178 			ext4_lock_group(sb, group);
2179 
2180 			/*
2181 			 * We need to check again after locking the
2182 			 * block group
2183 			 */
2184 			ret = ext4_mb_good_group(ac, group, cr);
2185 			if (ret <= 0) {
2186 				ext4_unlock_group(sb, group);
2187 				ext4_mb_unload_buddy(&e4b);
2188 				if (!first_err)
2189 					first_err = ret;
2190 				continue;
2191 			}
2192 
2193 			ac->ac_groups_scanned++;
2194 			if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
2195 				ext4_mb_simple_scan_group(ac, &e4b);
2196 			else if (cr == 1 && sbi->s_stripe &&
2197 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2198 				ext4_mb_scan_aligned(ac, &e4b);
2199 			else
2200 				ext4_mb_complex_scan_group(ac, &e4b);
2201 
2202 			ext4_unlock_group(sb, group);
2203 			ext4_mb_unload_buddy(&e4b);
2204 
2205 			if (ac->ac_status != AC_STATUS_CONTINUE)
2206 				break;
2207 		}
2208 	}
2209 
2210 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2211 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2212 		/*
2213 		 * We've been searching too long. Let's try to allocate
2214 		 * the best chunk we've found so far
2215 		 */
2216 
2217 		ext4_mb_try_best_found(ac, &e4b);
2218 		if (ac->ac_status != AC_STATUS_FOUND) {
2219 			/*
2220 			 * Someone more lucky has already allocated it.
2221 			 * The only thing we can do is just take first
2222 			 * found block(s)
2223 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2224 			 */
2225 			ac->ac_b_ex.fe_group = 0;
2226 			ac->ac_b_ex.fe_start = 0;
2227 			ac->ac_b_ex.fe_len = 0;
2228 			ac->ac_status = AC_STATUS_CONTINUE;
2229 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2230 			cr = 3;
2231 			atomic_inc(&sbi->s_mb_lost_chunks);
2232 			goto repeat;
2233 		}
2234 	}
2235 out:
2236 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2237 		err = first_err;
2238 	return err;
2239 }
2240 
2241 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2242 {
2243 	struct super_block *sb = seq->private;
2244 	ext4_group_t group;
2245 
2246 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2247 		return NULL;
2248 	group = *pos + 1;
2249 	return (void *) ((unsigned long) group);
2250 }
2251 
2252 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2253 {
2254 	struct super_block *sb = seq->private;
2255 	ext4_group_t group;
2256 
2257 	++*pos;
2258 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2259 		return NULL;
2260 	group = *pos + 1;
2261 	return (void *) ((unsigned long) group);
2262 }
2263 
2264 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2265 {
2266 	struct super_block *sb = seq->private;
2267 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2268 	int i;
2269 	int err, buddy_loaded = 0;
2270 	struct ext4_buddy e4b;
2271 	struct ext4_group_info *grinfo;
2272 	struct sg {
2273 		struct ext4_group_info info;
2274 		ext4_grpblk_t counters[16];
2275 	} sg;
2276 
2277 	group--;
2278 	if (group == 0)
2279 		seq_puts(seq, "#group: free  frags first ["
2280 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2281 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]");
2282 
2283 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2284 		sizeof(struct ext4_group_info);
2285 	grinfo = ext4_get_group_info(sb, group);
2286 	/* Load the group info in memory only if not already loaded. */
2287 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2288 		err = ext4_mb_load_buddy(sb, group, &e4b);
2289 		if (err) {
2290 			seq_printf(seq, "#%-5u: I/O error\n", group);
2291 			return 0;
2292 		}
2293 		buddy_loaded = 1;
2294 	}
2295 
2296 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2297 
2298 	if (buddy_loaded)
2299 		ext4_mb_unload_buddy(&e4b);
2300 
2301 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2302 			sg.info.bb_fragments, sg.info.bb_first_free);
2303 	for (i = 0; i <= 13; i++)
2304 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2305 				sg.info.bb_counters[i] : 0);
2306 	seq_printf(seq, " ]\n");
2307 
2308 	return 0;
2309 }
2310 
2311 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2312 {
2313 }
2314 
2315 static const struct seq_operations ext4_mb_seq_groups_ops = {
2316 	.start  = ext4_mb_seq_groups_start,
2317 	.next   = ext4_mb_seq_groups_next,
2318 	.stop   = ext4_mb_seq_groups_stop,
2319 	.show   = ext4_mb_seq_groups_show,
2320 };
2321 
2322 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2323 {
2324 	struct super_block *sb = PDE_DATA(inode);
2325 	int rc;
2326 
2327 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2328 	if (rc == 0) {
2329 		struct seq_file *m = file->private_data;
2330 		m->private = sb;
2331 	}
2332 	return rc;
2333 
2334 }
2335 
2336 static const struct file_operations ext4_mb_seq_groups_fops = {
2337 	.owner		= THIS_MODULE,
2338 	.open		= ext4_mb_seq_groups_open,
2339 	.read		= seq_read,
2340 	.llseek		= seq_lseek,
2341 	.release	= seq_release,
2342 };
2343 
2344 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2345 {
2346 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2347 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2348 
2349 	BUG_ON(!cachep);
2350 	return cachep;
2351 }
2352 
2353 /*
2354  * Allocate the top-level s_group_info array for the specified number
2355  * of groups
2356  */
2357 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2358 {
2359 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2360 	unsigned size;
2361 	struct ext4_group_info ***new_groupinfo;
2362 
2363 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2364 		EXT4_DESC_PER_BLOCK_BITS(sb);
2365 	if (size <= sbi->s_group_info_size)
2366 		return 0;
2367 
2368 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2369 	new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
2370 	if (!new_groupinfo) {
2371 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2372 		return -ENOMEM;
2373 	}
2374 	if (sbi->s_group_info) {
2375 		memcpy(new_groupinfo, sbi->s_group_info,
2376 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2377 		kvfree(sbi->s_group_info);
2378 	}
2379 	sbi->s_group_info = new_groupinfo;
2380 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2381 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2382 		   sbi->s_group_info_size);
2383 	return 0;
2384 }
2385 
2386 /* Create and initialize ext4_group_info data for the given group. */
2387 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2388 			  struct ext4_group_desc *desc)
2389 {
2390 	int i;
2391 	int metalen = 0;
2392 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2393 	struct ext4_group_info **meta_group_info;
2394 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2395 
2396 	/*
2397 	 * First check if this group is the first of a reserved block.
2398 	 * If it's true, we have to allocate a new table of pointers
2399 	 * to ext4_group_info structures
2400 	 */
2401 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2402 		metalen = sizeof(*meta_group_info) <<
2403 			EXT4_DESC_PER_BLOCK_BITS(sb);
2404 		meta_group_info = kmalloc(metalen, GFP_NOFS);
2405 		if (meta_group_info == NULL) {
2406 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2407 				 "for a buddy group");
2408 			goto exit_meta_group_info;
2409 		}
2410 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2411 			meta_group_info;
2412 	}
2413 
2414 	meta_group_info =
2415 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2416 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2417 
2418 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2419 	if (meta_group_info[i] == NULL) {
2420 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2421 		goto exit_group_info;
2422 	}
2423 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2424 		&(meta_group_info[i]->bb_state));
2425 
2426 	/*
2427 	 * initialize bb_free to be able to skip
2428 	 * empty groups without initialization
2429 	 */
2430 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2431 		meta_group_info[i]->bb_free =
2432 			ext4_free_clusters_after_init(sb, group, desc);
2433 	} else {
2434 		meta_group_info[i]->bb_free =
2435 			ext4_free_group_clusters(sb, desc);
2436 	}
2437 
2438 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2439 	init_rwsem(&meta_group_info[i]->alloc_sem);
2440 	meta_group_info[i]->bb_free_root = RB_ROOT;
2441 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2442 
2443 #ifdef DOUBLE_CHECK
2444 	{
2445 		struct buffer_head *bh;
2446 		meta_group_info[i]->bb_bitmap =
2447 			kmalloc(sb->s_blocksize, GFP_NOFS);
2448 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2449 		bh = ext4_read_block_bitmap(sb, group);
2450 		BUG_ON(bh == NULL);
2451 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2452 			sb->s_blocksize);
2453 		put_bh(bh);
2454 	}
2455 #endif
2456 
2457 	return 0;
2458 
2459 exit_group_info:
2460 	/* If a meta_group_info table has been allocated, release it now */
2461 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2462 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2463 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2464 	}
2465 exit_meta_group_info:
2466 	return -ENOMEM;
2467 } /* ext4_mb_add_groupinfo */
2468 
2469 static int ext4_mb_init_backend(struct super_block *sb)
2470 {
2471 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2472 	ext4_group_t i;
2473 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2474 	int err;
2475 	struct ext4_group_desc *desc;
2476 	struct kmem_cache *cachep;
2477 
2478 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2479 	if (err)
2480 		return err;
2481 
2482 	sbi->s_buddy_cache = new_inode(sb);
2483 	if (sbi->s_buddy_cache == NULL) {
2484 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2485 		goto err_freesgi;
2486 	}
2487 	/* To avoid potentially colliding with an valid on-disk inode number,
2488 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2489 	 * not in the inode hash, so it should never be found by iget(), but
2490 	 * this will avoid confusion if it ever shows up during debugging. */
2491 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2492 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2493 	for (i = 0; i < ngroups; i++) {
2494 		desc = ext4_get_group_desc(sb, i, NULL);
2495 		if (desc == NULL) {
2496 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2497 			goto err_freebuddy;
2498 		}
2499 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2500 			goto err_freebuddy;
2501 	}
2502 
2503 	return 0;
2504 
2505 err_freebuddy:
2506 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2507 	while (i-- > 0)
2508 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2509 	i = sbi->s_group_info_size;
2510 	while (i-- > 0)
2511 		kfree(sbi->s_group_info[i]);
2512 	iput(sbi->s_buddy_cache);
2513 err_freesgi:
2514 	kvfree(sbi->s_group_info);
2515 	return -ENOMEM;
2516 }
2517 
2518 static void ext4_groupinfo_destroy_slabs(void)
2519 {
2520 	int i;
2521 
2522 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2523 		if (ext4_groupinfo_caches[i])
2524 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2525 		ext4_groupinfo_caches[i] = NULL;
2526 	}
2527 }
2528 
2529 static int ext4_groupinfo_create_slab(size_t size)
2530 {
2531 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2532 	int slab_size;
2533 	int blocksize_bits = order_base_2(size);
2534 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2535 	struct kmem_cache *cachep;
2536 
2537 	if (cache_index >= NR_GRPINFO_CACHES)
2538 		return -EINVAL;
2539 
2540 	if (unlikely(cache_index < 0))
2541 		cache_index = 0;
2542 
2543 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2544 	if (ext4_groupinfo_caches[cache_index]) {
2545 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2546 		return 0;	/* Already created */
2547 	}
2548 
2549 	slab_size = offsetof(struct ext4_group_info,
2550 				bb_counters[blocksize_bits + 2]);
2551 
2552 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2553 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2554 					NULL);
2555 
2556 	ext4_groupinfo_caches[cache_index] = cachep;
2557 
2558 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2559 	if (!cachep) {
2560 		printk(KERN_EMERG
2561 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2562 		return -ENOMEM;
2563 	}
2564 
2565 	return 0;
2566 }
2567 
2568 int ext4_mb_init(struct super_block *sb)
2569 {
2570 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2571 	unsigned i, j;
2572 	unsigned offset;
2573 	unsigned max;
2574 	int ret;
2575 
2576 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2577 
2578 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2579 	if (sbi->s_mb_offsets == NULL) {
2580 		ret = -ENOMEM;
2581 		goto out;
2582 	}
2583 
2584 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2585 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2586 	if (sbi->s_mb_maxs == NULL) {
2587 		ret = -ENOMEM;
2588 		goto out;
2589 	}
2590 
2591 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2592 	if (ret < 0)
2593 		goto out;
2594 
2595 	/* order 0 is regular bitmap */
2596 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2597 	sbi->s_mb_offsets[0] = 0;
2598 
2599 	i = 1;
2600 	offset = 0;
2601 	max = sb->s_blocksize << 2;
2602 	do {
2603 		sbi->s_mb_offsets[i] = offset;
2604 		sbi->s_mb_maxs[i] = max;
2605 		offset += 1 << (sb->s_blocksize_bits - i);
2606 		max = max >> 1;
2607 		i++;
2608 	} while (i <= sb->s_blocksize_bits + 1);
2609 
2610 	spin_lock_init(&sbi->s_md_lock);
2611 	spin_lock_init(&sbi->s_bal_lock);
2612 
2613 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2614 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2615 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2616 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2617 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2618 	/*
2619 	 * The default group preallocation is 512, which for 4k block
2620 	 * sizes translates to 2 megabytes.  However for bigalloc file
2621 	 * systems, this is probably too big (i.e, if the cluster size
2622 	 * is 1 megabyte, then group preallocation size becomes half a
2623 	 * gigabyte!).  As a default, we will keep a two megabyte
2624 	 * group pralloc size for cluster sizes up to 64k, and after
2625 	 * that, we will force a minimum group preallocation size of
2626 	 * 32 clusters.  This translates to 8 megs when the cluster
2627 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2628 	 * which seems reasonable as a default.
2629 	 */
2630 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2631 				       sbi->s_cluster_bits, 32);
2632 	/*
2633 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2634 	 * to the lowest multiple of s_stripe which is bigger than
2635 	 * the s_mb_group_prealloc as determined above. We want
2636 	 * the preallocation size to be an exact multiple of the
2637 	 * RAID stripe size so that preallocations don't fragment
2638 	 * the stripes.
2639 	 */
2640 	if (sbi->s_stripe > 1) {
2641 		sbi->s_mb_group_prealloc = roundup(
2642 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2643 	}
2644 
2645 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2646 	if (sbi->s_locality_groups == NULL) {
2647 		ret = -ENOMEM;
2648 		goto out;
2649 	}
2650 	for_each_possible_cpu(i) {
2651 		struct ext4_locality_group *lg;
2652 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2653 		mutex_init(&lg->lg_mutex);
2654 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2655 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2656 		spin_lock_init(&lg->lg_prealloc_lock);
2657 	}
2658 
2659 	/* init file for buddy data */
2660 	ret = ext4_mb_init_backend(sb);
2661 	if (ret != 0)
2662 		goto out_free_locality_groups;
2663 
2664 	if (sbi->s_proc)
2665 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2666 				 &ext4_mb_seq_groups_fops, sb);
2667 
2668 	return 0;
2669 
2670 out_free_locality_groups:
2671 	free_percpu(sbi->s_locality_groups);
2672 	sbi->s_locality_groups = NULL;
2673 out:
2674 	kfree(sbi->s_mb_offsets);
2675 	sbi->s_mb_offsets = NULL;
2676 	kfree(sbi->s_mb_maxs);
2677 	sbi->s_mb_maxs = NULL;
2678 	return ret;
2679 }
2680 
2681 /* need to called with the ext4 group lock held */
2682 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2683 {
2684 	struct ext4_prealloc_space *pa;
2685 	struct list_head *cur, *tmp;
2686 	int count = 0;
2687 
2688 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2689 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2690 		list_del(&pa->pa_group_list);
2691 		count++;
2692 		kmem_cache_free(ext4_pspace_cachep, pa);
2693 	}
2694 	if (count)
2695 		mb_debug(1, "mballoc: %u PAs left\n", count);
2696 
2697 }
2698 
2699 int ext4_mb_release(struct super_block *sb)
2700 {
2701 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2702 	ext4_group_t i;
2703 	int num_meta_group_infos;
2704 	struct ext4_group_info *grinfo;
2705 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2706 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2707 
2708 	if (sbi->s_proc)
2709 		remove_proc_entry("mb_groups", sbi->s_proc);
2710 
2711 	if (sbi->s_group_info) {
2712 		for (i = 0; i < ngroups; i++) {
2713 			grinfo = ext4_get_group_info(sb, i);
2714 #ifdef DOUBLE_CHECK
2715 			kfree(grinfo->bb_bitmap);
2716 #endif
2717 			ext4_lock_group(sb, i);
2718 			ext4_mb_cleanup_pa(grinfo);
2719 			ext4_unlock_group(sb, i);
2720 			kmem_cache_free(cachep, grinfo);
2721 		}
2722 		num_meta_group_infos = (ngroups +
2723 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2724 			EXT4_DESC_PER_BLOCK_BITS(sb);
2725 		for (i = 0; i < num_meta_group_infos; i++)
2726 			kfree(sbi->s_group_info[i]);
2727 		kvfree(sbi->s_group_info);
2728 	}
2729 	kfree(sbi->s_mb_offsets);
2730 	kfree(sbi->s_mb_maxs);
2731 	iput(sbi->s_buddy_cache);
2732 	if (sbi->s_mb_stats) {
2733 		ext4_msg(sb, KERN_INFO,
2734 		       "mballoc: %u blocks %u reqs (%u success)",
2735 				atomic_read(&sbi->s_bal_allocated),
2736 				atomic_read(&sbi->s_bal_reqs),
2737 				atomic_read(&sbi->s_bal_success));
2738 		ext4_msg(sb, KERN_INFO,
2739 		      "mballoc: %u extents scanned, %u goal hits, "
2740 				"%u 2^N hits, %u breaks, %u lost",
2741 				atomic_read(&sbi->s_bal_ex_scanned),
2742 				atomic_read(&sbi->s_bal_goals),
2743 				atomic_read(&sbi->s_bal_2orders),
2744 				atomic_read(&sbi->s_bal_breaks),
2745 				atomic_read(&sbi->s_mb_lost_chunks));
2746 		ext4_msg(sb, KERN_INFO,
2747 		       "mballoc: %lu generated and it took %Lu",
2748 				sbi->s_mb_buddies_generated,
2749 				sbi->s_mb_generation_time);
2750 		ext4_msg(sb, KERN_INFO,
2751 		       "mballoc: %u preallocated, %u discarded",
2752 				atomic_read(&sbi->s_mb_preallocated),
2753 				atomic_read(&sbi->s_mb_discarded));
2754 	}
2755 
2756 	free_percpu(sbi->s_locality_groups);
2757 
2758 	return 0;
2759 }
2760 
2761 static inline int ext4_issue_discard(struct super_block *sb,
2762 		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
2763 {
2764 	ext4_fsblk_t discard_block;
2765 
2766 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2767 			 ext4_group_first_block_no(sb, block_group));
2768 	count = EXT4_C2B(EXT4_SB(sb), count);
2769 	trace_ext4_discard_blocks(sb,
2770 			(unsigned long long) discard_block, count);
2771 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2772 }
2773 
2774 /*
2775  * This function is called by the jbd2 layer once the commit has finished,
2776  * so we know we can free the blocks that were released with that commit.
2777  */
2778 static void ext4_free_data_callback(struct super_block *sb,
2779 				    struct ext4_journal_cb_entry *jce,
2780 				    int rc)
2781 {
2782 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2783 	struct ext4_buddy e4b;
2784 	struct ext4_group_info *db;
2785 	int err, count = 0, count2 = 0;
2786 
2787 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2788 		 entry->efd_count, entry->efd_group, entry);
2789 
2790 	if (test_opt(sb, DISCARD)) {
2791 		err = ext4_issue_discard(sb, entry->efd_group,
2792 					 entry->efd_start_cluster,
2793 					 entry->efd_count);
2794 		if (err && err != -EOPNOTSUPP)
2795 			ext4_msg(sb, KERN_WARNING, "discard request in"
2796 				 " group:%d block:%d count:%d failed"
2797 				 " with %d", entry->efd_group,
2798 				 entry->efd_start_cluster,
2799 				 entry->efd_count, err);
2800 	}
2801 
2802 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2803 	/* we expect to find existing buddy because it's pinned */
2804 	BUG_ON(err != 0);
2805 
2806 
2807 	db = e4b.bd_info;
2808 	/* there are blocks to put in buddy to make them really free */
2809 	count += entry->efd_count;
2810 	count2++;
2811 	ext4_lock_group(sb, entry->efd_group);
2812 	/* Take it out of per group rb tree */
2813 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2814 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2815 
2816 	/*
2817 	 * Clear the trimmed flag for the group so that the next
2818 	 * ext4_trim_fs can trim it.
2819 	 * If the volume is mounted with -o discard, online discard
2820 	 * is supported and the free blocks will be trimmed online.
2821 	 */
2822 	if (!test_opt(sb, DISCARD))
2823 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2824 
2825 	if (!db->bb_free_root.rb_node) {
2826 		/* No more items in the per group rb tree
2827 		 * balance refcounts from ext4_mb_free_metadata()
2828 		 */
2829 		page_cache_release(e4b.bd_buddy_page);
2830 		page_cache_release(e4b.bd_bitmap_page);
2831 	}
2832 	ext4_unlock_group(sb, entry->efd_group);
2833 	kmem_cache_free(ext4_free_data_cachep, entry);
2834 	ext4_mb_unload_buddy(&e4b);
2835 
2836 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2837 }
2838 
2839 int __init ext4_init_mballoc(void)
2840 {
2841 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2842 					SLAB_RECLAIM_ACCOUNT);
2843 	if (ext4_pspace_cachep == NULL)
2844 		return -ENOMEM;
2845 
2846 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2847 				    SLAB_RECLAIM_ACCOUNT);
2848 	if (ext4_ac_cachep == NULL) {
2849 		kmem_cache_destroy(ext4_pspace_cachep);
2850 		return -ENOMEM;
2851 	}
2852 
2853 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2854 					   SLAB_RECLAIM_ACCOUNT);
2855 	if (ext4_free_data_cachep == NULL) {
2856 		kmem_cache_destroy(ext4_pspace_cachep);
2857 		kmem_cache_destroy(ext4_ac_cachep);
2858 		return -ENOMEM;
2859 	}
2860 	return 0;
2861 }
2862 
2863 void ext4_exit_mballoc(void)
2864 {
2865 	/*
2866 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2867 	 * before destroying the slab cache.
2868 	 */
2869 	rcu_barrier();
2870 	kmem_cache_destroy(ext4_pspace_cachep);
2871 	kmem_cache_destroy(ext4_ac_cachep);
2872 	kmem_cache_destroy(ext4_free_data_cachep);
2873 	ext4_groupinfo_destroy_slabs();
2874 }
2875 
2876 
2877 /*
2878  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2879  * Returns 0 if success or error code
2880  */
2881 static noinline_for_stack int
2882 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2883 				handle_t *handle, unsigned int reserv_clstrs)
2884 {
2885 	struct buffer_head *bitmap_bh = NULL;
2886 	struct ext4_group_desc *gdp;
2887 	struct buffer_head *gdp_bh;
2888 	struct ext4_sb_info *sbi;
2889 	struct super_block *sb;
2890 	ext4_fsblk_t block;
2891 	int err, len;
2892 
2893 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2894 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2895 
2896 	sb = ac->ac_sb;
2897 	sbi = EXT4_SB(sb);
2898 
2899 	err = -EIO;
2900 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2901 	if (!bitmap_bh)
2902 		goto out_err;
2903 
2904 	BUFFER_TRACE(bitmap_bh, "getting write access");
2905 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2906 	if (err)
2907 		goto out_err;
2908 
2909 	err = -EIO;
2910 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2911 	if (!gdp)
2912 		goto out_err;
2913 
2914 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2915 			ext4_free_group_clusters(sb, gdp));
2916 
2917 	BUFFER_TRACE(gdp_bh, "get_write_access");
2918 	err = ext4_journal_get_write_access(handle, gdp_bh);
2919 	if (err)
2920 		goto out_err;
2921 
2922 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2923 
2924 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2925 	if (!ext4_data_block_valid(sbi, block, len)) {
2926 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2927 			   "fs metadata", block, block+len);
2928 		/* File system mounted not to panic on error
2929 		 * Fix the bitmap and repeat the block allocation
2930 		 * We leak some of the blocks here.
2931 		 */
2932 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2933 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2934 			      ac->ac_b_ex.fe_len);
2935 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2936 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2937 		if (!err)
2938 			err = -EAGAIN;
2939 		goto out_err;
2940 	}
2941 
2942 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2943 #ifdef AGGRESSIVE_CHECK
2944 	{
2945 		int i;
2946 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2947 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2948 						bitmap_bh->b_data));
2949 		}
2950 	}
2951 #endif
2952 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2953 		      ac->ac_b_ex.fe_len);
2954 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2955 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2956 		ext4_free_group_clusters_set(sb, gdp,
2957 					     ext4_free_clusters_after_init(sb,
2958 						ac->ac_b_ex.fe_group, gdp));
2959 	}
2960 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2961 	ext4_free_group_clusters_set(sb, gdp, len);
2962 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
2963 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2964 
2965 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2966 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2967 	/*
2968 	 * Now reduce the dirty block count also. Should not go negative
2969 	 */
2970 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2971 		/* release all the reserved blocks if non delalloc */
2972 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2973 				   reserv_clstrs);
2974 
2975 	if (sbi->s_log_groups_per_flex) {
2976 		ext4_group_t flex_group = ext4_flex_group(sbi,
2977 							  ac->ac_b_ex.fe_group);
2978 		atomic64_sub(ac->ac_b_ex.fe_len,
2979 			     &sbi->s_flex_groups[flex_group].free_clusters);
2980 	}
2981 
2982 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2983 	if (err)
2984 		goto out_err;
2985 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2986 
2987 out_err:
2988 	brelse(bitmap_bh);
2989 	return err;
2990 }
2991 
2992 /*
2993  * here we normalize request for locality group
2994  * Group request are normalized to s_mb_group_prealloc, which goes to
2995  * s_strip if we set the same via mount option.
2996  * s_mb_group_prealloc can be configured via
2997  * /sys/fs/ext4/<partition>/mb_group_prealloc
2998  *
2999  * XXX: should we try to preallocate more than the group has now?
3000  */
3001 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3002 {
3003 	struct super_block *sb = ac->ac_sb;
3004 	struct ext4_locality_group *lg = ac->ac_lg;
3005 
3006 	BUG_ON(lg == NULL);
3007 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3008 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
3009 		current->pid, ac->ac_g_ex.fe_len);
3010 }
3011 
3012 /*
3013  * Normalization means making request better in terms of
3014  * size and alignment
3015  */
3016 static noinline_for_stack void
3017 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3018 				struct ext4_allocation_request *ar)
3019 {
3020 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3021 	int bsbits, max;
3022 	ext4_lblk_t end;
3023 	loff_t size, start_off;
3024 	loff_t orig_size __maybe_unused;
3025 	ext4_lblk_t start;
3026 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3027 	struct ext4_prealloc_space *pa;
3028 
3029 	/* do normalize only data requests, metadata requests
3030 	   do not need preallocation */
3031 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3032 		return;
3033 
3034 	/* sometime caller may want exact blocks */
3035 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3036 		return;
3037 
3038 	/* caller may indicate that preallocation isn't
3039 	 * required (it's a tail, for example) */
3040 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3041 		return;
3042 
3043 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3044 		ext4_mb_normalize_group_request(ac);
3045 		return ;
3046 	}
3047 
3048 	bsbits = ac->ac_sb->s_blocksize_bits;
3049 
3050 	/* first, let's learn actual file size
3051 	 * given current request is allocated */
3052 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3053 	size = size << bsbits;
3054 	if (size < i_size_read(ac->ac_inode))
3055 		size = i_size_read(ac->ac_inode);
3056 	orig_size = size;
3057 
3058 	/* max size of free chunks */
3059 	max = 2 << bsbits;
3060 
3061 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3062 		(req <= (size) || max <= (chunk_size))
3063 
3064 	/* first, try to predict filesize */
3065 	/* XXX: should this table be tunable? */
3066 	start_off = 0;
3067 	if (size <= 16 * 1024) {
3068 		size = 16 * 1024;
3069 	} else if (size <= 32 * 1024) {
3070 		size = 32 * 1024;
3071 	} else if (size <= 64 * 1024) {
3072 		size = 64 * 1024;
3073 	} else if (size <= 128 * 1024) {
3074 		size = 128 * 1024;
3075 	} else if (size <= 256 * 1024) {
3076 		size = 256 * 1024;
3077 	} else if (size <= 512 * 1024) {
3078 		size = 512 * 1024;
3079 	} else if (size <= 1024 * 1024) {
3080 		size = 1024 * 1024;
3081 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3082 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3083 						(21 - bsbits)) << 21;
3084 		size = 2 * 1024 * 1024;
3085 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3086 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3087 							(22 - bsbits)) << 22;
3088 		size = 4 * 1024 * 1024;
3089 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3090 					(8<<20)>>bsbits, max, 8 * 1024)) {
3091 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3092 							(23 - bsbits)) << 23;
3093 		size = 8 * 1024 * 1024;
3094 	} else {
3095 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3096 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3097 					      ac->ac_o_ex.fe_len) << bsbits;
3098 	}
3099 	size = size >> bsbits;
3100 	start = start_off >> bsbits;
3101 
3102 	/* don't cover already allocated blocks in selected range */
3103 	if (ar->pleft && start <= ar->lleft) {
3104 		size -= ar->lleft + 1 - start;
3105 		start = ar->lleft + 1;
3106 	}
3107 	if (ar->pright && start + size - 1 >= ar->lright)
3108 		size -= start + size - ar->lright;
3109 
3110 	end = start + size;
3111 
3112 	/* check we don't cross already preallocated blocks */
3113 	rcu_read_lock();
3114 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3115 		ext4_lblk_t pa_end;
3116 
3117 		if (pa->pa_deleted)
3118 			continue;
3119 		spin_lock(&pa->pa_lock);
3120 		if (pa->pa_deleted) {
3121 			spin_unlock(&pa->pa_lock);
3122 			continue;
3123 		}
3124 
3125 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3126 						  pa->pa_len);
3127 
3128 		/* PA must not overlap original request */
3129 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3130 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3131 
3132 		/* skip PAs this normalized request doesn't overlap with */
3133 		if (pa->pa_lstart >= end || pa_end <= start) {
3134 			spin_unlock(&pa->pa_lock);
3135 			continue;
3136 		}
3137 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3138 
3139 		/* adjust start or end to be adjacent to this pa */
3140 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3141 			BUG_ON(pa_end < start);
3142 			start = pa_end;
3143 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3144 			BUG_ON(pa->pa_lstart > end);
3145 			end = pa->pa_lstart;
3146 		}
3147 		spin_unlock(&pa->pa_lock);
3148 	}
3149 	rcu_read_unlock();
3150 	size = end - start;
3151 
3152 	/* XXX: extra loop to check we really don't overlap preallocations */
3153 	rcu_read_lock();
3154 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3155 		ext4_lblk_t pa_end;
3156 
3157 		spin_lock(&pa->pa_lock);
3158 		if (pa->pa_deleted == 0) {
3159 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3160 							  pa->pa_len);
3161 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3162 		}
3163 		spin_unlock(&pa->pa_lock);
3164 	}
3165 	rcu_read_unlock();
3166 
3167 	if (start + size <= ac->ac_o_ex.fe_logical &&
3168 			start > ac->ac_o_ex.fe_logical) {
3169 		ext4_msg(ac->ac_sb, KERN_ERR,
3170 			 "start %lu, size %lu, fe_logical %lu",
3171 			 (unsigned long) start, (unsigned long) size,
3172 			 (unsigned long) ac->ac_o_ex.fe_logical);
3173 		BUG();
3174 	}
3175 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3176 
3177 	/* now prepare goal request */
3178 
3179 	/* XXX: is it better to align blocks WRT to logical
3180 	 * placement or satisfy big request as is */
3181 	ac->ac_g_ex.fe_logical = start;
3182 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3183 
3184 	/* define goal start in order to merge */
3185 	if (ar->pright && (ar->lright == (start + size))) {
3186 		/* merge to the right */
3187 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3188 						&ac->ac_f_ex.fe_group,
3189 						&ac->ac_f_ex.fe_start);
3190 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3191 	}
3192 	if (ar->pleft && (ar->lleft + 1 == start)) {
3193 		/* merge to the left */
3194 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3195 						&ac->ac_f_ex.fe_group,
3196 						&ac->ac_f_ex.fe_start);
3197 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3198 	}
3199 
3200 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3201 		(unsigned) orig_size, (unsigned) start);
3202 }
3203 
3204 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3205 {
3206 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3207 
3208 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3209 		atomic_inc(&sbi->s_bal_reqs);
3210 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3211 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3212 			atomic_inc(&sbi->s_bal_success);
3213 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3214 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3215 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3216 			atomic_inc(&sbi->s_bal_goals);
3217 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3218 			atomic_inc(&sbi->s_bal_breaks);
3219 	}
3220 
3221 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3222 		trace_ext4_mballoc_alloc(ac);
3223 	else
3224 		trace_ext4_mballoc_prealloc(ac);
3225 }
3226 
3227 /*
3228  * Called on failure; free up any blocks from the inode PA for this
3229  * context.  We don't need this for MB_GROUP_PA because we only change
3230  * pa_free in ext4_mb_release_context(), but on failure, we've already
3231  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3232  */
3233 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3234 {
3235 	struct ext4_prealloc_space *pa = ac->ac_pa;
3236 	struct ext4_buddy e4b;
3237 	int err;
3238 
3239 	if (pa == NULL) {
3240 		if (ac->ac_f_ex.fe_len == 0)
3241 			return;
3242 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3243 		if (err) {
3244 			/*
3245 			 * This should never happen since we pin the
3246 			 * pages in the ext4_allocation_context so
3247 			 * ext4_mb_load_buddy() should never fail.
3248 			 */
3249 			WARN(1, "mb_load_buddy failed (%d)", err);
3250 			return;
3251 		}
3252 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3253 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3254 			       ac->ac_f_ex.fe_len);
3255 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3256 		ext4_mb_unload_buddy(&e4b);
3257 		return;
3258 	}
3259 	if (pa->pa_type == MB_INODE_PA)
3260 		pa->pa_free += ac->ac_b_ex.fe_len;
3261 }
3262 
3263 /*
3264  * use blocks preallocated to inode
3265  */
3266 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3267 				struct ext4_prealloc_space *pa)
3268 {
3269 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3270 	ext4_fsblk_t start;
3271 	ext4_fsblk_t end;
3272 	int len;
3273 
3274 	/* found preallocated blocks, use them */
3275 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3276 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3277 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3278 	len = EXT4_NUM_B2C(sbi, end - start);
3279 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3280 					&ac->ac_b_ex.fe_start);
3281 	ac->ac_b_ex.fe_len = len;
3282 	ac->ac_status = AC_STATUS_FOUND;
3283 	ac->ac_pa = pa;
3284 
3285 	BUG_ON(start < pa->pa_pstart);
3286 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3287 	BUG_ON(pa->pa_free < len);
3288 	pa->pa_free -= len;
3289 
3290 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3291 }
3292 
3293 /*
3294  * use blocks preallocated to locality group
3295  */
3296 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3297 				struct ext4_prealloc_space *pa)
3298 {
3299 	unsigned int len = ac->ac_o_ex.fe_len;
3300 
3301 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3302 					&ac->ac_b_ex.fe_group,
3303 					&ac->ac_b_ex.fe_start);
3304 	ac->ac_b_ex.fe_len = len;
3305 	ac->ac_status = AC_STATUS_FOUND;
3306 	ac->ac_pa = pa;
3307 
3308 	/* we don't correct pa_pstart or pa_plen here to avoid
3309 	 * possible race when the group is being loaded concurrently
3310 	 * instead we correct pa later, after blocks are marked
3311 	 * in on-disk bitmap -- see ext4_mb_release_context()
3312 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3313 	 */
3314 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3315 }
3316 
3317 /*
3318  * Return the prealloc space that have minimal distance
3319  * from the goal block. @cpa is the prealloc
3320  * space that is having currently known minimal distance
3321  * from the goal block.
3322  */
3323 static struct ext4_prealloc_space *
3324 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3325 			struct ext4_prealloc_space *pa,
3326 			struct ext4_prealloc_space *cpa)
3327 {
3328 	ext4_fsblk_t cur_distance, new_distance;
3329 
3330 	if (cpa == NULL) {
3331 		atomic_inc(&pa->pa_count);
3332 		return pa;
3333 	}
3334 	cur_distance = abs(goal_block - cpa->pa_pstart);
3335 	new_distance = abs(goal_block - pa->pa_pstart);
3336 
3337 	if (cur_distance <= new_distance)
3338 		return cpa;
3339 
3340 	/* drop the previous reference */
3341 	atomic_dec(&cpa->pa_count);
3342 	atomic_inc(&pa->pa_count);
3343 	return pa;
3344 }
3345 
3346 /*
3347  * search goal blocks in preallocated space
3348  */
3349 static noinline_for_stack int
3350 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3351 {
3352 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3353 	int order, i;
3354 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3355 	struct ext4_locality_group *lg;
3356 	struct ext4_prealloc_space *pa, *cpa = NULL;
3357 	ext4_fsblk_t goal_block;
3358 
3359 	/* only data can be preallocated */
3360 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3361 		return 0;
3362 
3363 	/* first, try per-file preallocation */
3364 	rcu_read_lock();
3365 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3366 
3367 		/* all fields in this condition don't change,
3368 		 * so we can skip locking for them */
3369 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3370 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3371 					       EXT4_C2B(sbi, pa->pa_len)))
3372 			continue;
3373 
3374 		/* non-extent files can't have physical blocks past 2^32 */
3375 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3376 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3377 		     EXT4_MAX_BLOCK_FILE_PHYS))
3378 			continue;
3379 
3380 		/* found preallocated blocks, use them */
3381 		spin_lock(&pa->pa_lock);
3382 		if (pa->pa_deleted == 0 && pa->pa_free) {
3383 			atomic_inc(&pa->pa_count);
3384 			ext4_mb_use_inode_pa(ac, pa);
3385 			spin_unlock(&pa->pa_lock);
3386 			ac->ac_criteria = 10;
3387 			rcu_read_unlock();
3388 			return 1;
3389 		}
3390 		spin_unlock(&pa->pa_lock);
3391 	}
3392 	rcu_read_unlock();
3393 
3394 	/* can we use group allocation? */
3395 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3396 		return 0;
3397 
3398 	/* inode may have no locality group for some reason */
3399 	lg = ac->ac_lg;
3400 	if (lg == NULL)
3401 		return 0;
3402 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3403 	if (order > PREALLOC_TB_SIZE - 1)
3404 		/* The max size of hash table is PREALLOC_TB_SIZE */
3405 		order = PREALLOC_TB_SIZE - 1;
3406 
3407 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3408 	/*
3409 	 * search for the prealloc space that is having
3410 	 * minimal distance from the goal block.
3411 	 */
3412 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3413 		rcu_read_lock();
3414 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3415 					pa_inode_list) {
3416 			spin_lock(&pa->pa_lock);
3417 			if (pa->pa_deleted == 0 &&
3418 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3419 
3420 				cpa = ext4_mb_check_group_pa(goal_block,
3421 								pa, cpa);
3422 			}
3423 			spin_unlock(&pa->pa_lock);
3424 		}
3425 		rcu_read_unlock();
3426 	}
3427 	if (cpa) {
3428 		ext4_mb_use_group_pa(ac, cpa);
3429 		ac->ac_criteria = 20;
3430 		return 1;
3431 	}
3432 	return 0;
3433 }
3434 
3435 /*
3436  * the function goes through all block freed in the group
3437  * but not yet committed and marks them used in in-core bitmap.
3438  * buddy must be generated from this bitmap
3439  * Need to be called with the ext4 group lock held
3440  */
3441 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3442 						ext4_group_t group)
3443 {
3444 	struct rb_node *n;
3445 	struct ext4_group_info *grp;
3446 	struct ext4_free_data *entry;
3447 
3448 	grp = ext4_get_group_info(sb, group);
3449 	n = rb_first(&(grp->bb_free_root));
3450 
3451 	while (n) {
3452 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3453 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3454 		n = rb_next(n);
3455 	}
3456 	return;
3457 }
3458 
3459 /*
3460  * the function goes through all preallocation in this group and marks them
3461  * used in in-core bitmap. buddy must be generated from this bitmap
3462  * Need to be called with ext4 group lock held
3463  */
3464 static noinline_for_stack
3465 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3466 					ext4_group_t group)
3467 {
3468 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3469 	struct ext4_prealloc_space *pa;
3470 	struct list_head *cur;
3471 	ext4_group_t groupnr;
3472 	ext4_grpblk_t start;
3473 	int preallocated = 0;
3474 	int len;
3475 
3476 	/* all form of preallocation discards first load group,
3477 	 * so the only competing code is preallocation use.
3478 	 * we don't need any locking here
3479 	 * notice we do NOT ignore preallocations with pa_deleted
3480 	 * otherwise we could leave used blocks available for
3481 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3482 	 * is dropping preallocation
3483 	 */
3484 	list_for_each(cur, &grp->bb_prealloc_list) {
3485 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3486 		spin_lock(&pa->pa_lock);
3487 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3488 					     &groupnr, &start);
3489 		len = pa->pa_len;
3490 		spin_unlock(&pa->pa_lock);
3491 		if (unlikely(len == 0))
3492 			continue;
3493 		BUG_ON(groupnr != group);
3494 		ext4_set_bits(bitmap, start, len);
3495 		preallocated += len;
3496 	}
3497 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3498 }
3499 
3500 static void ext4_mb_pa_callback(struct rcu_head *head)
3501 {
3502 	struct ext4_prealloc_space *pa;
3503 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3504 
3505 	BUG_ON(atomic_read(&pa->pa_count));
3506 	BUG_ON(pa->pa_deleted == 0);
3507 	kmem_cache_free(ext4_pspace_cachep, pa);
3508 }
3509 
3510 /*
3511  * drops a reference to preallocated space descriptor
3512  * if this was the last reference and the space is consumed
3513  */
3514 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3515 			struct super_block *sb, struct ext4_prealloc_space *pa)
3516 {
3517 	ext4_group_t grp;
3518 	ext4_fsblk_t grp_blk;
3519 
3520 	/* in this short window concurrent discard can set pa_deleted */
3521 	spin_lock(&pa->pa_lock);
3522 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3523 		spin_unlock(&pa->pa_lock);
3524 		return;
3525 	}
3526 
3527 	if (pa->pa_deleted == 1) {
3528 		spin_unlock(&pa->pa_lock);
3529 		return;
3530 	}
3531 
3532 	pa->pa_deleted = 1;
3533 	spin_unlock(&pa->pa_lock);
3534 
3535 	grp_blk = pa->pa_pstart;
3536 	/*
3537 	 * If doing group-based preallocation, pa_pstart may be in the
3538 	 * next group when pa is used up
3539 	 */
3540 	if (pa->pa_type == MB_GROUP_PA)
3541 		grp_blk--;
3542 
3543 	grp = ext4_get_group_number(sb, grp_blk);
3544 
3545 	/*
3546 	 * possible race:
3547 	 *
3548 	 *  P1 (buddy init)			P2 (regular allocation)
3549 	 *					find block B in PA
3550 	 *  copy on-disk bitmap to buddy
3551 	 *  					mark B in on-disk bitmap
3552 	 *					drop PA from group
3553 	 *  mark all PAs in buddy
3554 	 *
3555 	 * thus, P1 initializes buddy with B available. to prevent this
3556 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3557 	 * against that pair
3558 	 */
3559 	ext4_lock_group(sb, grp);
3560 	list_del(&pa->pa_group_list);
3561 	ext4_unlock_group(sb, grp);
3562 
3563 	spin_lock(pa->pa_obj_lock);
3564 	list_del_rcu(&pa->pa_inode_list);
3565 	spin_unlock(pa->pa_obj_lock);
3566 
3567 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3568 }
3569 
3570 /*
3571  * creates new preallocated space for given inode
3572  */
3573 static noinline_for_stack int
3574 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3575 {
3576 	struct super_block *sb = ac->ac_sb;
3577 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3578 	struct ext4_prealloc_space *pa;
3579 	struct ext4_group_info *grp;
3580 	struct ext4_inode_info *ei;
3581 
3582 	/* preallocate only when found space is larger then requested */
3583 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3584 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3585 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3586 
3587 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3588 	if (pa == NULL)
3589 		return -ENOMEM;
3590 
3591 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3592 		int winl;
3593 		int wins;
3594 		int win;
3595 		int offs;
3596 
3597 		/* we can't allocate as much as normalizer wants.
3598 		 * so, found space must get proper lstart
3599 		 * to cover original request */
3600 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3601 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3602 
3603 		/* we're limited by original request in that
3604 		 * logical block must be covered any way
3605 		 * winl is window we can move our chunk within */
3606 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3607 
3608 		/* also, we should cover whole original request */
3609 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3610 
3611 		/* the smallest one defines real window */
3612 		win = min(winl, wins);
3613 
3614 		offs = ac->ac_o_ex.fe_logical %
3615 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3616 		if (offs && offs < win)
3617 			win = offs;
3618 
3619 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3620 			EXT4_NUM_B2C(sbi, win);
3621 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3622 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3623 	}
3624 
3625 	/* preallocation can change ac_b_ex, thus we store actually
3626 	 * allocated blocks for history */
3627 	ac->ac_f_ex = ac->ac_b_ex;
3628 
3629 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3630 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3631 	pa->pa_len = ac->ac_b_ex.fe_len;
3632 	pa->pa_free = pa->pa_len;
3633 	atomic_set(&pa->pa_count, 1);
3634 	spin_lock_init(&pa->pa_lock);
3635 	INIT_LIST_HEAD(&pa->pa_inode_list);
3636 	INIT_LIST_HEAD(&pa->pa_group_list);
3637 	pa->pa_deleted = 0;
3638 	pa->pa_type = MB_INODE_PA;
3639 
3640 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3641 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3642 	trace_ext4_mb_new_inode_pa(ac, pa);
3643 
3644 	ext4_mb_use_inode_pa(ac, pa);
3645 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3646 
3647 	ei = EXT4_I(ac->ac_inode);
3648 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3649 
3650 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3651 	pa->pa_inode = ac->ac_inode;
3652 
3653 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3654 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3655 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3656 
3657 	spin_lock(pa->pa_obj_lock);
3658 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3659 	spin_unlock(pa->pa_obj_lock);
3660 
3661 	return 0;
3662 }
3663 
3664 /*
3665  * creates new preallocated space for locality group inodes belongs to
3666  */
3667 static noinline_for_stack int
3668 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3669 {
3670 	struct super_block *sb = ac->ac_sb;
3671 	struct ext4_locality_group *lg;
3672 	struct ext4_prealloc_space *pa;
3673 	struct ext4_group_info *grp;
3674 
3675 	/* preallocate only when found space is larger then requested */
3676 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3677 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3678 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3679 
3680 	BUG_ON(ext4_pspace_cachep == NULL);
3681 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3682 	if (pa == NULL)
3683 		return -ENOMEM;
3684 
3685 	/* preallocation can change ac_b_ex, thus we store actually
3686 	 * allocated blocks for history */
3687 	ac->ac_f_ex = ac->ac_b_ex;
3688 
3689 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3690 	pa->pa_lstart = pa->pa_pstart;
3691 	pa->pa_len = ac->ac_b_ex.fe_len;
3692 	pa->pa_free = pa->pa_len;
3693 	atomic_set(&pa->pa_count, 1);
3694 	spin_lock_init(&pa->pa_lock);
3695 	INIT_LIST_HEAD(&pa->pa_inode_list);
3696 	INIT_LIST_HEAD(&pa->pa_group_list);
3697 	pa->pa_deleted = 0;
3698 	pa->pa_type = MB_GROUP_PA;
3699 
3700 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3701 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3702 	trace_ext4_mb_new_group_pa(ac, pa);
3703 
3704 	ext4_mb_use_group_pa(ac, pa);
3705 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3706 
3707 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3708 	lg = ac->ac_lg;
3709 	BUG_ON(lg == NULL);
3710 
3711 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3712 	pa->pa_inode = NULL;
3713 
3714 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3715 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3716 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3717 
3718 	/*
3719 	 * We will later add the new pa to the right bucket
3720 	 * after updating the pa_free in ext4_mb_release_context
3721 	 */
3722 	return 0;
3723 }
3724 
3725 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3726 {
3727 	int err;
3728 
3729 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3730 		err = ext4_mb_new_group_pa(ac);
3731 	else
3732 		err = ext4_mb_new_inode_pa(ac);
3733 	return err;
3734 }
3735 
3736 /*
3737  * finds all unused blocks in on-disk bitmap, frees them in
3738  * in-core bitmap and buddy.
3739  * @pa must be unlinked from inode and group lists, so that
3740  * nobody else can find/use it.
3741  * the caller MUST hold group/inode locks.
3742  * TODO: optimize the case when there are no in-core structures yet
3743  */
3744 static noinline_for_stack int
3745 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3746 			struct ext4_prealloc_space *pa)
3747 {
3748 	struct super_block *sb = e4b->bd_sb;
3749 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3750 	unsigned int end;
3751 	unsigned int next;
3752 	ext4_group_t group;
3753 	ext4_grpblk_t bit;
3754 	unsigned long long grp_blk_start;
3755 	int err = 0;
3756 	int free = 0;
3757 
3758 	BUG_ON(pa->pa_deleted == 0);
3759 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3760 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3761 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3762 	end = bit + pa->pa_len;
3763 
3764 	while (bit < end) {
3765 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3766 		if (bit >= end)
3767 			break;
3768 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3769 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3770 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3771 			 (unsigned) next - bit, (unsigned) group);
3772 		free += next - bit;
3773 
3774 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3775 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3776 						    EXT4_C2B(sbi, bit)),
3777 					       next - bit);
3778 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3779 		bit = next + 1;
3780 	}
3781 	if (free != pa->pa_free) {
3782 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3783 			 "pa %p: logic %lu, phys. %lu, len %lu",
3784 			 pa, (unsigned long) pa->pa_lstart,
3785 			 (unsigned long) pa->pa_pstart,
3786 			 (unsigned long) pa->pa_len);
3787 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3788 					free, pa->pa_free);
3789 		/*
3790 		 * pa is already deleted so we use the value obtained
3791 		 * from the bitmap and continue.
3792 		 */
3793 	}
3794 	atomic_add(free, &sbi->s_mb_discarded);
3795 
3796 	return err;
3797 }
3798 
3799 static noinline_for_stack int
3800 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3801 				struct ext4_prealloc_space *pa)
3802 {
3803 	struct super_block *sb = e4b->bd_sb;
3804 	ext4_group_t group;
3805 	ext4_grpblk_t bit;
3806 
3807 	trace_ext4_mb_release_group_pa(sb, pa);
3808 	BUG_ON(pa->pa_deleted == 0);
3809 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3810 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3811 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3812 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3813 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3814 
3815 	return 0;
3816 }
3817 
3818 /*
3819  * releases all preallocations in given group
3820  *
3821  * first, we need to decide discard policy:
3822  * - when do we discard
3823  *   1) ENOSPC
3824  * - how many do we discard
3825  *   1) how many requested
3826  */
3827 static noinline_for_stack int
3828 ext4_mb_discard_group_preallocations(struct super_block *sb,
3829 					ext4_group_t group, int needed)
3830 {
3831 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3832 	struct buffer_head *bitmap_bh = NULL;
3833 	struct ext4_prealloc_space *pa, *tmp;
3834 	struct list_head list;
3835 	struct ext4_buddy e4b;
3836 	int err;
3837 	int busy = 0;
3838 	int free = 0;
3839 
3840 	mb_debug(1, "discard preallocation for group %u\n", group);
3841 
3842 	if (list_empty(&grp->bb_prealloc_list))
3843 		return 0;
3844 
3845 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3846 	if (bitmap_bh == NULL) {
3847 		ext4_error(sb, "Error reading block bitmap for %u", group);
3848 		return 0;
3849 	}
3850 
3851 	err = ext4_mb_load_buddy(sb, group, &e4b);
3852 	if (err) {
3853 		ext4_error(sb, "Error loading buddy information for %u", group);
3854 		put_bh(bitmap_bh);
3855 		return 0;
3856 	}
3857 
3858 	if (needed == 0)
3859 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3860 
3861 	INIT_LIST_HEAD(&list);
3862 repeat:
3863 	ext4_lock_group(sb, group);
3864 	list_for_each_entry_safe(pa, tmp,
3865 				&grp->bb_prealloc_list, pa_group_list) {
3866 		spin_lock(&pa->pa_lock);
3867 		if (atomic_read(&pa->pa_count)) {
3868 			spin_unlock(&pa->pa_lock);
3869 			busy = 1;
3870 			continue;
3871 		}
3872 		if (pa->pa_deleted) {
3873 			spin_unlock(&pa->pa_lock);
3874 			continue;
3875 		}
3876 
3877 		/* seems this one can be freed ... */
3878 		pa->pa_deleted = 1;
3879 
3880 		/* we can trust pa_free ... */
3881 		free += pa->pa_free;
3882 
3883 		spin_unlock(&pa->pa_lock);
3884 
3885 		list_del(&pa->pa_group_list);
3886 		list_add(&pa->u.pa_tmp_list, &list);
3887 	}
3888 
3889 	/* if we still need more blocks and some PAs were used, try again */
3890 	if (free < needed && busy) {
3891 		busy = 0;
3892 		ext4_unlock_group(sb, group);
3893 		cond_resched();
3894 		goto repeat;
3895 	}
3896 
3897 	/* found anything to free? */
3898 	if (list_empty(&list)) {
3899 		BUG_ON(free != 0);
3900 		goto out;
3901 	}
3902 
3903 	/* now free all selected PAs */
3904 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3905 
3906 		/* remove from object (inode or locality group) */
3907 		spin_lock(pa->pa_obj_lock);
3908 		list_del_rcu(&pa->pa_inode_list);
3909 		spin_unlock(pa->pa_obj_lock);
3910 
3911 		if (pa->pa_type == MB_GROUP_PA)
3912 			ext4_mb_release_group_pa(&e4b, pa);
3913 		else
3914 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3915 
3916 		list_del(&pa->u.pa_tmp_list);
3917 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3918 	}
3919 
3920 out:
3921 	ext4_unlock_group(sb, group);
3922 	ext4_mb_unload_buddy(&e4b);
3923 	put_bh(bitmap_bh);
3924 	return free;
3925 }
3926 
3927 /*
3928  * releases all non-used preallocated blocks for given inode
3929  *
3930  * It's important to discard preallocations under i_data_sem
3931  * We don't want another block to be served from the prealloc
3932  * space when we are discarding the inode prealloc space.
3933  *
3934  * FIXME!! Make sure it is valid at all the call sites
3935  */
3936 void ext4_discard_preallocations(struct inode *inode)
3937 {
3938 	struct ext4_inode_info *ei = EXT4_I(inode);
3939 	struct super_block *sb = inode->i_sb;
3940 	struct buffer_head *bitmap_bh = NULL;
3941 	struct ext4_prealloc_space *pa, *tmp;
3942 	ext4_group_t group = 0;
3943 	struct list_head list;
3944 	struct ext4_buddy e4b;
3945 	int err;
3946 
3947 	if (!S_ISREG(inode->i_mode)) {
3948 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3949 		return;
3950 	}
3951 
3952 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3953 	trace_ext4_discard_preallocations(inode);
3954 
3955 	INIT_LIST_HEAD(&list);
3956 
3957 repeat:
3958 	/* first, collect all pa's in the inode */
3959 	spin_lock(&ei->i_prealloc_lock);
3960 	while (!list_empty(&ei->i_prealloc_list)) {
3961 		pa = list_entry(ei->i_prealloc_list.next,
3962 				struct ext4_prealloc_space, pa_inode_list);
3963 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3964 		spin_lock(&pa->pa_lock);
3965 		if (atomic_read(&pa->pa_count)) {
3966 			/* this shouldn't happen often - nobody should
3967 			 * use preallocation while we're discarding it */
3968 			spin_unlock(&pa->pa_lock);
3969 			spin_unlock(&ei->i_prealloc_lock);
3970 			ext4_msg(sb, KERN_ERR,
3971 				 "uh-oh! used pa while discarding");
3972 			WARN_ON(1);
3973 			schedule_timeout_uninterruptible(HZ);
3974 			goto repeat;
3975 
3976 		}
3977 		if (pa->pa_deleted == 0) {
3978 			pa->pa_deleted = 1;
3979 			spin_unlock(&pa->pa_lock);
3980 			list_del_rcu(&pa->pa_inode_list);
3981 			list_add(&pa->u.pa_tmp_list, &list);
3982 			continue;
3983 		}
3984 
3985 		/* someone is deleting pa right now */
3986 		spin_unlock(&pa->pa_lock);
3987 		spin_unlock(&ei->i_prealloc_lock);
3988 
3989 		/* we have to wait here because pa_deleted
3990 		 * doesn't mean pa is already unlinked from
3991 		 * the list. as we might be called from
3992 		 * ->clear_inode() the inode will get freed
3993 		 * and concurrent thread which is unlinking
3994 		 * pa from inode's list may access already
3995 		 * freed memory, bad-bad-bad */
3996 
3997 		/* XXX: if this happens too often, we can
3998 		 * add a flag to force wait only in case
3999 		 * of ->clear_inode(), but not in case of
4000 		 * regular truncate */
4001 		schedule_timeout_uninterruptible(HZ);
4002 		goto repeat;
4003 	}
4004 	spin_unlock(&ei->i_prealloc_lock);
4005 
4006 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4007 		BUG_ON(pa->pa_type != MB_INODE_PA);
4008 		group = ext4_get_group_number(sb, pa->pa_pstart);
4009 
4010 		err = ext4_mb_load_buddy(sb, group, &e4b);
4011 		if (err) {
4012 			ext4_error(sb, "Error loading buddy information for %u",
4013 					group);
4014 			continue;
4015 		}
4016 
4017 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4018 		if (bitmap_bh == NULL) {
4019 			ext4_error(sb, "Error reading block bitmap for %u",
4020 					group);
4021 			ext4_mb_unload_buddy(&e4b);
4022 			continue;
4023 		}
4024 
4025 		ext4_lock_group(sb, group);
4026 		list_del(&pa->pa_group_list);
4027 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4028 		ext4_unlock_group(sb, group);
4029 
4030 		ext4_mb_unload_buddy(&e4b);
4031 		put_bh(bitmap_bh);
4032 
4033 		list_del(&pa->u.pa_tmp_list);
4034 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4035 	}
4036 }
4037 
4038 #ifdef CONFIG_EXT4_DEBUG
4039 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4040 {
4041 	struct super_block *sb = ac->ac_sb;
4042 	ext4_group_t ngroups, i;
4043 
4044 	if (!ext4_mballoc_debug ||
4045 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
4046 		return;
4047 
4048 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
4049 			" Allocation context details:");
4050 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
4051 			ac->ac_status, ac->ac_flags);
4052 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
4053 		 	"goal %lu/%lu/%lu@%lu, "
4054 			"best %lu/%lu/%lu@%lu cr %d",
4055 			(unsigned long)ac->ac_o_ex.fe_group,
4056 			(unsigned long)ac->ac_o_ex.fe_start,
4057 			(unsigned long)ac->ac_o_ex.fe_len,
4058 			(unsigned long)ac->ac_o_ex.fe_logical,
4059 			(unsigned long)ac->ac_g_ex.fe_group,
4060 			(unsigned long)ac->ac_g_ex.fe_start,
4061 			(unsigned long)ac->ac_g_ex.fe_len,
4062 			(unsigned long)ac->ac_g_ex.fe_logical,
4063 			(unsigned long)ac->ac_b_ex.fe_group,
4064 			(unsigned long)ac->ac_b_ex.fe_start,
4065 			(unsigned long)ac->ac_b_ex.fe_len,
4066 			(unsigned long)ac->ac_b_ex.fe_logical,
4067 			(int)ac->ac_criteria);
4068 	ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
4069 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
4070 	ngroups = ext4_get_groups_count(sb);
4071 	for (i = 0; i < ngroups; i++) {
4072 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4073 		struct ext4_prealloc_space *pa;
4074 		ext4_grpblk_t start;
4075 		struct list_head *cur;
4076 		ext4_lock_group(sb, i);
4077 		list_for_each(cur, &grp->bb_prealloc_list) {
4078 			pa = list_entry(cur, struct ext4_prealloc_space,
4079 					pa_group_list);
4080 			spin_lock(&pa->pa_lock);
4081 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4082 						     NULL, &start);
4083 			spin_unlock(&pa->pa_lock);
4084 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
4085 			       start, pa->pa_len);
4086 		}
4087 		ext4_unlock_group(sb, i);
4088 
4089 		if (grp->bb_free == 0)
4090 			continue;
4091 		printk(KERN_ERR "%u: %d/%d \n",
4092 		       i, grp->bb_free, grp->bb_fragments);
4093 	}
4094 	printk(KERN_ERR "\n");
4095 }
4096 #else
4097 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4098 {
4099 	return;
4100 }
4101 #endif
4102 
4103 /*
4104  * We use locality group preallocation for small size file. The size of the
4105  * file is determined by the current size or the resulting size after
4106  * allocation which ever is larger
4107  *
4108  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4109  */
4110 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4111 {
4112 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4113 	int bsbits = ac->ac_sb->s_blocksize_bits;
4114 	loff_t size, isize;
4115 
4116 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4117 		return;
4118 
4119 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4120 		return;
4121 
4122 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4123 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4124 		>> bsbits;
4125 
4126 	if ((size == isize) &&
4127 	    !ext4_fs_is_busy(sbi) &&
4128 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
4129 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4130 		return;
4131 	}
4132 
4133 	if (sbi->s_mb_group_prealloc <= 0) {
4134 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4135 		return;
4136 	}
4137 
4138 	/* don't use group allocation for large files */
4139 	size = max(size, isize);
4140 	if (size > sbi->s_mb_stream_request) {
4141 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4142 		return;
4143 	}
4144 
4145 	BUG_ON(ac->ac_lg != NULL);
4146 	/*
4147 	 * locality group prealloc space are per cpu. The reason for having
4148 	 * per cpu locality group is to reduce the contention between block
4149 	 * request from multiple CPUs.
4150 	 */
4151 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4152 
4153 	/* we're going to use group allocation */
4154 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4155 
4156 	/* serialize all allocations in the group */
4157 	mutex_lock(&ac->ac_lg->lg_mutex);
4158 }
4159 
4160 static noinline_for_stack int
4161 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4162 				struct ext4_allocation_request *ar)
4163 {
4164 	struct super_block *sb = ar->inode->i_sb;
4165 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4166 	struct ext4_super_block *es = sbi->s_es;
4167 	ext4_group_t group;
4168 	unsigned int len;
4169 	ext4_fsblk_t goal;
4170 	ext4_grpblk_t block;
4171 
4172 	/* we can't allocate > group size */
4173 	len = ar->len;
4174 
4175 	/* just a dirty hack to filter too big requests  */
4176 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4177 		len = EXT4_CLUSTERS_PER_GROUP(sb);
4178 
4179 	/* start searching from the goal */
4180 	goal = ar->goal;
4181 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4182 			goal >= ext4_blocks_count(es))
4183 		goal = le32_to_cpu(es->s_first_data_block);
4184 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4185 
4186 	/* set up allocation goals */
4187 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4188 	ac->ac_status = AC_STATUS_CONTINUE;
4189 	ac->ac_sb = sb;
4190 	ac->ac_inode = ar->inode;
4191 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4192 	ac->ac_o_ex.fe_group = group;
4193 	ac->ac_o_ex.fe_start = block;
4194 	ac->ac_o_ex.fe_len = len;
4195 	ac->ac_g_ex = ac->ac_o_ex;
4196 	ac->ac_flags = ar->flags;
4197 
4198 	/* we have to define context: we'll we work with a file or
4199 	 * locality group. this is a policy, actually */
4200 	ext4_mb_group_or_file(ac);
4201 
4202 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4203 			"left: %u/%u, right %u/%u to %swritable\n",
4204 			(unsigned) ar->len, (unsigned) ar->logical,
4205 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4206 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4207 			(unsigned) ar->lright, (unsigned) ar->pright,
4208 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4209 	return 0;
4210 
4211 }
4212 
4213 static noinline_for_stack void
4214 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4215 					struct ext4_locality_group *lg,
4216 					int order, int total_entries)
4217 {
4218 	ext4_group_t group = 0;
4219 	struct ext4_buddy e4b;
4220 	struct list_head discard_list;
4221 	struct ext4_prealloc_space *pa, *tmp;
4222 
4223 	mb_debug(1, "discard locality group preallocation\n");
4224 
4225 	INIT_LIST_HEAD(&discard_list);
4226 
4227 	spin_lock(&lg->lg_prealloc_lock);
4228 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4229 						pa_inode_list) {
4230 		spin_lock(&pa->pa_lock);
4231 		if (atomic_read(&pa->pa_count)) {
4232 			/*
4233 			 * This is the pa that we just used
4234 			 * for block allocation. So don't
4235 			 * free that
4236 			 */
4237 			spin_unlock(&pa->pa_lock);
4238 			continue;
4239 		}
4240 		if (pa->pa_deleted) {
4241 			spin_unlock(&pa->pa_lock);
4242 			continue;
4243 		}
4244 		/* only lg prealloc space */
4245 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4246 
4247 		/* seems this one can be freed ... */
4248 		pa->pa_deleted = 1;
4249 		spin_unlock(&pa->pa_lock);
4250 
4251 		list_del_rcu(&pa->pa_inode_list);
4252 		list_add(&pa->u.pa_tmp_list, &discard_list);
4253 
4254 		total_entries--;
4255 		if (total_entries <= 5) {
4256 			/*
4257 			 * we want to keep only 5 entries
4258 			 * allowing it to grow to 8. This
4259 			 * mak sure we don't call discard
4260 			 * soon for this list.
4261 			 */
4262 			break;
4263 		}
4264 	}
4265 	spin_unlock(&lg->lg_prealloc_lock);
4266 
4267 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4268 
4269 		group = ext4_get_group_number(sb, pa->pa_pstart);
4270 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4271 			ext4_error(sb, "Error loading buddy information for %u",
4272 					group);
4273 			continue;
4274 		}
4275 		ext4_lock_group(sb, group);
4276 		list_del(&pa->pa_group_list);
4277 		ext4_mb_release_group_pa(&e4b, pa);
4278 		ext4_unlock_group(sb, group);
4279 
4280 		ext4_mb_unload_buddy(&e4b);
4281 		list_del(&pa->u.pa_tmp_list);
4282 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4283 	}
4284 }
4285 
4286 /*
4287  * We have incremented pa_count. So it cannot be freed at this
4288  * point. Also we hold lg_mutex. So no parallel allocation is
4289  * possible from this lg. That means pa_free cannot be updated.
4290  *
4291  * A parallel ext4_mb_discard_group_preallocations is possible.
4292  * which can cause the lg_prealloc_list to be updated.
4293  */
4294 
4295 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4296 {
4297 	int order, added = 0, lg_prealloc_count = 1;
4298 	struct super_block *sb = ac->ac_sb;
4299 	struct ext4_locality_group *lg = ac->ac_lg;
4300 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4301 
4302 	order = fls(pa->pa_free) - 1;
4303 	if (order > PREALLOC_TB_SIZE - 1)
4304 		/* The max size of hash table is PREALLOC_TB_SIZE */
4305 		order = PREALLOC_TB_SIZE - 1;
4306 	/* Add the prealloc space to lg */
4307 	spin_lock(&lg->lg_prealloc_lock);
4308 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4309 						pa_inode_list) {
4310 		spin_lock(&tmp_pa->pa_lock);
4311 		if (tmp_pa->pa_deleted) {
4312 			spin_unlock(&tmp_pa->pa_lock);
4313 			continue;
4314 		}
4315 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4316 			/* Add to the tail of the previous entry */
4317 			list_add_tail_rcu(&pa->pa_inode_list,
4318 						&tmp_pa->pa_inode_list);
4319 			added = 1;
4320 			/*
4321 			 * we want to count the total
4322 			 * number of entries in the list
4323 			 */
4324 		}
4325 		spin_unlock(&tmp_pa->pa_lock);
4326 		lg_prealloc_count++;
4327 	}
4328 	if (!added)
4329 		list_add_tail_rcu(&pa->pa_inode_list,
4330 					&lg->lg_prealloc_list[order]);
4331 	spin_unlock(&lg->lg_prealloc_lock);
4332 
4333 	/* Now trim the list to be not more than 8 elements */
4334 	if (lg_prealloc_count > 8) {
4335 		ext4_mb_discard_lg_preallocations(sb, lg,
4336 						  order, lg_prealloc_count);
4337 		return;
4338 	}
4339 	return ;
4340 }
4341 
4342 /*
4343  * release all resource we used in allocation
4344  */
4345 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4346 {
4347 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4348 	struct ext4_prealloc_space *pa = ac->ac_pa;
4349 	if (pa) {
4350 		if (pa->pa_type == MB_GROUP_PA) {
4351 			/* see comment in ext4_mb_use_group_pa() */
4352 			spin_lock(&pa->pa_lock);
4353 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4354 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4355 			pa->pa_free -= ac->ac_b_ex.fe_len;
4356 			pa->pa_len -= ac->ac_b_ex.fe_len;
4357 			spin_unlock(&pa->pa_lock);
4358 		}
4359 	}
4360 	if (pa) {
4361 		/*
4362 		 * We want to add the pa to the right bucket.
4363 		 * Remove it from the list and while adding
4364 		 * make sure the list to which we are adding
4365 		 * doesn't grow big.
4366 		 */
4367 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4368 			spin_lock(pa->pa_obj_lock);
4369 			list_del_rcu(&pa->pa_inode_list);
4370 			spin_unlock(pa->pa_obj_lock);
4371 			ext4_mb_add_n_trim(ac);
4372 		}
4373 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4374 	}
4375 	if (ac->ac_bitmap_page)
4376 		page_cache_release(ac->ac_bitmap_page);
4377 	if (ac->ac_buddy_page)
4378 		page_cache_release(ac->ac_buddy_page);
4379 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4380 		mutex_unlock(&ac->ac_lg->lg_mutex);
4381 	ext4_mb_collect_stats(ac);
4382 	return 0;
4383 }
4384 
4385 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4386 {
4387 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4388 	int ret;
4389 	int freed = 0;
4390 
4391 	trace_ext4_mb_discard_preallocations(sb, needed);
4392 	for (i = 0; i < ngroups && needed > 0; i++) {
4393 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4394 		freed += ret;
4395 		needed -= ret;
4396 	}
4397 
4398 	return freed;
4399 }
4400 
4401 /*
4402  * Main entry point into mballoc to allocate blocks
4403  * it tries to use preallocation first, then falls back
4404  * to usual allocation
4405  */
4406 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4407 				struct ext4_allocation_request *ar, int *errp)
4408 {
4409 	int freed;
4410 	struct ext4_allocation_context *ac = NULL;
4411 	struct ext4_sb_info *sbi;
4412 	struct super_block *sb;
4413 	ext4_fsblk_t block = 0;
4414 	unsigned int inquota = 0;
4415 	unsigned int reserv_clstrs = 0;
4416 
4417 	might_sleep();
4418 	sb = ar->inode->i_sb;
4419 	sbi = EXT4_SB(sb);
4420 
4421 	trace_ext4_request_blocks(ar);
4422 
4423 	/* Allow to use superuser reservation for quota file */
4424 	if (IS_NOQUOTA(ar->inode))
4425 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4426 
4427 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4428 		/* Without delayed allocation we need to verify
4429 		 * there is enough free blocks to do block allocation
4430 		 * and verify allocation doesn't exceed the quota limits.
4431 		 */
4432 		while (ar->len &&
4433 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4434 
4435 			/* let others to free the space */
4436 			cond_resched();
4437 			ar->len = ar->len >> 1;
4438 		}
4439 		if (!ar->len) {
4440 			*errp = -ENOSPC;
4441 			return 0;
4442 		}
4443 		reserv_clstrs = ar->len;
4444 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4445 			dquot_alloc_block_nofail(ar->inode,
4446 						 EXT4_C2B(sbi, ar->len));
4447 		} else {
4448 			while (ar->len &&
4449 				dquot_alloc_block(ar->inode,
4450 						  EXT4_C2B(sbi, ar->len))) {
4451 
4452 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4453 				ar->len--;
4454 			}
4455 		}
4456 		inquota = ar->len;
4457 		if (ar->len == 0) {
4458 			*errp = -EDQUOT;
4459 			goto out;
4460 		}
4461 	}
4462 
4463 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4464 	if (!ac) {
4465 		ar->len = 0;
4466 		*errp = -ENOMEM;
4467 		goto out;
4468 	}
4469 
4470 	*errp = ext4_mb_initialize_context(ac, ar);
4471 	if (*errp) {
4472 		ar->len = 0;
4473 		goto out;
4474 	}
4475 
4476 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4477 	if (!ext4_mb_use_preallocated(ac)) {
4478 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4479 		ext4_mb_normalize_request(ac, ar);
4480 repeat:
4481 		/* allocate space in core */
4482 		*errp = ext4_mb_regular_allocator(ac);
4483 		if (*errp)
4484 			goto discard_and_exit;
4485 
4486 		/* as we've just preallocated more space than
4487 		 * user requested originally, we store allocated
4488 		 * space in a special descriptor */
4489 		if (ac->ac_status == AC_STATUS_FOUND &&
4490 		    ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4491 			*errp = ext4_mb_new_preallocation(ac);
4492 		if (*errp) {
4493 		discard_and_exit:
4494 			ext4_discard_allocated_blocks(ac);
4495 			goto errout;
4496 		}
4497 	}
4498 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4499 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4500 		if (*errp == -EAGAIN) {
4501 			/*
4502 			 * drop the reference that we took
4503 			 * in ext4_mb_use_best_found
4504 			 */
4505 			ext4_mb_release_context(ac);
4506 			ac->ac_b_ex.fe_group = 0;
4507 			ac->ac_b_ex.fe_start = 0;
4508 			ac->ac_b_ex.fe_len = 0;
4509 			ac->ac_status = AC_STATUS_CONTINUE;
4510 			goto repeat;
4511 		} else if (*errp) {
4512 			ext4_discard_allocated_blocks(ac);
4513 			goto errout;
4514 		} else {
4515 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4516 			ar->len = ac->ac_b_ex.fe_len;
4517 		}
4518 	} else {
4519 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4520 		if (freed)
4521 			goto repeat;
4522 		*errp = -ENOSPC;
4523 	}
4524 
4525 errout:
4526 	if (*errp) {
4527 		ac->ac_b_ex.fe_len = 0;
4528 		ar->len = 0;
4529 		ext4_mb_show_ac(ac);
4530 	}
4531 	ext4_mb_release_context(ac);
4532 out:
4533 	if (ac)
4534 		kmem_cache_free(ext4_ac_cachep, ac);
4535 	if (inquota && ar->len < inquota)
4536 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4537 	if (!ar->len) {
4538 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
4539 			/* release all the reserved blocks if non delalloc */
4540 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4541 						reserv_clstrs);
4542 	}
4543 
4544 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4545 
4546 	return block;
4547 }
4548 
4549 /*
4550  * We can merge two free data extents only if the physical blocks
4551  * are contiguous, AND the extents were freed by the same transaction,
4552  * AND the blocks are associated with the same group.
4553  */
4554 static int can_merge(struct ext4_free_data *entry1,
4555 			struct ext4_free_data *entry2)
4556 {
4557 	if ((entry1->efd_tid == entry2->efd_tid) &&
4558 	    (entry1->efd_group == entry2->efd_group) &&
4559 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4560 		return 1;
4561 	return 0;
4562 }
4563 
4564 static noinline_for_stack int
4565 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4566 		      struct ext4_free_data *new_entry)
4567 {
4568 	ext4_group_t group = e4b->bd_group;
4569 	ext4_grpblk_t cluster;
4570 	struct ext4_free_data *entry;
4571 	struct ext4_group_info *db = e4b->bd_info;
4572 	struct super_block *sb = e4b->bd_sb;
4573 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4574 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4575 	struct rb_node *parent = NULL, *new_node;
4576 
4577 	BUG_ON(!ext4_handle_valid(handle));
4578 	BUG_ON(e4b->bd_bitmap_page == NULL);
4579 	BUG_ON(e4b->bd_buddy_page == NULL);
4580 
4581 	new_node = &new_entry->efd_node;
4582 	cluster = new_entry->efd_start_cluster;
4583 
4584 	if (!*n) {
4585 		/* first free block exent. We need to
4586 		   protect buddy cache from being freed,
4587 		 * otherwise we'll refresh it from
4588 		 * on-disk bitmap and lose not-yet-available
4589 		 * blocks */
4590 		page_cache_get(e4b->bd_buddy_page);
4591 		page_cache_get(e4b->bd_bitmap_page);
4592 	}
4593 	while (*n) {
4594 		parent = *n;
4595 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4596 		if (cluster < entry->efd_start_cluster)
4597 			n = &(*n)->rb_left;
4598 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4599 			n = &(*n)->rb_right;
4600 		else {
4601 			ext4_grp_locked_error(sb, group, 0,
4602 				ext4_group_first_block_no(sb, group) +
4603 				EXT4_C2B(sbi, cluster),
4604 				"Block already on to-be-freed list");
4605 			return 0;
4606 		}
4607 	}
4608 
4609 	rb_link_node(new_node, parent, n);
4610 	rb_insert_color(new_node, &db->bb_free_root);
4611 
4612 	/* Now try to see the extent can be merged to left and right */
4613 	node = rb_prev(new_node);
4614 	if (node) {
4615 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4616 		if (can_merge(entry, new_entry) &&
4617 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4618 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4619 			new_entry->efd_count += entry->efd_count;
4620 			rb_erase(node, &(db->bb_free_root));
4621 			kmem_cache_free(ext4_free_data_cachep, entry);
4622 		}
4623 	}
4624 
4625 	node = rb_next(new_node);
4626 	if (node) {
4627 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4628 		if (can_merge(new_entry, entry) &&
4629 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4630 			new_entry->efd_count += entry->efd_count;
4631 			rb_erase(node, &(db->bb_free_root));
4632 			kmem_cache_free(ext4_free_data_cachep, entry);
4633 		}
4634 	}
4635 	/* Add the extent to transaction's private list */
4636 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4637 				  &new_entry->efd_jce);
4638 	return 0;
4639 }
4640 
4641 /**
4642  * ext4_free_blocks() -- Free given blocks and update quota
4643  * @handle:		handle for this transaction
4644  * @inode:		inode
4645  * @block:		start physical block to free
4646  * @count:		number of blocks to count
4647  * @flags:		flags used by ext4_free_blocks
4648  */
4649 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4650 		      struct buffer_head *bh, ext4_fsblk_t block,
4651 		      unsigned long count, int flags)
4652 {
4653 	struct buffer_head *bitmap_bh = NULL;
4654 	struct super_block *sb = inode->i_sb;
4655 	struct ext4_group_desc *gdp;
4656 	unsigned int overflow;
4657 	ext4_grpblk_t bit;
4658 	struct buffer_head *gd_bh;
4659 	ext4_group_t block_group;
4660 	struct ext4_sb_info *sbi;
4661 	struct ext4_buddy e4b;
4662 	unsigned int count_clusters;
4663 	int err = 0;
4664 	int ret;
4665 
4666 	might_sleep();
4667 	if (bh) {
4668 		if (block)
4669 			BUG_ON(block != bh->b_blocknr);
4670 		else
4671 			block = bh->b_blocknr;
4672 	}
4673 
4674 	sbi = EXT4_SB(sb);
4675 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4676 	    !ext4_data_block_valid(sbi, block, count)) {
4677 		ext4_error(sb, "Freeing blocks not in datazone - "
4678 			   "block = %llu, count = %lu", block, count);
4679 		goto error_return;
4680 	}
4681 
4682 	ext4_debug("freeing block %llu\n", block);
4683 	trace_ext4_free_blocks(inode, block, count, flags);
4684 
4685 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4686 		struct buffer_head *tbh = bh;
4687 		int i;
4688 
4689 		BUG_ON(bh && (count > 1));
4690 
4691 		for (i = 0; i < count; i++) {
4692 			cond_resched();
4693 			if (!bh)
4694 				tbh = sb_find_get_block(inode->i_sb,
4695 							block + i);
4696 			if (!tbh)
4697 				continue;
4698 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4699 				    inode, tbh, block + i);
4700 		}
4701 	}
4702 
4703 	/*
4704 	 * We need to make sure we don't reuse the freed block until
4705 	 * after the transaction is committed, which we can do by
4706 	 * treating the block as metadata, below.  We make an
4707 	 * exception if the inode is to be written in writeback mode
4708 	 * since writeback mode has weak data consistency guarantees.
4709 	 */
4710 	if (!ext4_should_writeback_data(inode))
4711 		flags |= EXT4_FREE_BLOCKS_METADATA;
4712 
4713 	/*
4714 	 * If the extent to be freed does not begin on a cluster
4715 	 * boundary, we need to deal with partial clusters at the
4716 	 * beginning and end of the extent.  Normally we will free
4717 	 * blocks at the beginning or the end unless we are explicitly
4718 	 * requested to avoid doing so.
4719 	 */
4720 	overflow = EXT4_PBLK_COFF(sbi, block);
4721 	if (overflow) {
4722 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4723 			overflow = sbi->s_cluster_ratio - overflow;
4724 			block += overflow;
4725 			if (count > overflow)
4726 				count -= overflow;
4727 			else
4728 				return;
4729 		} else {
4730 			block -= overflow;
4731 			count += overflow;
4732 		}
4733 	}
4734 	overflow = EXT4_LBLK_COFF(sbi, count);
4735 	if (overflow) {
4736 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4737 			if (count > overflow)
4738 				count -= overflow;
4739 			else
4740 				return;
4741 		} else
4742 			count += sbi->s_cluster_ratio - overflow;
4743 	}
4744 
4745 do_more:
4746 	overflow = 0;
4747 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4748 
4749 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
4750 			ext4_get_group_info(sb, block_group))))
4751 		return;
4752 
4753 	/*
4754 	 * Check to see if we are freeing blocks across a group
4755 	 * boundary.
4756 	 */
4757 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4758 		overflow = EXT4_C2B(sbi, bit) + count -
4759 			EXT4_BLOCKS_PER_GROUP(sb);
4760 		count -= overflow;
4761 	}
4762 	count_clusters = EXT4_NUM_B2C(sbi, count);
4763 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4764 	if (!bitmap_bh) {
4765 		err = -EIO;
4766 		goto error_return;
4767 	}
4768 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4769 	if (!gdp) {
4770 		err = -EIO;
4771 		goto error_return;
4772 	}
4773 
4774 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4775 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4776 	    in_range(block, ext4_inode_table(sb, gdp),
4777 		     EXT4_SB(sb)->s_itb_per_group) ||
4778 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4779 		     EXT4_SB(sb)->s_itb_per_group)) {
4780 
4781 		ext4_error(sb, "Freeing blocks in system zone - "
4782 			   "Block = %llu, count = %lu", block, count);
4783 		/* err = 0. ext4_std_error should be a no op */
4784 		goto error_return;
4785 	}
4786 
4787 	BUFFER_TRACE(bitmap_bh, "getting write access");
4788 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4789 	if (err)
4790 		goto error_return;
4791 
4792 	/*
4793 	 * We are about to modify some metadata.  Call the journal APIs
4794 	 * to unshare ->b_data if a currently-committing transaction is
4795 	 * using it
4796 	 */
4797 	BUFFER_TRACE(gd_bh, "get_write_access");
4798 	err = ext4_journal_get_write_access(handle, gd_bh);
4799 	if (err)
4800 		goto error_return;
4801 #ifdef AGGRESSIVE_CHECK
4802 	{
4803 		int i;
4804 		for (i = 0; i < count_clusters; i++)
4805 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4806 	}
4807 #endif
4808 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4809 
4810 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4811 	if (err)
4812 		goto error_return;
4813 
4814 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4815 		struct ext4_free_data *new_entry;
4816 		/*
4817 		 * blocks being freed are metadata. these blocks shouldn't
4818 		 * be used until this transaction is committed
4819 		 *
4820 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4821 		 * to fail.
4822 		 */
4823 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4824 				GFP_NOFS|__GFP_NOFAIL);
4825 		new_entry->efd_start_cluster = bit;
4826 		new_entry->efd_group = block_group;
4827 		new_entry->efd_count = count_clusters;
4828 		new_entry->efd_tid = handle->h_transaction->t_tid;
4829 
4830 		ext4_lock_group(sb, block_group);
4831 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4832 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4833 	} else {
4834 		/* need to update group_info->bb_free and bitmap
4835 		 * with group lock held. generate_buddy look at
4836 		 * them with group lock_held
4837 		 */
4838 		if (test_opt(sb, DISCARD)) {
4839 			err = ext4_issue_discard(sb, block_group, bit, count);
4840 			if (err && err != -EOPNOTSUPP)
4841 				ext4_msg(sb, KERN_WARNING, "discard request in"
4842 					 " group:%d block:%d count:%lu failed"
4843 					 " with %d", block_group, bit, count,
4844 					 err);
4845 		} else
4846 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
4847 
4848 		ext4_lock_group(sb, block_group);
4849 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4850 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4851 	}
4852 
4853 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4854 	ext4_free_group_clusters_set(sb, gdp, ret);
4855 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4856 	ext4_group_desc_csum_set(sb, block_group, gdp);
4857 	ext4_unlock_group(sb, block_group);
4858 
4859 	if (sbi->s_log_groups_per_flex) {
4860 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4861 		atomic64_add(count_clusters,
4862 			     &sbi->s_flex_groups[flex_group].free_clusters);
4863 	}
4864 
4865 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4866 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4867 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4868 
4869 	ext4_mb_unload_buddy(&e4b);
4870 
4871 	/* We dirtied the bitmap block */
4872 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4873 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4874 
4875 	/* And the group descriptor block */
4876 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4877 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4878 	if (!err)
4879 		err = ret;
4880 
4881 	if (overflow && !err) {
4882 		block += count;
4883 		count = overflow;
4884 		put_bh(bitmap_bh);
4885 		goto do_more;
4886 	}
4887 error_return:
4888 	brelse(bitmap_bh);
4889 	ext4_std_error(sb, err);
4890 	return;
4891 }
4892 
4893 /**
4894  * ext4_group_add_blocks() -- Add given blocks to an existing group
4895  * @handle:			handle to this transaction
4896  * @sb:				super block
4897  * @block:			start physical block to add to the block group
4898  * @count:			number of blocks to free
4899  *
4900  * This marks the blocks as free in the bitmap and buddy.
4901  */
4902 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4903 			 ext4_fsblk_t block, unsigned long count)
4904 {
4905 	struct buffer_head *bitmap_bh = NULL;
4906 	struct buffer_head *gd_bh;
4907 	ext4_group_t block_group;
4908 	ext4_grpblk_t bit;
4909 	unsigned int i;
4910 	struct ext4_group_desc *desc;
4911 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4912 	struct ext4_buddy e4b;
4913 	int err = 0, ret, blk_free_count;
4914 	ext4_grpblk_t blocks_freed;
4915 
4916 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4917 
4918 	if (count == 0)
4919 		return 0;
4920 
4921 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4922 	/*
4923 	 * Check to see if we are freeing blocks across a group
4924 	 * boundary.
4925 	 */
4926 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4927 		ext4_warning(sb, "too much blocks added to group %u\n",
4928 			     block_group);
4929 		err = -EINVAL;
4930 		goto error_return;
4931 	}
4932 
4933 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4934 	if (!bitmap_bh) {
4935 		err = -EIO;
4936 		goto error_return;
4937 	}
4938 
4939 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4940 	if (!desc) {
4941 		err = -EIO;
4942 		goto error_return;
4943 	}
4944 
4945 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4946 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4947 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4948 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4949 		     sbi->s_itb_per_group)) {
4950 		ext4_error(sb, "Adding blocks in system zones - "
4951 			   "Block = %llu, count = %lu",
4952 			   block, count);
4953 		err = -EINVAL;
4954 		goto error_return;
4955 	}
4956 
4957 	BUFFER_TRACE(bitmap_bh, "getting write access");
4958 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4959 	if (err)
4960 		goto error_return;
4961 
4962 	/*
4963 	 * We are about to modify some metadata.  Call the journal APIs
4964 	 * to unshare ->b_data if a currently-committing transaction is
4965 	 * using it
4966 	 */
4967 	BUFFER_TRACE(gd_bh, "get_write_access");
4968 	err = ext4_journal_get_write_access(handle, gd_bh);
4969 	if (err)
4970 		goto error_return;
4971 
4972 	for (i = 0, blocks_freed = 0; i < count; i++) {
4973 		BUFFER_TRACE(bitmap_bh, "clear bit");
4974 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4975 			ext4_error(sb, "bit already cleared for block %llu",
4976 				   (ext4_fsblk_t)(block + i));
4977 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4978 		} else {
4979 			blocks_freed++;
4980 		}
4981 	}
4982 
4983 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4984 	if (err)
4985 		goto error_return;
4986 
4987 	/*
4988 	 * need to update group_info->bb_free and bitmap
4989 	 * with group lock held. generate_buddy look at
4990 	 * them with group lock_held
4991 	 */
4992 	ext4_lock_group(sb, block_group);
4993 	mb_clear_bits(bitmap_bh->b_data, bit, count);
4994 	mb_free_blocks(NULL, &e4b, bit, count);
4995 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4996 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
4997 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
4998 	ext4_group_desc_csum_set(sb, block_group, desc);
4999 	ext4_unlock_group(sb, block_group);
5000 	percpu_counter_add(&sbi->s_freeclusters_counter,
5001 			   EXT4_NUM_B2C(sbi, blocks_freed));
5002 
5003 	if (sbi->s_log_groups_per_flex) {
5004 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5005 		atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
5006 			     &sbi->s_flex_groups[flex_group].free_clusters);
5007 	}
5008 
5009 	ext4_mb_unload_buddy(&e4b);
5010 
5011 	/* We dirtied the bitmap block */
5012 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5013 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5014 
5015 	/* And the group descriptor block */
5016 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5017 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5018 	if (!err)
5019 		err = ret;
5020 
5021 error_return:
5022 	brelse(bitmap_bh);
5023 	ext4_std_error(sb, err);
5024 	return err;
5025 }
5026 
5027 /**
5028  * ext4_trim_extent -- function to TRIM one single free extent in the group
5029  * @sb:		super block for the file system
5030  * @start:	starting block of the free extent in the alloc. group
5031  * @count:	number of blocks to TRIM
5032  * @group:	alloc. group we are working with
5033  * @e4b:	ext4 buddy for the group
5034  *
5035  * Trim "count" blocks starting at "start" in the "group". To assure that no
5036  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5037  * be called with under the group lock.
5038  */
5039 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5040 			     ext4_group_t group, struct ext4_buddy *e4b)
5041 __releases(bitlock)
5042 __acquires(bitlock)
5043 {
5044 	struct ext4_free_extent ex;
5045 	int ret = 0;
5046 
5047 	trace_ext4_trim_extent(sb, group, start, count);
5048 
5049 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
5050 
5051 	ex.fe_start = start;
5052 	ex.fe_group = group;
5053 	ex.fe_len = count;
5054 
5055 	/*
5056 	 * Mark blocks used, so no one can reuse them while
5057 	 * being trimmed.
5058 	 */
5059 	mb_mark_used(e4b, &ex);
5060 	ext4_unlock_group(sb, group);
5061 	ret = ext4_issue_discard(sb, group, start, count);
5062 	ext4_lock_group(sb, group);
5063 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
5064 	return ret;
5065 }
5066 
5067 /**
5068  * ext4_trim_all_free -- function to trim all free space in alloc. group
5069  * @sb:			super block for file system
5070  * @group:		group to be trimmed
5071  * @start:		first group block to examine
5072  * @max:		last group block to examine
5073  * @minblocks:		minimum extent block count
5074  *
5075  * ext4_trim_all_free walks through group's buddy bitmap searching for free
5076  * extents. When the free block is found, ext4_trim_extent is called to TRIM
5077  * the extent.
5078  *
5079  *
5080  * ext4_trim_all_free walks through group's block bitmap searching for free
5081  * extents. When the free extent is found, mark it as used in group buddy
5082  * bitmap. Then issue a TRIM command on this extent and free the extent in
5083  * the group buddy bitmap. This is done until whole group is scanned.
5084  */
5085 static ext4_grpblk_t
5086 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5087 		   ext4_grpblk_t start, ext4_grpblk_t max,
5088 		   ext4_grpblk_t minblocks)
5089 {
5090 	void *bitmap;
5091 	ext4_grpblk_t next, count = 0, free_count = 0;
5092 	struct ext4_buddy e4b;
5093 	int ret = 0;
5094 
5095 	trace_ext4_trim_all_free(sb, group, start, max);
5096 
5097 	ret = ext4_mb_load_buddy(sb, group, &e4b);
5098 	if (ret) {
5099 		ext4_error(sb, "Error in loading buddy "
5100 				"information for %u", group);
5101 		return ret;
5102 	}
5103 	bitmap = e4b.bd_bitmap;
5104 
5105 	ext4_lock_group(sb, group);
5106 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5107 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5108 		goto out;
5109 
5110 	start = (e4b.bd_info->bb_first_free > start) ?
5111 		e4b.bd_info->bb_first_free : start;
5112 
5113 	while (start <= max) {
5114 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
5115 		if (start > max)
5116 			break;
5117 		next = mb_find_next_bit(bitmap, max + 1, start);
5118 
5119 		if ((next - start) >= minblocks) {
5120 			ret = ext4_trim_extent(sb, start,
5121 					       next - start, group, &e4b);
5122 			if (ret && ret != -EOPNOTSUPP)
5123 				break;
5124 			ret = 0;
5125 			count += next - start;
5126 		}
5127 		free_count += next - start;
5128 		start = next + 1;
5129 
5130 		if (fatal_signal_pending(current)) {
5131 			count = -ERESTARTSYS;
5132 			break;
5133 		}
5134 
5135 		if (need_resched()) {
5136 			ext4_unlock_group(sb, group);
5137 			cond_resched();
5138 			ext4_lock_group(sb, group);
5139 		}
5140 
5141 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
5142 			break;
5143 	}
5144 
5145 	if (!ret) {
5146 		ret = count;
5147 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5148 	}
5149 out:
5150 	ext4_unlock_group(sb, group);
5151 	ext4_mb_unload_buddy(&e4b);
5152 
5153 	ext4_debug("trimmed %d blocks in the group %d\n",
5154 		count, group);
5155 
5156 	return ret;
5157 }
5158 
5159 /**
5160  * ext4_trim_fs() -- trim ioctl handle function
5161  * @sb:			superblock for filesystem
5162  * @range:		fstrim_range structure
5163  *
5164  * start:	First Byte to trim
5165  * len:		number of Bytes to trim from start
5166  * minlen:	minimum extent length in Bytes
5167  * ext4_trim_fs goes through all allocation groups containing Bytes from
5168  * start to start+len. For each such a group ext4_trim_all_free function
5169  * is invoked to trim all free space.
5170  */
5171 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5172 {
5173 	struct ext4_group_info *grp;
5174 	ext4_group_t group, first_group, last_group;
5175 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5176 	uint64_t start, end, minlen, trimmed = 0;
5177 	ext4_fsblk_t first_data_blk =
5178 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5179 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5180 	int ret = 0;
5181 
5182 	start = range->start >> sb->s_blocksize_bits;
5183 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
5184 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5185 			      range->minlen >> sb->s_blocksize_bits);
5186 
5187 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5188 	    start >= max_blks ||
5189 	    range->len < sb->s_blocksize)
5190 		return -EINVAL;
5191 	if (end >= max_blks)
5192 		end = max_blks - 1;
5193 	if (end <= first_data_blk)
5194 		goto out;
5195 	if (start < first_data_blk)
5196 		start = first_data_blk;
5197 
5198 	/* Determine first and last group to examine based on start and end */
5199 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5200 				     &first_group, &first_cluster);
5201 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5202 				     &last_group, &last_cluster);
5203 
5204 	/* end now represents the last cluster to discard in this group */
5205 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5206 
5207 	for (group = first_group; group <= last_group; group++) {
5208 		grp = ext4_get_group_info(sb, group);
5209 		/* We only do this if the grp has never been initialized */
5210 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5211 			ret = ext4_mb_init_group(sb, group);
5212 			if (ret)
5213 				break;
5214 		}
5215 
5216 		/*
5217 		 * For all the groups except the last one, last cluster will
5218 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5219 		 * change it for the last group, note that last_cluster is
5220 		 * already computed earlier by ext4_get_group_no_and_offset()
5221 		 */
5222 		if (group == last_group)
5223 			end = last_cluster;
5224 
5225 		if (grp->bb_free >= minlen) {
5226 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5227 						end, minlen);
5228 			if (cnt < 0) {
5229 				ret = cnt;
5230 				break;
5231 			}
5232 			trimmed += cnt;
5233 		}
5234 
5235 		/*
5236 		 * For every group except the first one, we are sure
5237 		 * that the first cluster to discard will be cluster #0.
5238 		 */
5239 		first_cluster = 0;
5240 	}
5241 
5242 	if (!ret)
5243 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5244 
5245 out:
5246 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5247 	return ret;
5248 }
5249