xref: /openbmc/linux/fs/ext4/mballoc.c (revision 6b5fc336)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/log2.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/backing-dev.h>
30 #include <trace/events/ext4.h>
31 
32 #ifdef CONFIG_EXT4_DEBUG
33 ushort ext4_mballoc_debug __read_mostly;
34 
35 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
36 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
37 #endif
38 
39 /*
40  * MUSTDO:
41  *   - test ext4_ext_search_left() and ext4_ext_search_right()
42  *   - search for metadata in few groups
43  *
44  * TODO v4:
45  *   - normalization should take into account whether file is still open
46  *   - discard preallocations if no free space left (policy?)
47  *   - don't normalize tails
48  *   - quota
49  *   - reservation for superuser
50  *
51  * TODO v3:
52  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
53  *   - track min/max extents in each group for better group selection
54  *   - mb_mark_used() may allocate chunk right after splitting buddy
55  *   - tree of groups sorted by number of free blocks
56  *   - error handling
57  */
58 
59 /*
60  * The allocation request involve request for multiple number of blocks
61  * near to the goal(block) value specified.
62  *
63  * During initialization phase of the allocator we decide to use the
64  * group preallocation or inode preallocation depending on the size of
65  * the file. The size of the file could be the resulting file size we
66  * would have after allocation, or the current file size, which ever
67  * is larger. If the size is less than sbi->s_mb_stream_request we
68  * select to use the group preallocation. The default value of
69  * s_mb_stream_request is 16 blocks. This can also be tuned via
70  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
71  * terms of number of blocks.
72  *
73  * The main motivation for having small file use group preallocation is to
74  * ensure that we have small files closer together on the disk.
75  *
76  * First stage the allocator looks at the inode prealloc list,
77  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
78  * spaces for this particular inode. The inode prealloc space is
79  * represented as:
80  *
81  * pa_lstart -> the logical start block for this prealloc space
82  * pa_pstart -> the physical start block for this prealloc space
83  * pa_len    -> length for this prealloc space (in clusters)
84  * pa_free   ->  free space available in this prealloc space (in clusters)
85  *
86  * The inode preallocation space is used looking at the _logical_ start
87  * block. If only the logical file block falls within the range of prealloc
88  * space we will consume the particular prealloc space. This makes sure that
89  * we have contiguous physical blocks representing the file blocks
90  *
91  * The important thing to be noted in case of inode prealloc space is that
92  * we don't modify the values associated to inode prealloc space except
93  * pa_free.
94  *
95  * If we are not able to find blocks in the inode prealloc space and if we
96  * have the group allocation flag set then we look at the locality group
97  * prealloc space. These are per CPU prealloc list represented as
98  *
99  * ext4_sb_info.s_locality_groups[smp_processor_id()]
100  *
101  * The reason for having a per cpu locality group is to reduce the contention
102  * between CPUs. It is possible to get scheduled at this point.
103  *
104  * The locality group prealloc space is used looking at whether we have
105  * enough free space (pa_free) within the prealloc space.
106  *
107  * If we can't allocate blocks via inode prealloc or/and locality group
108  * prealloc then we look at the buddy cache. The buddy cache is represented
109  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
110  * mapped to the buddy and bitmap information regarding different
111  * groups. The buddy information is attached to buddy cache inode so that
112  * we can access them through the page cache. The information regarding
113  * each group is loaded via ext4_mb_load_buddy.  The information involve
114  * block bitmap and buddy information. The information are stored in the
115  * inode as:
116  *
117  *  {                        page                        }
118  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
119  *
120  *
121  * one block each for bitmap and buddy information.  So for each group we
122  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123  * blocksize) blocks.  So it can have information regarding groups_per_page
124  * which is blocks_per_page/2
125  *
126  * The buddy cache inode is not stored on disk. The inode is thrown
127  * away when the filesystem is unmounted.
128  *
129  * We look for count number of blocks in the buddy cache. If we were able
130  * to locate that many free blocks we return with additional information
131  * regarding rest of the contiguous physical block available
132  *
133  * Before allocating blocks via buddy cache we normalize the request
134  * blocks. This ensure we ask for more blocks that we needed. The extra
135  * blocks that we get after allocation is added to the respective prealloc
136  * list. In case of inode preallocation we follow a list of heuristics
137  * based on file size. This can be found in ext4_mb_normalize_request. If
138  * we are doing a group prealloc we try to normalize the request to
139  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
140  * dependent on the cluster size; for non-bigalloc file systems, it is
141  * 512 blocks. This can be tuned via
142  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
143  * terms of number of blocks. If we have mounted the file system with -O
144  * stripe=<value> option the group prealloc request is normalized to the
145  * the smallest multiple of the stripe value (sbi->s_stripe) which is
146  * greater than the default mb_group_prealloc.
147  *
148  * The regular allocator (using the buddy cache) supports a few tunables.
149  *
150  * /sys/fs/ext4/<partition>/mb_min_to_scan
151  * /sys/fs/ext4/<partition>/mb_max_to_scan
152  * /sys/fs/ext4/<partition>/mb_order2_req
153  *
154  * The regular allocator uses buddy scan only if the request len is power of
155  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
156  * value of s_mb_order2_reqs can be tuned via
157  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
158  * stripe size (sbi->s_stripe), we try to search for contiguous block in
159  * stripe size. This should result in better allocation on RAID setups. If
160  * not, we search in the specific group using bitmap for best extents. The
161  * tunable min_to_scan and max_to_scan control the behaviour here.
162  * min_to_scan indicate how long the mballoc __must__ look for a best
163  * extent and max_to_scan indicates how long the mballoc __can__ look for a
164  * best extent in the found extents. Searching for the blocks starts with
165  * the group specified as the goal value in allocation context via
166  * ac_g_ex. Each group is first checked based on the criteria whether it
167  * can be used for allocation. ext4_mb_good_group explains how the groups are
168  * checked.
169  *
170  * Both the prealloc space are getting populated as above. So for the first
171  * request we will hit the buddy cache which will result in this prealloc
172  * space getting filled. The prealloc space is then later used for the
173  * subsequent request.
174  */
175 
176 /*
177  * mballoc operates on the following data:
178  *  - on-disk bitmap
179  *  - in-core buddy (actually includes buddy and bitmap)
180  *  - preallocation descriptors (PAs)
181  *
182  * there are two types of preallocations:
183  *  - inode
184  *    assiged to specific inode and can be used for this inode only.
185  *    it describes part of inode's space preallocated to specific
186  *    physical blocks. any block from that preallocated can be used
187  *    independent. the descriptor just tracks number of blocks left
188  *    unused. so, before taking some block from descriptor, one must
189  *    make sure corresponded logical block isn't allocated yet. this
190  *    also means that freeing any block within descriptor's range
191  *    must discard all preallocated blocks.
192  *  - locality group
193  *    assigned to specific locality group which does not translate to
194  *    permanent set of inodes: inode can join and leave group. space
195  *    from this type of preallocation can be used for any inode. thus
196  *    it's consumed from the beginning to the end.
197  *
198  * relation between them can be expressed as:
199  *    in-core buddy = on-disk bitmap + preallocation descriptors
200  *
201  * this mean blocks mballoc considers used are:
202  *  - allocated blocks (persistent)
203  *  - preallocated blocks (non-persistent)
204  *
205  * consistency in mballoc world means that at any time a block is either
206  * free or used in ALL structures. notice: "any time" should not be read
207  * literally -- time is discrete and delimited by locks.
208  *
209  *  to keep it simple, we don't use block numbers, instead we count number of
210  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
211  *
212  * all operations can be expressed as:
213  *  - init buddy:			buddy = on-disk + PAs
214  *  - new PA:				buddy += N; PA = N
215  *  - use inode PA:			on-disk += N; PA -= N
216  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
217  *  - use locality group PA		on-disk += N; PA -= N
218  *  - discard locality group PA		buddy -= PA; PA = 0
219  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
220  *        is used in real operation because we can't know actual used
221  *        bits from PA, only from on-disk bitmap
222  *
223  * if we follow this strict logic, then all operations above should be atomic.
224  * given some of them can block, we'd have to use something like semaphores
225  * killing performance on high-end SMP hardware. let's try to relax it using
226  * the following knowledge:
227  *  1) if buddy is referenced, it's already initialized
228  *  2) while block is used in buddy and the buddy is referenced,
229  *     nobody can re-allocate that block
230  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
231  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
232  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
233  *     block
234  *
235  * so, now we're building a concurrency table:
236  *  - init buddy vs.
237  *    - new PA
238  *      blocks for PA are allocated in the buddy, buddy must be referenced
239  *      until PA is linked to allocation group to avoid concurrent buddy init
240  *    - use inode PA
241  *      we need to make sure that either on-disk bitmap or PA has uptodate data
242  *      given (3) we care that PA-=N operation doesn't interfere with init
243  *    - discard inode PA
244  *      the simplest way would be to have buddy initialized by the discard
245  *    - use locality group PA
246  *      again PA-=N must be serialized with init
247  *    - discard locality group PA
248  *      the simplest way would be to have buddy initialized by the discard
249  *  - new PA vs.
250  *    - use inode PA
251  *      i_data_sem serializes them
252  *    - discard inode PA
253  *      discard process must wait until PA isn't used by another process
254  *    - use locality group PA
255  *      some mutex should serialize them
256  *    - discard locality group PA
257  *      discard process must wait until PA isn't used by another process
258  *  - use inode PA
259  *    - use inode PA
260  *      i_data_sem or another mutex should serializes them
261  *    - discard inode PA
262  *      discard process must wait until PA isn't used by another process
263  *    - use locality group PA
264  *      nothing wrong here -- they're different PAs covering different blocks
265  *    - discard locality group PA
266  *      discard process must wait until PA isn't used by another process
267  *
268  * now we're ready to make few consequences:
269  *  - PA is referenced and while it is no discard is possible
270  *  - PA is referenced until block isn't marked in on-disk bitmap
271  *  - PA changes only after on-disk bitmap
272  *  - discard must not compete with init. either init is done before
273  *    any discard or they're serialized somehow
274  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
275  *
276  * a special case when we've used PA to emptiness. no need to modify buddy
277  * in this case, but we should care about concurrent init
278  *
279  */
280 
281  /*
282  * Logic in few words:
283  *
284  *  - allocation:
285  *    load group
286  *    find blocks
287  *    mark bits in on-disk bitmap
288  *    release group
289  *
290  *  - use preallocation:
291  *    find proper PA (per-inode or group)
292  *    load group
293  *    mark bits in on-disk bitmap
294  *    release group
295  *    release PA
296  *
297  *  - free:
298  *    load group
299  *    mark bits in on-disk bitmap
300  *    release group
301  *
302  *  - discard preallocations in group:
303  *    mark PAs deleted
304  *    move them onto local list
305  *    load on-disk bitmap
306  *    load group
307  *    remove PA from object (inode or locality group)
308  *    mark free blocks in-core
309  *
310  *  - discard inode's preallocations:
311  */
312 
313 /*
314  * Locking rules
315  *
316  * Locks:
317  *  - bitlock on a group	(group)
318  *  - object (inode/locality)	(object)
319  *  - per-pa lock		(pa)
320  *
321  * Paths:
322  *  - new pa
323  *    object
324  *    group
325  *
326  *  - find and use pa:
327  *    pa
328  *
329  *  - release consumed pa:
330  *    pa
331  *    group
332  *    object
333  *
334  *  - generate in-core bitmap:
335  *    group
336  *        pa
337  *
338  *  - discard all for given object (inode, locality group):
339  *    object
340  *        pa
341  *    group
342  *
343  *  - discard all for given group:
344  *    group
345  *        pa
346  *    group
347  *        object
348  *
349  */
350 static struct kmem_cache *ext4_pspace_cachep;
351 static struct kmem_cache *ext4_ac_cachep;
352 static struct kmem_cache *ext4_free_data_cachep;
353 
354 /* We create slab caches for groupinfo data structures based on the
355  * superblock block size.  There will be one per mounted filesystem for
356  * each unique s_blocksize_bits */
357 #define NR_GRPINFO_CACHES 8
358 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
359 
360 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
361 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
362 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
363 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
364 };
365 
366 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
367 					ext4_group_t group);
368 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
369 						ext4_group_t group);
370 
371 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
372 {
373 #if BITS_PER_LONG == 64
374 	*bit += ((unsigned long) addr & 7UL) << 3;
375 	addr = (void *) ((unsigned long) addr & ~7UL);
376 #elif BITS_PER_LONG == 32
377 	*bit += ((unsigned long) addr & 3UL) << 3;
378 	addr = (void *) ((unsigned long) addr & ~3UL);
379 #else
380 #error "how many bits you are?!"
381 #endif
382 	return addr;
383 }
384 
385 static inline int mb_test_bit(int bit, void *addr)
386 {
387 	/*
388 	 * ext4_test_bit on architecture like powerpc
389 	 * needs unsigned long aligned address
390 	 */
391 	addr = mb_correct_addr_and_bit(&bit, addr);
392 	return ext4_test_bit(bit, addr);
393 }
394 
395 static inline void mb_set_bit(int bit, void *addr)
396 {
397 	addr = mb_correct_addr_and_bit(&bit, addr);
398 	ext4_set_bit(bit, addr);
399 }
400 
401 static inline void mb_clear_bit(int bit, void *addr)
402 {
403 	addr = mb_correct_addr_and_bit(&bit, addr);
404 	ext4_clear_bit(bit, addr);
405 }
406 
407 static inline int mb_test_and_clear_bit(int bit, void *addr)
408 {
409 	addr = mb_correct_addr_and_bit(&bit, addr);
410 	return ext4_test_and_clear_bit(bit, addr);
411 }
412 
413 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
414 {
415 	int fix = 0, ret, tmpmax;
416 	addr = mb_correct_addr_and_bit(&fix, addr);
417 	tmpmax = max + fix;
418 	start += fix;
419 
420 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
421 	if (ret > max)
422 		return max;
423 	return ret;
424 }
425 
426 static inline int mb_find_next_bit(void *addr, int max, int start)
427 {
428 	int fix = 0, ret, tmpmax;
429 	addr = mb_correct_addr_and_bit(&fix, addr);
430 	tmpmax = max + fix;
431 	start += fix;
432 
433 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
434 	if (ret > max)
435 		return max;
436 	return ret;
437 }
438 
439 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
440 {
441 	char *bb;
442 
443 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
444 	BUG_ON(max == NULL);
445 
446 	if (order > e4b->bd_blkbits + 1) {
447 		*max = 0;
448 		return NULL;
449 	}
450 
451 	/* at order 0 we see each particular block */
452 	if (order == 0) {
453 		*max = 1 << (e4b->bd_blkbits + 3);
454 		return e4b->bd_bitmap;
455 	}
456 
457 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
458 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
459 
460 	return bb;
461 }
462 
463 #ifdef DOUBLE_CHECK
464 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
465 			   int first, int count)
466 {
467 	int i;
468 	struct super_block *sb = e4b->bd_sb;
469 
470 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
471 		return;
472 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
473 	for (i = 0; i < count; i++) {
474 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
475 			ext4_fsblk_t blocknr;
476 
477 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
478 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
479 			ext4_grp_locked_error(sb, e4b->bd_group,
480 					      inode ? inode->i_ino : 0,
481 					      blocknr,
482 					      "freeing block already freed "
483 					      "(bit %u)",
484 					      first + i);
485 		}
486 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
487 	}
488 }
489 
490 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
491 {
492 	int i;
493 
494 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
495 		return;
496 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
497 	for (i = 0; i < count; i++) {
498 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
499 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
500 	}
501 }
502 
503 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
504 {
505 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
506 		unsigned char *b1, *b2;
507 		int i;
508 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
509 		b2 = (unsigned char *) bitmap;
510 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
511 			if (b1[i] != b2[i]) {
512 				ext4_msg(e4b->bd_sb, KERN_ERR,
513 					 "corruption in group %u "
514 					 "at byte %u(%u): %x in copy != %x "
515 					 "on disk/prealloc",
516 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
517 				BUG();
518 			}
519 		}
520 	}
521 }
522 
523 #else
524 static inline void mb_free_blocks_double(struct inode *inode,
525 				struct ext4_buddy *e4b, int first, int count)
526 {
527 	return;
528 }
529 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
530 						int first, int count)
531 {
532 	return;
533 }
534 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
535 {
536 	return;
537 }
538 #endif
539 
540 #ifdef AGGRESSIVE_CHECK
541 
542 #define MB_CHECK_ASSERT(assert)						\
543 do {									\
544 	if (!(assert)) {						\
545 		printk(KERN_EMERG					\
546 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
547 			function, file, line, # assert);		\
548 		BUG();							\
549 	}								\
550 } while (0)
551 
552 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
553 				const char *function, int line)
554 {
555 	struct super_block *sb = e4b->bd_sb;
556 	int order = e4b->bd_blkbits + 1;
557 	int max;
558 	int max2;
559 	int i;
560 	int j;
561 	int k;
562 	int count;
563 	struct ext4_group_info *grp;
564 	int fragments = 0;
565 	int fstart;
566 	struct list_head *cur;
567 	void *buddy;
568 	void *buddy2;
569 
570 	{
571 		static int mb_check_counter;
572 		if (mb_check_counter++ % 100 != 0)
573 			return 0;
574 	}
575 
576 	while (order > 1) {
577 		buddy = mb_find_buddy(e4b, order, &max);
578 		MB_CHECK_ASSERT(buddy);
579 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
580 		MB_CHECK_ASSERT(buddy2);
581 		MB_CHECK_ASSERT(buddy != buddy2);
582 		MB_CHECK_ASSERT(max * 2 == max2);
583 
584 		count = 0;
585 		for (i = 0; i < max; i++) {
586 
587 			if (mb_test_bit(i, buddy)) {
588 				/* only single bit in buddy2 may be 1 */
589 				if (!mb_test_bit(i << 1, buddy2)) {
590 					MB_CHECK_ASSERT(
591 						mb_test_bit((i<<1)+1, buddy2));
592 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
593 					MB_CHECK_ASSERT(
594 						mb_test_bit(i << 1, buddy2));
595 				}
596 				continue;
597 			}
598 
599 			/* both bits in buddy2 must be 1 */
600 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
601 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
602 
603 			for (j = 0; j < (1 << order); j++) {
604 				k = (i * (1 << order)) + j;
605 				MB_CHECK_ASSERT(
606 					!mb_test_bit(k, e4b->bd_bitmap));
607 			}
608 			count++;
609 		}
610 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
611 		order--;
612 	}
613 
614 	fstart = -1;
615 	buddy = mb_find_buddy(e4b, 0, &max);
616 	for (i = 0; i < max; i++) {
617 		if (!mb_test_bit(i, buddy)) {
618 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
619 			if (fstart == -1) {
620 				fragments++;
621 				fstart = i;
622 			}
623 			continue;
624 		}
625 		fstart = -1;
626 		/* check used bits only */
627 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
628 			buddy2 = mb_find_buddy(e4b, j, &max2);
629 			k = i >> j;
630 			MB_CHECK_ASSERT(k < max2);
631 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
632 		}
633 	}
634 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
635 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
636 
637 	grp = ext4_get_group_info(sb, e4b->bd_group);
638 	list_for_each(cur, &grp->bb_prealloc_list) {
639 		ext4_group_t groupnr;
640 		struct ext4_prealloc_space *pa;
641 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
642 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
643 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
644 		for (i = 0; i < pa->pa_len; i++)
645 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
646 	}
647 	return 0;
648 }
649 #undef MB_CHECK_ASSERT
650 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
651 					__FILE__, __func__, __LINE__)
652 #else
653 #define mb_check_buddy(e4b)
654 #endif
655 
656 /*
657  * Divide blocks started from @first with length @len into
658  * smaller chunks with power of 2 blocks.
659  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
660  * then increase bb_counters[] for corresponded chunk size.
661  */
662 static void ext4_mb_mark_free_simple(struct super_block *sb,
663 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
664 					struct ext4_group_info *grp)
665 {
666 	struct ext4_sb_info *sbi = EXT4_SB(sb);
667 	ext4_grpblk_t min;
668 	ext4_grpblk_t max;
669 	ext4_grpblk_t chunk;
670 	unsigned int border;
671 
672 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
673 
674 	border = 2 << sb->s_blocksize_bits;
675 
676 	while (len > 0) {
677 		/* find how many blocks can be covered since this position */
678 		max = ffs(first | border) - 1;
679 
680 		/* find how many blocks of power 2 we need to mark */
681 		min = fls(len) - 1;
682 
683 		if (max < min)
684 			min = max;
685 		chunk = 1 << min;
686 
687 		/* mark multiblock chunks only */
688 		grp->bb_counters[min]++;
689 		if (min > 0)
690 			mb_clear_bit(first >> min,
691 				     buddy + sbi->s_mb_offsets[min]);
692 
693 		len -= chunk;
694 		first += chunk;
695 	}
696 }
697 
698 /*
699  * Cache the order of the largest free extent we have available in this block
700  * group.
701  */
702 static void
703 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
704 {
705 	int i;
706 	int bits;
707 
708 	grp->bb_largest_free_order = -1; /* uninit */
709 
710 	bits = sb->s_blocksize_bits + 1;
711 	for (i = bits; i >= 0; i--) {
712 		if (grp->bb_counters[i] > 0) {
713 			grp->bb_largest_free_order = i;
714 			break;
715 		}
716 	}
717 }
718 
719 static noinline_for_stack
720 void ext4_mb_generate_buddy(struct super_block *sb,
721 				void *buddy, void *bitmap, ext4_group_t group)
722 {
723 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
724 	struct ext4_sb_info *sbi = EXT4_SB(sb);
725 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
726 	ext4_grpblk_t i = 0;
727 	ext4_grpblk_t first;
728 	ext4_grpblk_t len;
729 	unsigned free = 0;
730 	unsigned fragments = 0;
731 	unsigned long long period = get_cycles();
732 
733 	/* initialize buddy from bitmap which is aggregation
734 	 * of on-disk bitmap and preallocations */
735 	i = mb_find_next_zero_bit(bitmap, max, 0);
736 	grp->bb_first_free = i;
737 	while (i < max) {
738 		fragments++;
739 		first = i;
740 		i = mb_find_next_bit(bitmap, max, i);
741 		len = i - first;
742 		free += len;
743 		if (len > 1)
744 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
745 		else
746 			grp->bb_counters[0]++;
747 		if (i < max)
748 			i = mb_find_next_zero_bit(bitmap, max, i);
749 	}
750 	grp->bb_fragments = fragments;
751 
752 	if (free != grp->bb_free) {
753 		ext4_grp_locked_error(sb, group, 0, 0,
754 				      "block bitmap and bg descriptor "
755 				      "inconsistent: %u vs %u free clusters",
756 				      free, grp->bb_free);
757 		/*
758 		 * If we intend to continue, we consider group descriptor
759 		 * corrupt and update bb_free using bitmap value
760 		 */
761 		grp->bb_free = free;
762 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
763 			percpu_counter_sub(&sbi->s_freeclusters_counter,
764 					   grp->bb_free);
765 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
766 	}
767 	mb_set_largest_free_order(sb, grp);
768 
769 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
770 
771 	period = get_cycles() - period;
772 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
773 	EXT4_SB(sb)->s_mb_buddies_generated++;
774 	EXT4_SB(sb)->s_mb_generation_time += period;
775 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
776 }
777 
778 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
779 {
780 	int count;
781 	int order = 1;
782 	void *buddy;
783 
784 	while ((buddy = mb_find_buddy(e4b, order++, &count))) {
785 		ext4_set_bits(buddy, 0, count);
786 	}
787 	e4b->bd_info->bb_fragments = 0;
788 	memset(e4b->bd_info->bb_counters, 0,
789 		sizeof(*e4b->bd_info->bb_counters) *
790 		(e4b->bd_sb->s_blocksize_bits + 2));
791 
792 	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
793 		e4b->bd_bitmap, e4b->bd_group);
794 }
795 
796 /* The buddy information is attached the buddy cache inode
797  * for convenience. The information regarding each group
798  * is loaded via ext4_mb_load_buddy. The information involve
799  * block bitmap and buddy information. The information are
800  * stored in the inode as
801  *
802  * {                        page                        }
803  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
804  *
805  *
806  * one block each for bitmap and buddy information.
807  * So for each group we take up 2 blocks. A page can
808  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
809  * So it can have information regarding groups_per_page which
810  * is blocks_per_page/2
811  *
812  * Locking note:  This routine takes the block group lock of all groups
813  * for this page; do not hold this lock when calling this routine!
814  */
815 
816 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
817 {
818 	ext4_group_t ngroups;
819 	int blocksize;
820 	int blocks_per_page;
821 	int groups_per_page;
822 	int err = 0;
823 	int i;
824 	ext4_group_t first_group, group;
825 	int first_block;
826 	struct super_block *sb;
827 	struct buffer_head *bhs;
828 	struct buffer_head **bh = NULL;
829 	struct inode *inode;
830 	char *data;
831 	char *bitmap;
832 	struct ext4_group_info *grinfo;
833 
834 	mb_debug(1, "init page %lu\n", page->index);
835 
836 	inode = page->mapping->host;
837 	sb = inode->i_sb;
838 	ngroups = ext4_get_groups_count(sb);
839 	blocksize = i_blocksize(inode);
840 	blocks_per_page = PAGE_SIZE / blocksize;
841 
842 	groups_per_page = blocks_per_page >> 1;
843 	if (groups_per_page == 0)
844 		groups_per_page = 1;
845 
846 	/* allocate buffer_heads to read bitmaps */
847 	if (groups_per_page > 1) {
848 		i = sizeof(struct buffer_head *) * groups_per_page;
849 		bh = kzalloc(i, gfp);
850 		if (bh == NULL) {
851 			err = -ENOMEM;
852 			goto out;
853 		}
854 	} else
855 		bh = &bhs;
856 
857 	first_group = page->index * blocks_per_page / 2;
858 
859 	/* read all groups the page covers into the cache */
860 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
861 		if (group >= ngroups)
862 			break;
863 
864 		grinfo = ext4_get_group_info(sb, group);
865 		/*
866 		 * If page is uptodate then we came here after online resize
867 		 * which added some new uninitialized group info structs, so
868 		 * we must skip all initialized uptodate buddies on the page,
869 		 * which may be currently in use by an allocating task.
870 		 */
871 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
872 			bh[i] = NULL;
873 			continue;
874 		}
875 		bh[i] = ext4_read_block_bitmap_nowait(sb, group);
876 		if (IS_ERR(bh[i])) {
877 			err = PTR_ERR(bh[i]);
878 			bh[i] = NULL;
879 			goto out;
880 		}
881 		mb_debug(1, "read bitmap for group %u\n", group);
882 	}
883 
884 	/* wait for I/O completion */
885 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
886 		int err2;
887 
888 		if (!bh[i])
889 			continue;
890 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
891 		if (!err)
892 			err = err2;
893 	}
894 
895 	first_block = page->index * blocks_per_page;
896 	for (i = 0; i < blocks_per_page; i++) {
897 		group = (first_block + i) >> 1;
898 		if (group >= ngroups)
899 			break;
900 
901 		if (!bh[group - first_group])
902 			/* skip initialized uptodate buddy */
903 			continue;
904 
905 		if (!buffer_verified(bh[group - first_group]))
906 			/* Skip faulty bitmaps */
907 			continue;
908 		err = 0;
909 
910 		/*
911 		 * data carry information regarding this
912 		 * particular group in the format specified
913 		 * above
914 		 *
915 		 */
916 		data = page_address(page) + (i * blocksize);
917 		bitmap = bh[group - first_group]->b_data;
918 
919 		/*
920 		 * We place the buddy block and bitmap block
921 		 * close together
922 		 */
923 		if ((first_block + i) & 1) {
924 			/* this is block of buddy */
925 			BUG_ON(incore == NULL);
926 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
927 				group, page->index, i * blocksize);
928 			trace_ext4_mb_buddy_bitmap_load(sb, group);
929 			grinfo = ext4_get_group_info(sb, group);
930 			grinfo->bb_fragments = 0;
931 			memset(grinfo->bb_counters, 0,
932 			       sizeof(*grinfo->bb_counters) *
933 				(sb->s_blocksize_bits+2));
934 			/*
935 			 * incore got set to the group block bitmap below
936 			 */
937 			ext4_lock_group(sb, group);
938 			/* init the buddy */
939 			memset(data, 0xff, blocksize);
940 			ext4_mb_generate_buddy(sb, data, incore, group);
941 			ext4_unlock_group(sb, group);
942 			incore = NULL;
943 		} else {
944 			/* this is block of bitmap */
945 			BUG_ON(incore != NULL);
946 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
947 				group, page->index, i * blocksize);
948 			trace_ext4_mb_bitmap_load(sb, group);
949 
950 			/* see comments in ext4_mb_put_pa() */
951 			ext4_lock_group(sb, group);
952 			memcpy(data, bitmap, blocksize);
953 
954 			/* mark all preallocated blks used in in-core bitmap */
955 			ext4_mb_generate_from_pa(sb, data, group);
956 			ext4_mb_generate_from_freelist(sb, data, group);
957 			ext4_unlock_group(sb, group);
958 
959 			/* set incore so that the buddy information can be
960 			 * generated using this
961 			 */
962 			incore = data;
963 		}
964 	}
965 	SetPageUptodate(page);
966 
967 out:
968 	if (bh) {
969 		for (i = 0; i < groups_per_page; i++)
970 			brelse(bh[i]);
971 		if (bh != &bhs)
972 			kfree(bh);
973 	}
974 	return err;
975 }
976 
977 /*
978  * Lock the buddy and bitmap pages. This make sure other parallel init_group
979  * on the same buddy page doesn't happen whild holding the buddy page lock.
980  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
981  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
982  */
983 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
984 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
985 {
986 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
987 	int block, pnum, poff;
988 	int blocks_per_page;
989 	struct page *page;
990 
991 	e4b->bd_buddy_page = NULL;
992 	e4b->bd_bitmap_page = NULL;
993 
994 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
995 	/*
996 	 * the buddy cache inode stores the block bitmap
997 	 * and buddy information in consecutive blocks.
998 	 * So for each group we need two blocks.
999 	 */
1000 	block = group * 2;
1001 	pnum = block / blocks_per_page;
1002 	poff = block % blocks_per_page;
1003 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1004 	if (!page)
1005 		return -ENOMEM;
1006 	BUG_ON(page->mapping != inode->i_mapping);
1007 	e4b->bd_bitmap_page = page;
1008 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1009 
1010 	if (blocks_per_page >= 2) {
1011 		/* buddy and bitmap are on the same page */
1012 		return 0;
1013 	}
1014 
1015 	block++;
1016 	pnum = block / blocks_per_page;
1017 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1018 	if (!page)
1019 		return -ENOMEM;
1020 	BUG_ON(page->mapping != inode->i_mapping);
1021 	e4b->bd_buddy_page = page;
1022 	return 0;
1023 }
1024 
1025 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1026 {
1027 	if (e4b->bd_bitmap_page) {
1028 		unlock_page(e4b->bd_bitmap_page);
1029 		put_page(e4b->bd_bitmap_page);
1030 	}
1031 	if (e4b->bd_buddy_page) {
1032 		unlock_page(e4b->bd_buddy_page);
1033 		put_page(e4b->bd_buddy_page);
1034 	}
1035 }
1036 
1037 /*
1038  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1039  * block group lock of all groups for this page; do not hold the BG lock when
1040  * calling this routine!
1041  */
1042 static noinline_for_stack
1043 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1044 {
1045 
1046 	struct ext4_group_info *this_grp;
1047 	struct ext4_buddy e4b;
1048 	struct page *page;
1049 	int ret = 0;
1050 
1051 	might_sleep();
1052 	mb_debug(1, "init group %u\n", group);
1053 	this_grp = ext4_get_group_info(sb, group);
1054 	/*
1055 	 * This ensures that we don't reinit the buddy cache
1056 	 * page which map to the group from which we are already
1057 	 * allocating. If we are looking at the buddy cache we would
1058 	 * have taken a reference using ext4_mb_load_buddy and that
1059 	 * would have pinned buddy page to page cache.
1060 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1061 	 * page accessed.
1062 	 */
1063 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1064 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1065 		/*
1066 		 * somebody initialized the group
1067 		 * return without doing anything
1068 		 */
1069 		goto err;
1070 	}
1071 
1072 	page = e4b.bd_bitmap_page;
1073 	ret = ext4_mb_init_cache(page, NULL, gfp);
1074 	if (ret)
1075 		goto err;
1076 	if (!PageUptodate(page)) {
1077 		ret = -EIO;
1078 		goto err;
1079 	}
1080 
1081 	if (e4b.bd_buddy_page == NULL) {
1082 		/*
1083 		 * If both the bitmap and buddy are in
1084 		 * the same page we don't need to force
1085 		 * init the buddy
1086 		 */
1087 		ret = 0;
1088 		goto err;
1089 	}
1090 	/* init buddy cache */
1091 	page = e4b.bd_buddy_page;
1092 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1093 	if (ret)
1094 		goto err;
1095 	if (!PageUptodate(page)) {
1096 		ret = -EIO;
1097 		goto err;
1098 	}
1099 err:
1100 	ext4_mb_put_buddy_page_lock(&e4b);
1101 	return ret;
1102 }
1103 
1104 /*
1105  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1106  * block group lock of all groups for this page; do not hold the BG lock when
1107  * calling this routine!
1108  */
1109 static noinline_for_stack int
1110 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1111 		       struct ext4_buddy *e4b, gfp_t gfp)
1112 {
1113 	int blocks_per_page;
1114 	int block;
1115 	int pnum;
1116 	int poff;
1117 	struct page *page;
1118 	int ret;
1119 	struct ext4_group_info *grp;
1120 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1121 	struct inode *inode = sbi->s_buddy_cache;
1122 
1123 	might_sleep();
1124 	mb_debug(1, "load group %u\n", group);
1125 
1126 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1127 	grp = ext4_get_group_info(sb, group);
1128 
1129 	e4b->bd_blkbits = sb->s_blocksize_bits;
1130 	e4b->bd_info = grp;
1131 	e4b->bd_sb = sb;
1132 	e4b->bd_group = group;
1133 	e4b->bd_buddy_page = NULL;
1134 	e4b->bd_bitmap_page = NULL;
1135 
1136 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1137 		/*
1138 		 * we need full data about the group
1139 		 * to make a good selection
1140 		 */
1141 		ret = ext4_mb_init_group(sb, group, gfp);
1142 		if (ret)
1143 			return ret;
1144 	}
1145 
1146 	/*
1147 	 * the buddy cache inode stores the block bitmap
1148 	 * and buddy information in consecutive blocks.
1149 	 * So for each group we need two blocks.
1150 	 */
1151 	block = group * 2;
1152 	pnum = block / blocks_per_page;
1153 	poff = block % blocks_per_page;
1154 
1155 	/* we could use find_or_create_page(), but it locks page
1156 	 * what we'd like to avoid in fast path ... */
1157 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1158 	if (page == NULL || !PageUptodate(page)) {
1159 		if (page)
1160 			/*
1161 			 * drop the page reference and try
1162 			 * to get the page with lock. If we
1163 			 * are not uptodate that implies
1164 			 * somebody just created the page but
1165 			 * is yet to initialize the same. So
1166 			 * wait for it to initialize.
1167 			 */
1168 			put_page(page);
1169 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1170 		if (page) {
1171 			BUG_ON(page->mapping != inode->i_mapping);
1172 			if (!PageUptodate(page)) {
1173 				ret = ext4_mb_init_cache(page, NULL, gfp);
1174 				if (ret) {
1175 					unlock_page(page);
1176 					goto err;
1177 				}
1178 				mb_cmp_bitmaps(e4b, page_address(page) +
1179 					       (poff * sb->s_blocksize));
1180 			}
1181 			unlock_page(page);
1182 		}
1183 	}
1184 	if (page == NULL) {
1185 		ret = -ENOMEM;
1186 		goto err;
1187 	}
1188 	if (!PageUptodate(page)) {
1189 		ret = -EIO;
1190 		goto err;
1191 	}
1192 
1193 	/* Pages marked accessed already */
1194 	e4b->bd_bitmap_page = page;
1195 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1196 
1197 	block++;
1198 	pnum = block / blocks_per_page;
1199 	poff = block % blocks_per_page;
1200 
1201 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1202 	if (page == NULL || !PageUptodate(page)) {
1203 		if (page)
1204 			put_page(page);
1205 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1206 		if (page) {
1207 			BUG_ON(page->mapping != inode->i_mapping);
1208 			if (!PageUptodate(page)) {
1209 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1210 							 gfp);
1211 				if (ret) {
1212 					unlock_page(page);
1213 					goto err;
1214 				}
1215 			}
1216 			unlock_page(page);
1217 		}
1218 	}
1219 	if (page == NULL) {
1220 		ret = -ENOMEM;
1221 		goto err;
1222 	}
1223 	if (!PageUptodate(page)) {
1224 		ret = -EIO;
1225 		goto err;
1226 	}
1227 
1228 	/* Pages marked accessed already */
1229 	e4b->bd_buddy_page = page;
1230 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1231 
1232 	BUG_ON(e4b->bd_bitmap_page == NULL);
1233 	BUG_ON(e4b->bd_buddy_page == NULL);
1234 
1235 	return 0;
1236 
1237 err:
1238 	if (page)
1239 		put_page(page);
1240 	if (e4b->bd_bitmap_page)
1241 		put_page(e4b->bd_bitmap_page);
1242 	if (e4b->bd_buddy_page)
1243 		put_page(e4b->bd_buddy_page);
1244 	e4b->bd_buddy = NULL;
1245 	e4b->bd_bitmap = NULL;
1246 	return ret;
1247 }
1248 
1249 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1250 			      struct ext4_buddy *e4b)
1251 {
1252 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1253 }
1254 
1255 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1256 {
1257 	if (e4b->bd_bitmap_page)
1258 		put_page(e4b->bd_bitmap_page);
1259 	if (e4b->bd_buddy_page)
1260 		put_page(e4b->bd_buddy_page);
1261 }
1262 
1263 
1264 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1265 {
1266 	int order = 1;
1267 	int bb_incr = 1 << (e4b->bd_blkbits - 1);
1268 	void *bb;
1269 
1270 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1271 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1272 
1273 	bb = e4b->bd_buddy;
1274 	while (order <= e4b->bd_blkbits + 1) {
1275 		block = block >> 1;
1276 		if (!mb_test_bit(block, bb)) {
1277 			/* this block is part of buddy of order 'order' */
1278 			return order;
1279 		}
1280 		bb += bb_incr;
1281 		bb_incr >>= 1;
1282 		order++;
1283 	}
1284 	return 0;
1285 }
1286 
1287 static void mb_clear_bits(void *bm, int cur, int len)
1288 {
1289 	__u32 *addr;
1290 
1291 	len = cur + len;
1292 	while (cur < len) {
1293 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1294 			/* fast path: clear whole word at once */
1295 			addr = bm + (cur >> 3);
1296 			*addr = 0;
1297 			cur += 32;
1298 			continue;
1299 		}
1300 		mb_clear_bit(cur, bm);
1301 		cur++;
1302 	}
1303 }
1304 
1305 /* clear bits in given range
1306  * will return first found zero bit if any, -1 otherwise
1307  */
1308 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1309 {
1310 	__u32 *addr;
1311 	int zero_bit = -1;
1312 
1313 	len = cur + len;
1314 	while (cur < len) {
1315 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1316 			/* fast path: clear whole word at once */
1317 			addr = bm + (cur >> 3);
1318 			if (*addr != (__u32)(-1) && zero_bit == -1)
1319 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1320 			*addr = 0;
1321 			cur += 32;
1322 			continue;
1323 		}
1324 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1325 			zero_bit = cur;
1326 		cur++;
1327 	}
1328 
1329 	return zero_bit;
1330 }
1331 
1332 void ext4_set_bits(void *bm, int cur, int len)
1333 {
1334 	__u32 *addr;
1335 
1336 	len = cur + len;
1337 	while (cur < len) {
1338 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1339 			/* fast path: set whole word at once */
1340 			addr = bm + (cur >> 3);
1341 			*addr = 0xffffffff;
1342 			cur += 32;
1343 			continue;
1344 		}
1345 		mb_set_bit(cur, bm);
1346 		cur++;
1347 	}
1348 }
1349 
1350 /*
1351  * _________________________________________________________________ */
1352 
1353 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1354 {
1355 	if (mb_test_bit(*bit + side, bitmap)) {
1356 		mb_clear_bit(*bit, bitmap);
1357 		(*bit) -= side;
1358 		return 1;
1359 	}
1360 	else {
1361 		(*bit) += side;
1362 		mb_set_bit(*bit, bitmap);
1363 		return -1;
1364 	}
1365 }
1366 
1367 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1368 {
1369 	int max;
1370 	int order = 1;
1371 	void *buddy = mb_find_buddy(e4b, order, &max);
1372 
1373 	while (buddy) {
1374 		void *buddy2;
1375 
1376 		/* Bits in range [first; last] are known to be set since
1377 		 * corresponding blocks were allocated. Bits in range
1378 		 * (first; last) will stay set because they form buddies on
1379 		 * upper layer. We just deal with borders if they don't
1380 		 * align with upper layer and then go up.
1381 		 * Releasing entire group is all about clearing
1382 		 * single bit of highest order buddy.
1383 		 */
1384 
1385 		/* Example:
1386 		 * ---------------------------------
1387 		 * |   1   |   1   |   1   |   1   |
1388 		 * ---------------------------------
1389 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1390 		 * ---------------------------------
1391 		 *   0   1   2   3   4   5   6   7
1392 		 *      \_____________________/
1393 		 *
1394 		 * Neither [1] nor [6] is aligned to above layer.
1395 		 * Left neighbour [0] is free, so mark it busy,
1396 		 * decrease bb_counters and extend range to
1397 		 * [0; 6]
1398 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1399 		 * mark [6] free, increase bb_counters and shrink range to
1400 		 * [0; 5].
1401 		 * Then shift range to [0; 2], go up and do the same.
1402 		 */
1403 
1404 
1405 		if (first & 1)
1406 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1407 		if (!(last & 1))
1408 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1409 		if (first > last)
1410 			break;
1411 		order++;
1412 
1413 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1414 			mb_clear_bits(buddy, first, last - first + 1);
1415 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1416 			break;
1417 		}
1418 		first >>= 1;
1419 		last >>= 1;
1420 		buddy = buddy2;
1421 	}
1422 }
1423 
1424 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1425 			   int first, int count)
1426 {
1427 	int left_is_free = 0;
1428 	int right_is_free = 0;
1429 	int block;
1430 	int last = first + count - 1;
1431 	struct super_block *sb = e4b->bd_sb;
1432 
1433 	if (WARN_ON(count == 0))
1434 		return;
1435 	BUG_ON(last >= (sb->s_blocksize << 3));
1436 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1437 	/* Don't bother if the block group is corrupt. */
1438 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1439 		return;
1440 
1441 	mb_check_buddy(e4b);
1442 	mb_free_blocks_double(inode, e4b, first, count);
1443 
1444 	e4b->bd_info->bb_free += count;
1445 	if (first < e4b->bd_info->bb_first_free)
1446 		e4b->bd_info->bb_first_free = first;
1447 
1448 	/* access memory sequentially: check left neighbour,
1449 	 * clear range and then check right neighbour
1450 	 */
1451 	if (first != 0)
1452 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1453 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1454 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1455 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1456 
1457 	if (unlikely(block != -1)) {
1458 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1459 		ext4_fsblk_t blocknr;
1460 
1461 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1462 		blocknr += EXT4_C2B(EXT4_SB(sb), block);
1463 		ext4_grp_locked_error(sb, e4b->bd_group,
1464 				      inode ? inode->i_ino : 0,
1465 				      blocknr,
1466 				      "freeing already freed block "
1467 				      "(bit %u); block bitmap corrupt.",
1468 				      block);
1469 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1470 			percpu_counter_sub(&sbi->s_freeclusters_counter,
1471 					   e4b->bd_info->bb_free);
1472 		/* Mark the block group as corrupt. */
1473 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1474 			&e4b->bd_info->bb_state);
1475 		mb_regenerate_buddy(e4b);
1476 		goto done;
1477 	}
1478 
1479 	/* let's maintain fragments counter */
1480 	if (left_is_free && right_is_free)
1481 		e4b->bd_info->bb_fragments--;
1482 	else if (!left_is_free && !right_is_free)
1483 		e4b->bd_info->bb_fragments++;
1484 
1485 	/* buddy[0] == bd_bitmap is a special case, so handle
1486 	 * it right away and let mb_buddy_mark_free stay free of
1487 	 * zero order checks.
1488 	 * Check if neighbours are to be coaleasced,
1489 	 * adjust bitmap bb_counters and borders appropriately.
1490 	 */
1491 	if (first & 1) {
1492 		first += !left_is_free;
1493 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1494 	}
1495 	if (!(last & 1)) {
1496 		last -= !right_is_free;
1497 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1498 	}
1499 
1500 	if (first <= last)
1501 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1502 
1503 done:
1504 	mb_set_largest_free_order(sb, e4b->bd_info);
1505 	mb_check_buddy(e4b);
1506 }
1507 
1508 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1509 				int needed, struct ext4_free_extent *ex)
1510 {
1511 	int next = block;
1512 	int max, order;
1513 	void *buddy;
1514 
1515 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1516 	BUG_ON(ex == NULL);
1517 
1518 	buddy = mb_find_buddy(e4b, 0, &max);
1519 	BUG_ON(buddy == NULL);
1520 	BUG_ON(block >= max);
1521 	if (mb_test_bit(block, buddy)) {
1522 		ex->fe_len = 0;
1523 		ex->fe_start = 0;
1524 		ex->fe_group = 0;
1525 		return 0;
1526 	}
1527 
1528 	/* find actual order */
1529 	order = mb_find_order_for_block(e4b, block);
1530 	block = block >> order;
1531 
1532 	ex->fe_len = 1 << order;
1533 	ex->fe_start = block << order;
1534 	ex->fe_group = e4b->bd_group;
1535 
1536 	/* calc difference from given start */
1537 	next = next - ex->fe_start;
1538 	ex->fe_len -= next;
1539 	ex->fe_start += next;
1540 
1541 	while (needed > ex->fe_len &&
1542 	       mb_find_buddy(e4b, order, &max)) {
1543 
1544 		if (block + 1 >= max)
1545 			break;
1546 
1547 		next = (block + 1) * (1 << order);
1548 		if (mb_test_bit(next, e4b->bd_bitmap))
1549 			break;
1550 
1551 		order = mb_find_order_for_block(e4b, next);
1552 
1553 		block = next >> order;
1554 		ex->fe_len += 1 << order;
1555 	}
1556 
1557 	if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
1558 		/* Should never happen! (but apparently sometimes does?!?) */
1559 		WARN_ON(1);
1560 		ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
1561 			   "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1562 			   block, order, needed, ex->fe_group, ex->fe_start,
1563 			   ex->fe_len, ex->fe_logical);
1564 		ex->fe_len = 0;
1565 		ex->fe_start = 0;
1566 		ex->fe_group = 0;
1567 	}
1568 	return ex->fe_len;
1569 }
1570 
1571 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1572 {
1573 	int ord;
1574 	int mlen = 0;
1575 	int max = 0;
1576 	int cur;
1577 	int start = ex->fe_start;
1578 	int len = ex->fe_len;
1579 	unsigned ret = 0;
1580 	int len0 = len;
1581 	void *buddy;
1582 
1583 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1584 	BUG_ON(e4b->bd_group != ex->fe_group);
1585 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1586 	mb_check_buddy(e4b);
1587 	mb_mark_used_double(e4b, start, len);
1588 
1589 	e4b->bd_info->bb_free -= len;
1590 	if (e4b->bd_info->bb_first_free == start)
1591 		e4b->bd_info->bb_first_free += len;
1592 
1593 	/* let's maintain fragments counter */
1594 	if (start != 0)
1595 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1596 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1597 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1598 	if (mlen && max)
1599 		e4b->bd_info->bb_fragments++;
1600 	else if (!mlen && !max)
1601 		e4b->bd_info->bb_fragments--;
1602 
1603 	/* let's maintain buddy itself */
1604 	while (len) {
1605 		ord = mb_find_order_for_block(e4b, start);
1606 
1607 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1608 			/* the whole chunk may be allocated at once! */
1609 			mlen = 1 << ord;
1610 			buddy = mb_find_buddy(e4b, ord, &max);
1611 			BUG_ON((start >> ord) >= max);
1612 			mb_set_bit(start >> ord, buddy);
1613 			e4b->bd_info->bb_counters[ord]--;
1614 			start += mlen;
1615 			len -= mlen;
1616 			BUG_ON(len < 0);
1617 			continue;
1618 		}
1619 
1620 		/* store for history */
1621 		if (ret == 0)
1622 			ret = len | (ord << 16);
1623 
1624 		/* we have to split large buddy */
1625 		BUG_ON(ord <= 0);
1626 		buddy = mb_find_buddy(e4b, ord, &max);
1627 		mb_set_bit(start >> ord, buddy);
1628 		e4b->bd_info->bb_counters[ord]--;
1629 
1630 		ord--;
1631 		cur = (start >> ord) & ~1U;
1632 		buddy = mb_find_buddy(e4b, ord, &max);
1633 		mb_clear_bit(cur, buddy);
1634 		mb_clear_bit(cur + 1, buddy);
1635 		e4b->bd_info->bb_counters[ord]++;
1636 		e4b->bd_info->bb_counters[ord]++;
1637 	}
1638 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1639 
1640 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1641 	mb_check_buddy(e4b);
1642 
1643 	return ret;
1644 }
1645 
1646 /*
1647  * Must be called under group lock!
1648  */
1649 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1650 					struct ext4_buddy *e4b)
1651 {
1652 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1653 	int ret;
1654 
1655 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1656 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1657 
1658 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1659 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1660 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1661 
1662 	/* preallocation can change ac_b_ex, thus we store actually
1663 	 * allocated blocks for history */
1664 	ac->ac_f_ex = ac->ac_b_ex;
1665 
1666 	ac->ac_status = AC_STATUS_FOUND;
1667 	ac->ac_tail = ret & 0xffff;
1668 	ac->ac_buddy = ret >> 16;
1669 
1670 	/*
1671 	 * take the page reference. We want the page to be pinned
1672 	 * so that we don't get a ext4_mb_init_cache_call for this
1673 	 * group until we update the bitmap. That would mean we
1674 	 * double allocate blocks. The reference is dropped
1675 	 * in ext4_mb_release_context
1676 	 */
1677 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1678 	get_page(ac->ac_bitmap_page);
1679 	ac->ac_buddy_page = e4b->bd_buddy_page;
1680 	get_page(ac->ac_buddy_page);
1681 	/* store last allocated for subsequent stream allocation */
1682 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1683 		spin_lock(&sbi->s_md_lock);
1684 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1685 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1686 		spin_unlock(&sbi->s_md_lock);
1687 	}
1688 }
1689 
1690 /*
1691  * regular allocator, for general purposes allocation
1692  */
1693 
1694 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1695 					struct ext4_buddy *e4b,
1696 					int finish_group)
1697 {
1698 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1699 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1700 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1701 	struct ext4_free_extent ex;
1702 	int max;
1703 
1704 	if (ac->ac_status == AC_STATUS_FOUND)
1705 		return;
1706 	/*
1707 	 * We don't want to scan for a whole year
1708 	 */
1709 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1710 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1711 		ac->ac_status = AC_STATUS_BREAK;
1712 		return;
1713 	}
1714 
1715 	/*
1716 	 * Haven't found good chunk so far, let's continue
1717 	 */
1718 	if (bex->fe_len < gex->fe_len)
1719 		return;
1720 
1721 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1722 			&& bex->fe_group == e4b->bd_group) {
1723 		/* recheck chunk's availability - we don't know
1724 		 * when it was found (within this lock-unlock
1725 		 * period or not) */
1726 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1727 		if (max >= gex->fe_len) {
1728 			ext4_mb_use_best_found(ac, e4b);
1729 			return;
1730 		}
1731 	}
1732 }
1733 
1734 /*
1735  * The routine checks whether found extent is good enough. If it is,
1736  * then the extent gets marked used and flag is set to the context
1737  * to stop scanning. Otherwise, the extent is compared with the
1738  * previous found extent and if new one is better, then it's stored
1739  * in the context. Later, the best found extent will be used, if
1740  * mballoc can't find good enough extent.
1741  *
1742  * FIXME: real allocation policy is to be designed yet!
1743  */
1744 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1745 					struct ext4_free_extent *ex,
1746 					struct ext4_buddy *e4b)
1747 {
1748 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1749 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1750 
1751 	BUG_ON(ex->fe_len <= 0);
1752 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1753 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1754 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1755 
1756 	ac->ac_found++;
1757 
1758 	/*
1759 	 * The special case - take what you catch first
1760 	 */
1761 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1762 		*bex = *ex;
1763 		ext4_mb_use_best_found(ac, e4b);
1764 		return;
1765 	}
1766 
1767 	/*
1768 	 * Let's check whether the chuck is good enough
1769 	 */
1770 	if (ex->fe_len == gex->fe_len) {
1771 		*bex = *ex;
1772 		ext4_mb_use_best_found(ac, e4b);
1773 		return;
1774 	}
1775 
1776 	/*
1777 	 * If this is first found extent, just store it in the context
1778 	 */
1779 	if (bex->fe_len == 0) {
1780 		*bex = *ex;
1781 		return;
1782 	}
1783 
1784 	/*
1785 	 * If new found extent is better, store it in the context
1786 	 */
1787 	if (bex->fe_len < gex->fe_len) {
1788 		/* if the request isn't satisfied, any found extent
1789 		 * larger than previous best one is better */
1790 		if (ex->fe_len > bex->fe_len)
1791 			*bex = *ex;
1792 	} else if (ex->fe_len > gex->fe_len) {
1793 		/* if the request is satisfied, then we try to find
1794 		 * an extent that still satisfy the request, but is
1795 		 * smaller than previous one */
1796 		if (ex->fe_len < bex->fe_len)
1797 			*bex = *ex;
1798 	}
1799 
1800 	ext4_mb_check_limits(ac, e4b, 0);
1801 }
1802 
1803 static noinline_for_stack
1804 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1805 					struct ext4_buddy *e4b)
1806 {
1807 	struct ext4_free_extent ex = ac->ac_b_ex;
1808 	ext4_group_t group = ex.fe_group;
1809 	int max;
1810 	int err;
1811 
1812 	BUG_ON(ex.fe_len <= 0);
1813 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1814 	if (err)
1815 		return err;
1816 
1817 	ext4_lock_group(ac->ac_sb, group);
1818 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1819 
1820 	if (max > 0) {
1821 		ac->ac_b_ex = ex;
1822 		ext4_mb_use_best_found(ac, e4b);
1823 	}
1824 
1825 	ext4_unlock_group(ac->ac_sb, group);
1826 	ext4_mb_unload_buddy(e4b);
1827 
1828 	return 0;
1829 }
1830 
1831 static noinline_for_stack
1832 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1833 				struct ext4_buddy *e4b)
1834 {
1835 	ext4_group_t group = ac->ac_g_ex.fe_group;
1836 	int max;
1837 	int err;
1838 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1839 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1840 	struct ext4_free_extent ex;
1841 
1842 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1843 		return 0;
1844 	if (grp->bb_free == 0)
1845 		return 0;
1846 
1847 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1848 	if (err)
1849 		return err;
1850 
1851 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1852 		ext4_mb_unload_buddy(e4b);
1853 		return 0;
1854 	}
1855 
1856 	ext4_lock_group(ac->ac_sb, group);
1857 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1858 			     ac->ac_g_ex.fe_len, &ex);
1859 	ex.fe_logical = 0xDEADFA11; /* debug value */
1860 
1861 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1862 		ext4_fsblk_t start;
1863 
1864 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1865 			ex.fe_start;
1866 		/* use do_div to get remainder (would be 64-bit modulo) */
1867 		if (do_div(start, sbi->s_stripe) == 0) {
1868 			ac->ac_found++;
1869 			ac->ac_b_ex = ex;
1870 			ext4_mb_use_best_found(ac, e4b);
1871 		}
1872 	} else if (max >= ac->ac_g_ex.fe_len) {
1873 		BUG_ON(ex.fe_len <= 0);
1874 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1875 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1876 		ac->ac_found++;
1877 		ac->ac_b_ex = ex;
1878 		ext4_mb_use_best_found(ac, e4b);
1879 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1880 		/* Sometimes, caller may want to merge even small
1881 		 * number of blocks to an existing extent */
1882 		BUG_ON(ex.fe_len <= 0);
1883 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1884 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1885 		ac->ac_found++;
1886 		ac->ac_b_ex = ex;
1887 		ext4_mb_use_best_found(ac, e4b);
1888 	}
1889 	ext4_unlock_group(ac->ac_sb, group);
1890 	ext4_mb_unload_buddy(e4b);
1891 
1892 	return 0;
1893 }
1894 
1895 /*
1896  * The routine scans buddy structures (not bitmap!) from given order
1897  * to max order and tries to find big enough chunk to satisfy the req
1898  */
1899 static noinline_for_stack
1900 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1901 					struct ext4_buddy *e4b)
1902 {
1903 	struct super_block *sb = ac->ac_sb;
1904 	struct ext4_group_info *grp = e4b->bd_info;
1905 	void *buddy;
1906 	int i;
1907 	int k;
1908 	int max;
1909 
1910 	BUG_ON(ac->ac_2order <= 0);
1911 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1912 		if (grp->bb_counters[i] == 0)
1913 			continue;
1914 
1915 		buddy = mb_find_buddy(e4b, i, &max);
1916 		BUG_ON(buddy == NULL);
1917 
1918 		k = mb_find_next_zero_bit(buddy, max, 0);
1919 		BUG_ON(k >= max);
1920 
1921 		ac->ac_found++;
1922 
1923 		ac->ac_b_ex.fe_len = 1 << i;
1924 		ac->ac_b_ex.fe_start = k << i;
1925 		ac->ac_b_ex.fe_group = e4b->bd_group;
1926 
1927 		ext4_mb_use_best_found(ac, e4b);
1928 
1929 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1930 
1931 		if (EXT4_SB(sb)->s_mb_stats)
1932 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1933 
1934 		break;
1935 	}
1936 }
1937 
1938 /*
1939  * The routine scans the group and measures all found extents.
1940  * In order to optimize scanning, caller must pass number of
1941  * free blocks in the group, so the routine can know upper limit.
1942  */
1943 static noinline_for_stack
1944 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1945 					struct ext4_buddy *e4b)
1946 {
1947 	struct super_block *sb = ac->ac_sb;
1948 	void *bitmap = e4b->bd_bitmap;
1949 	struct ext4_free_extent ex;
1950 	int i;
1951 	int free;
1952 
1953 	free = e4b->bd_info->bb_free;
1954 	BUG_ON(free <= 0);
1955 
1956 	i = e4b->bd_info->bb_first_free;
1957 
1958 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1959 		i = mb_find_next_zero_bit(bitmap,
1960 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1961 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1962 			/*
1963 			 * IF we have corrupt bitmap, we won't find any
1964 			 * free blocks even though group info says we
1965 			 * we have free blocks
1966 			 */
1967 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1968 					"%d free clusters as per "
1969 					"group info. But bitmap says 0",
1970 					free);
1971 			break;
1972 		}
1973 
1974 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
1975 		BUG_ON(ex.fe_len <= 0);
1976 		if (free < ex.fe_len) {
1977 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1978 					"%d free clusters as per "
1979 					"group info. But got %d blocks",
1980 					free, ex.fe_len);
1981 			/*
1982 			 * The number of free blocks differs. This mostly
1983 			 * indicate that the bitmap is corrupt. So exit
1984 			 * without claiming the space.
1985 			 */
1986 			break;
1987 		}
1988 		ex.fe_logical = 0xDEADC0DE; /* debug value */
1989 		ext4_mb_measure_extent(ac, &ex, e4b);
1990 
1991 		i += ex.fe_len;
1992 		free -= ex.fe_len;
1993 	}
1994 
1995 	ext4_mb_check_limits(ac, e4b, 1);
1996 }
1997 
1998 /*
1999  * This is a special case for storages like raid5
2000  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2001  */
2002 static noinline_for_stack
2003 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2004 				 struct ext4_buddy *e4b)
2005 {
2006 	struct super_block *sb = ac->ac_sb;
2007 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2008 	void *bitmap = e4b->bd_bitmap;
2009 	struct ext4_free_extent ex;
2010 	ext4_fsblk_t first_group_block;
2011 	ext4_fsblk_t a;
2012 	ext4_grpblk_t i;
2013 	int max;
2014 
2015 	BUG_ON(sbi->s_stripe == 0);
2016 
2017 	/* find first stripe-aligned block in group */
2018 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2019 
2020 	a = first_group_block + sbi->s_stripe - 1;
2021 	do_div(a, sbi->s_stripe);
2022 	i = (a * sbi->s_stripe) - first_group_block;
2023 
2024 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2025 		if (!mb_test_bit(i, bitmap)) {
2026 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2027 			if (max >= sbi->s_stripe) {
2028 				ac->ac_found++;
2029 				ex.fe_logical = 0xDEADF00D; /* debug value */
2030 				ac->ac_b_ex = ex;
2031 				ext4_mb_use_best_found(ac, e4b);
2032 				break;
2033 			}
2034 		}
2035 		i += sbi->s_stripe;
2036 	}
2037 }
2038 
2039 /*
2040  * This is now called BEFORE we load the buddy bitmap.
2041  * Returns either 1 or 0 indicating that the group is either suitable
2042  * for the allocation or not. In addition it can also return negative
2043  * error code when something goes wrong.
2044  */
2045 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
2046 				ext4_group_t group, int cr)
2047 {
2048 	unsigned free, fragments;
2049 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2050 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2051 
2052 	BUG_ON(cr < 0 || cr >= 4);
2053 
2054 	free = grp->bb_free;
2055 	if (free == 0)
2056 		return 0;
2057 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2058 		return 0;
2059 
2060 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2061 		return 0;
2062 
2063 	/* We only do this if the grp has never been initialized */
2064 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2065 		int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
2066 		if (ret)
2067 			return ret;
2068 	}
2069 
2070 	fragments = grp->bb_fragments;
2071 	if (fragments == 0)
2072 		return 0;
2073 
2074 	switch (cr) {
2075 	case 0:
2076 		BUG_ON(ac->ac_2order == 0);
2077 
2078 		/* Avoid using the first bg of a flexgroup for data files */
2079 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2080 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2081 		    ((group % flex_size) == 0))
2082 			return 0;
2083 
2084 		if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
2085 		    (free / fragments) >= ac->ac_g_ex.fe_len)
2086 			return 1;
2087 
2088 		if (grp->bb_largest_free_order < ac->ac_2order)
2089 			return 0;
2090 
2091 		return 1;
2092 	case 1:
2093 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2094 			return 1;
2095 		break;
2096 	case 2:
2097 		if (free >= ac->ac_g_ex.fe_len)
2098 			return 1;
2099 		break;
2100 	case 3:
2101 		return 1;
2102 	default:
2103 		BUG();
2104 	}
2105 
2106 	return 0;
2107 }
2108 
2109 static noinline_for_stack int
2110 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2111 {
2112 	ext4_group_t ngroups, group, i;
2113 	int cr;
2114 	int err = 0, first_err = 0;
2115 	struct ext4_sb_info *sbi;
2116 	struct super_block *sb;
2117 	struct ext4_buddy e4b;
2118 
2119 	sb = ac->ac_sb;
2120 	sbi = EXT4_SB(sb);
2121 	ngroups = ext4_get_groups_count(sb);
2122 	/* non-extent files are limited to low blocks/groups */
2123 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2124 		ngroups = sbi->s_blockfile_groups;
2125 
2126 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2127 
2128 	/* first, try the goal */
2129 	err = ext4_mb_find_by_goal(ac, &e4b);
2130 	if (err || ac->ac_status == AC_STATUS_FOUND)
2131 		goto out;
2132 
2133 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2134 		goto out;
2135 
2136 	/*
2137 	 * ac->ac2_order is set only if the fe_len is a power of 2
2138 	 * if ac2_order is set we also set criteria to 0 so that we
2139 	 * try exact allocation using buddy.
2140 	 */
2141 	i = fls(ac->ac_g_ex.fe_len);
2142 	ac->ac_2order = 0;
2143 	/*
2144 	 * We search using buddy data only if the order of the request
2145 	 * is greater than equal to the sbi_s_mb_order2_reqs
2146 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2147 	 * We also support searching for power-of-two requests only for
2148 	 * requests upto maximum buddy size we have constructed.
2149 	 */
2150 	if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2151 		/*
2152 		 * This should tell if fe_len is exactly power of 2
2153 		 */
2154 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2155 			ac->ac_2order = i - 1;
2156 	}
2157 
2158 	/* if stream allocation is enabled, use global goal */
2159 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2160 		/* TBD: may be hot point */
2161 		spin_lock(&sbi->s_md_lock);
2162 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2163 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2164 		spin_unlock(&sbi->s_md_lock);
2165 	}
2166 
2167 	/* Let's just scan groups to find more-less suitable blocks */
2168 	cr = ac->ac_2order ? 0 : 1;
2169 	/*
2170 	 * cr == 0 try to get exact allocation,
2171 	 * cr == 3  try to get anything
2172 	 */
2173 repeat:
2174 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2175 		ac->ac_criteria = cr;
2176 		/*
2177 		 * searching for the right group start
2178 		 * from the goal value specified
2179 		 */
2180 		group = ac->ac_g_ex.fe_group;
2181 
2182 		for (i = 0; i < ngroups; group++, i++) {
2183 			int ret = 0;
2184 			cond_resched();
2185 			/*
2186 			 * Artificially restricted ngroups for non-extent
2187 			 * files makes group > ngroups possible on first loop.
2188 			 */
2189 			if (group >= ngroups)
2190 				group = 0;
2191 
2192 			/* This now checks without needing the buddy page */
2193 			ret = ext4_mb_good_group(ac, group, cr);
2194 			if (ret <= 0) {
2195 				if (!first_err)
2196 					first_err = ret;
2197 				continue;
2198 			}
2199 
2200 			err = ext4_mb_load_buddy(sb, group, &e4b);
2201 			if (err)
2202 				goto out;
2203 
2204 			ext4_lock_group(sb, group);
2205 
2206 			/*
2207 			 * We need to check again after locking the
2208 			 * block group
2209 			 */
2210 			ret = ext4_mb_good_group(ac, group, cr);
2211 			if (ret <= 0) {
2212 				ext4_unlock_group(sb, group);
2213 				ext4_mb_unload_buddy(&e4b);
2214 				if (!first_err)
2215 					first_err = ret;
2216 				continue;
2217 			}
2218 
2219 			ac->ac_groups_scanned++;
2220 			if (cr == 0)
2221 				ext4_mb_simple_scan_group(ac, &e4b);
2222 			else if (cr == 1 && sbi->s_stripe &&
2223 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2224 				ext4_mb_scan_aligned(ac, &e4b);
2225 			else
2226 				ext4_mb_complex_scan_group(ac, &e4b);
2227 
2228 			ext4_unlock_group(sb, group);
2229 			ext4_mb_unload_buddy(&e4b);
2230 
2231 			if (ac->ac_status != AC_STATUS_CONTINUE)
2232 				break;
2233 		}
2234 	}
2235 
2236 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2237 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2238 		/*
2239 		 * We've been searching too long. Let's try to allocate
2240 		 * the best chunk we've found so far
2241 		 */
2242 
2243 		ext4_mb_try_best_found(ac, &e4b);
2244 		if (ac->ac_status != AC_STATUS_FOUND) {
2245 			/*
2246 			 * Someone more lucky has already allocated it.
2247 			 * The only thing we can do is just take first
2248 			 * found block(s)
2249 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2250 			 */
2251 			ac->ac_b_ex.fe_group = 0;
2252 			ac->ac_b_ex.fe_start = 0;
2253 			ac->ac_b_ex.fe_len = 0;
2254 			ac->ac_status = AC_STATUS_CONTINUE;
2255 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2256 			cr = 3;
2257 			atomic_inc(&sbi->s_mb_lost_chunks);
2258 			goto repeat;
2259 		}
2260 	}
2261 out:
2262 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2263 		err = first_err;
2264 	return err;
2265 }
2266 
2267 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2268 {
2269 	struct super_block *sb = seq->private;
2270 	ext4_group_t group;
2271 
2272 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2273 		return NULL;
2274 	group = *pos + 1;
2275 	return (void *) ((unsigned long) group);
2276 }
2277 
2278 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2279 {
2280 	struct super_block *sb = seq->private;
2281 	ext4_group_t group;
2282 
2283 	++*pos;
2284 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2285 		return NULL;
2286 	group = *pos + 1;
2287 	return (void *) ((unsigned long) group);
2288 }
2289 
2290 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2291 {
2292 	struct super_block *sb = seq->private;
2293 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2294 	int i;
2295 	int err, buddy_loaded = 0;
2296 	struct ext4_buddy e4b;
2297 	struct ext4_group_info *grinfo;
2298 	unsigned char blocksize_bits = min_t(unsigned char,
2299 					     sb->s_blocksize_bits,
2300 					     EXT4_MAX_BLOCK_LOG_SIZE);
2301 	struct sg {
2302 		struct ext4_group_info info;
2303 		ext4_grpblk_t counters[blocksize_bits + 2];
2304 	} sg;
2305 
2306 	group--;
2307 	if (group == 0)
2308 		seq_puts(seq, "#group: free  frags first ["
2309 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2310 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2311 
2312 	grinfo = ext4_get_group_info(sb, group);
2313 	/* Load the group info in memory only if not already loaded. */
2314 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2315 		err = ext4_mb_load_buddy(sb, group, &e4b);
2316 		if (err) {
2317 			seq_printf(seq, "#%-5u: I/O error\n", group);
2318 			return 0;
2319 		}
2320 		buddy_loaded = 1;
2321 	}
2322 
2323 	memcpy(&sg, ext4_get_group_info(sb, group), sizeof(sg));
2324 
2325 	if (buddy_loaded)
2326 		ext4_mb_unload_buddy(&e4b);
2327 
2328 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2329 			sg.info.bb_fragments, sg.info.bb_first_free);
2330 	for (i = 0; i <= 13; i++)
2331 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2332 				sg.info.bb_counters[i] : 0);
2333 	seq_printf(seq, " ]\n");
2334 
2335 	return 0;
2336 }
2337 
2338 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2339 {
2340 }
2341 
2342 static const struct seq_operations ext4_mb_seq_groups_ops = {
2343 	.start  = ext4_mb_seq_groups_start,
2344 	.next   = ext4_mb_seq_groups_next,
2345 	.stop   = ext4_mb_seq_groups_stop,
2346 	.show   = ext4_mb_seq_groups_show,
2347 };
2348 
2349 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2350 {
2351 	struct super_block *sb = PDE_DATA(inode);
2352 	int rc;
2353 
2354 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2355 	if (rc == 0) {
2356 		struct seq_file *m = file->private_data;
2357 		m->private = sb;
2358 	}
2359 	return rc;
2360 
2361 }
2362 
2363 const struct file_operations ext4_seq_mb_groups_fops = {
2364 	.open		= ext4_mb_seq_groups_open,
2365 	.read		= seq_read,
2366 	.llseek		= seq_lseek,
2367 	.release	= seq_release,
2368 };
2369 
2370 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2371 {
2372 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2373 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2374 
2375 	BUG_ON(!cachep);
2376 	return cachep;
2377 }
2378 
2379 /*
2380  * Allocate the top-level s_group_info array for the specified number
2381  * of groups
2382  */
2383 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2384 {
2385 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2386 	unsigned size;
2387 	struct ext4_group_info ***new_groupinfo;
2388 
2389 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2390 		EXT4_DESC_PER_BLOCK_BITS(sb);
2391 	if (size <= sbi->s_group_info_size)
2392 		return 0;
2393 
2394 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2395 	new_groupinfo = kvzalloc(size, GFP_KERNEL);
2396 	if (!new_groupinfo) {
2397 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2398 		return -ENOMEM;
2399 	}
2400 	if (sbi->s_group_info) {
2401 		memcpy(new_groupinfo, sbi->s_group_info,
2402 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2403 		kvfree(sbi->s_group_info);
2404 	}
2405 	sbi->s_group_info = new_groupinfo;
2406 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2407 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2408 		   sbi->s_group_info_size);
2409 	return 0;
2410 }
2411 
2412 /* Create and initialize ext4_group_info data for the given group. */
2413 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2414 			  struct ext4_group_desc *desc)
2415 {
2416 	int i;
2417 	int metalen = 0;
2418 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2419 	struct ext4_group_info **meta_group_info;
2420 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2421 
2422 	/*
2423 	 * First check if this group is the first of a reserved block.
2424 	 * If it's true, we have to allocate a new table of pointers
2425 	 * to ext4_group_info structures
2426 	 */
2427 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2428 		metalen = sizeof(*meta_group_info) <<
2429 			EXT4_DESC_PER_BLOCK_BITS(sb);
2430 		meta_group_info = kmalloc(metalen, GFP_NOFS);
2431 		if (meta_group_info == NULL) {
2432 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2433 				 "for a buddy group");
2434 			goto exit_meta_group_info;
2435 		}
2436 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2437 			meta_group_info;
2438 	}
2439 
2440 	meta_group_info =
2441 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2442 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2443 
2444 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2445 	if (meta_group_info[i] == NULL) {
2446 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2447 		goto exit_group_info;
2448 	}
2449 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2450 		&(meta_group_info[i]->bb_state));
2451 
2452 	/*
2453 	 * initialize bb_free to be able to skip
2454 	 * empty groups without initialization
2455 	 */
2456 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2457 		meta_group_info[i]->bb_free =
2458 			ext4_free_clusters_after_init(sb, group, desc);
2459 	} else {
2460 		meta_group_info[i]->bb_free =
2461 			ext4_free_group_clusters(sb, desc);
2462 	}
2463 
2464 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2465 	init_rwsem(&meta_group_info[i]->alloc_sem);
2466 	meta_group_info[i]->bb_free_root = RB_ROOT;
2467 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2468 
2469 #ifdef DOUBLE_CHECK
2470 	{
2471 		struct buffer_head *bh;
2472 		meta_group_info[i]->bb_bitmap =
2473 			kmalloc(sb->s_blocksize, GFP_NOFS);
2474 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2475 		bh = ext4_read_block_bitmap(sb, group);
2476 		BUG_ON(IS_ERR_OR_NULL(bh));
2477 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2478 			sb->s_blocksize);
2479 		put_bh(bh);
2480 	}
2481 #endif
2482 
2483 	return 0;
2484 
2485 exit_group_info:
2486 	/* If a meta_group_info table has been allocated, release it now */
2487 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2488 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2489 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2490 	}
2491 exit_meta_group_info:
2492 	return -ENOMEM;
2493 } /* ext4_mb_add_groupinfo */
2494 
2495 static int ext4_mb_init_backend(struct super_block *sb)
2496 {
2497 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2498 	ext4_group_t i;
2499 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2500 	int err;
2501 	struct ext4_group_desc *desc;
2502 	struct kmem_cache *cachep;
2503 
2504 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2505 	if (err)
2506 		return err;
2507 
2508 	sbi->s_buddy_cache = new_inode(sb);
2509 	if (sbi->s_buddy_cache == NULL) {
2510 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2511 		goto err_freesgi;
2512 	}
2513 	/* To avoid potentially colliding with an valid on-disk inode number,
2514 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2515 	 * not in the inode hash, so it should never be found by iget(), but
2516 	 * this will avoid confusion if it ever shows up during debugging. */
2517 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2518 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2519 	for (i = 0; i < ngroups; i++) {
2520 		desc = ext4_get_group_desc(sb, i, NULL);
2521 		if (desc == NULL) {
2522 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2523 			goto err_freebuddy;
2524 		}
2525 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2526 			goto err_freebuddy;
2527 	}
2528 
2529 	return 0;
2530 
2531 err_freebuddy:
2532 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2533 	while (i-- > 0)
2534 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2535 	i = sbi->s_group_info_size;
2536 	while (i-- > 0)
2537 		kfree(sbi->s_group_info[i]);
2538 	iput(sbi->s_buddy_cache);
2539 err_freesgi:
2540 	kvfree(sbi->s_group_info);
2541 	return -ENOMEM;
2542 }
2543 
2544 static void ext4_groupinfo_destroy_slabs(void)
2545 {
2546 	int i;
2547 
2548 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2549 		if (ext4_groupinfo_caches[i])
2550 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2551 		ext4_groupinfo_caches[i] = NULL;
2552 	}
2553 }
2554 
2555 static int ext4_groupinfo_create_slab(size_t size)
2556 {
2557 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2558 	int slab_size;
2559 	int blocksize_bits = order_base_2(size);
2560 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2561 	struct kmem_cache *cachep;
2562 
2563 	if (cache_index >= NR_GRPINFO_CACHES)
2564 		return -EINVAL;
2565 
2566 	if (unlikely(cache_index < 0))
2567 		cache_index = 0;
2568 
2569 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2570 	if (ext4_groupinfo_caches[cache_index]) {
2571 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2572 		return 0;	/* Already created */
2573 	}
2574 
2575 	slab_size = offsetof(struct ext4_group_info,
2576 				bb_counters[blocksize_bits + 2]);
2577 
2578 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2579 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2580 					NULL);
2581 
2582 	ext4_groupinfo_caches[cache_index] = cachep;
2583 
2584 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2585 	if (!cachep) {
2586 		printk(KERN_EMERG
2587 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2588 		return -ENOMEM;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 int ext4_mb_init(struct super_block *sb)
2595 {
2596 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2597 	unsigned i, j;
2598 	unsigned offset, offset_incr;
2599 	unsigned max;
2600 	int ret;
2601 
2602 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2603 
2604 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2605 	if (sbi->s_mb_offsets == NULL) {
2606 		ret = -ENOMEM;
2607 		goto out;
2608 	}
2609 
2610 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2611 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2612 	if (sbi->s_mb_maxs == NULL) {
2613 		ret = -ENOMEM;
2614 		goto out;
2615 	}
2616 
2617 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2618 	if (ret < 0)
2619 		goto out;
2620 
2621 	/* order 0 is regular bitmap */
2622 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2623 	sbi->s_mb_offsets[0] = 0;
2624 
2625 	i = 1;
2626 	offset = 0;
2627 	offset_incr = 1 << (sb->s_blocksize_bits - 1);
2628 	max = sb->s_blocksize << 2;
2629 	do {
2630 		sbi->s_mb_offsets[i] = offset;
2631 		sbi->s_mb_maxs[i] = max;
2632 		offset += offset_incr;
2633 		offset_incr = offset_incr >> 1;
2634 		max = max >> 1;
2635 		i++;
2636 	} while (i <= sb->s_blocksize_bits + 1);
2637 
2638 	spin_lock_init(&sbi->s_md_lock);
2639 	spin_lock_init(&sbi->s_bal_lock);
2640 	sbi->s_mb_free_pending = 0;
2641 	INIT_LIST_HEAD(&sbi->s_freed_data_list);
2642 
2643 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2644 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2645 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2646 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2647 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2648 	/*
2649 	 * The default group preallocation is 512, which for 4k block
2650 	 * sizes translates to 2 megabytes.  However for bigalloc file
2651 	 * systems, this is probably too big (i.e, if the cluster size
2652 	 * is 1 megabyte, then group preallocation size becomes half a
2653 	 * gigabyte!).  As a default, we will keep a two megabyte
2654 	 * group pralloc size for cluster sizes up to 64k, and after
2655 	 * that, we will force a minimum group preallocation size of
2656 	 * 32 clusters.  This translates to 8 megs when the cluster
2657 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2658 	 * which seems reasonable as a default.
2659 	 */
2660 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2661 				       sbi->s_cluster_bits, 32);
2662 	/*
2663 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2664 	 * to the lowest multiple of s_stripe which is bigger than
2665 	 * the s_mb_group_prealloc as determined above. We want
2666 	 * the preallocation size to be an exact multiple of the
2667 	 * RAID stripe size so that preallocations don't fragment
2668 	 * the stripes.
2669 	 */
2670 	if (sbi->s_stripe > 1) {
2671 		sbi->s_mb_group_prealloc = roundup(
2672 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2673 	}
2674 
2675 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2676 	if (sbi->s_locality_groups == NULL) {
2677 		ret = -ENOMEM;
2678 		goto out;
2679 	}
2680 	for_each_possible_cpu(i) {
2681 		struct ext4_locality_group *lg;
2682 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2683 		mutex_init(&lg->lg_mutex);
2684 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2685 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2686 		spin_lock_init(&lg->lg_prealloc_lock);
2687 	}
2688 
2689 	/* init file for buddy data */
2690 	ret = ext4_mb_init_backend(sb);
2691 	if (ret != 0)
2692 		goto out_free_locality_groups;
2693 
2694 	return 0;
2695 
2696 out_free_locality_groups:
2697 	free_percpu(sbi->s_locality_groups);
2698 	sbi->s_locality_groups = NULL;
2699 out:
2700 	kfree(sbi->s_mb_offsets);
2701 	sbi->s_mb_offsets = NULL;
2702 	kfree(sbi->s_mb_maxs);
2703 	sbi->s_mb_maxs = NULL;
2704 	return ret;
2705 }
2706 
2707 /* need to called with the ext4 group lock held */
2708 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2709 {
2710 	struct ext4_prealloc_space *pa;
2711 	struct list_head *cur, *tmp;
2712 	int count = 0;
2713 
2714 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2715 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2716 		list_del(&pa->pa_group_list);
2717 		count++;
2718 		kmem_cache_free(ext4_pspace_cachep, pa);
2719 	}
2720 	if (count)
2721 		mb_debug(1, "mballoc: %u PAs left\n", count);
2722 
2723 }
2724 
2725 int ext4_mb_release(struct super_block *sb)
2726 {
2727 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2728 	ext4_group_t i;
2729 	int num_meta_group_infos;
2730 	struct ext4_group_info *grinfo;
2731 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2732 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2733 
2734 	if (sbi->s_group_info) {
2735 		for (i = 0; i < ngroups; i++) {
2736 			grinfo = ext4_get_group_info(sb, i);
2737 #ifdef DOUBLE_CHECK
2738 			kfree(grinfo->bb_bitmap);
2739 #endif
2740 			ext4_lock_group(sb, i);
2741 			ext4_mb_cleanup_pa(grinfo);
2742 			ext4_unlock_group(sb, i);
2743 			kmem_cache_free(cachep, grinfo);
2744 		}
2745 		num_meta_group_infos = (ngroups +
2746 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2747 			EXT4_DESC_PER_BLOCK_BITS(sb);
2748 		for (i = 0; i < num_meta_group_infos; i++)
2749 			kfree(sbi->s_group_info[i]);
2750 		kvfree(sbi->s_group_info);
2751 	}
2752 	kfree(sbi->s_mb_offsets);
2753 	kfree(sbi->s_mb_maxs);
2754 	iput(sbi->s_buddy_cache);
2755 	if (sbi->s_mb_stats) {
2756 		ext4_msg(sb, KERN_INFO,
2757 		       "mballoc: %u blocks %u reqs (%u success)",
2758 				atomic_read(&sbi->s_bal_allocated),
2759 				atomic_read(&sbi->s_bal_reqs),
2760 				atomic_read(&sbi->s_bal_success));
2761 		ext4_msg(sb, KERN_INFO,
2762 		      "mballoc: %u extents scanned, %u goal hits, "
2763 				"%u 2^N hits, %u breaks, %u lost",
2764 				atomic_read(&sbi->s_bal_ex_scanned),
2765 				atomic_read(&sbi->s_bal_goals),
2766 				atomic_read(&sbi->s_bal_2orders),
2767 				atomic_read(&sbi->s_bal_breaks),
2768 				atomic_read(&sbi->s_mb_lost_chunks));
2769 		ext4_msg(sb, KERN_INFO,
2770 		       "mballoc: %lu generated and it took %Lu",
2771 				sbi->s_mb_buddies_generated,
2772 				sbi->s_mb_generation_time);
2773 		ext4_msg(sb, KERN_INFO,
2774 		       "mballoc: %u preallocated, %u discarded",
2775 				atomic_read(&sbi->s_mb_preallocated),
2776 				atomic_read(&sbi->s_mb_discarded));
2777 	}
2778 
2779 	free_percpu(sbi->s_locality_groups);
2780 
2781 	return 0;
2782 }
2783 
2784 static inline int ext4_issue_discard(struct super_block *sb,
2785 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
2786 		struct bio **biop)
2787 {
2788 	ext4_fsblk_t discard_block;
2789 
2790 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2791 			 ext4_group_first_block_no(sb, block_group));
2792 	count = EXT4_C2B(EXT4_SB(sb), count);
2793 	trace_ext4_discard_blocks(sb,
2794 			(unsigned long long) discard_block, count);
2795 	if (biop) {
2796 		return __blkdev_issue_discard(sb->s_bdev,
2797 			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
2798 			(sector_t)count << (sb->s_blocksize_bits - 9),
2799 			GFP_NOFS, 0, biop);
2800 	} else
2801 		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2802 }
2803 
2804 static void ext4_free_data_in_buddy(struct super_block *sb,
2805 				    struct ext4_free_data *entry)
2806 {
2807 	struct ext4_buddy e4b;
2808 	struct ext4_group_info *db;
2809 	int err, count = 0, count2 = 0;
2810 
2811 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2812 		 entry->efd_count, entry->efd_group, entry);
2813 
2814 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2815 	/* we expect to find existing buddy because it's pinned */
2816 	BUG_ON(err != 0);
2817 
2818 	spin_lock(&EXT4_SB(sb)->s_md_lock);
2819 	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
2820 	spin_unlock(&EXT4_SB(sb)->s_md_lock);
2821 
2822 	db = e4b.bd_info;
2823 	/* there are blocks to put in buddy to make them really free */
2824 	count += entry->efd_count;
2825 	count2++;
2826 	ext4_lock_group(sb, entry->efd_group);
2827 	/* Take it out of per group rb tree */
2828 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2829 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2830 
2831 	/*
2832 	 * Clear the trimmed flag for the group so that the next
2833 	 * ext4_trim_fs can trim it.
2834 	 * If the volume is mounted with -o discard, online discard
2835 	 * is supported and the free blocks will be trimmed online.
2836 	 */
2837 	if (!test_opt(sb, DISCARD))
2838 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2839 
2840 	if (!db->bb_free_root.rb_node) {
2841 		/* No more items in the per group rb tree
2842 		 * balance refcounts from ext4_mb_free_metadata()
2843 		 */
2844 		put_page(e4b.bd_buddy_page);
2845 		put_page(e4b.bd_bitmap_page);
2846 	}
2847 	ext4_unlock_group(sb, entry->efd_group);
2848 	kmem_cache_free(ext4_free_data_cachep, entry);
2849 	ext4_mb_unload_buddy(&e4b);
2850 
2851 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2852 }
2853 
2854 /*
2855  * This function is called by the jbd2 layer once the commit has finished,
2856  * so we know we can free the blocks that were released with that commit.
2857  */
2858 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
2859 {
2860 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2861 	struct ext4_free_data *entry, *tmp;
2862 	struct bio *discard_bio = NULL;
2863 	struct list_head freed_data_list;
2864 	struct list_head *cut_pos = NULL;
2865 	int err;
2866 
2867 	INIT_LIST_HEAD(&freed_data_list);
2868 
2869 	spin_lock(&sbi->s_md_lock);
2870 	list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
2871 		if (entry->efd_tid != commit_tid)
2872 			break;
2873 		cut_pos = &entry->efd_list;
2874 	}
2875 	if (cut_pos)
2876 		list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
2877 				  cut_pos);
2878 	spin_unlock(&sbi->s_md_lock);
2879 
2880 	if (test_opt(sb, DISCARD)) {
2881 		list_for_each_entry(entry, &freed_data_list, efd_list) {
2882 			err = ext4_issue_discard(sb, entry->efd_group,
2883 						 entry->efd_start_cluster,
2884 						 entry->efd_count,
2885 						 &discard_bio);
2886 			if (err && err != -EOPNOTSUPP) {
2887 				ext4_msg(sb, KERN_WARNING, "discard request in"
2888 					 " group:%d block:%d count:%d failed"
2889 					 " with %d", entry->efd_group,
2890 					 entry->efd_start_cluster,
2891 					 entry->efd_count, err);
2892 			} else if (err == -EOPNOTSUPP)
2893 				break;
2894 		}
2895 
2896 		if (discard_bio) {
2897 			submit_bio_wait(discard_bio);
2898 			bio_put(discard_bio);
2899 		}
2900 	}
2901 
2902 	list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
2903 		ext4_free_data_in_buddy(sb, entry);
2904 }
2905 
2906 int __init ext4_init_mballoc(void)
2907 {
2908 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2909 					SLAB_RECLAIM_ACCOUNT);
2910 	if (ext4_pspace_cachep == NULL)
2911 		return -ENOMEM;
2912 
2913 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2914 				    SLAB_RECLAIM_ACCOUNT);
2915 	if (ext4_ac_cachep == NULL) {
2916 		kmem_cache_destroy(ext4_pspace_cachep);
2917 		return -ENOMEM;
2918 	}
2919 
2920 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2921 					   SLAB_RECLAIM_ACCOUNT);
2922 	if (ext4_free_data_cachep == NULL) {
2923 		kmem_cache_destroy(ext4_pspace_cachep);
2924 		kmem_cache_destroy(ext4_ac_cachep);
2925 		return -ENOMEM;
2926 	}
2927 	return 0;
2928 }
2929 
2930 void ext4_exit_mballoc(void)
2931 {
2932 	/*
2933 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2934 	 * before destroying the slab cache.
2935 	 */
2936 	rcu_barrier();
2937 	kmem_cache_destroy(ext4_pspace_cachep);
2938 	kmem_cache_destroy(ext4_ac_cachep);
2939 	kmem_cache_destroy(ext4_free_data_cachep);
2940 	ext4_groupinfo_destroy_slabs();
2941 }
2942 
2943 
2944 /*
2945  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2946  * Returns 0 if success or error code
2947  */
2948 static noinline_for_stack int
2949 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2950 				handle_t *handle, unsigned int reserv_clstrs)
2951 {
2952 	struct buffer_head *bitmap_bh = NULL;
2953 	struct ext4_group_desc *gdp;
2954 	struct buffer_head *gdp_bh;
2955 	struct ext4_sb_info *sbi;
2956 	struct super_block *sb;
2957 	ext4_fsblk_t block;
2958 	int err, len;
2959 
2960 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2961 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2962 
2963 	sb = ac->ac_sb;
2964 	sbi = EXT4_SB(sb);
2965 
2966 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2967 	if (IS_ERR(bitmap_bh)) {
2968 		err = PTR_ERR(bitmap_bh);
2969 		bitmap_bh = NULL;
2970 		goto out_err;
2971 	}
2972 
2973 	BUFFER_TRACE(bitmap_bh, "getting write access");
2974 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2975 	if (err)
2976 		goto out_err;
2977 
2978 	err = -EIO;
2979 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2980 	if (!gdp)
2981 		goto out_err;
2982 
2983 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2984 			ext4_free_group_clusters(sb, gdp));
2985 
2986 	BUFFER_TRACE(gdp_bh, "get_write_access");
2987 	err = ext4_journal_get_write_access(handle, gdp_bh);
2988 	if (err)
2989 		goto out_err;
2990 
2991 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2992 
2993 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2994 	if (!ext4_data_block_valid(sbi, block, len)) {
2995 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2996 			   "fs metadata", block, block+len);
2997 		/* File system mounted not to panic on error
2998 		 * Fix the bitmap and return EFSCORRUPTED
2999 		 * We leak some of the blocks here.
3000 		 */
3001 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3002 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3003 			      ac->ac_b_ex.fe_len);
3004 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3005 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3006 		if (!err)
3007 			err = -EFSCORRUPTED;
3008 		goto out_err;
3009 	}
3010 
3011 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3012 #ifdef AGGRESSIVE_CHECK
3013 	{
3014 		int i;
3015 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3016 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3017 						bitmap_bh->b_data));
3018 		}
3019 	}
3020 #endif
3021 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3022 		      ac->ac_b_ex.fe_len);
3023 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
3024 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3025 		ext4_free_group_clusters_set(sb, gdp,
3026 					     ext4_free_clusters_after_init(sb,
3027 						ac->ac_b_ex.fe_group, gdp));
3028 	}
3029 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3030 	ext4_free_group_clusters_set(sb, gdp, len);
3031 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3032 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3033 
3034 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3035 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3036 	/*
3037 	 * Now reduce the dirty block count also. Should not go negative
3038 	 */
3039 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3040 		/* release all the reserved blocks if non delalloc */
3041 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3042 				   reserv_clstrs);
3043 
3044 	if (sbi->s_log_groups_per_flex) {
3045 		ext4_group_t flex_group = ext4_flex_group(sbi,
3046 							  ac->ac_b_ex.fe_group);
3047 		atomic64_sub(ac->ac_b_ex.fe_len,
3048 			     &sbi->s_flex_groups[flex_group].free_clusters);
3049 	}
3050 
3051 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3052 	if (err)
3053 		goto out_err;
3054 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3055 
3056 out_err:
3057 	brelse(bitmap_bh);
3058 	return err;
3059 }
3060 
3061 /*
3062  * here we normalize request for locality group
3063  * Group request are normalized to s_mb_group_prealloc, which goes to
3064  * s_strip if we set the same via mount option.
3065  * s_mb_group_prealloc can be configured via
3066  * /sys/fs/ext4/<partition>/mb_group_prealloc
3067  *
3068  * XXX: should we try to preallocate more than the group has now?
3069  */
3070 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3071 {
3072 	struct super_block *sb = ac->ac_sb;
3073 	struct ext4_locality_group *lg = ac->ac_lg;
3074 
3075 	BUG_ON(lg == NULL);
3076 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3077 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
3078 		current->pid, ac->ac_g_ex.fe_len);
3079 }
3080 
3081 /*
3082  * Normalization means making request better in terms of
3083  * size and alignment
3084  */
3085 static noinline_for_stack void
3086 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3087 				struct ext4_allocation_request *ar)
3088 {
3089 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3090 	int bsbits, max;
3091 	ext4_lblk_t end;
3092 	loff_t size, start_off;
3093 	loff_t orig_size __maybe_unused;
3094 	ext4_lblk_t start;
3095 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3096 	struct ext4_prealloc_space *pa;
3097 
3098 	/* do normalize only data requests, metadata requests
3099 	   do not need preallocation */
3100 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3101 		return;
3102 
3103 	/* sometime caller may want exact blocks */
3104 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3105 		return;
3106 
3107 	/* caller may indicate that preallocation isn't
3108 	 * required (it's a tail, for example) */
3109 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3110 		return;
3111 
3112 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3113 		ext4_mb_normalize_group_request(ac);
3114 		return ;
3115 	}
3116 
3117 	bsbits = ac->ac_sb->s_blocksize_bits;
3118 
3119 	/* first, let's learn actual file size
3120 	 * given current request is allocated */
3121 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3122 	size = size << bsbits;
3123 	if (size < i_size_read(ac->ac_inode))
3124 		size = i_size_read(ac->ac_inode);
3125 	orig_size = size;
3126 
3127 	/* max size of free chunks */
3128 	max = 2 << bsbits;
3129 
3130 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3131 		(req <= (size) || max <= (chunk_size))
3132 
3133 	/* first, try to predict filesize */
3134 	/* XXX: should this table be tunable? */
3135 	start_off = 0;
3136 	if (size <= 16 * 1024) {
3137 		size = 16 * 1024;
3138 	} else if (size <= 32 * 1024) {
3139 		size = 32 * 1024;
3140 	} else if (size <= 64 * 1024) {
3141 		size = 64 * 1024;
3142 	} else if (size <= 128 * 1024) {
3143 		size = 128 * 1024;
3144 	} else if (size <= 256 * 1024) {
3145 		size = 256 * 1024;
3146 	} else if (size <= 512 * 1024) {
3147 		size = 512 * 1024;
3148 	} else if (size <= 1024 * 1024) {
3149 		size = 1024 * 1024;
3150 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3151 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3152 						(21 - bsbits)) << 21;
3153 		size = 2 * 1024 * 1024;
3154 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3155 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3156 							(22 - bsbits)) << 22;
3157 		size = 4 * 1024 * 1024;
3158 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3159 					(8<<20)>>bsbits, max, 8 * 1024)) {
3160 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3161 							(23 - bsbits)) << 23;
3162 		size = 8 * 1024 * 1024;
3163 	} else {
3164 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3165 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3166 					      ac->ac_o_ex.fe_len) << bsbits;
3167 	}
3168 	size = size >> bsbits;
3169 	start = start_off >> bsbits;
3170 
3171 	/* don't cover already allocated blocks in selected range */
3172 	if (ar->pleft && start <= ar->lleft) {
3173 		size -= ar->lleft + 1 - start;
3174 		start = ar->lleft + 1;
3175 	}
3176 	if (ar->pright && start + size - 1 >= ar->lright)
3177 		size -= start + size - ar->lright;
3178 
3179 	/*
3180 	 * Trim allocation request for filesystems with artificially small
3181 	 * groups.
3182 	 */
3183 	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3184 		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3185 
3186 	end = start + size;
3187 
3188 	/* check we don't cross already preallocated blocks */
3189 	rcu_read_lock();
3190 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3191 		ext4_lblk_t pa_end;
3192 
3193 		if (pa->pa_deleted)
3194 			continue;
3195 		spin_lock(&pa->pa_lock);
3196 		if (pa->pa_deleted) {
3197 			spin_unlock(&pa->pa_lock);
3198 			continue;
3199 		}
3200 
3201 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3202 						  pa->pa_len);
3203 
3204 		/* PA must not overlap original request */
3205 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3206 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3207 
3208 		/* skip PAs this normalized request doesn't overlap with */
3209 		if (pa->pa_lstart >= end || pa_end <= start) {
3210 			spin_unlock(&pa->pa_lock);
3211 			continue;
3212 		}
3213 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3214 
3215 		/* adjust start or end to be adjacent to this pa */
3216 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3217 			BUG_ON(pa_end < start);
3218 			start = pa_end;
3219 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3220 			BUG_ON(pa->pa_lstart > end);
3221 			end = pa->pa_lstart;
3222 		}
3223 		spin_unlock(&pa->pa_lock);
3224 	}
3225 	rcu_read_unlock();
3226 	size = end - start;
3227 
3228 	/* XXX: extra loop to check we really don't overlap preallocations */
3229 	rcu_read_lock();
3230 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3231 		ext4_lblk_t pa_end;
3232 
3233 		spin_lock(&pa->pa_lock);
3234 		if (pa->pa_deleted == 0) {
3235 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3236 							  pa->pa_len);
3237 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3238 		}
3239 		spin_unlock(&pa->pa_lock);
3240 	}
3241 	rcu_read_unlock();
3242 
3243 	if (start + size <= ac->ac_o_ex.fe_logical &&
3244 			start > ac->ac_o_ex.fe_logical) {
3245 		ext4_msg(ac->ac_sb, KERN_ERR,
3246 			 "start %lu, size %lu, fe_logical %lu",
3247 			 (unsigned long) start, (unsigned long) size,
3248 			 (unsigned long) ac->ac_o_ex.fe_logical);
3249 		BUG();
3250 	}
3251 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3252 
3253 	/* now prepare goal request */
3254 
3255 	/* XXX: is it better to align blocks WRT to logical
3256 	 * placement or satisfy big request as is */
3257 	ac->ac_g_ex.fe_logical = start;
3258 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3259 
3260 	/* define goal start in order to merge */
3261 	if (ar->pright && (ar->lright == (start + size))) {
3262 		/* merge to the right */
3263 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3264 						&ac->ac_f_ex.fe_group,
3265 						&ac->ac_f_ex.fe_start);
3266 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3267 	}
3268 	if (ar->pleft && (ar->lleft + 1 == start)) {
3269 		/* merge to the left */
3270 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3271 						&ac->ac_f_ex.fe_group,
3272 						&ac->ac_f_ex.fe_start);
3273 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3274 	}
3275 
3276 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3277 		(unsigned) orig_size, (unsigned) start);
3278 }
3279 
3280 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3281 {
3282 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3283 
3284 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3285 		atomic_inc(&sbi->s_bal_reqs);
3286 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3287 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3288 			atomic_inc(&sbi->s_bal_success);
3289 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3290 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3291 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3292 			atomic_inc(&sbi->s_bal_goals);
3293 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3294 			atomic_inc(&sbi->s_bal_breaks);
3295 	}
3296 
3297 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3298 		trace_ext4_mballoc_alloc(ac);
3299 	else
3300 		trace_ext4_mballoc_prealloc(ac);
3301 }
3302 
3303 /*
3304  * Called on failure; free up any blocks from the inode PA for this
3305  * context.  We don't need this for MB_GROUP_PA because we only change
3306  * pa_free in ext4_mb_release_context(), but on failure, we've already
3307  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3308  */
3309 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3310 {
3311 	struct ext4_prealloc_space *pa = ac->ac_pa;
3312 	struct ext4_buddy e4b;
3313 	int err;
3314 
3315 	if (pa == NULL) {
3316 		if (ac->ac_f_ex.fe_len == 0)
3317 			return;
3318 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3319 		if (err) {
3320 			/*
3321 			 * This should never happen since we pin the
3322 			 * pages in the ext4_allocation_context so
3323 			 * ext4_mb_load_buddy() should never fail.
3324 			 */
3325 			WARN(1, "mb_load_buddy failed (%d)", err);
3326 			return;
3327 		}
3328 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3329 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3330 			       ac->ac_f_ex.fe_len);
3331 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3332 		ext4_mb_unload_buddy(&e4b);
3333 		return;
3334 	}
3335 	if (pa->pa_type == MB_INODE_PA)
3336 		pa->pa_free += ac->ac_b_ex.fe_len;
3337 }
3338 
3339 /*
3340  * use blocks preallocated to inode
3341  */
3342 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3343 				struct ext4_prealloc_space *pa)
3344 {
3345 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3346 	ext4_fsblk_t start;
3347 	ext4_fsblk_t end;
3348 	int len;
3349 
3350 	/* found preallocated blocks, use them */
3351 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3352 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3353 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3354 	len = EXT4_NUM_B2C(sbi, end - start);
3355 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3356 					&ac->ac_b_ex.fe_start);
3357 	ac->ac_b_ex.fe_len = len;
3358 	ac->ac_status = AC_STATUS_FOUND;
3359 	ac->ac_pa = pa;
3360 
3361 	BUG_ON(start < pa->pa_pstart);
3362 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3363 	BUG_ON(pa->pa_free < len);
3364 	pa->pa_free -= len;
3365 
3366 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3367 }
3368 
3369 /*
3370  * use blocks preallocated to locality group
3371  */
3372 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3373 				struct ext4_prealloc_space *pa)
3374 {
3375 	unsigned int len = ac->ac_o_ex.fe_len;
3376 
3377 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3378 					&ac->ac_b_ex.fe_group,
3379 					&ac->ac_b_ex.fe_start);
3380 	ac->ac_b_ex.fe_len = len;
3381 	ac->ac_status = AC_STATUS_FOUND;
3382 	ac->ac_pa = pa;
3383 
3384 	/* we don't correct pa_pstart or pa_plen here to avoid
3385 	 * possible race when the group is being loaded concurrently
3386 	 * instead we correct pa later, after blocks are marked
3387 	 * in on-disk bitmap -- see ext4_mb_release_context()
3388 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3389 	 */
3390 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3391 }
3392 
3393 /*
3394  * Return the prealloc space that have minimal distance
3395  * from the goal block. @cpa is the prealloc
3396  * space that is having currently known minimal distance
3397  * from the goal block.
3398  */
3399 static struct ext4_prealloc_space *
3400 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3401 			struct ext4_prealloc_space *pa,
3402 			struct ext4_prealloc_space *cpa)
3403 {
3404 	ext4_fsblk_t cur_distance, new_distance;
3405 
3406 	if (cpa == NULL) {
3407 		atomic_inc(&pa->pa_count);
3408 		return pa;
3409 	}
3410 	cur_distance = abs(goal_block - cpa->pa_pstart);
3411 	new_distance = abs(goal_block - pa->pa_pstart);
3412 
3413 	if (cur_distance <= new_distance)
3414 		return cpa;
3415 
3416 	/* drop the previous reference */
3417 	atomic_dec(&cpa->pa_count);
3418 	atomic_inc(&pa->pa_count);
3419 	return pa;
3420 }
3421 
3422 /*
3423  * search goal blocks in preallocated space
3424  */
3425 static noinline_for_stack int
3426 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3427 {
3428 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3429 	int order, i;
3430 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3431 	struct ext4_locality_group *lg;
3432 	struct ext4_prealloc_space *pa, *cpa = NULL;
3433 	ext4_fsblk_t goal_block;
3434 
3435 	/* only data can be preallocated */
3436 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3437 		return 0;
3438 
3439 	/* first, try per-file preallocation */
3440 	rcu_read_lock();
3441 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3442 
3443 		/* all fields in this condition don't change,
3444 		 * so we can skip locking for them */
3445 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3446 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3447 					       EXT4_C2B(sbi, pa->pa_len)))
3448 			continue;
3449 
3450 		/* non-extent files can't have physical blocks past 2^32 */
3451 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3452 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3453 		     EXT4_MAX_BLOCK_FILE_PHYS))
3454 			continue;
3455 
3456 		/* found preallocated blocks, use them */
3457 		spin_lock(&pa->pa_lock);
3458 		if (pa->pa_deleted == 0 && pa->pa_free) {
3459 			atomic_inc(&pa->pa_count);
3460 			ext4_mb_use_inode_pa(ac, pa);
3461 			spin_unlock(&pa->pa_lock);
3462 			ac->ac_criteria = 10;
3463 			rcu_read_unlock();
3464 			return 1;
3465 		}
3466 		spin_unlock(&pa->pa_lock);
3467 	}
3468 	rcu_read_unlock();
3469 
3470 	/* can we use group allocation? */
3471 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3472 		return 0;
3473 
3474 	/* inode may have no locality group for some reason */
3475 	lg = ac->ac_lg;
3476 	if (lg == NULL)
3477 		return 0;
3478 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3479 	if (order > PREALLOC_TB_SIZE - 1)
3480 		/* The max size of hash table is PREALLOC_TB_SIZE */
3481 		order = PREALLOC_TB_SIZE - 1;
3482 
3483 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3484 	/*
3485 	 * search for the prealloc space that is having
3486 	 * minimal distance from the goal block.
3487 	 */
3488 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3489 		rcu_read_lock();
3490 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3491 					pa_inode_list) {
3492 			spin_lock(&pa->pa_lock);
3493 			if (pa->pa_deleted == 0 &&
3494 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3495 
3496 				cpa = ext4_mb_check_group_pa(goal_block,
3497 								pa, cpa);
3498 			}
3499 			spin_unlock(&pa->pa_lock);
3500 		}
3501 		rcu_read_unlock();
3502 	}
3503 	if (cpa) {
3504 		ext4_mb_use_group_pa(ac, cpa);
3505 		ac->ac_criteria = 20;
3506 		return 1;
3507 	}
3508 	return 0;
3509 }
3510 
3511 /*
3512  * the function goes through all block freed in the group
3513  * but not yet committed and marks them used in in-core bitmap.
3514  * buddy must be generated from this bitmap
3515  * Need to be called with the ext4 group lock held
3516  */
3517 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3518 						ext4_group_t group)
3519 {
3520 	struct rb_node *n;
3521 	struct ext4_group_info *grp;
3522 	struct ext4_free_data *entry;
3523 
3524 	grp = ext4_get_group_info(sb, group);
3525 	n = rb_first(&(grp->bb_free_root));
3526 
3527 	while (n) {
3528 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3529 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3530 		n = rb_next(n);
3531 	}
3532 	return;
3533 }
3534 
3535 /*
3536  * the function goes through all preallocation in this group and marks them
3537  * used in in-core bitmap. buddy must be generated from this bitmap
3538  * Need to be called with ext4 group lock held
3539  */
3540 static noinline_for_stack
3541 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3542 					ext4_group_t group)
3543 {
3544 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3545 	struct ext4_prealloc_space *pa;
3546 	struct list_head *cur;
3547 	ext4_group_t groupnr;
3548 	ext4_grpblk_t start;
3549 	int preallocated = 0;
3550 	int len;
3551 
3552 	/* all form of preallocation discards first load group,
3553 	 * so the only competing code is preallocation use.
3554 	 * we don't need any locking here
3555 	 * notice we do NOT ignore preallocations with pa_deleted
3556 	 * otherwise we could leave used blocks available for
3557 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3558 	 * is dropping preallocation
3559 	 */
3560 	list_for_each(cur, &grp->bb_prealloc_list) {
3561 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3562 		spin_lock(&pa->pa_lock);
3563 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3564 					     &groupnr, &start);
3565 		len = pa->pa_len;
3566 		spin_unlock(&pa->pa_lock);
3567 		if (unlikely(len == 0))
3568 			continue;
3569 		BUG_ON(groupnr != group);
3570 		ext4_set_bits(bitmap, start, len);
3571 		preallocated += len;
3572 	}
3573 	mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
3574 }
3575 
3576 static void ext4_mb_pa_callback(struct rcu_head *head)
3577 {
3578 	struct ext4_prealloc_space *pa;
3579 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3580 
3581 	BUG_ON(atomic_read(&pa->pa_count));
3582 	BUG_ON(pa->pa_deleted == 0);
3583 	kmem_cache_free(ext4_pspace_cachep, pa);
3584 }
3585 
3586 /*
3587  * drops a reference to preallocated space descriptor
3588  * if this was the last reference and the space is consumed
3589  */
3590 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3591 			struct super_block *sb, struct ext4_prealloc_space *pa)
3592 {
3593 	ext4_group_t grp;
3594 	ext4_fsblk_t grp_blk;
3595 
3596 	/* in this short window concurrent discard can set pa_deleted */
3597 	spin_lock(&pa->pa_lock);
3598 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3599 		spin_unlock(&pa->pa_lock);
3600 		return;
3601 	}
3602 
3603 	if (pa->pa_deleted == 1) {
3604 		spin_unlock(&pa->pa_lock);
3605 		return;
3606 	}
3607 
3608 	pa->pa_deleted = 1;
3609 	spin_unlock(&pa->pa_lock);
3610 
3611 	grp_blk = pa->pa_pstart;
3612 	/*
3613 	 * If doing group-based preallocation, pa_pstart may be in the
3614 	 * next group when pa is used up
3615 	 */
3616 	if (pa->pa_type == MB_GROUP_PA)
3617 		grp_blk--;
3618 
3619 	grp = ext4_get_group_number(sb, grp_blk);
3620 
3621 	/*
3622 	 * possible race:
3623 	 *
3624 	 *  P1 (buddy init)			P2 (regular allocation)
3625 	 *					find block B in PA
3626 	 *  copy on-disk bitmap to buddy
3627 	 *  					mark B in on-disk bitmap
3628 	 *					drop PA from group
3629 	 *  mark all PAs in buddy
3630 	 *
3631 	 * thus, P1 initializes buddy with B available. to prevent this
3632 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3633 	 * against that pair
3634 	 */
3635 	ext4_lock_group(sb, grp);
3636 	list_del(&pa->pa_group_list);
3637 	ext4_unlock_group(sb, grp);
3638 
3639 	spin_lock(pa->pa_obj_lock);
3640 	list_del_rcu(&pa->pa_inode_list);
3641 	spin_unlock(pa->pa_obj_lock);
3642 
3643 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3644 }
3645 
3646 /*
3647  * creates new preallocated space for given inode
3648  */
3649 static noinline_for_stack int
3650 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3651 {
3652 	struct super_block *sb = ac->ac_sb;
3653 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3654 	struct ext4_prealloc_space *pa;
3655 	struct ext4_group_info *grp;
3656 	struct ext4_inode_info *ei;
3657 
3658 	/* preallocate only when found space is larger then requested */
3659 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3660 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3661 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3662 
3663 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3664 	if (pa == NULL)
3665 		return -ENOMEM;
3666 
3667 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3668 		int winl;
3669 		int wins;
3670 		int win;
3671 		int offs;
3672 
3673 		/* we can't allocate as much as normalizer wants.
3674 		 * so, found space must get proper lstart
3675 		 * to cover original request */
3676 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3677 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3678 
3679 		/* we're limited by original request in that
3680 		 * logical block must be covered any way
3681 		 * winl is window we can move our chunk within */
3682 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3683 
3684 		/* also, we should cover whole original request */
3685 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3686 
3687 		/* the smallest one defines real window */
3688 		win = min(winl, wins);
3689 
3690 		offs = ac->ac_o_ex.fe_logical %
3691 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3692 		if (offs && offs < win)
3693 			win = offs;
3694 
3695 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3696 			EXT4_NUM_B2C(sbi, win);
3697 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3698 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3699 	}
3700 
3701 	/* preallocation can change ac_b_ex, thus we store actually
3702 	 * allocated blocks for history */
3703 	ac->ac_f_ex = ac->ac_b_ex;
3704 
3705 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3706 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3707 	pa->pa_len = ac->ac_b_ex.fe_len;
3708 	pa->pa_free = pa->pa_len;
3709 	atomic_set(&pa->pa_count, 1);
3710 	spin_lock_init(&pa->pa_lock);
3711 	INIT_LIST_HEAD(&pa->pa_inode_list);
3712 	INIT_LIST_HEAD(&pa->pa_group_list);
3713 	pa->pa_deleted = 0;
3714 	pa->pa_type = MB_INODE_PA;
3715 
3716 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3717 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3718 	trace_ext4_mb_new_inode_pa(ac, pa);
3719 
3720 	ext4_mb_use_inode_pa(ac, pa);
3721 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3722 
3723 	ei = EXT4_I(ac->ac_inode);
3724 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3725 
3726 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3727 	pa->pa_inode = ac->ac_inode;
3728 
3729 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3730 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3731 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3732 
3733 	spin_lock(pa->pa_obj_lock);
3734 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3735 	spin_unlock(pa->pa_obj_lock);
3736 
3737 	return 0;
3738 }
3739 
3740 /*
3741  * creates new preallocated space for locality group inodes belongs to
3742  */
3743 static noinline_for_stack int
3744 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3745 {
3746 	struct super_block *sb = ac->ac_sb;
3747 	struct ext4_locality_group *lg;
3748 	struct ext4_prealloc_space *pa;
3749 	struct ext4_group_info *grp;
3750 
3751 	/* preallocate only when found space is larger then requested */
3752 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3753 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3754 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3755 
3756 	BUG_ON(ext4_pspace_cachep == NULL);
3757 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3758 	if (pa == NULL)
3759 		return -ENOMEM;
3760 
3761 	/* preallocation can change ac_b_ex, thus we store actually
3762 	 * allocated blocks for history */
3763 	ac->ac_f_ex = ac->ac_b_ex;
3764 
3765 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3766 	pa->pa_lstart = pa->pa_pstart;
3767 	pa->pa_len = ac->ac_b_ex.fe_len;
3768 	pa->pa_free = pa->pa_len;
3769 	atomic_set(&pa->pa_count, 1);
3770 	spin_lock_init(&pa->pa_lock);
3771 	INIT_LIST_HEAD(&pa->pa_inode_list);
3772 	INIT_LIST_HEAD(&pa->pa_group_list);
3773 	pa->pa_deleted = 0;
3774 	pa->pa_type = MB_GROUP_PA;
3775 
3776 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3777 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3778 	trace_ext4_mb_new_group_pa(ac, pa);
3779 
3780 	ext4_mb_use_group_pa(ac, pa);
3781 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3782 
3783 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3784 	lg = ac->ac_lg;
3785 	BUG_ON(lg == NULL);
3786 
3787 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3788 	pa->pa_inode = NULL;
3789 
3790 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3791 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3792 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3793 
3794 	/*
3795 	 * We will later add the new pa to the right bucket
3796 	 * after updating the pa_free in ext4_mb_release_context
3797 	 */
3798 	return 0;
3799 }
3800 
3801 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3802 {
3803 	int err;
3804 
3805 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3806 		err = ext4_mb_new_group_pa(ac);
3807 	else
3808 		err = ext4_mb_new_inode_pa(ac);
3809 	return err;
3810 }
3811 
3812 /*
3813  * finds all unused blocks in on-disk bitmap, frees them in
3814  * in-core bitmap and buddy.
3815  * @pa must be unlinked from inode and group lists, so that
3816  * nobody else can find/use it.
3817  * the caller MUST hold group/inode locks.
3818  * TODO: optimize the case when there are no in-core structures yet
3819  */
3820 static noinline_for_stack int
3821 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3822 			struct ext4_prealloc_space *pa)
3823 {
3824 	struct super_block *sb = e4b->bd_sb;
3825 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3826 	unsigned int end;
3827 	unsigned int next;
3828 	ext4_group_t group;
3829 	ext4_grpblk_t bit;
3830 	unsigned long long grp_blk_start;
3831 	int err = 0;
3832 	int free = 0;
3833 
3834 	BUG_ON(pa->pa_deleted == 0);
3835 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3836 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3837 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3838 	end = bit + pa->pa_len;
3839 
3840 	while (bit < end) {
3841 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3842 		if (bit >= end)
3843 			break;
3844 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3845 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3846 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3847 			 (unsigned) next - bit, (unsigned) group);
3848 		free += next - bit;
3849 
3850 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3851 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3852 						    EXT4_C2B(sbi, bit)),
3853 					       next - bit);
3854 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3855 		bit = next + 1;
3856 	}
3857 	if (free != pa->pa_free) {
3858 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3859 			 "pa %p: logic %lu, phys. %lu, len %lu",
3860 			 pa, (unsigned long) pa->pa_lstart,
3861 			 (unsigned long) pa->pa_pstart,
3862 			 (unsigned long) pa->pa_len);
3863 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3864 					free, pa->pa_free);
3865 		/*
3866 		 * pa is already deleted so we use the value obtained
3867 		 * from the bitmap and continue.
3868 		 */
3869 	}
3870 	atomic_add(free, &sbi->s_mb_discarded);
3871 
3872 	return err;
3873 }
3874 
3875 static noinline_for_stack int
3876 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3877 				struct ext4_prealloc_space *pa)
3878 {
3879 	struct super_block *sb = e4b->bd_sb;
3880 	ext4_group_t group;
3881 	ext4_grpblk_t bit;
3882 
3883 	trace_ext4_mb_release_group_pa(sb, pa);
3884 	BUG_ON(pa->pa_deleted == 0);
3885 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3886 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3887 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3888 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3889 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3890 
3891 	return 0;
3892 }
3893 
3894 /*
3895  * releases all preallocations in given group
3896  *
3897  * first, we need to decide discard policy:
3898  * - when do we discard
3899  *   1) ENOSPC
3900  * - how many do we discard
3901  *   1) how many requested
3902  */
3903 static noinline_for_stack int
3904 ext4_mb_discard_group_preallocations(struct super_block *sb,
3905 					ext4_group_t group, int needed)
3906 {
3907 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3908 	struct buffer_head *bitmap_bh = NULL;
3909 	struct ext4_prealloc_space *pa, *tmp;
3910 	struct list_head list;
3911 	struct ext4_buddy e4b;
3912 	int err;
3913 	int busy = 0;
3914 	int free = 0;
3915 
3916 	mb_debug(1, "discard preallocation for group %u\n", group);
3917 
3918 	if (list_empty(&grp->bb_prealloc_list))
3919 		return 0;
3920 
3921 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3922 	if (IS_ERR(bitmap_bh)) {
3923 		err = PTR_ERR(bitmap_bh);
3924 		ext4_error(sb, "Error %d reading block bitmap for %u",
3925 			   err, group);
3926 		return 0;
3927 	}
3928 
3929 	err = ext4_mb_load_buddy(sb, group, &e4b);
3930 	if (err) {
3931 		ext4_warning(sb, "Error %d loading buddy information for %u",
3932 			     err, group);
3933 		put_bh(bitmap_bh);
3934 		return 0;
3935 	}
3936 
3937 	if (needed == 0)
3938 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3939 
3940 	INIT_LIST_HEAD(&list);
3941 repeat:
3942 	ext4_lock_group(sb, group);
3943 	list_for_each_entry_safe(pa, tmp,
3944 				&grp->bb_prealloc_list, pa_group_list) {
3945 		spin_lock(&pa->pa_lock);
3946 		if (atomic_read(&pa->pa_count)) {
3947 			spin_unlock(&pa->pa_lock);
3948 			busy = 1;
3949 			continue;
3950 		}
3951 		if (pa->pa_deleted) {
3952 			spin_unlock(&pa->pa_lock);
3953 			continue;
3954 		}
3955 
3956 		/* seems this one can be freed ... */
3957 		pa->pa_deleted = 1;
3958 
3959 		/* we can trust pa_free ... */
3960 		free += pa->pa_free;
3961 
3962 		spin_unlock(&pa->pa_lock);
3963 
3964 		list_del(&pa->pa_group_list);
3965 		list_add(&pa->u.pa_tmp_list, &list);
3966 	}
3967 
3968 	/* if we still need more blocks and some PAs were used, try again */
3969 	if (free < needed && busy) {
3970 		busy = 0;
3971 		ext4_unlock_group(sb, group);
3972 		cond_resched();
3973 		goto repeat;
3974 	}
3975 
3976 	/* found anything to free? */
3977 	if (list_empty(&list)) {
3978 		BUG_ON(free != 0);
3979 		goto out;
3980 	}
3981 
3982 	/* now free all selected PAs */
3983 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3984 
3985 		/* remove from object (inode or locality group) */
3986 		spin_lock(pa->pa_obj_lock);
3987 		list_del_rcu(&pa->pa_inode_list);
3988 		spin_unlock(pa->pa_obj_lock);
3989 
3990 		if (pa->pa_type == MB_GROUP_PA)
3991 			ext4_mb_release_group_pa(&e4b, pa);
3992 		else
3993 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3994 
3995 		list_del(&pa->u.pa_tmp_list);
3996 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3997 	}
3998 
3999 out:
4000 	ext4_unlock_group(sb, group);
4001 	ext4_mb_unload_buddy(&e4b);
4002 	put_bh(bitmap_bh);
4003 	return free;
4004 }
4005 
4006 /*
4007  * releases all non-used preallocated blocks for given inode
4008  *
4009  * It's important to discard preallocations under i_data_sem
4010  * We don't want another block to be served from the prealloc
4011  * space when we are discarding the inode prealloc space.
4012  *
4013  * FIXME!! Make sure it is valid at all the call sites
4014  */
4015 void ext4_discard_preallocations(struct inode *inode)
4016 {
4017 	struct ext4_inode_info *ei = EXT4_I(inode);
4018 	struct super_block *sb = inode->i_sb;
4019 	struct buffer_head *bitmap_bh = NULL;
4020 	struct ext4_prealloc_space *pa, *tmp;
4021 	ext4_group_t group = 0;
4022 	struct list_head list;
4023 	struct ext4_buddy e4b;
4024 	int err;
4025 
4026 	if (!S_ISREG(inode->i_mode)) {
4027 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4028 		return;
4029 	}
4030 
4031 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
4032 	trace_ext4_discard_preallocations(inode);
4033 
4034 	INIT_LIST_HEAD(&list);
4035 
4036 repeat:
4037 	/* first, collect all pa's in the inode */
4038 	spin_lock(&ei->i_prealloc_lock);
4039 	while (!list_empty(&ei->i_prealloc_list)) {
4040 		pa = list_entry(ei->i_prealloc_list.next,
4041 				struct ext4_prealloc_space, pa_inode_list);
4042 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4043 		spin_lock(&pa->pa_lock);
4044 		if (atomic_read(&pa->pa_count)) {
4045 			/* this shouldn't happen often - nobody should
4046 			 * use preallocation while we're discarding it */
4047 			spin_unlock(&pa->pa_lock);
4048 			spin_unlock(&ei->i_prealloc_lock);
4049 			ext4_msg(sb, KERN_ERR,
4050 				 "uh-oh! used pa while discarding");
4051 			WARN_ON(1);
4052 			schedule_timeout_uninterruptible(HZ);
4053 			goto repeat;
4054 
4055 		}
4056 		if (pa->pa_deleted == 0) {
4057 			pa->pa_deleted = 1;
4058 			spin_unlock(&pa->pa_lock);
4059 			list_del_rcu(&pa->pa_inode_list);
4060 			list_add(&pa->u.pa_tmp_list, &list);
4061 			continue;
4062 		}
4063 
4064 		/* someone is deleting pa right now */
4065 		spin_unlock(&pa->pa_lock);
4066 		spin_unlock(&ei->i_prealloc_lock);
4067 
4068 		/* we have to wait here because pa_deleted
4069 		 * doesn't mean pa is already unlinked from
4070 		 * the list. as we might be called from
4071 		 * ->clear_inode() the inode will get freed
4072 		 * and concurrent thread which is unlinking
4073 		 * pa from inode's list may access already
4074 		 * freed memory, bad-bad-bad */
4075 
4076 		/* XXX: if this happens too often, we can
4077 		 * add a flag to force wait only in case
4078 		 * of ->clear_inode(), but not in case of
4079 		 * regular truncate */
4080 		schedule_timeout_uninterruptible(HZ);
4081 		goto repeat;
4082 	}
4083 	spin_unlock(&ei->i_prealloc_lock);
4084 
4085 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4086 		BUG_ON(pa->pa_type != MB_INODE_PA);
4087 		group = ext4_get_group_number(sb, pa->pa_pstart);
4088 
4089 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4090 					     GFP_NOFS|__GFP_NOFAIL);
4091 		if (err) {
4092 			ext4_error(sb, "Error %d loading buddy information for %u",
4093 				   err, group);
4094 			continue;
4095 		}
4096 
4097 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4098 		if (IS_ERR(bitmap_bh)) {
4099 			err = PTR_ERR(bitmap_bh);
4100 			ext4_error(sb, "Error %d reading block bitmap for %u",
4101 					err, group);
4102 			ext4_mb_unload_buddy(&e4b);
4103 			continue;
4104 		}
4105 
4106 		ext4_lock_group(sb, group);
4107 		list_del(&pa->pa_group_list);
4108 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4109 		ext4_unlock_group(sb, group);
4110 
4111 		ext4_mb_unload_buddy(&e4b);
4112 		put_bh(bitmap_bh);
4113 
4114 		list_del(&pa->u.pa_tmp_list);
4115 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4116 	}
4117 }
4118 
4119 #ifdef CONFIG_EXT4_DEBUG
4120 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4121 {
4122 	struct super_block *sb = ac->ac_sb;
4123 	ext4_group_t ngroups, i;
4124 
4125 	if (!ext4_mballoc_debug ||
4126 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
4127 		return;
4128 
4129 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
4130 			" Allocation context details:");
4131 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
4132 			ac->ac_status, ac->ac_flags);
4133 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
4134 		 	"goal %lu/%lu/%lu@%lu, "
4135 			"best %lu/%lu/%lu@%lu cr %d",
4136 			(unsigned long)ac->ac_o_ex.fe_group,
4137 			(unsigned long)ac->ac_o_ex.fe_start,
4138 			(unsigned long)ac->ac_o_ex.fe_len,
4139 			(unsigned long)ac->ac_o_ex.fe_logical,
4140 			(unsigned long)ac->ac_g_ex.fe_group,
4141 			(unsigned long)ac->ac_g_ex.fe_start,
4142 			(unsigned long)ac->ac_g_ex.fe_len,
4143 			(unsigned long)ac->ac_g_ex.fe_logical,
4144 			(unsigned long)ac->ac_b_ex.fe_group,
4145 			(unsigned long)ac->ac_b_ex.fe_start,
4146 			(unsigned long)ac->ac_b_ex.fe_len,
4147 			(unsigned long)ac->ac_b_ex.fe_logical,
4148 			(int)ac->ac_criteria);
4149 	ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
4150 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
4151 	ngroups = ext4_get_groups_count(sb);
4152 	for (i = 0; i < ngroups; i++) {
4153 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4154 		struct ext4_prealloc_space *pa;
4155 		ext4_grpblk_t start;
4156 		struct list_head *cur;
4157 		ext4_lock_group(sb, i);
4158 		list_for_each(cur, &grp->bb_prealloc_list) {
4159 			pa = list_entry(cur, struct ext4_prealloc_space,
4160 					pa_group_list);
4161 			spin_lock(&pa->pa_lock);
4162 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4163 						     NULL, &start);
4164 			spin_unlock(&pa->pa_lock);
4165 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
4166 			       start, pa->pa_len);
4167 		}
4168 		ext4_unlock_group(sb, i);
4169 
4170 		if (grp->bb_free == 0)
4171 			continue;
4172 		printk(KERN_ERR "%u: %d/%d \n",
4173 		       i, grp->bb_free, grp->bb_fragments);
4174 	}
4175 	printk(KERN_ERR "\n");
4176 }
4177 #else
4178 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4179 {
4180 	return;
4181 }
4182 #endif
4183 
4184 /*
4185  * We use locality group preallocation for small size file. The size of the
4186  * file is determined by the current size or the resulting size after
4187  * allocation which ever is larger
4188  *
4189  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4190  */
4191 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4192 {
4193 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4194 	int bsbits = ac->ac_sb->s_blocksize_bits;
4195 	loff_t size, isize;
4196 
4197 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4198 		return;
4199 
4200 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4201 		return;
4202 
4203 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4204 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4205 		>> bsbits;
4206 
4207 	if ((size == isize) &&
4208 	    !ext4_fs_is_busy(sbi) &&
4209 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
4210 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4211 		return;
4212 	}
4213 
4214 	if (sbi->s_mb_group_prealloc <= 0) {
4215 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4216 		return;
4217 	}
4218 
4219 	/* don't use group allocation for large files */
4220 	size = max(size, isize);
4221 	if (size > sbi->s_mb_stream_request) {
4222 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4223 		return;
4224 	}
4225 
4226 	BUG_ON(ac->ac_lg != NULL);
4227 	/*
4228 	 * locality group prealloc space are per cpu. The reason for having
4229 	 * per cpu locality group is to reduce the contention between block
4230 	 * request from multiple CPUs.
4231 	 */
4232 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4233 
4234 	/* we're going to use group allocation */
4235 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4236 
4237 	/* serialize all allocations in the group */
4238 	mutex_lock(&ac->ac_lg->lg_mutex);
4239 }
4240 
4241 static noinline_for_stack int
4242 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4243 				struct ext4_allocation_request *ar)
4244 {
4245 	struct super_block *sb = ar->inode->i_sb;
4246 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4247 	struct ext4_super_block *es = sbi->s_es;
4248 	ext4_group_t group;
4249 	unsigned int len;
4250 	ext4_fsblk_t goal;
4251 	ext4_grpblk_t block;
4252 
4253 	/* we can't allocate > group size */
4254 	len = ar->len;
4255 
4256 	/* just a dirty hack to filter too big requests  */
4257 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4258 		len = EXT4_CLUSTERS_PER_GROUP(sb);
4259 
4260 	/* start searching from the goal */
4261 	goal = ar->goal;
4262 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4263 			goal >= ext4_blocks_count(es))
4264 		goal = le32_to_cpu(es->s_first_data_block);
4265 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4266 
4267 	/* set up allocation goals */
4268 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4269 	ac->ac_status = AC_STATUS_CONTINUE;
4270 	ac->ac_sb = sb;
4271 	ac->ac_inode = ar->inode;
4272 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4273 	ac->ac_o_ex.fe_group = group;
4274 	ac->ac_o_ex.fe_start = block;
4275 	ac->ac_o_ex.fe_len = len;
4276 	ac->ac_g_ex = ac->ac_o_ex;
4277 	ac->ac_flags = ar->flags;
4278 
4279 	/* we have to define context: we'll we work with a file or
4280 	 * locality group. this is a policy, actually */
4281 	ext4_mb_group_or_file(ac);
4282 
4283 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4284 			"left: %u/%u, right %u/%u to %swritable\n",
4285 			(unsigned) ar->len, (unsigned) ar->logical,
4286 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4287 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4288 			(unsigned) ar->lright, (unsigned) ar->pright,
4289 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4290 	return 0;
4291 
4292 }
4293 
4294 static noinline_for_stack void
4295 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4296 					struct ext4_locality_group *lg,
4297 					int order, int total_entries)
4298 {
4299 	ext4_group_t group = 0;
4300 	struct ext4_buddy e4b;
4301 	struct list_head discard_list;
4302 	struct ext4_prealloc_space *pa, *tmp;
4303 
4304 	mb_debug(1, "discard locality group preallocation\n");
4305 
4306 	INIT_LIST_HEAD(&discard_list);
4307 
4308 	spin_lock(&lg->lg_prealloc_lock);
4309 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4310 						pa_inode_list) {
4311 		spin_lock(&pa->pa_lock);
4312 		if (atomic_read(&pa->pa_count)) {
4313 			/*
4314 			 * This is the pa that we just used
4315 			 * for block allocation. So don't
4316 			 * free that
4317 			 */
4318 			spin_unlock(&pa->pa_lock);
4319 			continue;
4320 		}
4321 		if (pa->pa_deleted) {
4322 			spin_unlock(&pa->pa_lock);
4323 			continue;
4324 		}
4325 		/* only lg prealloc space */
4326 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4327 
4328 		/* seems this one can be freed ... */
4329 		pa->pa_deleted = 1;
4330 		spin_unlock(&pa->pa_lock);
4331 
4332 		list_del_rcu(&pa->pa_inode_list);
4333 		list_add(&pa->u.pa_tmp_list, &discard_list);
4334 
4335 		total_entries--;
4336 		if (total_entries <= 5) {
4337 			/*
4338 			 * we want to keep only 5 entries
4339 			 * allowing it to grow to 8. This
4340 			 * mak sure we don't call discard
4341 			 * soon for this list.
4342 			 */
4343 			break;
4344 		}
4345 	}
4346 	spin_unlock(&lg->lg_prealloc_lock);
4347 
4348 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4349 		int err;
4350 
4351 		group = ext4_get_group_number(sb, pa->pa_pstart);
4352 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4353 					     GFP_NOFS|__GFP_NOFAIL);
4354 		if (err) {
4355 			ext4_error(sb, "Error %d loading buddy information for %u",
4356 				   err, group);
4357 			continue;
4358 		}
4359 		ext4_lock_group(sb, group);
4360 		list_del(&pa->pa_group_list);
4361 		ext4_mb_release_group_pa(&e4b, pa);
4362 		ext4_unlock_group(sb, group);
4363 
4364 		ext4_mb_unload_buddy(&e4b);
4365 		list_del(&pa->u.pa_tmp_list);
4366 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4367 	}
4368 }
4369 
4370 /*
4371  * We have incremented pa_count. So it cannot be freed at this
4372  * point. Also we hold lg_mutex. So no parallel allocation is
4373  * possible from this lg. That means pa_free cannot be updated.
4374  *
4375  * A parallel ext4_mb_discard_group_preallocations is possible.
4376  * which can cause the lg_prealloc_list to be updated.
4377  */
4378 
4379 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4380 {
4381 	int order, added = 0, lg_prealloc_count = 1;
4382 	struct super_block *sb = ac->ac_sb;
4383 	struct ext4_locality_group *lg = ac->ac_lg;
4384 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4385 
4386 	order = fls(pa->pa_free) - 1;
4387 	if (order > PREALLOC_TB_SIZE - 1)
4388 		/* The max size of hash table is PREALLOC_TB_SIZE */
4389 		order = PREALLOC_TB_SIZE - 1;
4390 	/* Add the prealloc space to lg */
4391 	spin_lock(&lg->lg_prealloc_lock);
4392 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4393 						pa_inode_list) {
4394 		spin_lock(&tmp_pa->pa_lock);
4395 		if (tmp_pa->pa_deleted) {
4396 			spin_unlock(&tmp_pa->pa_lock);
4397 			continue;
4398 		}
4399 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4400 			/* Add to the tail of the previous entry */
4401 			list_add_tail_rcu(&pa->pa_inode_list,
4402 						&tmp_pa->pa_inode_list);
4403 			added = 1;
4404 			/*
4405 			 * we want to count the total
4406 			 * number of entries in the list
4407 			 */
4408 		}
4409 		spin_unlock(&tmp_pa->pa_lock);
4410 		lg_prealloc_count++;
4411 	}
4412 	if (!added)
4413 		list_add_tail_rcu(&pa->pa_inode_list,
4414 					&lg->lg_prealloc_list[order]);
4415 	spin_unlock(&lg->lg_prealloc_lock);
4416 
4417 	/* Now trim the list to be not more than 8 elements */
4418 	if (lg_prealloc_count > 8) {
4419 		ext4_mb_discard_lg_preallocations(sb, lg,
4420 						  order, lg_prealloc_count);
4421 		return;
4422 	}
4423 	return ;
4424 }
4425 
4426 /*
4427  * release all resource we used in allocation
4428  */
4429 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4430 {
4431 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4432 	struct ext4_prealloc_space *pa = ac->ac_pa;
4433 	if (pa) {
4434 		if (pa->pa_type == MB_GROUP_PA) {
4435 			/* see comment in ext4_mb_use_group_pa() */
4436 			spin_lock(&pa->pa_lock);
4437 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4438 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4439 			pa->pa_free -= ac->ac_b_ex.fe_len;
4440 			pa->pa_len -= ac->ac_b_ex.fe_len;
4441 			spin_unlock(&pa->pa_lock);
4442 		}
4443 	}
4444 	if (pa) {
4445 		/*
4446 		 * We want to add the pa to the right bucket.
4447 		 * Remove it from the list and while adding
4448 		 * make sure the list to which we are adding
4449 		 * doesn't grow big.
4450 		 */
4451 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4452 			spin_lock(pa->pa_obj_lock);
4453 			list_del_rcu(&pa->pa_inode_list);
4454 			spin_unlock(pa->pa_obj_lock);
4455 			ext4_mb_add_n_trim(ac);
4456 		}
4457 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4458 	}
4459 	if (ac->ac_bitmap_page)
4460 		put_page(ac->ac_bitmap_page);
4461 	if (ac->ac_buddy_page)
4462 		put_page(ac->ac_buddy_page);
4463 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4464 		mutex_unlock(&ac->ac_lg->lg_mutex);
4465 	ext4_mb_collect_stats(ac);
4466 	return 0;
4467 }
4468 
4469 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4470 {
4471 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4472 	int ret;
4473 	int freed = 0;
4474 
4475 	trace_ext4_mb_discard_preallocations(sb, needed);
4476 	for (i = 0; i < ngroups && needed > 0; i++) {
4477 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4478 		freed += ret;
4479 		needed -= ret;
4480 	}
4481 
4482 	return freed;
4483 }
4484 
4485 /*
4486  * Main entry point into mballoc to allocate blocks
4487  * it tries to use preallocation first, then falls back
4488  * to usual allocation
4489  */
4490 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4491 				struct ext4_allocation_request *ar, int *errp)
4492 {
4493 	int freed;
4494 	struct ext4_allocation_context *ac = NULL;
4495 	struct ext4_sb_info *sbi;
4496 	struct super_block *sb;
4497 	ext4_fsblk_t block = 0;
4498 	unsigned int inquota = 0;
4499 	unsigned int reserv_clstrs = 0;
4500 
4501 	might_sleep();
4502 	sb = ar->inode->i_sb;
4503 	sbi = EXT4_SB(sb);
4504 
4505 	trace_ext4_request_blocks(ar);
4506 
4507 	/* Allow to use superuser reservation for quota file */
4508 	if (ext4_is_quota_file(ar->inode))
4509 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4510 
4511 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4512 		/* Without delayed allocation we need to verify
4513 		 * there is enough free blocks to do block allocation
4514 		 * and verify allocation doesn't exceed the quota limits.
4515 		 */
4516 		while (ar->len &&
4517 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4518 
4519 			/* let others to free the space */
4520 			cond_resched();
4521 			ar->len = ar->len >> 1;
4522 		}
4523 		if (!ar->len) {
4524 			*errp = -ENOSPC;
4525 			return 0;
4526 		}
4527 		reserv_clstrs = ar->len;
4528 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4529 			dquot_alloc_block_nofail(ar->inode,
4530 						 EXT4_C2B(sbi, ar->len));
4531 		} else {
4532 			while (ar->len &&
4533 				dquot_alloc_block(ar->inode,
4534 						  EXT4_C2B(sbi, ar->len))) {
4535 
4536 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4537 				ar->len--;
4538 			}
4539 		}
4540 		inquota = ar->len;
4541 		if (ar->len == 0) {
4542 			*errp = -EDQUOT;
4543 			goto out;
4544 		}
4545 	}
4546 
4547 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4548 	if (!ac) {
4549 		ar->len = 0;
4550 		*errp = -ENOMEM;
4551 		goto out;
4552 	}
4553 
4554 	*errp = ext4_mb_initialize_context(ac, ar);
4555 	if (*errp) {
4556 		ar->len = 0;
4557 		goto out;
4558 	}
4559 
4560 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4561 	if (!ext4_mb_use_preallocated(ac)) {
4562 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4563 		ext4_mb_normalize_request(ac, ar);
4564 repeat:
4565 		/* allocate space in core */
4566 		*errp = ext4_mb_regular_allocator(ac);
4567 		if (*errp)
4568 			goto discard_and_exit;
4569 
4570 		/* as we've just preallocated more space than
4571 		 * user requested originally, we store allocated
4572 		 * space in a special descriptor */
4573 		if (ac->ac_status == AC_STATUS_FOUND &&
4574 		    ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4575 			*errp = ext4_mb_new_preallocation(ac);
4576 		if (*errp) {
4577 		discard_and_exit:
4578 			ext4_discard_allocated_blocks(ac);
4579 			goto errout;
4580 		}
4581 	}
4582 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4583 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4584 		if (*errp) {
4585 			ext4_discard_allocated_blocks(ac);
4586 			goto errout;
4587 		} else {
4588 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4589 			ar->len = ac->ac_b_ex.fe_len;
4590 		}
4591 	} else {
4592 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4593 		if (freed)
4594 			goto repeat;
4595 		*errp = -ENOSPC;
4596 	}
4597 
4598 errout:
4599 	if (*errp) {
4600 		ac->ac_b_ex.fe_len = 0;
4601 		ar->len = 0;
4602 		ext4_mb_show_ac(ac);
4603 	}
4604 	ext4_mb_release_context(ac);
4605 out:
4606 	if (ac)
4607 		kmem_cache_free(ext4_ac_cachep, ac);
4608 	if (inquota && ar->len < inquota)
4609 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4610 	if (!ar->len) {
4611 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
4612 			/* release all the reserved blocks if non delalloc */
4613 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4614 						reserv_clstrs);
4615 	}
4616 
4617 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4618 
4619 	return block;
4620 }
4621 
4622 /*
4623  * We can merge two free data extents only if the physical blocks
4624  * are contiguous, AND the extents were freed by the same transaction,
4625  * AND the blocks are associated with the same group.
4626  */
4627 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
4628 					struct ext4_free_data *entry,
4629 					struct ext4_free_data *new_entry,
4630 					struct rb_root *entry_rb_root)
4631 {
4632 	if ((entry->efd_tid != new_entry->efd_tid) ||
4633 	    (entry->efd_group != new_entry->efd_group))
4634 		return;
4635 	if (entry->efd_start_cluster + entry->efd_count ==
4636 	    new_entry->efd_start_cluster) {
4637 		new_entry->efd_start_cluster = entry->efd_start_cluster;
4638 		new_entry->efd_count += entry->efd_count;
4639 	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
4640 		   entry->efd_start_cluster) {
4641 		new_entry->efd_count += entry->efd_count;
4642 	} else
4643 		return;
4644 	spin_lock(&sbi->s_md_lock);
4645 	list_del(&entry->efd_list);
4646 	spin_unlock(&sbi->s_md_lock);
4647 	rb_erase(&entry->efd_node, entry_rb_root);
4648 	kmem_cache_free(ext4_free_data_cachep, entry);
4649 }
4650 
4651 static noinline_for_stack int
4652 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4653 		      struct ext4_free_data *new_entry)
4654 {
4655 	ext4_group_t group = e4b->bd_group;
4656 	ext4_grpblk_t cluster;
4657 	ext4_grpblk_t clusters = new_entry->efd_count;
4658 	struct ext4_free_data *entry;
4659 	struct ext4_group_info *db = e4b->bd_info;
4660 	struct super_block *sb = e4b->bd_sb;
4661 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4662 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4663 	struct rb_node *parent = NULL, *new_node;
4664 
4665 	BUG_ON(!ext4_handle_valid(handle));
4666 	BUG_ON(e4b->bd_bitmap_page == NULL);
4667 	BUG_ON(e4b->bd_buddy_page == NULL);
4668 
4669 	new_node = &new_entry->efd_node;
4670 	cluster = new_entry->efd_start_cluster;
4671 
4672 	if (!*n) {
4673 		/* first free block exent. We need to
4674 		   protect buddy cache from being freed,
4675 		 * otherwise we'll refresh it from
4676 		 * on-disk bitmap and lose not-yet-available
4677 		 * blocks */
4678 		get_page(e4b->bd_buddy_page);
4679 		get_page(e4b->bd_bitmap_page);
4680 	}
4681 	while (*n) {
4682 		parent = *n;
4683 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4684 		if (cluster < entry->efd_start_cluster)
4685 			n = &(*n)->rb_left;
4686 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4687 			n = &(*n)->rb_right;
4688 		else {
4689 			ext4_grp_locked_error(sb, group, 0,
4690 				ext4_group_first_block_no(sb, group) +
4691 				EXT4_C2B(sbi, cluster),
4692 				"Block already on to-be-freed list");
4693 			return 0;
4694 		}
4695 	}
4696 
4697 	rb_link_node(new_node, parent, n);
4698 	rb_insert_color(new_node, &db->bb_free_root);
4699 
4700 	/* Now try to see the extent can be merged to left and right */
4701 	node = rb_prev(new_node);
4702 	if (node) {
4703 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4704 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
4705 					    &(db->bb_free_root));
4706 	}
4707 
4708 	node = rb_next(new_node);
4709 	if (node) {
4710 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4711 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
4712 					    &(db->bb_free_root));
4713 	}
4714 
4715 	spin_lock(&sbi->s_md_lock);
4716 	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
4717 	sbi->s_mb_free_pending += clusters;
4718 	spin_unlock(&sbi->s_md_lock);
4719 	return 0;
4720 }
4721 
4722 /**
4723  * ext4_free_blocks() -- Free given blocks and update quota
4724  * @handle:		handle for this transaction
4725  * @inode:		inode
4726  * @block:		start physical block to free
4727  * @count:		number of blocks to count
4728  * @flags:		flags used by ext4_free_blocks
4729  */
4730 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4731 		      struct buffer_head *bh, ext4_fsblk_t block,
4732 		      unsigned long count, int flags)
4733 {
4734 	struct buffer_head *bitmap_bh = NULL;
4735 	struct super_block *sb = inode->i_sb;
4736 	struct ext4_group_desc *gdp;
4737 	unsigned int overflow;
4738 	ext4_grpblk_t bit;
4739 	struct buffer_head *gd_bh;
4740 	ext4_group_t block_group;
4741 	struct ext4_sb_info *sbi;
4742 	struct ext4_buddy e4b;
4743 	unsigned int count_clusters;
4744 	int err = 0;
4745 	int ret;
4746 
4747 	might_sleep();
4748 	if (bh) {
4749 		if (block)
4750 			BUG_ON(block != bh->b_blocknr);
4751 		else
4752 			block = bh->b_blocknr;
4753 	}
4754 
4755 	sbi = EXT4_SB(sb);
4756 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4757 	    !ext4_data_block_valid(sbi, block, count)) {
4758 		ext4_error(sb, "Freeing blocks not in datazone - "
4759 			   "block = %llu, count = %lu", block, count);
4760 		goto error_return;
4761 	}
4762 
4763 	ext4_debug("freeing block %llu\n", block);
4764 	trace_ext4_free_blocks(inode, block, count, flags);
4765 
4766 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4767 		BUG_ON(count > 1);
4768 
4769 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4770 			    inode, bh, block);
4771 	}
4772 
4773 	/*
4774 	 * If the extent to be freed does not begin on a cluster
4775 	 * boundary, we need to deal with partial clusters at the
4776 	 * beginning and end of the extent.  Normally we will free
4777 	 * blocks at the beginning or the end unless we are explicitly
4778 	 * requested to avoid doing so.
4779 	 */
4780 	overflow = EXT4_PBLK_COFF(sbi, block);
4781 	if (overflow) {
4782 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4783 			overflow = sbi->s_cluster_ratio - overflow;
4784 			block += overflow;
4785 			if (count > overflow)
4786 				count -= overflow;
4787 			else
4788 				return;
4789 		} else {
4790 			block -= overflow;
4791 			count += overflow;
4792 		}
4793 	}
4794 	overflow = EXT4_LBLK_COFF(sbi, count);
4795 	if (overflow) {
4796 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4797 			if (count > overflow)
4798 				count -= overflow;
4799 			else
4800 				return;
4801 		} else
4802 			count += sbi->s_cluster_ratio - overflow;
4803 	}
4804 
4805 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4806 		int i;
4807 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
4808 
4809 		for (i = 0; i < count; i++) {
4810 			cond_resched();
4811 			if (is_metadata)
4812 				bh = sb_find_get_block(inode->i_sb, block + i);
4813 			ext4_forget(handle, is_metadata, inode, bh, block + i);
4814 		}
4815 	}
4816 
4817 do_more:
4818 	overflow = 0;
4819 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4820 
4821 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
4822 			ext4_get_group_info(sb, block_group))))
4823 		return;
4824 
4825 	/*
4826 	 * Check to see if we are freeing blocks across a group
4827 	 * boundary.
4828 	 */
4829 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4830 		overflow = EXT4_C2B(sbi, bit) + count -
4831 			EXT4_BLOCKS_PER_GROUP(sb);
4832 		count -= overflow;
4833 	}
4834 	count_clusters = EXT4_NUM_B2C(sbi, count);
4835 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4836 	if (IS_ERR(bitmap_bh)) {
4837 		err = PTR_ERR(bitmap_bh);
4838 		bitmap_bh = NULL;
4839 		goto error_return;
4840 	}
4841 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4842 	if (!gdp) {
4843 		err = -EIO;
4844 		goto error_return;
4845 	}
4846 
4847 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4848 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4849 	    in_range(block, ext4_inode_table(sb, gdp),
4850 		     EXT4_SB(sb)->s_itb_per_group) ||
4851 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4852 		     EXT4_SB(sb)->s_itb_per_group)) {
4853 
4854 		ext4_error(sb, "Freeing blocks in system zone - "
4855 			   "Block = %llu, count = %lu", block, count);
4856 		/* err = 0. ext4_std_error should be a no op */
4857 		goto error_return;
4858 	}
4859 
4860 	BUFFER_TRACE(bitmap_bh, "getting write access");
4861 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4862 	if (err)
4863 		goto error_return;
4864 
4865 	/*
4866 	 * We are about to modify some metadata.  Call the journal APIs
4867 	 * to unshare ->b_data if a currently-committing transaction is
4868 	 * using it
4869 	 */
4870 	BUFFER_TRACE(gd_bh, "get_write_access");
4871 	err = ext4_journal_get_write_access(handle, gd_bh);
4872 	if (err)
4873 		goto error_return;
4874 #ifdef AGGRESSIVE_CHECK
4875 	{
4876 		int i;
4877 		for (i = 0; i < count_clusters; i++)
4878 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4879 	}
4880 #endif
4881 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4882 
4883 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
4884 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
4885 				     GFP_NOFS|__GFP_NOFAIL);
4886 	if (err)
4887 		goto error_return;
4888 
4889 	/*
4890 	 * We need to make sure we don't reuse the freed block until after the
4891 	 * transaction is committed. We make an exception if the inode is to be
4892 	 * written in writeback mode since writeback mode has weak data
4893 	 * consistency guarantees.
4894 	 */
4895 	if (ext4_handle_valid(handle) &&
4896 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
4897 	     !ext4_should_writeback_data(inode))) {
4898 		struct ext4_free_data *new_entry;
4899 		/*
4900 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4901 		 * to fail.
4902 		 */
4903 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4904 				GFP_NOFS|__GFP_NOFAIL);
4905 		new_entry->efd_start_cluster = bit;
4906 		new_entry->efd_group = block_group;
4907 		new_entry->efd_count = count_clusters;
4908 		new_entry->efd_tid = handle->h_transaction->t_tid;
4909 
4910 		ext4_lock_group(sb, block_group);
4911 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4912 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4913 	} else {
4914 		/* need to update group_info->bb_free and bitmap
4915 		 * with group lock held. generate_buddy look at
4916 		 * them with group lock_held
4917 		 */
4918 		if (test_opt(sb, DISCARD)) {
4919 			err = ext4_issue_discard(sb, block_group, bit, count,
4920 						 NULL);
4921 			if (err && err != -EOPNOTSUPP)
4922 				ext4_msg(sb, KERN_WARNING, "discard request in"
4923 					 " group:%d block:%d count:%lu failed"
4924 					 " with %d", block_group, bit, count,
4925 					 err);
4926 		} else
4927 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
4928 
4929 		ext4_lock_group(sb, block_group);
4930 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4931 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4932 	}
4933 
4934 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4935 	ext4_free_group_clusters_set(sb, gdp, ret);
4936 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4937 	ext4_group_desc_csum_set(sb, block_group, gdp);
4938 	ext4_unlock_group(sb, block_group);
4939 
4940 	if (sbi->s_log_groups_per_flex) {
4941 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4942 		atomic64_add(count_clusters,
4943 			     &sbi->s_flex_groups[flex_group].free_clusters);
4944 	}
4945 
4946 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4947 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4948 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4949 
4950 	ext4_mb_unload_buddy(&e4b);
4951 
4952 	/* We dirtied the bitmap block */
4953 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4954 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4955 
4956 	/* And the group descriptor block */
4957 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4958 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4959 	if (!err)
4960 		err = ret;
4961 
4962 	if (overflow && !err) {
4963 		block += count;
4964 		count = overflow;
4965 		put_bh(bitmap_bh);
4966 		goto do_more;
4967 	}
4968 error_return:
4969 	brelse(bitmap_bh);
4970 	ext4_std_error(sb, err);
4971 	return;
4972 }
4973 
4974 /**
4975  * ext4_group_add_blocks() -- Add given blocks to an existing group
4976  * @handle:			handle to this transaction
4977  * @sb:				super block
4978  * @block:			start physical block to add to the block group
4979  * @count:			number of blocks to free
4980  *
4981  * This marks the blocks as free in the bitmap and buddy.
4982  */
4983 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4984 			 ext4_fsblk_t block, unsigned long count)
4985 {
4986 	struct buffer_head *bitmap_bh = NULL;
4987 	struct buffer_head *gd_bh;
4988 	ext4_group_t block_group;
4989 	ext4_grpblk_t bit;
4990 	unsigned int i;
4991 	struct ext4_group_desc *desc;
4992 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4993 	struct ext4_buddy e4b;
4994 	int err = 0, ret, blk_free_count;
4995 	ext4_grpblk_t blocks_freed;
4996 
4997 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4998 
4999 	if (count == 0)
5000 		return 0;
5001 
5002 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5003 	/*
5004 	 * Check to see if we are freeing blocks across a group
5005 	 * boundary.
5006 	 */
5007 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5008 		ext4_warning(sb, "too much blocks added to group %u",
5009 			     block_group);
5010 		err = -EINVAL;
5011 		goto error_return;
5012 	}
5013 
5014 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5015 	if (IS_ERR(bitmap_bh)) {
5016 		err = PTR_ERR(bitmap_bh);
5017 		bitmap_bh = NULL;
5018 		goto error_return;
5019 	}
5020 
5021 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
5022 	if (!desc) {
5023 		err = -EIO;
5024 		goto error_return;
5025 	}
5026 
5027 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
5028 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
5029 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
5030 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
5031 		     sbi->s_itb_per_group)) {
5032 		ext4_error(sb, "Adding blocks in system zones - "
5033 			   "Block = %llu, count = %lu",
5034 			   block, count);
5035 		err = -EINVAL;
5036 		goto error_return;
5037 	}
5038 
5039 	BUFFER_TRACE(bitmap_bh, "getting write access");
5040 	err = ext4_journal_get_write_access(handle, bitmap_bh);
5041 	if (err)
5042 		goto error_return;
5043 
5044 	/*
5045 	 * We are about to modify some metadata.  Call the journal APIs
5046 	 * to unshare ->b_data if a currently-committing transaction is
5047 	 * using it
5048 	 */
5049 	BUFFER_TRACE(gd_bh, "get_write_access");
5050 	err = ext4_journal_get_write_access(handle, gd_bh);
5051 	if (err)
5052 		goto error_return;
5053 
5054 	for (i = 0, blocks_freed = 0; i < count; i++) {
5055 		BUFFER_TRACE(bitmap_bh, "clear bit");
5056 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
5057 			ext4_error(sb, "bit already cleared for block %llu",
5058 				   (ext4_fsblk_t)(block + i));
5059 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
5060 		} else {
5061 			blocks_freed++;
5062 		}
5063 	}
5064 
5065 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
5066 	if (err)
5067 		goto error_return;
5068 
5069 	/*
5070 	 * need to update group_info->bb_free and bitmap
5071 	 * with group lock held. generate_buddy look at
5072 	 * them with group lock_held
5073 	 */
5074 	ext4_lock_group(sb, block_group);
5075 	mb_clear_bits(bitmap_bh->b_data, bit, count);
5076 	mb_free_blocks(NULL, &e4b, bit, count);
5077 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
5078 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
5079 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5080 	ext4_group_desc_csum_set(sb, block_group, desc);
5081 	ext4_unlock_group(sb, block_group);
5082 	percpu_counter_add(&sbi->s_freeclusters_counter,
5083 			   EXT4_NUM_B2C(sbi, blocks_freed));
5084 
5085 	if (sbi->s_log_groups_per_flex) {
5086 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5087 		atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
5088 			     &sbi->s_flex_groups[flex_group].free_clusters);
5089 	}
5090 
5091 	ext4_mb_unload_buddy(&e4b);
5092 
5093 	/* We dirtied the bitmap block */
5094 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5095 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5096 
5097 	/* And the group descriptor block */
5098 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5099 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5100 	if (!err)
5101 		err = ret;
5102 
5103 error_return:
5104 	brelse(bitmap_bh);
5105 	ext4_std_error(sb, err);
5106 	return err;
5107 }
5108 
5109 /**
5110  * ext4_trim_extent -- function to TRIM one single free extent in the group
5111  * @sb:		super block for the file system
5112  * @start:	starting block of the free extent in the alloc. group
5113  * @count:	number of blocks to TRIM
5114  * @group:	alloc. group we are working with
5115  * @e4b:	ext4 buddy for the group
5116  *
5117  * Trim "count" blocks starting at "start" in the "group". To assure that no
5118  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5119  * be called with under the group lock.
5120  */
5121 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5122 			     ext4_group_t group, struct ext4_buddy *e4b)
5123 __releases(bitlock)
5124 __acquires(bitlock)
5125 {
5126 	struct ext4_free_extent ex;
5127 	int ret = 0;
5128 
5129 	trace_ext4_trim_extent(sb, group, start, count);
5130 
5131 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
5132 
5133 	ex.fe_start = start;
5134 	ex.fe_group = group;
5135 	ex.fe_len = count;
5136 
5137 	/*
5138 	 * Mark blocks used, so no one can reuse them while
5139 	 * being trimmed.
5140 	 */
5141 	mb_mark_used(e4b, &ex);
5142 	ext4_unlock_group(sb, group);
5143 	ret = ext4_issue_discard(sb, group, start, count, NULL);
5144 	ext4_lock_group(sb, group);
5145 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
5146 	return ret;
5147 }
5148 
5149 /**
5150  * ext4_trim_all_free -- function to trim all free space in alloc. group
5151  * @sb:			super block for file system
5152  * @group:		group to be trimmed
5153  * @start:		first group block to examine
5154  * @max:		last group block to examine
5155  * @minblocks:		minimum extent block count
5156  *
5157  * ext4_trim_all_free walks through group's buddy bitmap searching for free
5158  * extents. When the free block is found, ext4_trim_extent is called to TRIM
5159  * the extent.
5160  *
5161  *
5162  * ext4_trim_all_free walks through group's block bitmap searching for free
5163  * extents. When the free extent is found, mark it as used in group buddy
5164  * bitmap. Then issue a TRIM command on this extent and free the extent in
5165  * the group buddy bitmap. This is done until whole group is scanned.
5166  */
5167 static ext4_grpblk_t
5168 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5169 		   ext4_grpblk_t start, ext4_grpblk_t max,
5170 		   ext4_grpblk_t minblocks)
5171 {
5172 	void *bitmap;
5173 	ext4_grpblk_t next, count = 0, free_count = 0;
5174 	struct ext4_buddy e4b;
5175 	int ret = 0;
5176 
5177 	trace_ext4_trim_all_free(sb, group, start, max);
5178 
5179 	ret = ext4_mb_load_buddy(sb, group, &e4b);
5180 	if (ret) {
5181 		ext4_warning(sb, "Error %d loading buddy information for %u",
5182 			     ret, group);
5183 		return ret;
5184 	}
5185 	bitmap = e4b.bd_bitmap;
5186 
5187 	ext4_lock_group(sb, group);
5188 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5189 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5190 		goto out;
5191 
5192 	start = (e4b.bd_info->bb_first_free > start) ?
5193 		e4b.bd_info->bb_first_free : start;
5194 
5195 	while (start <= max) {
5196 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
5197 		if (start > max)
5198 			break;
5199 		next = mb_find_next_bit(bitmap, max + 1, start);
5200 
5201 		if ((next - start) >= minblocks) {
5202 			ret = ext4_trim_extent(sb, start,
5203 					       next - start, group, &e4b);
5204 			if (ret && ret != -EOPNOTSUPP)
5205 				break;
5206 			ret = 0;
5207 			count += next - start;
5208 		}
5209 		free_count += next - start;
5210 		start = next + 1;
5211 
5212 		if (fatal_signal_pending(current)) {
5213 			count = -ERESTARTSYS;
5214 			break;
5215 		}
5216 
5217 		if (need_resched()) {
5218 			ext4_unlock_group(sb, group);
5219 			cond_resched();
5220 			ext4_lock_group(sb, group);
5221 		}
5222 
5223 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
5224 			break;
5225 	}
5226 
5227 	if (!ret) {
5228 		ret = count;
5229 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5230 	}
5231 out:
5232 	ext4_unlock_group(sb, group);
5233 	ext4_mb_unload_buddy(&e4b);
5234 
5235 	ext4_debug("trimmed %d blocks in the group %d\n",
5236 		count, group);
5237 
5238 	return ret;
5239 }
5240 
5241 /**
5242  * ext4_trim_fs() -- trim ioctl handle function
5243  * @sb:			superblock for filesystem
5244  * @range:		fstrim_range structure
5245  *
5246  * start:	First Byte to trim
5247  * len:		number of Bytes to trim from start
5248  * minlen:	minimum extent length in Bytes
5249  * ext4_trim_fs goes through all allocation groups containing Bytes from
5250  * start to start+len. For each such a group ext4_trim_all_free function
5251  * is invoked to trim all free space.
5252  */
5253 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5254 {
5255 	struct ext4_group_info *grp;
5256 	ext4_group_t group, first_group, last_group;
5257 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5258 	uint64_t start, end, minlen, trimmed = 0;
5259 	ext4_fsblk_t first_data_blk =
5260 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5261 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5262 	int ret = 0;
5263 
5264 	start = range->start >> sb->s_blocksize_bits;
5265 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
5266 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5267 			      range->minlen >> sb->s_blocksize_bits);
5268 
5269 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5270 	    start >= max_blks ||
5271 	    range->len < sb->s_blocksize)
5272 		return -EINVAL;
5273 	if (end >= max_blks)
5274 		end = max_blks - 1;
5275 	if (end <= first_data_blk)
5276 		goto out;
5277 	if (start < first_data_blk)
5278 		start = first_data_blk;
5279 
5280 	/* Determine first and last group to examine based on start and end */
5281 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5282 				     &first_group, &first_cluster);
5283 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5284 				     &last_group, &last_cluster);
5285 
5286 	/* end now represents the last cluster to discard in this group */
5287 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5288 
5289 	for (group = first_group; group <= last_group; group++) {
5290 		grp = ext4_get_group_info(sb, group);
5291 		/* We only do this if the grp has never been initialized */
5292 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5293 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
5294 			if (ret)
5295 				break;
5296 		}
5297 
5298 		/*
5299 		 * For all the groups except the last one, last cluster will
5300 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5301 		 * change it for the last group, note that last_cluster is
5302 		 * already computed earlier by ext4_get_group_no_and_offset()
5303 		 */
5304 		if (group == last_group)
5305 			end = last_cluster;
5306 
5307 		if (grp->bb_free >= minlen) {
5308 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5309 						end, minlen);
5310 			if (cnt < 0) {
5311 				ret = cnt;
5312 				break;
5313 			}
5314 			trimmed += cnt;
5315 		}
5316 
5317 		/*
5318 		 * For every group except the first one, we are sure
5319 		 * that the first cluster to discard will be cluster #0.
5320 		 */
5321 		first_cluster = 0;
5322 	}
5323 
5324 	if (!ret)
5325 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5326 
5327 out:
5328 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5329 	return ret;
5330 }
5331 
5332 /* Iterate all the free extents in the group. */
5333 int
5334 ext4_mballoc_query_range(
5335 	struct super_block		*sb,
5336 	ext4_group_t			group,
5337 	ext4_grpblk_t			start,
5338 	ext4_grpblk_t			end,
5339 	ext4_mballoc_query_range_fn	formatter,
5340 	void				*priv)
5341 {
5342 	void				*bitmap;
5343 	ext4_grpblk_t			next;
5344 	struct ext4_buddy		e4b;
5345 	int				error;
5346 
5347 	error = ext4_mb_load_buddy(sb, group, &e4b);
5348 	if (error)
5349 		return error;
5350 	bitmap = e4b.bd_bitmap;
5351 
5352 	ext4_lock_group(sb, group);
5353 
5354 	start = (e4b.bd_info->bb_first_free > start) ?
5355 		e4b.bd_info->bb_first_free : start;
5356 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
5357 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5358 
5359 	while (start <= end) {
5360 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
5361 		if (start > end)
5362 			break;
5363 		next = mb_find_next_bit(bitmap, end + 1, start);
5364 
5365 		ext4_unlock_group(sb, group);
5366 		error = formatter(sb, group, start, next - start, priv);
5367 		if (error)
5368 			goto out_unload;
5369 		ext4_lock_group(sb, group);
5370 
5371 		start = next + 1;
5372 	}
5373 
5374 	ext4_unlock_group(sb, group);
5375 out_unload:
5376 	ext4_mb_unload_buddy(&e4b);
5377 
5378 	return error;
5379 }
5380