xref: /openbmc/linux/fs/ext4/mballoc.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/log2.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <trace/events/ext4.h>
30 
31 #ifdef CONFIG_EXT4_DEBUG
32 ushort ext4_mballoc_debug __read_mostly;
33 
34 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
35 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
36 #endif
37 
38 /*
39  * MUSTDO:
40  *   - test ext4_ext_search_left() and ext4_ext_search_right()
41  *   - search for metadata in few groups
42  *
43  * TODO v4:
44  *   - normalization should take into account whether file is still open
45  *   - discard preallocations if no free space left (policy?)
46  *   - don't normalize tails
47  *   - quota
48  *   - reservation for superuser
49  *
50  * TODO v3:
51  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
52  *   - track min/max extents in each group for better group selection
53  *   - mb_mark_used() may allocate chunk right after splitting buddy
54  *   - tree of groups sorted by number of free blocks
55  *   - error handling
56  */
57 
58 /*
59  * The allocation request involve request for multiple number of blocks
60  * near to the goal(block) value specified.
61  *
62  * During initialization phase of the allocator we decide to use the
63  * group preallocation or inode preallocation depending on the size of
64  * the file. The size of the file could be the resulting file size we
65  * would have after allocation, or the current file size, which ever
66  * is larger. If the size is less than sbi->s_mb_stream_request we
67  * select to use the group preallocation. The default value of
68  * s_mb_stream_request is 16 blocks. This can also be tuned via
69  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
70  * terms of number of blocks.
71  *
72  * The main motivation for having small file use group preallocation is to
73  * ensure that we have small files closer together on the disk.
74  *
75  * First stage the allocator looks at the inode prealloc list,
76  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
77  * spaces for this particular inode. The inode prealloc space is
78  * represented as:
79  *
80  * pa_lstart -> the logical start block for this prealloc space
81  * pa_pstart -> the physical start block for this prealloc space
82  * pa_len    -> length for this prealloc space (in clusters)
83  * pa_free   ->  free space available in this prealloc space (in clusters)
84  *
85  * The inode preallocation space is used looking at the _logical_ start
86  * block. If only the logical file block falls within the range of prealloc
87  * space we will consume the particular prealloc space. This makes sure that
88  * we have contiguous physical blocks representing the file blocks
89  *
90  * The important thing to be noted in case of inode prealloc space is that
91  * we don't modify the values associated to inode prealloc space except
92  * pa_free.
93  *
94  * If we are not able to find blocks in the inode prealloc space and if we
95  * have the group allocation flag set then we look at the locality group
96  * prealloc space. These are per CPU prealloc list represented as
97  *
98  * ext4_sb_info.s_locality_groups[smp_processor_id()]
99  *
100  * The reason for having a per cpu locality group is to reduce the contention
101  * between CPUs. It is possible to get scheduled at this point.
102  *
103  * The locality group prealloc space is used looking at whether we have
104  * enough free space (pa_free) within the prealloc space.
105  *
106  * If we can't allocate blocks via inode prealloc or/and locality group
107  * prealloc then we look at the buddy cache. The buddy cache is represented
108  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
109  * mapped to the buddy and bitmap information regarding different
110  * groups. The buddy information is attached to buddy cache inode so that
111  * we can access them through the page cache. The information regarding
112  * each group is loaded via ext4_mb_load_buddy.  The information involve
113  * block bitmap and buddy information. The information are stored in the
114  * inode as:
115  *
116  *  {                        page                        }
117  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
118  *
119  *
120  * one block each for bitmap and buddy information.  So for each group we
121  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
122  * blocksize) blocks.  So it can have information regarding groups_per_page
123  * which is blocks_per_page/2
124  *
125  * The buddy cache inode is not stored on disk. The inode is thrown
126  * away when the filesystem is unmounted.
127  *
128  * We look for count number of blocks in the buddy cache. If we were able
129  * to locate that many free blocks we return with additional information
130  * regarding rest of the contiguous physical block available
131  *
132  * Before allocating blocks via buddy cache we normalize the request
133  * blocks. This ensure we ask for more blocks that we needed. The extra
134  * blocks that we get after allocation is added to the respective prealloc
135  * list. In case of inode preallocation we follow a list of heuristics
136  * based on file size. This can be found in ext4_mb_normalize_request. If
137  * we are doing a group prealloc we try to normalize the request to
138  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
139  * dependent on the cluster size; for non-bigalloc file systems, it is
140  * 512 blocks. This can be tuned via
141  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
142  * terms of number of blocks. If we have mounted the file system with -O
143  * stripe=<value> option the group prealloc request is normalized to the
144  * the smallest multiple of the stripe value (sbi->s_stripe) which is
145  * greater than the default mb_group_prealloc.
146  *
147  * The regular allocator (using the buddy cache) supports a few tunables.
148  *
149  * /sys/fs/ext4/<partition>/mb_min_to_scan
150  * /sys/fs/ext4/<partition>/mb_max_to_scan
151  * /sys/fs/ext4/<partition>/mb_order2_req
152  *
153  * The regular allocator uses buddy scan only if the request len is power of
154  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
155  * value of s_mb_order2_reqs can be tuned via
156  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
157  * stripe size (sbi->s_stripe), we try to search for contiguous block in
158  * stripe size. This should result in better allocation on RAID setups. If
159  * not, we search in the specific group using bitmap for best extents. The
160  * tunable min_to_scan and max_to_scan control the behaviour here.
161  * min_to_scan indicate how long the mballoc __must__ look for a best
162  * extent and max_to_scan indicates how long the mballoc __can__ look for a
163  * best extent in the found extents. Searching for the blocks starts with
164  * the group specified as the goal value in allocation context via
165  * ac_g_ex. Each group is first checked based on the criteria whether it
166  * can be used for allocation. ext4_mb_good_group explains how the groups are
167  * checked.
168  *
169  * Both the prealloc space are getting populated as above. So for the first
170  * request we will hit the buddy cache which will result in this prealloc
171  * space getting filled. The prealloc space is then later used for the
172  * subsequent request.
173  */
174 
175 /*
176  * mballoc operates on the following data:
177  *  - on-disk bitmap
178  *  - in-core buddy (actually includes buddy and bitmap)
179  *  - preallocation descriptors (PAs)
180  *
181  * there are two types of preallocations:
182  *  - inode
183  *    assiged to specific inode and can be used for this inode only.
184  *    it describes part of inode's space preallocated to specific
185  *    physical blocks. any block from that preallocated can be used
186  *    independent. the descriptor just tracks number of blocks left
187  *    unused. so, before taking some block from descriptor, one must
188  *    make sure corresponded logical block isn't allocated yet. this
189  *    also means that freeing any block within descriptor's range
190  *    must discard all preallocated blocks.
191  *  - locality group
192  *    assigned to specific locality group which does not translate to
193  *    permanent set of inodes: inode can join and leave group. space
194  *    from this type of preallocation can be used for any inode. thus
195  *    it's consumed from the beginning to the end.
196  *
197  * relation between them can be expressed as:
198  *    in-core buddy = on-disk bitmap + preallocation descriptors
199  *
200  * this mean blocks mballoc considers used are:
201  *  - allocated blocks (persistent)
202  *  - preallocated blocks (non-persistent)
203  *
204  * consistency in mballoc world means that at any time a block is either
205  * free or used in ALL structures. notice: "any time" should not be read
206  * literally -- time is discrete and delimited by locks.
207  *
208  *  to keep it simple, we don't use block numbers, instead we count number of
209  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
210  *
211  * all operations can be expressed as:
212  *  - init buddy:			buddy = on-disk + PAs
213  *  - new PA:				buddy += N; PA = N
214  *  - use inode PA:			on-disk += N; PA -= N
215  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
216  *  - use locality group PA		on-disk += N; PA -= N
217  *  - discard locality group PA		buddy -= PA; PA = 0
218  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
219  *        is used in real operation because we can't know actual used
220  *        bits from PA, only from on-disk bitmap
221  *
222  * if we follow this strict logic, then all operations above should be atomic.
223  * given some of them can block, we'd have to use something like semaphores
224  * killing performance on high-end SMP hardware. let's try to relax it using
225  * the following knowledge:
226  *  1) if buddy is referenced, it's already initialized
227  *  2) while block is used in buddy and the buddy is referenced,
228  *     nobody can re-allocate that block
229  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
230  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
231  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
232  *     block
233  *
234  * so, now we're building a concurrency table:
235  *  - init buddy vs.
236  *    - new PA
237  *      blocks for PA are allocated in the buddy, buddy must be referenced
238  *      until PA is linked to allocation group to avoid concurrent buddy init
239  *    - use inode PA
240  *      we need to make sure that either on-disk bitmap or PA has uptodate data
241  *      given (3) we care that PA-=N operation doesn't interfere with init
242  *    - discard inode PA
243  *      the simplest way would be to have buddy initialized by the discard
244  *    - use locality group PA
245  *      again PA-=N must be serialized with init
246  *    - discard locality group PA
247  *      the simplest way would be to have buddy initialized by the discard
248  *  - new PA vs.
249  *    - use inode PA
250  *      i_data_sem serializes them
251  *    - discard inode PA
252  *      discard process must wait until PA isn't used by another process
253  *    - use locality group PA
254  *      some mutex should serialize them
255  *    - discard locality group PA
256  *      discard process must wait until PA isn't used by another process
257  *  - use inode PA
258  *    - use inode PA
259  *      i_data_sem or another mutex should serializes them
260  *    - discard inode PA
261  *      discard process must wait until PA isn't used by another process
262  *    - use locality group PA
263  *      nothing wrong here -- they're different PAs covering different blocks
264  *    - discard locality group PA
265  *      discard process must wait until PA isn't used by another process
266  *
267  * now we're ready to make few consequences:
268  *  - PA is referenced and while it is no discard is possible
269  *  - PA is referenced until block isn't marked in on-disk bitmap
270  *  - PA changes only after on-disk bitmap
271  *  - discard must not compete with init. either init is done before
272  *    any discard or they're serialized somehow
273  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
274  *
275  * a special case when we've used PA to emptiness. no need to modify buddy
276  * in this case, but we should care about concurrent init
277  *
278  */
279 
280  /*
281  * Logic in few words:
282  *
283  *  - allocation:
284  *    load group
285  *    find blocks
286  *    mark bits in on-disk bitmap
287  *    release group
288  *
289  *  - use preallocation:
290  *    find proper PA (per-inode or group)
291  *    load group
292  *    mark bits in on-disk bitmap
293  *    release group
294  *    release PA
295  *
296  *  - free:
297  *    load group
298  *    mark bits in on-disk bitmap
299  *    release group
300  *
301  *  - discard preallocations in group:
302  *    mark PAs deleted
303  *    move them onto local list
304  *    load on-disk bitmap
305  *    load group
306  *    remove PA from object (inode or locality group)
307  *    mark free blocks in-core
308  *
309  *  - discard inode's preallocations:
310  */
311 
312 /*
313  * Locking rules
314  *
315  * Locks:
316  *  - bitlock on a group	(group)
317  *  - object (inode/locality)	(object)
318  *  - per-pa lock		(pa)
319  *
320  * Paths:
321  *  - new pa
322  *    object
323  *    group
324  *
325  *  - find and use pa:
326  *    pa
327  *
328  *  - release consumed pa:
329  *    pa
330  *    group
331  *    object
332  *
333  *  - generate in-core bitmap:
334  *    group
335  *        pa
336  *
337  *  - discard all for given object (inode, locality group):
338  *    object
339  *        pa
340  *    group
341  *
342  *  - discard all for given group:
343  *    group
344  *        pa
345  *    group
346  *        object
347  *
348  */
349 static struct kmem_cache *ext4_pspace_cachep;
350 static struct kmem_cache *ext4_ac_cachep;
351 static struct kmem_cache *ext4_free_data_cachep;
352 
353 /* We create slab caches for groupinfo data structures based on the
354  * superblock block size.  There will be one per mounted filesystem for
355  * each unique s_blocksize_bits */
356 #define NR_GRPINFO_CACHES 8
357 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
358 
359 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
360 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
361 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
362 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
363 };
364 
365 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
366 					ext4_group_t group);
367 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
368 						ext4_group_t group);
369 static void ext4_free_data_callback(struct super_block *sb,
370 				struct ext4_journal_cb_entry *jce, int rc);
371 
372 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
373 {
374 #if BITS_PER_LONG == 64
375 	*bit += ((unsigned long) addr & 7UL) << 3;
376 	addr = (void *) ((unsigned long) addr & ~7UL);
377 #elif BITS_PER_LONG == 32
378 	*bit += ((unsigned long) addr & 3UL) << 3;
379 	addr = (void *) ((unsigned long) addr & ~3UL);
380 #else
381 #error "how many bits you are?!"
382 #endif
383 	return addr;
384 }
385 
386 static inline int mb_test_bit(int bit, void *addr)
387 {
388 	/*
389 	 * ext4_test_bit on architecture like powerpc
390 	 * needs unsigned long aligned address
391 	 */
392 	addr = mb_correct_addr_and_bit(&bit, addr);
393 	return ext4_test_bit(bit, addr);
394 }
395 
396 static inline void mb_set_bit(int bit, void *addr)
397 {
398 	addr = mb_correct_addr_and_bit(&bit, addr);
399 	ext4_set_bit(bit, addr);
400 }
401 
402 static inline void mb_clear_bit(int bit, void *addr)
403 {
404 	addr = mb_correct_addr_and_bit(&bit, addr);
405 	ext4_clear_bit(bit, addr);
406 }
407 
408 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
409 {
410 	int fix = 0, ret, tmpmax;
411 	addr = mb_correct_addr_and_bit(&fix, addr);
412 	tmpmax = max + fix;
413 	start += fix;
414 
415 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
416 	if (ret > max)
417 		return max;
418 	return ret;
419 }
420 
421 static inline int mb_find_next_bit(void *addr, int max, int start)
422 {
423 	int fix = 0, ret, tmpmax;
424 	addr = mb_correct_addr_and_bit(&fix, addr);
425 	tmpmax = max + fix;
426 	start += fix;
427 
428 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
429 	if (ret > max)
430 		return max;
431 	return ret;
432 }
433 
434 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
435 {
436 	char *bb;
437 
438 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
439 	BUG_ON(max == NULL);
440 
441 	if (order > e4b->bd_blkbits + 1) {
442 		*max = 0;
443 		return NULL;
444 	}
445 
446 	/* at order 0 we see each particular block */
447 	if (order == 0) {
448 		*max = 1 << (e4b->bd_blkbits + 3);
449 		return e4b->bd_bitmap;
450 	}
451 
452 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
453 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
454 
455 	return bb;
456 }
457 
458 #ifdef DOUBLE_CHECK
459 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
460 			   int first, int count)
461 {
462 	int i;
463 	struct super_block *sb = e4b->bd_sb;
464 
465 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
466 		return;
467 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
468 	for (i = 0; i < count; i++) {
469 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
470 			ext4_fsblk_t blocknr;
471 
472 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
473 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
474 			ext4_grp_locked_error(sb, e4b->bd_group,
475 					      inode ? inode->i_ino : 0,
476 					      blocknr,
477 					      "freeing block already freed "
478 					      "(bit %u)",
479 					      first + i);
480 		}
481 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
482 	}
483 }
484 
485 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
486 {
487 	int i;
488 
489 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
490 		return;
491 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
492 	for (i = 0; i < count; i++) {
493 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
494 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
495 	}
496 }
497 
498 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
499 {
500 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
501 		unsigned char *b1, *b2;
502 		int i;
503 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
504 		b2 = (unsigned char *) bitmap;
505 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
506 			if (b1[i] != b2[i]) {
507 				ext4_msg(e4b->bd_sb, KERN_ERR,
508 					 "corruption in group %u "
509 					 "at byte %u(%u): %x in copy != %x "
510 					 "on disk/prealloc",
511 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
512 				BUG();
513 			}
514 		}
515 	}
516 }
517 
518 #else
519 static inline void mb_free_blocks_double(struct inode *inode,
520 				struct ext4_buddy *e4b, int first, int count)
521 {
522 	return;
523 }
524 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
525 						int first, int count)
526 {
527 	return;
528 }
529 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
530 {
531 	return;
532 }
533 #endif
534 
535 #ifdef AGGRESSIVE_CHECK
536 
537 #define MB_CHECK_ASSERT(assert)						\
538 do {									\
539 	if (!(assert)) {						\
540 		printk(KERN_EMERG					\
541 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
542 			function, file, line, # assert);		\
543 		BUG();							\
544 	}								\
545 } while (0)
546 
547 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
548 				const char *function, int line)
549 {
550 	struct super_block *sb = e4b->bd_sb;
551 	int order = e4b->bd_blkbits + 1;
552 	int max;
553 	int max2;
554 	int i;
555 	int j;
556 	int k;
557 	int count;
558 	struct ext4_group_info *grp;
559 	int fragments = 0;
560 	int fstart;
561 	struct list_head *cur;
562 	void *buddy;
563 	void *buddy2;
564 
565 	{
566 		static int mb_check_counter;
567 		if (mb_check_counter++ % 100 != 0)
568 			return 0;
569 	}
570 
571 	while (order > 1) {
572 		buddy = mb_find_buddy(e4b, order, &max);
573 		MB_CHECK_ASSERT(buddy);
574 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
575 		MB_CHECK_ASSERT(buddy2);
576 		MB_CHECK_ASSERT(buddy != buddy2);
577 		MB_CHECK_ASSERT(max * 2 == max2);
578 
579 		count = 0;
580 		for (i = 0; i < max; i++) {
581 
582 			if (mb_test_bit(i, buddy)) {
583 				/* only single bit in buddy2 may be 1 */
584 				if (!mb_test_bit(i << 1, buddy2)) {
585 					MB_CHECK_ASSERT(
586 						mb_test_bit((i<<1)+1, buddy2));
587 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
588 					MB_CHECK_ASSERT(
589 						mb_test_bit(i << 1, buddy2));
590 				}
591 				continue;
592 			}
593 
594 			/* both bits in buddy2 must be 1 */
595 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
596 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
597 
598 			for (j = 0; j < (1 << order); j++) {
599 				k = (i * (1 << order)) + j;
600 				MB_CHECK_ASSERT(
601 					!mb_test_bit(k, e4b->bd_bitmap));
602 			}
603 			count++;
604 		}
605 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
606 		order--;
607 	}
608 
609 	fstart = -1;
610 	buddy = mb_find_buddy(e4b, 0, &max);
611 	for (i = 0; i < max; i++) {
612 		if (!mb_test_bit(i, buddy)) {
613 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
614 			if (fstart == -1) {
615 				fragments++;
616 				fstart = i;
617 			}
618 			continue;
619 		}
620 		fstart = -1;
621 		/* check used bits only */
622 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
623 			buddy2 = mb_find_buddy(e4b, j, &max2);
624 			k = i >> j;
625 			MB_CHECK_ASSERT(k < max2);
626 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
627 		}
628 	}
629 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
630 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
631 
632 	grp = ext4_get_group_info(sb, e4b->bd_group);
633 	list_for_each(cur, &grp->bb_prealloc_list) {
634 		ext4_group_t groupnr;
635 		struct ext4_prealloc_space *pa;
636 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
637 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
638 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
639 		for (i = 0; i < pa->pa_len; i++)
640 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
641 	}
642 	return 0;
643 }
644 #undef MB_CHECK_ASSERT
645 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
646 					__FILE__, __func__, __LINE__)
647 #else
648 #define mb_check_buddy(e4b)
649 #endif
650 
651 /*
652  * Divide blocks started from @first with length @len into
653  * smaller chunks with power of 2 blocks.
654  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
655  * then increase bb_counters[] for corresponded chunk size.
656  */
657 static void ext4_mb_mark_free_simple(struct super_block *sb,
658 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
659 					struct ext4_group_info *grp)
660 {
661 	struct ext4_sb_info *sbi = EXT4_SB(sb);
662 	ext4_grpblk_t min;
663 	ext4_grpblk_t max;
664 	ext4_grpblk_t chunk;
665 	unsigned short border;
666 
667 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
668 
669 	border = 2 << sb->s_blocksize_bits;
670 
671 	while (len > 0) {
672 		/* find how many blocks can be covered since this position */
673 		max = ffs(first | border) - 1;
674 
675 		/* find how many blocks of power 2 we need to mark */
676 		min = fls(len) - 1;
677 
678 		if (max < min)
679 			min = max;
680 		chunk = 1 << min;
681 
682 		/* mark multiblock chunks only */
683 		grp->bb_counters[min]++;
684 		if (min > 0)
685 			mb_clear_bit(first >> min,
686 				     buddy + sbi->s_mb_offsets[min]);
687 
688 		len -= chunk;
689 		first += chunk;
690 	}
691 }
692 
693 /*
694  * Cache the order of the largest free extent we have available in this block
695  * group.
696  */
697 static void
698 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
699 {
700 	int i;
701 	int bits;
702 
703 	grp->bb_largest_free_order = -1; /* uninit */
704 
705 	bits = sb->s_blocksize_bits + 1;
706 	for (i = bits; i >= 0; i--) {
707 		if (grp->bb_counters[i] > 0) {
708 			grp->bb_largest_free_order = i;
709 			break;
710 		}
711 	}
712 }
713 
714 static noinline_for_stack
715 void ext4_mb_generate_buddy(struct super_block *sb,
716 				void *buddy, void *bitmap, ext4_group_t group)
717 {
718 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
719 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
720 	ext4_grpblk_t i = 0;
721 	ext4_grpblk_t first;
722 	ext4_grpblk_t len;
723 	unsigned free = 0;
724 	unsigned fragments = 0;
725 	unsigned long long period = get_cycles();
726 
727 	/* initialize buddy from bitmap which is aggregation
728 	 * of on-disk bitmap and preallocations */
729 	i = mb_find_next_zero_bit(bitmap, max, 0);
730 	grp->bb_first_free = i;
731 	while (i < max) {
732 		fragments++;
733 		first = i;
734 		i = mb_find_next_bit(bitmap, max, i);
735 		len = i - first;
736 		free += len;
737 		if (len > 1)
738 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
739 		else
740 			grp->bb_counters[0]++;
741 		if (i < max)
742 			i = mb_find_next_zero_bit(bitmap, max, i);
743 	}
744 	grp->bb_fragments = fragments;
745 
746 	if (free != grp->bb_free) {
747 		ext4_grp_locked_error(sb, group, 0, 0,
748 				      "%u clusters in bitmap, %u in gd",
749 				      free, grp->bb_free);
750 		/*
751 		 * If we intent to continue, we consider group descritor
752 		 * corrupt and update bb_free using bitmap value
753 		 */
754 		grp->bb_free = free;
755 	}
756 	mb_set_largest_free_order(sb, grp);
757 
758 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
759 
760 	period = get_cycles() - period;
761 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
762 	EXT4_SB(sb)->s_mb_buddies_generated++;
763 	EXT4_SB(sb)->s_mb_generation_time += period;
764 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
765 }
766 
767 /* The buddy information is attached the buddy cache inode
768  * for convenience. The information regarding each group
769  * is loaded via ext4_mb_load_buddy. The information involve
770  * block bitmap and buddy information. The information are
771  * stored in the inode as
772  *
773  * {                        page                        }
774  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
775  *
776  *
777  * one block each for bitmap and buddy information.
778  * So for each group we take up 2 blocks. A page can
779  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
780  * So it can have information regarding groups_per_page which
781  * is blocks_per_page/2
782  *
783  * Locking note:  This routine takes the block group lock of all groups
784  * for this page; do not hold this lock when calling this routine!
785  */
786 
787 static int ext4_mb_init_cache(struct page *page, char *incore)
788 {
789 	ext4_group_t ngroups;
790 	int blocksize;
791 	int blocks_per_page;
792 	int groups_per_page;
793 	int err = 0;
794 	int i;
795 	ext4_group_t first_group, group;
796 	int first_block;
797 	struct super_block *sb;
798 	struct buffer_head *bhs;
799 	struct buffer_head **bh = NULL;
800 	struct inode *inode;
801 	char *data;
802 	char *bitmap;
803 	struct ext4_group_info *grinfo;
804 
805 	mb_debug(1, "init page %lu\n", page->index);
806 
807 	inode = page->mapping->host;
808 	sb = inode->i_sb;
809 	ngroups = ext4_get_groups_count(sb);
810 	blocksize = 1 << inode->i_blkbits;
811 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
812 
813 	groups_per_page = blocks_per_page >> 1;
814 	if (groups_per_page == 0)
815 		groups_per_page = 1;
816 
817 	/* allocate buffer_heads to read bitmaps */
818 	if (groups_per_page > 1) {
819 		i = sizeof(struct buffer_head *) * groups_per_page;
820 		bh = kzalloc(i, GFP_NOFS);
821 		if (bh == NULL) {
822 			err = -ENOMEM;
823 			goto out;
824 		}
825 	} else
826 		bh = &bhs;
827 
828 	first_group = page->index * blocks_per_page / 2;
829 
830 	/* read all groups the page covers into the cache */
831 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
832 		if (group >= ngroups)
833 			break;
834 
835 		grinfo = ext4_get_group_info(sb, group);
836 		/*
837 		 * If page is uptodate then we came here after online resize
838 		 * which added some new uninitialized group info structs, so
839 		 * we must skip all initialized uptodate buddies on the page,
840 		 * which may be currently in use by an allocating task.
841 		 */
842 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
843 			bh[i] = NULL;
844 			continue;
845 		}
846 		if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
847 			err = -ENOMEM;
848 			goto out;
849 		}
850 		mb_debug(1, "read bitmap for group %u\n", group);
851 	}
852 
853 	/* wait for I/O completion */
854 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
855 		if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
856 			err = -EIO;
857 			goto out;
858 		}
859 	}
860 
861 	first_block = page->index * blocks_per_page;
862 	for (i = 0; i < blocks_per_page; i++) {
863 		int group;
864 
865 		group = (first_block + i) >> 1;
866 		if (group >= ngroups)
867 			break;
868 
869 		if (!bh[group - first_group])
870 			/* skip initialized uptodate buddy */
871 			continue;
872 
873 		/*
874 		 * data carry information regarding this
875 		 * particular group in the format specified
876 		 * above
877 		 *
878 		 */
879 		data = page_address(page) + (i * blocksize);
880 		bitmap = bh[group - first_group]->b_data;
881 
882 		/*
883 		 * We place the buddy block and bitmap block
884 		 * close together
885 		 */
886 		if ((first_block + i) & 1) {
887 			/* this is block of buddy */
888 			BUG_ON(incore == NULL);
889 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
890 				group, page->index, i * blocksize);
891 			trace_ext4_mb_buddy_bitmap_load(sb, group);
892 			grinfo = ext4_get_group_info(sb, group);
893 			grinfo->bb_fragments = 0;
894 			memset(grinfo->bb_counters, 0,
895 			       sizeof(*grinfo->bb_counters) *
896 				(sb->s_blocksize_bits+2));
897 			/*
898 			 * incore got set to the group block bitmap below
899 			 */
900 			ext4_lock_group(sb, group);
901 			/* init the buddy */
902 			memset(data, 0xff, blocksize);
903 			ext4_mb_generate_buddy(sb, data, incore, group);
904 			ext4_unlock_group(sb, group);
905 			incore = NULL;
906 		} else {
907 			/* this is block of bitmap */
908 			BUG_ON(incore != NULL);
909 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
910 				group, page->index, i * blocksize);
911 			trace_ext4_mb_bitmap_load(sb, group);
912 
913 			/* see comments in ext4_mb_put_pa() */
914 			ext4_lock_group(sb, group);
915 			memcpy(data, bitmap, blocksize);
916 
917 			/* mark all preallocated blks used in in-core bitmap */
918 			ext4_mb_generate_from_pa(sb, data, group);
919 			ext4_mb_generate_from_freelist(sb, data, group);
920 			ext4_unlock_group(sb, group);
921 
922 			/* set incore so that the buddy information can be
923 			 * generated using this
924 			 */
925 			incore = data;
926 		}
927 	}
928 	SetPageUptodate(page);
929 
930 out:
931 	if (bh) {
932 		for (i = 0; i < groups_per_page; i++)
933 			brelse(bh[i]);
934 		if (bh != &bhs)
935 			kfree(bh);
936 	}
937 	return err;
938 }
939 
940 /*
941  * Lock the buddy and bitmap pages. This make sure other parallel init_group
942  * on the same buddy page doesn't happen whild holding the buddy page lock.
943  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
944  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
945  */
946 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
947 		ext4_group_t group, struct ext4_buddy *e4b)
948 {
949 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
950 	int block, pnum, poff;
951 	int blocks_per_page;
952 	struct page *page;
953 
954 	e4b->bd_buddy_page = NULL;
955 	e4b->bd_bitmap_page = NULL;
956 
957 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
958 	/*
959 	 * the buddy cache inode stores the block bitmap
960 	 * and buddy information in consecutive blocks.
961 	 * So for each group we need two blocks.
962 	 */
963 	block = group * 2;
964 	pnum = block / blocks_per_page;
965 	poff = block % blocks_per_page;
966 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
967 	if (!page)
968 		return -EIO;
969 	BUG_ON(page->mapping != inode->i_mapping);
970 	e4b->bd_bitmap_page = page;
971 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
972 
973 	if (blocks_per_page >= 2) {
974 		/* buddy and bitmap are on the same page */
975 		return 0;
976 	}
977 
978 	block++;
979 	pnum = block / blocks_per_page;
980 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
981 	if (!page)
982 		return -EIO;
983 	BUG_ON(page->mapping != inode->i_mapping);
984 	e4b->bd_buddy_page = page;
985 	return 0;
986 }
987 
988 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
989 {
990 	if (e4b->bd_bitmap_page) {
991 		unlock_page(e4b->bd_bitmap_page);
992 		page_cache_release(e4b->bd_bitmap_page);
993 	}
994 	if (e4b->bd_buddy_page) {
995 		unlock_page(e4b->bd_buddy_page);
996 		page_cache_release(e4b->bd_buddy_page);
997 	}
998 }
999 
1000 /*
1001  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1002  * block group lock of all groups for this page; do not hold the BG lock when
1003  * calling this routine!
1004  */
1005 static noinline_for_stack
1006 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1007 {
1008 
1009 	struct ext4_group_info *this_grp;
1010 	struct ext4_buddy e4b;
1011 	struct page *page;
1012 	int ret = 0;
1013 
1014 	mb_debug(1, "init group %u\n", group);
1015 	this_grp = ext4_get_group_info(sb, group);
1016 	/*
1017 	 * This ensures that we don't reinit the buddy cache
1018 	 * page which map to the group from which we are already
1019 	 * allocating. If we are looking at the buddy cache we would
1020 	 * have taken a reference using ext4_mb_load_buddy and that
1021 	 * would have pinned buddy page to page cache.
1022 	 */
1023 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1024 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1025 		/*
1026 		 * somebody initialized the group
1027 		 * return without doing anything
1028 		 */
1029 		goto err;
1030 	}
1031 
1032 	page = e4b.bd_bitmap_page;
1033 	ret = ext4_mb_init_cache(page, NULL);
1034 	if (ret)
1035 		goto err;
1036 	if (!PageUptodate(page)) {
1037 		ret = -EIO;
1038 		goto err;
1039 	}
1040 	mark_page_accessed(page);
1041 
1042 	if (e4b.bd_buddy_page == NULL) {
1043 		/*
1044 		 * If both the bitmap and buddy are in
1045 		 * the same page we don't need to force
1046 		 * init the buddy
1047 		 */
1048 		ret = 0;
1049 		goto err;
1050 	}
1051 	/* init buddy cache */
1052 	page = e4b.bd_buddy_page;
1053 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1054 	if (ret)
1055 		goto err;
1056 	if (!PageUptodate(page)) {
1057 		ret = -EIO;
1058 		goto err;
1059 	}
1060 	mark_page_accessed(page);
1061 err:
1062 	ext4_mb_put_buddy_page_lock(&e4b);
1063 	return ret;
1064 }
1065 
1066 /*
1067  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1068  * block group lock of all groups for this page; do not hold the BG lock when
1069  * calling this routine!
1070  */
1071 static noinline_for_stack int
1072 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1073 					struct ext4_buddy *e4b)
1074 {
1075 	int blocks_per_page;
1076 	int block;
1077 	int pnum;
1078 	int poff;
1079 	struct page *page;
1080 	int ret;
1081 	struct ext4_group_info *grp;
1082 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1083 	struct inode *inode = sbi->s_buddy_cache;
1084 
1085 	mb_debug(1, "load group %u\n", group);
1086 
1087 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1088 	grp = ext4_get_group_info(sb, group);
1089 
1090 	e4b->bd_blkbits = sb->s_blocksize_bits;
1091 	e4b->bd_info = grp;
1092 	e4b->bd_sb = sb;
1093 	e4b->bd_group = group;
1094 	e4b->bd_buddy_page = NULL;
1095 	e4b->bd_bitmap_page = NULL;
1096 
1097 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1098 		/*
1099 		 * we need full data about the group
1100 		 * to make a good selection
1101 		 */
1102 		ret = ext4_mb_init_group(sb, group);
1103 		if (ret)
1104 			return ret;
1105 	}
1106 
1107 	/*
1108 	 * the buddy cache inode stores the block bitmap
1109 	 * and buddy information in consecutive blocks.
1110 	 * So for each group we need two blocks.
1111 	 */
1112 	block = group * 2;
1113 	pnum = block / blocks_per_page;
1114 	poff = block % blocks_per_page;
1115 
1116 	/* we could use find_or_create_page(), but it locks page
1117 	 * what we'd like to avoid in fast path ... */
1118 	page = find_get_page(inode->i_mapping, pnum);
1119 	if (page == NULL || !PageUptodate(page)) {
1120 		if (page)
1121 			/*
1122 			 * drop the page reference and try
1123 			 * to get the page with lock. If we
1124 			 * are not uptodate that implies
1125 			 * somebody just created the page but
1126 			 * is yet to initialize the same. So
1127 			 * wait for it to initialize.
1128 			 */
1129 			page_cache_release(page);
1130 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1131 		if (page) {
1132 			BUG_ON(page->mapping != inode->i_mapping);
1133 			if (!PageUptodate(page)) {
1134 				ret = ext4_mb_init_cache(page, NULL);
1135 				if (ret) {
1136 					unlock_page(page);
1137 					goto err;
1138 				}
1139 				mb_cmp_bitmaps(e4b, page_address(page) +
1140 					       (poff * sb->s_blocksize));
1141 			}
1142 			unlock_page(page);
1143 		}
1144 	}
1145 	if (page == NULL || !PageUptodate(page)) {
1146 		ret = -EIO;
1147 		goto err;
1148 	}
1149 	e4b->bd_bitmap_page = page;
1150 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1151 	mark_page_accessed(page);
1152 
1153 	block++;
1154 	pnum = block / blocks_per_page;
1155 	poff = block % blocks_per_page;
1156 
1157 	page = find_get_page(inode->i_mapping, pnum);
1158 	if (page == NULL || !PageUptodate(page)) {
1159 		if (page)
1160 			page_cache_release(page);
1161 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1162 		if (page) {
1163 			BUG_ON(page->mapping != inode->i_mapping);
1164 			if (!PageUptodate(page)) {
1165 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1166 				if (ret) {
1167 					unlock_page(page);
1168 					goto err;
1169 				}
1170 			}
1171 			unlock_page(page);
1172 		}
1173 	}
1174 	if (page == NULL || !PageUptodate(page)) {
1175 		ret = -EIO;
1176 		goto err;
1177 	}
1178 	e4b->bd_buddy_page = page;
1179 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1180 	mark_page_accessed(page);
1181 
1182 	BUG_ON(e4b->bd_bitmap_page == NULL);
1183 	BUG_ON(e4b->bd_buddy_page == NULL);
1184 
1185 	return 0;
1186 
1187 err:
1188 	if (page)
1189 		page_cache_release(page);
1190 	if (e4b->bd_bitmap_page)
1191 		page_cache_release(e4b->bd_bitmap_page);
1192 	if (e4b->bd_buddy_page)
1193 		page_cache_release(e4b->bd_buddy_page);
1194 	e4b->bd_buddy = NULL;
1195 	e4b->bd_bitmap = NULL;
1196 	return ret;
1197 }
1198 
1199 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1200 {
1201 	if (e4b->bd_bitmap_page)
1202 		page_cache_release(e4b->bd_bitmap_page);
1203 	if (e4b->bd_buddy_page)
1204 		page_cache_release(e4b->bd_buddy_page);
1205 }
1206 
1207 
1208 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1209 {
1210 	int order = 1;
1211 	void *bb;
1212 
1213 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1214 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1215 
1216 	bb = e4b->bd_buddy;
1217 	while (order <= e4b->bd_blkbits + 1) {
1218 		block = block >> 1;
1219 		if (!mb_test_bit(block, bb)) {
1220 			/* this block is part of buddy of order 'order' */
1221 			return order;
1222 		}
1223 		bb += 1 << (e4b->bd_blkbits - order);
1224 		order++;
1225 	}
1226 	return 0;
1227 }
1228 
1229 static void mb_clear_bits(void *bm, int cur, int len)
1230 {
1231 	__u32 *addr;
1232 
1233 	len = cur + len;
1234 	while (cur < len) {
1235 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1236 			/* fast path: clear whole word at once */
1237 			addr = bm + (cur >> 3);
1238 			*addr = 0;
1239 			cur += 32;
1240 			continue;
1241 		}
1242 		mb_clear_bit(cur, bm);
1243 		cur++;
1244 	}
1245 }
1246 
1247 void ext4_set_bits(void *bm, int cur, int len)
1248 {
1249 	__u32 *addr;
1250 
1251 	len = cur + len;
1252 	while (cur < len) {
1253 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1254 			/* fast path: set whole word at once */
1255 			addr = bm + (cur >> 3);
1256 			*addr = 0xffffffff;
1257 			cur += 32;
1258 			continue;
1259 		}
1260 		mb_set_bit(cur, bm);
1261 		cur++;
1262 	}
1263 }
1264 
1265 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1266 			  int first, int count)
1267 {
1268 	int block = 0;
1269 	int max = 0;
1270 	int order;
1271 	void *buddy;
1272 	void *buddy2;
1273 	struct super_block *sb = e4b->bd_sb;
1274 
1275 	BUG_ON(first + count > (sb->s_blocksize << 3));
1276 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1277 	mb_check_buddy(e4b);
1278 	mb_free_blocks_double(inode, e4b, first, count);
1279 
1280 	e4b->bd_info->bb_free += count;
1281 	if (first < e4b->bd_info->bb_first_free)
1282 		e4b->bd_info->bb_first_free = first;
1283 
1284 	/* let's maintain fragments counter */
1285 	if (first != 0)
1286 		block = !mb_test_bit(first - 1, e4b->bd_bitmap);
1287 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1288 		max = !mb_test_bit(first + count, e4b->bd_bitmap);
1289 	if (block && max)
1290 		e4b->bd_info->bb_fragments--;
1291 	else if (!block && !max)
1292 		e4b->bd_info->bb_fragments++;
1293 
1294 	/* let's maintain buddy itself */
1295 	while (count-- > 0) {
1296 		block = first++;
1297 		order = 0;
1298 
1299 		if (!mb_test_bit(block, e4b->bd_bitmap)) {
1300 			ext4_fsblk_t blocknr;
1301 
1302 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1303 			blocknr += EXT4_C2B(EXT4_SB(sb), block);
1304 			ext4_grp_locked_error(sb, e4b->bd_group,
1305 					      inode ? inode->i_ino : 0,
1306 					      blocknr,
1307 					      "freeing already freed block "
1308 					      "(bit %u)", block);
1309 		}
1310 		mb_clear_bit(block, e4b->bd_bitmap);
1311 		e4b->bd_info->bb_counters[order]++;
1312 
1313 		/* start of the buddy */
1314 		buddy = mb_find_buddy(e4b, order, &max);
1315 
1316 		do {
1317 			block &= ~1UL;
1318 			if (mb_test_bit(block, buddy) ||
1319 					mb_test_bit(block + 1, buddy))
1320 				break;
1321 
1322 			/* both the buddies are free, try to coalesce them */
1323 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1324 
1325 			if (!buddy2)
1326 				break;
1327 
1328 			if (order > 0) {
1329 				/* for special purposes, we don't set
1330 				 * free bits in bitmap */
1331 				mb_set_bit(block, buddy);
1332 				mb_set_bit(block + 1, buddy);
1333 			}
1334 			e4b->bd_info->bb_counters[order]--;
1335 			e4b->bd_info->bb_counters[order]--;
1336 
1337 			block = block >> 1;
1338 			order++;
1339 			e4b->bd_info->bb_counters[order]++;
1340 
1341 			mb_clear_bit(block, buddy2);
1342 			buddy = buddy2;
1343 		} while (1);
1344 	}
1345 	mb_set_largest_free_order(sb, e4b->bd_info);
1346 	mb_check_buddy(e4b);
1347 }
1348 
1349 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1350 				int needed, struct ext4_free_extent *ex)
1351 {
1352 	int next = block;
1353 	int max, order;
1354 	void *buddy;
1355 
1356 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1357 	BUG_ON(ex == NULL);
1358 
1359 	buddy = mb_find_buddy(e4b, 0, &max);
1360 	BUG_ON(buddy == NULL);
1361 	BUG_ON(block >= max);
1362 	if (mb_test_bit(block, buddy)) {
1363 		ex->fe_len = 0;
1364 		ex->fe_start = 0;
1365 		ex->fe_group = 0;
1366 		return 0;
1367 	}
1368 
1369 	/* find actual order */
1370 	order = mb_find_order_for_block(e4b, block);
1371 	block = block >> order;
1372 
1373 	ex->fe_len = 1 << order;
1374 	ex->fe_start = block << order;
1375 	ex->fe_group = e4b->bd_group;
1376 
1377 	/* calc difference from given start */
1378 	next = next - ex->fe_start;
1379 	ex->fe_len -= next;
1380 	ex->fe_start += next;
1381 
1382 	while (needed > ex->fe_len &&
1383 	       mb_find_buddy(e4b, order, &max)) {
1384 
1385 		if (block + 1 >= max)
1386 			break;
1387 
1388 		next = (block + 1) * (1 << order);
1389 		if (mb_test_bit(next, e4b->bd_bitmap))
1390 			break;
1391 
1392 		order = mb_find_order_for_block(e4b, next);
1393 
1394 		block = next >> order;
1395 		ex->fe_len += 1 << order;
1396 	}
1397 
1398 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1399 	return ex->fe_len;
1400 }
1401 
1402 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1403 {
1404 	int ord;
1405 	int mlen = 0;
1406 	int max = 0;
1407 	int cur;
1408 	int start = ex->fe_start;
1409 	int len = ex->fe_len;
1410 	unsigned ret = 0;
1411 	int len0 = len;
1412 	void *buddy;
1413 
1414 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1415 	BUG_ON(e4b->bd_group != ex->fe_group);
1416 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1417 	mb_check_buddy(e4b);
1418 	mb_mark_used_double(e4b, start, len);
1419 
1420 	e4b->bd_info->bb_free -= len;
1421 	if (e4b->bd_info->bb_first_free == start)
1422 		e4b->bd_info->bb_first_free += len;
1423 
1424 	/* let's maintain fragments counter */
1425 	if (start != 0)
1426 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1427 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1428 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1429 	if (mlen && max)
1430 		e4b->bd_info->bb_fragments++;
1431 	else if (!mlen && !max)
1432 		e4b->bd_info->bb_fragments--;
1433 
1434 	/* let's maintain buddy itself */
1435 	while (len) {
1436 		ord = mb_find_order_for_block(e4b, start);
1437 
1438 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1439 			/* the whole chunk may be allocated at once! */
1440 			mlen = 1 << ord;
1441 			buddy = mb_find_buddy(e4b, ord, &max);
1442 			BUG_ON((start >> ord) >= max);
1443 			mb_set_bit(start >> ord, buddy);
1444 			e4b->bd_info->bb_counters[ord]--;
1445 			start += mlen;
1446 			len -= mlen;
1447 			BUG_ON(len < 0);
1448 			continue;
1449 		}
1450 
1451 		/* store for history */
1452 		if (ret == 0)
1453 			ret = len | (ord << 16);
1454 
1455 		/* we have to split large buddy */
1456 		BUG_ON(ord <= 0);
1457 		buddy = mb_find_buddy(e4b, ord, &max);
1458 		mb_set_bit(start >> ord, buddy);
1459 		e4b->bd_info->bb_counters[ord]--;
1460 
1461 		ord--;
1462 		cur = (start >> ord) & ~1U;
1463 		buddy = mb_find_buddy(e4b, ord, &max);
1464 		mb_clear_bit(cur, buddy);
1465 		mb_clear_bit(cur + 1, buddy);
1466 		e4b->bd_info->bb_counters[ord]++;
1467 		e4b->bd_info->bb_counters[ord]++;
1468 	}
1469 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1470 
1471 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1472 	mb_check_buddy(e4b);
1473 
1474 	return ret;
1475 }
1476 
1477 /*
1478  * Must be called under group lock!
1479  */
1480 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1481 					struct ext4_buddy *e4b)
1482 {
1483 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1484 	int ret;
1485 
1486 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1487 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1488 
1489 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1490 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1491 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1492 
1493 	/* preallocation can change ac_b_ex, thus we store actually
1494 	 * allocated blocks for history */
1495 	ac->ac_f_ex = ac->ac_b_ex;
1496 
1497 	ac->ac_status = AC_STATUS_FOUND;
1498 	ac->ac_tail = ret & 0xffff;
1499 	ac->ac_buddy = ret >> 16;
1500 
1501 	/*
1502 	 * take the page reference. We want the page to be pinned
1503 	 * so that we don't get a ext4_mb_init_cache_call for this
1504 	 * group until we update the bitmap. That would mean we
1505 	 * double allocate blocks. The reference is dropped
1506 	 * in ext4_mb_release_context
1507 	 */
1508 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1509 	get_page(ac->ac_bitmap_page);
1510 	ac->ac_buddy_page = e4b->bd_buddy_page;
1511 	get_page(ac->ac_buddy_page);
1512 	/* store last allocated for subsequent stream allocation */
1513 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1514 		spin_lock(&sbi->s_md_lock);
1515 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1516 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1517 		spin_unlock(&sbi->s_md_lock);
1518 	}
1519 }
1520 
1521 /*
1522  * regular allocator, for general purposes allocation
1523  */
1524 
1525 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1526 					struct ext4_buddy *e4b,
1527 					int finish_group)
1528 {
1529 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1530 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1531 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1532 	struct ext4_free_extent ex;
1533 	int max;
1534 
1535 	if (ac->ac_status == AC_STATUS_FOUND)
1536 		return;
1537 	/*
1538 	 * We don't want to scan for a whole year
1539 	 */
1540 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1541 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1542 		ac->ac_status = AC_STATUS_BREAK;
1543 		return;
1544 	}
1545 
1546 	/*
1547 	 * Haven't found good chunk so far, let's continue
1548 	 */
1549 	if (bex->fe_len < gex->fe_len)
1550 		return;
1551 
1552 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1553 			&& bex->fe_group == e4b->bd_group) {
1554 		/* recheck chunk's availability - we don't know
1555 		 * when it was found (within this lock-unlock
1556 		 * period or not) */
1557 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1558 		if (max >= gex->fe_len) {
1559 			ext4_mb_use_best_found(ac, e4b);
1560 			return;
1561 		}
1562 	}
1563 }
1564 
1565 /*
1566  * The routine checks whether found extent is good enough. If it is,
1567  * then the extent gets marked used and flag is set to the context
1568  * to stop scanning. Otherwise, the extent is compared with the
1569  * previous found extent and if new one is better, then it's stored
1570  * in the context. Later, the best found extent will be used, if
1571  * mballoc can't find good enough extent.
1572  *
1573  * FIXME: real allocation policy is to be designed yet!
1574  */
1575 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1576 					struct ext4_free_extent *ex,
1577 					struct ext4_buddy *e4b)
1578 {
1579 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1580 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1581 
1582 	BUG_ON(ex->fe_len <= 0);
1583 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1584 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1585 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1586 
1587 	ac->ac_found++;
1588 
1589 	/*
1590 	 * The special case - take what you catch first
1591 	 */
1592 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1593 		*bex = *ex;
1594 		ext4_mb_use_best_found(ac, e4b);
1595 		return;
1596 	}
1597 
1598 	/*
1599 	 * Let's check whether the chuck is good enough
1600 	 */
1601 	if (ex->fe_len == gex->fe_len) {
1602 		*bex = *ex;
1603 		ext4_mb_use_best_found(ac, e4b);
1604 		return;
1605 	}
1606 
1607 	/*
1608 	 * If this is first found extent, just store it in the context
1609 	 */
1610 	if (bex->fe_len == 0) {
1611 		*bex = *ex;
1612 		return;
1613 	}
1614 
1615 	/*
1616 	 * If new found extent is better, store it in the context
1617 	 */
1618 	if (bex->fe_len < gex->fe_len) {
1619 		/* if the request isn't satisfied, any found extent
1620 		 * larger than previous best one is better */
1621 		if (ex->fe_len > bex->fe_len)
1622 			*bex = *ex;
1623 	} else if (ex->fe_len > gex->fe_len) {
1624 		/* if the request is satisfied, then we try to find
1625 		 * an extent that still satisfy the request, but is
1626 		 * smaller than previous one */
1627 		if (ex->fe_len < bex->fe_len)
1628 			*bex = *ex;
1629 	}
1630 
1631 	ext4_mb_check_limits(ac, e4b, 0);
1632 }
1633 
1634 static noinline_for_stack
1635 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1636 					struct ext4_buddy *e4b)
1637 {
1638 	struct ext4_free_extent ex = ac->ac_b_ex;
1639 	ext4_group_t group = ex.fe_group;
1640 	int max;
1641 	int err;
1642 
1643 	BUG_ON(ex.fe_len <= 0);
1644 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1645 	if (err)
1646 		return err;
1647 
1648 	ext4_lock_group(ac->ac_sb, group);
1649 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1650 
1651 	if (max > 0) {
1652 		ac->ac_b_ex = ex;
1653 		ext4_mb_use_best_found(ac, e4b);
1654 	}
1655 
1656 	ext4_unlock_group(ac->ac_sb, group);
1657 	ext4_mb_unload_buddy(e4b);
1658 
1659 	return 0;
1660 }
1661 
1662 static noinline_for_stack
1663 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1664 				struct ext4_buddy *e4b)
1665 {
1666 	ext4_group_t group = ac->ac_g_ex.fe_group;
1667 	int max;
1668 	int err;
1669 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1670 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1671 	struct ext4_free_extent ex;
1672 
1673 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1674 		return 0;
1675 	if (grp->bb_free == 0)
1676 		return 0;
1677 
1678 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1679 	if (err)
1680 		return err;
1681 
1682 	ext4_lock_group(ac->ac_sb, group);
1683 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1684 			     ac->ac_g_ex.fe_len, &ex);
1685 
1686 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1687 		ext4_fsblk_t start;
1688 
1689 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1690 			ex.fe_start;
1691 		/* use do_div to get remainder (would be 64-bit modulo) */
1692 		if (do_div(start, sbi->s_stripe) == 0) {
1693 			ac->ac_found++;
1694 			ac->ac_b_ex = ex;
1695 			ext4_mb_use_best_found(ac, e4b);
1696 		}
1697 	} else if (max >= ac->ac_g_ex.fe_len) {
1698 		BUG_ON(ex.fe_len <= 0);
1699 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1700 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1701 		ac->ac_found++;
1702 		ac->ac_b_ex = ex;
1703 		ext4_mb_use_best_found(ac, e4b);
1704 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1705 		/* Sometimes, caller may want to merge even small
1706 		 * number of blocks to an existing extent */
1707 		BUG_ON(ex.fe_len <= 0);
1708 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1709 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1710 		ac->ac_found++;
1711 		ac->ac_b_ex = ex;
1712 		ext4_mb_use_best_found(ac, e4b);
1713 	}
1714 	ext4_unlock_group(ac->ac_sb, group);
1715 	ext4_mb_unload_buddy(e4b);
1716 
1717 	return 0;
1718 }
1719 
1720 /*
1721  * The routine scans buddy structures (not bitmap!) from given order
1722  * to max order and tries to find big enough chunk to satisfy the req
1723  */
1724 static noinline_for_stack
1725 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1726 					struct ext4_buddy *e4b)
1727 {
1728 	struct super_block *sb = ac->ac_sb;
1729 	struct ext4_group_info *grp = e4b->bd_info;
1730 	void *buddy;
1731 	int i;
1732 	int k;
1733 	int max;
1734 
1735 	BUG_ON(ac->ac_2order <= 0);
1736 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1737 		if (grp->bb_counters[i] == 0)
1738 			continue;
1739 
1740 		buddy = mb_find_buddy(e4b, i, &max);
1741 		BUG_ON(buddy == NULL);
1742 
1743 		k = mb_find_next_zero_bit(buddy, max, 0);
1744 		BUG_ON(k >= max);
1745 
1746 		ac->ac_found++;
1747 
1748 		ac->ac_b_ex.fe_len = 1 << i;
1749 		ac->ac_b_ex.fe_start = k << i;
1750 		ac->ac_b_ex.fe_group = e4b->bd_group;
1751 
1752 		ext4_mb_use_best_found(ac, e4b);
1753 
1754 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1755 
1756 		if (EXT4_SB(sb)->s_mb_stats)
1757 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1758 
1759 		break;
1760 	}
1761 }
1762 
1763 /*
1764  * The routine scans the group and measures all found extents.
1765  * In order to optimize scanning, caller must pass number of
1766  * free blocks in the group, so the routine can know upper limit.
1767  */
1768 static noinline_for_stack
1769 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1770 					struct ext4_buddy *e4b)
1771 {
1772 	struct super_block *sb = ac->ac_sb;
1773 	void *bitmap = e4b->bd_bitmap;
1774 	struct ext4_free_extent ex;
1775 	int i;
1776 	int free;
1777 
1778 	free = e4b->bd_info->bb_free;
1779 	BUG_ON(free <= 0);
1780 
1781 	i = e4b->bd_info->bb_first_free;
1782 
1783 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1784 		i = mb_find_next_zero_bit(bitmap,
1785 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1786 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1787 			/*
1788 			 * IF we have corrupt bitmap, we won't find any
1789 			 * free blocks even though group info says we
1790 			 * we have free blocks
1791 			 */
1792 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1793 					"%d free clusters as per "
1794 					"group info. But bitmap says 0",
1795 					free);
1796 			break;
1797 		}
1798 
1799 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
1800 		BUG_ON(ex.fe_len <= 0);
1801 		if (free < ex.fe_len) {
1802 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1803 					"%d free clusters as per "
1804 					"group info. But got %d blocks",
1805 					free, ex.fe_len);
1806 			/*
1807 			 * The number of free blocks differs. This mostly
1808 			 * indicate that the bitmap is corrupt. So exit
1809 			 * without claiming the space.
1810 			 */
1811 			break;
1812 		}
1813 
1814 		ext4_mb_measure_extent(ac, &ex, e4b);
1815 
1816 		i += ex.fe_len;
1817 		free -= ex.fe_len;
1818 	}
1819 
1820 	ext4_mb_check_limits(ac, e4b, 1);
1821 }
1822 
1823 /*
1824  * This is a special case for storages like raid5
1825  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1826  */
1827 static noinline_for_stack
1828 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1829 				 struct ext4_buddy *e4b)
1830 {
1831 	struct super_block *sb = ac->ac_sb;
1832 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1833 	void *bitmap = e4b->bd_bitmap;
1834 	struct ext4_free_extent ex;
1835 	ext4_fsblk_t first_group_block;
1836 	ext4_fsblk_t a;
1837 	ext4_grpblk_t i;
1838 	int max;
1839 
1840 	BUG_ON(sbi->s_stripe == 0);
1841 
1842 	/* find first stripe-aligned block in group */
1843 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1844 
1845 	a = first_group_block + sbi->s_stripe - 1;
1846 	do_div(a, sbi->s_stripe);
1847 	i = (a * sbi->s_stripe) - first_group_block;
1848 
1849 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
1850 		if (!mb_test_bit(i, bitmap)) {
1851 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
1852 			if (max >= sbi->s_stripe) {
1853 				ac->ac_found++;
1854 				ac->ac_b_ex = ex;
1855 				ext4_mb_use_best_found(ac, e4b);
1856 				break;
1857 			}
1858 		}
1859 		i += sbi->s_stripe;
1860 	}
1861 }
1862 
1863 /* This is now called BEFORE we load the buddy bitmap. */
1864 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1865 				ext4_group_t group, int cr)
1866 {
1867 	unsigned free, fragments;
1868 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1869 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1870 
1871 	BUG_ON(cr < 0 || cr >= 4);
1872 
1873 	free = grp->bb_free;
1874 	if (free == 0)
1875 		return 0;
1876 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
1877 		return 0;
1878 
1879 	/* We only do this if the grp has never been initialized */
1880 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1881 		int ret = ext4_mb_init_group(ac->ac_sb, group);
1882 		if (ret)
1883 			return 0;
1884 	}
1885 
1886 	fragments = grp->bb_fragments;
1887 	if (fragments == 0)
1888 		return 0;
1889 
1890 	switch (cr) {
1891 	case 0:
1892 		BUG_ON(ac->ac_2order == 0);
1893 
1894 		/* Avoid using the first bg of a flexgroup for data files */
1895 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1896 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1897 		    ((group % flex_size) == 0))
1898 			return 0;
1899 
1900 		if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
1901 		    (free / fragments) >= ac->ac_g_ex.fe_len)
1902 			return 1;
1903 
1904 		if (grp->bb_largest_free_order < ac->ac_2order)
1905 			return 0;
1906 
1907 		return 1;
1908 	case 1:
1909 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1910 			return 1;
1911 		break;
1912 	case 2:
1913 		if (free >= ac->ac_g_ex.fe_len)
1914 			return 1;
1915 		break;
1916 	case 3:
1917 		return 1;
1918 	default:
1919 		BUG();
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static noinline_for_stack int
1926 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1927 {
1928 	ext4_group_t ngroups, group, i;
1929 	int cr;
1930 	int err = 0;
1931 	struct ext4_sb_info *sbi;
1932 	struct super_block *sb;
1933 	struct ext4_buddy e4b;
1934 
1935 	sb = ac->ac_sb;
1936 	sbi = EXT4_SB(sb);
1937 	ngroups = ext4_get_groups_count(sb);
1938 	/* non-extent files are limited to low blocks/groups */
1939 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1940 		ngroups = sbi->s_blockfile_groups;
1941 
1942 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1943 
1944 	/* first, try the goal */
1945 	err = ext4_mb_find_by_goal(ac, &e4b);
1946 	if (err || ac->ac_status == AC_STATUS_FOUND)
1947 		goto out;
1948 
1949 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1950 		goto out;
1951 
1952 	/*
1953 	 * ac->ac2_order is set only if the fe_len is a power of 2
1954 	 * if ac2_order is set we also set criteria to 0 so that we
1955 	 * try exact allocation using buddy.
1956 	 */
1957 	i = fls(ac->ac_g_ex.fe_len);
1958 	ac->ac_2order = 0;
1959 	/*
1960 	 * We search using buddy data only if the order of the request
1961 	 * is greater than equal to the sbi_s_mb_order2_reqs
1962 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
1963 	 */
1964 	if (i >= sbi->s_mb_order2_reqs) {
1965 		/*
1966 		 * This should tell if fe_len is exactly power of 2
1967 		 */
1968 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1969 			ac->ac_2order = i - 1;
1970 	}
1971 
1972 	/* if stream allocation is enabled, use global goal */
1973 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1974 		/* TBD: may be hot point */
1975 		spin_lock(&sbi->s_md_lock);
1976 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1977 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1978 		spin_unlock(&sbi->s_md_lock);
1979 	}
1980 
1981 	/* Let's just scan groups to find more-less suitable blocks */
1982 	cr = ac->ac_2order ? 0 : 1;
1983 	/*
1984 	 * cr == 0 try to get exact allocation,
1985 	 * cr == 3  try to get anything
1986 	 */
1987 repeat:
1988 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1989 		ac->ac_criteria = cr;
1990 		/*
1991 		 * searching for the right group start
1992 		 * from the goal value specified
1993 		 */
1994 		group = ac->ac_g_ex.fe_group;
1995 
1996 		for (i = 0; i < ngroups; group++, i++) {
1997 			if (group == ngroups)
1998 				group = 0;
1999 
2000 			/* This now checks without needing the buddy page */
2001 			if (!ext4_mb_good_group(ac, group, cr))
2002 				continue;
2003 
2004 			err = ext4_mb_load_buddy(sb, group, &e4b);
2005 			if (err)
2006 				goto out;
2007 
2008 			ext4_lock_group(sb, group);
2009 
2010 			/*
2011 			 * We need to check again after locking the
2012 			 * block group
2013 			 */
2014 			if (!ext4_mb_good_group(ac, group, cr)) {
2015 				ext4_unlock_group(sb, group);
2016 				ext4_mb_unload_buddy(&e4b);
2017 				continue;
2018 			}
2019 
2020 			ac->ac_groups_scanned++;
2021 			if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
2022 				ext4_mb_simple_scan_group(ac, &e4b);
2023 			else if (cr == 1 && sbi->s_stripe &&
2024 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2025 				ext4_mb_scan_aligned(ac, &e4b);
2026 			else
2027 				ext4_mb_complex_scan_group(ac, &e4b);
2028 
2029 			ext4_unlock_group(sb, group);
2030 			ext4_mb_unload_buddy(&e4b);
2031 
2032 			if (ac->ac_status != AC_STATUS_CONTINUE)
2033 				break;
2034 		}
2035 	}
2036 
2037 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2038 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2039 		/*
2040 		 * We've been searching too long. Let's try to allocate
2041 		 * the best chunk we've found so far
2042 		 */
2043 
2044 		ext4_mb_try_best_found(ac, &e4b);
2045 		if (ac->ac_status != AC_STATUS_FOUND) {
2046 			/*
2047 			 * Someone more lucky has already allocated it.
2048 			 * The only thing we can do is just take first
2049 			 * found block(s)
2050 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2051 			 */
2052 			ac->ac_b_ex.fe_group = 0;
2053 			ac->ac_b_ex.fe_start = 0;
2054 			ac->ac_b_ex.fe_len = 0;
2055 			ac->ac_status = AC_STATUS_CONTINUE;
2056 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2057 			cr = 3;
2058 			atomic_inc(&sbi->s_mb_lost_chunks);
2059 			goto repeat;
2060 		}
2061 	}
2062 out:
2063 	return err;
2064 }
2065 
2066 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2067 {
2068 	struct super_block *sb = seq->private;
2069 	ext4_group_t group;
2070 
2071 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2072 		return NULL;
2073 	group = *pos + 1;
2074 	return (void *) ((unsigned long) group);
2075 }
2076 
2077 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2078 {
2079 	struct super_block *sb = seq->private;
2080 	ext4_group_t group;
2081 
2082 	++*pos;
2083 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2084 		return NULL;
2085 	group = *pos + 1;
2086 	return (void *) ((unsigned long) group);
2087 }
2088 
2089 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2090 {
2091 	struct super_block *sb = seq->private;
2092 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2093 	int i;
2094 	int err, buddy_loaded = 0;
2095 	struct ext4_buddy e4b;
2096 	struct ext4_group_info *grinfo;
2097 	struct sg {
2098 		struct ext4_group_info info;
2099 		ext4_grpblk_t counters[16];
2100 	} sg;
2101 
2102 	group--;
2103 	if (group == 0)
2104 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2105 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2106 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2107 			   "group", "free", "frags", "first",
2108 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2109 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2110 
2111 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2112 		sizeof(struct ext4_group_info);
2113 	grinfo = ext4_get_group_info(sb, group);
2114 	/* Load the group info in memory only if not already loaded. */
2115 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2116 		err = ext4_mb_load_buddy(sb, group, &e4b);
2117 		if (err) {
2118 			seq_printf(seq, "#%-5u: I/O error\n", group);
2119 			return 0;
2120 		}
2121 		buddy_loaded = 1;
2122 	}
2123 
2124 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2125 
2126 	if (buddy_loaded)
2127 		ext4_mb_unload_buddy(&e4b);
2128 
2129 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2130 			sg.info.bb_fragments, sg.info.bb_first_free);
2131 	for (i = 0; i <= 13; i++)
2132 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2133 				sg.info.bb_counters[i] : 0);
2134 	seq_printf(seq, " ]\n");
2135 
2136 	return 0;
2137 }
2138 
2139 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2140 {
2141 }
2142 
2143 static const struct seq_operations ext4_mb_seq_groups_ops = {
2144 	.start  = ext4_mb_seq_groups_start,
2145 	.next   = ext4_mb_seq_groups_next,
2146 	.stop   = ext4_mb_seq_groups_stop,
2147 	.show   = ext4_mb_seq_groups_show,
2148 };
2149 
2150 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2151 {
2152 	struct super_block *sb = PDE(inode)->data;
2153 	int rc;
2154 
2155 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2156 	if (rc == 0) {
2157 		struct seq_file *m = file->private_data;
2158 		m->private = sb;
2159 	}
2160 	return rc;
2161 
2162 }
2163 
2164 static const struct file_operations ext4_mb_seq_groups_fops = {
2165 	.owner		= THIS_MODULE,
2166 	.open		= ext4_mb_seq_groups_open,
2167 	.read		= seq_read,
2168 	.llseek		= seq_lseek,
2169 	.release	= seq_release,
2170 };
2171 
2172 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2173 {
2174 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2175 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2176 
2177 	BUG_ON(!cachep);
2178 	return cachep;
2179 }
2180 
2181 /*
2182  * Allocate the top-level s_group_info array for the specified number
2183  * of groups
2184  */
2185 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2186 {
2187 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2188 	unsigned size;
2189 	struct ext4_group_info ***new_groupinfo;
2190 
2191 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2192 		EXT4_DESC_PER_BLOCK_BITS(sb);
2193 	if (size <= sbi->s_group_info_size)
2194 		return 0;
2195 
2196 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2197 	new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
2198 	if (!new_groupinfo) {
2199 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2200 		return -ENOMEM;
2201 	}
2202 	if (sbi->s_group_info) {
2203 		memcpy(new_groupinfo, sbi->s_group_info,
2204 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2205 		ext4_kvfree(sbi->s_group_info);
2206 	}
2207 	sbi->s_group_info = new_groupinfo;
2208 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2209 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2210 		   sbi->s_group_info_size);
2211 	return 0;
2212 }
2213 
2214 /* Create and initialize ext4_group_info data for the given group. */
2215 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2216 			  struct ext4_group_desc *desc)
2217 {
2218 	int i;
2219 	int metalen = 0;
2220 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2221 	struct ext4_group_info **meta_group_info;
2222 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2223 
2224 	/*
2225 	 * First check if this group is the first of a reserved block.
2226 	 * If it's true, we have to allocate a new table of pointers
2227 	 * to ext4_group_info structures
2228 	 */
2229 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2230 		metalen = sizeof(*meta_group_info) <<
2231 			EXT4_DESC_PER_BLOCK_BITS(sb);
2232 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2233 		if (meta_group_info == NULL) {
2234 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2235 				 "for a buddy group");
2236 			goto exit_meta_group_info;
2237 		}
2238 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2239 			meta_group_info;
2240 	}
2241 
2242 	meta_group_info =
2243 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2244 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2245 
2246 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL);
2247 	if (meta_group_info[i] == NULL) {
2248 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2249 		goto exit_group_info;
2250 	}
2251 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2252 		&(meta_group_info[i]->bb_state));
2253 
2254 	/*
2255 	 * initialize bb_free to be able to skip
2256 	 * empty groups without initialization
2257 	 */
2258 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2259 		meta_group_info[i]->bb_free =
2260 			ext4_free_clusters_after_init(sb, group, desc);
2261 	} else {
2262 		meta_group_info[i]->bb_free =
2263 			ext4_free_group_clusters(sb, desc);
2264 	}
2265 
2266 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2267 	init_rwsem(&meta_group_info[i]->alloc_sem);
2268 	meta_group_info[i]->bb_free_root = RB_ROOT;
2269 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2270 
2271 #ifdef DOUBLE_CHECK
2272 	{
2273 		struct buffer_head *bh;
2274 		meta_group_info[i]->bb_bitmap =
2275 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2276 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2277 		bh = ext4_read_block_bitmap(sb, group);
2278 		BUG_ON(bh == NULL);
2279 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2280 			sb->s_blocksize);
2281 		put_bh(bh);
2282 	}
2283 #endif
2284 
2285 	return 0;
2286 
2287 exit_group_info:
2288 	/* If a meta_group_info table has been allocated, release it now */
2289 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2290 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2291 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2292 	}
2293 exit_meta_group_info:
2294 	return -ENOMEM;
2295 } /* ext4_mb_add_groupinfo */
2296 
2297 static int ext4_mb_init_backend(struct super_block *sb)
2298 {
2299 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2300 	ext4_group_t i;
2301 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2302 	int err;
2303 	struct ext4_group_desc *desc;
2304 	struct kmem_cache *cachep;
2305 
2306 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2307 	if (err)
2308 		return err;
2309 
2310 	sbi->s_buddy_cache = new_inode(sb);
2311 	if (sbi->s_buddy_cache == NULL) {
2312 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2313 		goto err_freesgi;
2314 	}
2315 	/* To avoid potentially colliding with an valid on-disk inode number,
2316 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2317 	 * not in the inode hash, so it should never be found by iget(), but
2318 	 * this will avoid confusion if it ever shows up during debugging. */
2319 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2320 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2321 	for (i = 0; i < ngroups; i++) {
2322 		desc = ext4_get_group_desc(sb, i, NULL);
2323 		if (desc == NULL) {
2324 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2325 			goto err_freebuddy;
2326 		}
2327 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2328 			goto err_freebuddy;
2329 	}
2330 
2331 	return 0;
2332 
2333 err_freebuddy:
2334 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2335 	while (i-- > 0)
2336 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2337 	i = sbi->s_group_info_size;
2338 	while (i-- > 0)
2339 		kfree(sbi->s_group_info[i]);
2340 	iput(sbi->s_buddy_cache);
2341 err_freesgi:
2342 	ext4_kvfree(sbi->s_group_info);
2343 	return -ENOMEM;
2344 }
2345 
2346 static void ext4_groupinfo_destroy_slabs(void)
2347 {
2348 	int i;
2349 
2350 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2351 		if (ext4_groupinfo_caches[i])
2352 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2353 		ext4_groupinfo_caches[i] = NULL;
2354 	}
2355 }
2356 
2357 static int ext4_groupinfo_create_slab(size_t size)
2358 {
2359 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2360 	int slab_size;
2361 	int blocksize_bits = order_base_2(size);
2362 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2363 	struct kmem_cache *cachep;
2364 
2365 	if (cache_index >= NR_GRPINFO_CACHES)
2366 		return -EINVAL;
2367 
2368 	if (unlikely(cache_index < 0))
2369 		cache_index = 0;
2370 
2371 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2372 	if (ext4_groupinfo_caches[cache_index]) {
2373 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2374 		return 0;	/* Already created */
2375 	}
2376 
2377 	slab_size = offsetof(struct ext4_group_info,
2378 				bb_counters[blocksize_bits + 2]);
2379 
2380 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2381 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2382 					NULL);
2383 
2384 	ext4_groupinfo_caches[cache_index] = cachep;
2385 
2386 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2387 	if (!cachep) {
2388 		printk(KERN_EMERG
2389 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2390 		return -ENOMEM;
2391 	}
2392 
2393 	return 0;
2394 }
2395 
2396 int ext4_mb_init(struct super_block *sb)
2397 {
2398 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2399 	unsigned i, j;
2400 	unsigned offset;
2401 	unsigned max;
2402 	int ret;
2403 
2404 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2405 
2406 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2407 	if (sbi->s_mb_offsets == NULL) {
2408 		ret = -ENOMEM;
2409 		goto out;
2410 	}
2411 
2412 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2413 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2414 	if (sbi->s_mb_maxs == NULL) {
2415 		ret = -ENOMEM;
2416 		goto out;
2417 	}
2418 
2419 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2420 	if (ret < 0)
2421 		goto out;
2422 
2423 	/* order 0 is regular bitmap */
2424 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2425 	sbi->s_mb_offsets[0] = 0;
2426 
2427 	i = 1;
2428 	offset = 0;
2429 	max = sb->s_blocksize << 2;
2430 	do {
2431 		sbi->s_mb_offsets[i] = offset;
2432 		sbi->s_mb_maxs[i] = max;
2433 		offset += 1 << (sb->s_blocksize_bits - i);
2434 		max = max >> 1;
2435 		i++;
2436 	} while (i <= sb->s_blocksize_bits + 1);
2437 
2438 	spin_lock_init(&sbi->s_md_lock);
2439 	spin_lock_init(&sbi->s_bal_lock);
2440 
2441 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2442 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2443 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2444 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2445 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2446 	/*
2447 	 * The default group preallocation is 512, which for 4k block
2448 	 * sizes translates to 2 megabytes.  However for bigalloc file
2449 	 * systems, this is probably too big (i.e, if the cluster size
2450 	 * is 1 megabyte, then group preallocation size becomes half a
2451 	 * gigabyte!).  As a default, we will keep a two megabyte
2452 	 * group pralloc size for cluster sizes up to 64k, and after
2453 	 * that, we will force a minimum group preallocation size of
2454 	 * 32 clusters.  This translates to 8 megs when the cluster
2455 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2456 	 * which seems reasonable as a default.
2457 	 */
2458 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2459 				       sbi->s_cluster_bits, 32);
2460 	/*
2461 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2462 	 * to the lowest multiple of s_stripe which is bigger than
2463 	 * the s_mb_group_prealloc as determined above. We want
2464 	 * the preallocation size to be an exact multiple of the
2465 	 * RAID stripe size so that preallocations don't fragment
2466 	 * the stripes.
2467 	 */
2468 	if (sbi->s_stripe > 1) {
2469 		sbi->s_mb_group_prealloc = roundup(
2470 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2471 	}
2472 
2473 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2474 	if (sbi->s_locality_groups == NULL) {
2475 		ret = -ENOMEM;
2476 		goto out_free_groupinfo_slab;
2477 	}
2478 	for_each_possible_cpu(i) {
2479 		struct ext4_locality_group *lg;
2480 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2481 		mutex_init(&lg->lg_mutex);
2482 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2483 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2484 		spin_lock_init(&lg->lg_prealloc_lock);
2485 	}
2486 
2487 	/* init file for buddy data */
2488 	ret = ext4_mb_init_backend(sb);
2489 	if (ret != 0)
2490 		goto out_free_locality_groups;
2491 
2492 	if (sbi->s_proc)
2493 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2494 				 &ext4_mb_seq_groups_fops, sb);
2495 
2496 	return 0;
2497 
2498 out_free_locality_groups:
2499 	free_percpu(sbi->s_locality_groups);
2500 	sbi->s_locality_groups = NULL;
2501 out_free_groupinfo_slab:
2502 	ext4_groupinfo_destroy_slabs();
2503 out:
2504 	kfree(sbi->s_mb_offsets);
2505 	sbi->s_mb_offsets = NULL;
2506 	kfree(sbi->s_mb_maxs);
2507 	sbi->s_mb_maxs = NULL;
2508 	return ret;
2509 }
2510 
2511 /* need to called with the ext4 group lock held */
2512 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2513 {
2514 	struct ext4_prealloc_space *pa;
2515 	struct list_head *cur, *tmp;
2516 	int count = 0;
2517 
2518 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2519 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2520 		list_del(&pa->pa_group_list);
2521 		count++;
2522 		kmem_cache_free(ext4_pspace_cachep, pa);
2523 	}
2524 	if (count)
2525 		mb_debug(1, "mballoc: %u PAs left\n", count);
2526 
2527 }
2528 
2529 int ext4_mb_release(struct super_block *sb)
2530 {
2531 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2532 	ext4_group_t i;
2533 	int num_meta_group_infos;
2534 	struct ext4_group_info *grinfo;
2535 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2536 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2537 
2538 	if (sbi->s_proc)
2539 		remove_proc_entry("mb_groups", sbi->s_proc);
2540 
2541 	if (sbi->s_group_info) {
2542 		for (i = 0; i < ngroups; i++) {
2543 			grinfo = ext4_get_group_info(sb, i);
2544 #ifdef DOUBLE_CHECK
2545 			kfree(grinfo->bb_bitmap);
2546 #endif
2547 			ext4_lock_group(sb, i);
2548 			ext4_mb_cleanup_pa(grinfo);
2549 			ext4_unlock_group(sb, i);
2550 			kmem_cache_free(cachep, grinfo);
2551 		}
2552 		num_meta_group_infos = (ngroups +
2553 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2554 			EXT4_DESC_PER_BLOCK_BITS(sb);
2555 		for (i = 0; i < num_meta_group_infos; i++)
2556 			kfree(sbi->s_group_info[i]);
2557 		ext4_kvfree(sbi->s_group_info);
2558 	}
2559 	kfree(sbi->s_mb_offsets);
2560 	kfree(sbi->s_mb_maxs);
2561 	if (sbi->s_buddy_cache)
2562 		iput(sbi->s_buddy_cache);
2563 	if (sbi->s_mb_stats) {
2564 		ext4_msg(sb, KERN_INFO,
2565 		       "mballoc: %u blocks %u reqs (%u success)",
2566 				atomic_read(&sbi->s_bal_allocated),
2567 				atomic_read(&sbi->s_bal_reqs),
2568 				atomic_read(&sbi->s_bal_success));
2569 		ext4_msg(sb, KERN_INFO,
2570 		      "mballoc: %u extents scanned, %u goal hits, "
2571 				"%u 2^N hits, %u breaks, %u lost",
2572 				atomic_read(&sbi->s_bal_ex_scanned),
2573 				atomic_read(&sbi->s_bal_goals),
2574 				atomic_read(&sbi->s_bal_2orders),
2575 				atomic_read(&sbi->s_bal_breaks),
2576 				atomic_read(&sbi->s_mb_lost_chunks));
2577 		ext4_msg(sb, KERN_INFO,
2578 		       "mballoc: %lu generated and it took %Lu",
2579 				sbi->s_mb_buddies_generated,
2580 				sbi->s_mb_generation_time);
2581 		ext4_msg(sb, KERN_INFO,
2582 		       "mballoc: %u preallocated, %u discarded",
2583 				atomic_read(&sbi->s_mb_preallocated),
2584 				atomic_read(&sbi->s_mb_discarded));
2585 	}
2586 
2587 	free_percpu(sbi->s_locality_groups);
2588 
2589 	return 0;
2590 }
2591 
2592 static inline int ext4_issue_discard(struct super_block *sb,
2593 		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
2594 {
2595 	ext4_fsblk_t discard_block;
2596 
2597 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2598 			 ext4_group_first_block_no(sb, block_group));
2599 	count = EXT4_C2B(EXT4_SB(sb), count);
2600 	trace_ext4_discard_blocks(sb,
2601 			(unsigned long long) discard_block, count);
2602 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2603 }
2604 
2605 /*
2606  * This function is called by the jbd2 layer once the commit has finished,
2607  * so we know we can free the blocks that were released with that commit.
2608  */
2609 static void ext4_free_data_callback(struct super_block *sb,
2610 				    struct ext4_journal_cb_entry *jce,
2611 				    int rc)
2612 {
2613 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2614 	struct ext4_buddy e4b;
2615 	struct ext4_group_info *db;
2616 	int err, count = 0, count2 = 0;
2617 
2618 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2619 		 entry->efd_count, entry->efd_group, entry);
2620 
2621 	if (test_opt(sb, DISCARD)) {
2622 		err = ext4_issue_discard(sb, entry->efd_group,
2623 					 entry->efd_start_cluster,
2624 					 entry->efd_count);
2625 		if (err && err != -EOPNOTSUPP)
2626 			ext4_msg(sb, KERN_WARNING, "discard request in"
2627 				 " group:%d block:%d count:%d failed"
2628 				 " with %d", entry->efd_group,
2629 				 entry->efd_start_cluster,
2630 				 entry->efd_count, err);
2631 	}
2632 
2633 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2634 	/* we expect to find existing buddy because it's pinned */
2635 	BUG_ON(err != 0);
2636 
2637 
2638 	db = e4b.bd_info;
2639 	/* there are blocks to put in buddy to make them really free */
2640 	count += entry->efd_count;
2641 	count2++;
2642 	ext4_lock_group(sb, entry->efd_group);
2643 	/* Take it out of per group rb tree */
2644 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2645 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2646 
2647 	/*
2648 	 * Clear the trimmed flag for the group so that the next
2649 	 * ext4_trim_fs can trim it.
2650 	 * If the volume is mounted with -o discard, online discard
2651 	 * is supported and the free blocks will be trimmed online.
2652 	 */
2653 	if (!test_opt(sb, DISCARD))
2654 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2655 
2656 	if (!db->bb_free_root.rb_node) {
2657 		/* No more items in the per group rb tree
2658 		 * balance refcounts from ext4_mb_free_metadata()
2659 		 */
2660 		page_cache_release(e4b.bd_buddy_page);
2661 		page_cache_release(e4b.bd_bitmap_page);
2662 	}
2663 	ext4_unlock_group(sb, entry->efd_group);
2664 	kmem_cache_free(ext4_free_data_cachep, entry);
2665 	ext4_mb_unload_buddy(&e4b);
2666 
2667 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2668 }
2669 
2670 int __init ext4_init_mballoc(void)
2671 {
2672 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2673 					SLAB_RECLAIM_ACCOUNT);
2674 	if (ext4_pspace_cachep == NULL)
2675 		return -ENOMEM;
2676 
2677 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2678 				    SLAB_RECLAIM_ACCOUNT);
2679 	if (ext4_ac_cachep == NULL) {
2680 		kmem_cache_destroy(ext4_pspace_cachep);
2681 		return -ENOMEM;
2682 	}
2683 
2684 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2685 					   SLAB_RECLAIM_ACCOUNT);
2686 	if (ext4_free_data_cachep == NULL) {
2687 		kmem_cache_destroy(ext4_pspace_cachep);
2688 		kmem_cache_destroy(ext4_ac_cachep);
2689 		return -ENOMEM;
2690 	}
2691 	return 0;
2692 }
2693 
2694 void ext4_exit_mballoc(void)
2695 {
2696 	/*
2697 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2698 	 * before destroying the slab cache.
2699 	 */
2700 	rcu_barrier();
2701 	kmem_cache_destroy(ext4_pspace_cachep);
2702 	kmem_cache_destroy(ext4_ac_cachep);
2703 	kmem_cache_destroy(ext4_free_data_cachep);
2704 	ext4_groupinfo_destroy_slabs();
2705 }
2706 
2707 
2708 /*
2709  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2710  * Returns 0 if success or error code
2711  */
2712 static noinline_for_stack int
2713 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2714 				handle_t *handle, unsigned int reserv_clstrs)
2715 {
2716 	struct buffer_head *bitmap_bh = NULL;
2717 	struct ext4_group_desc *gdp;
2718 	struct buffer_head *gdp_bh;
2719 	struct ext4_sb_info *sbi;
2720 	struct super_block *sb;
2721 	ext4_fsblk_t block;
2722 	int err, len;
2723 
2724 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2725 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2726 
2727 	sb = ac->ac_sb;
2728 	sbi = EXT4_SB(sb);
2729 
2730 	err = -EIO;
2731 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2732 	if (!bitmap_bh)
2733 		goto out_err;
2734 
2735 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2736 	if (err)
2737 		goto out_err;
2738 
2739 	err = -EIO;
2740 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2741 	if (!gdp)
2742 		goto out_err;
2743 
2744 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2745 			ext4_free_group_clusters(sb, gdp));
2746 
2747 	err = ext4_journal_get_write_access(handle, gdp_bh);
2748 	if (err)
2749 		goto out_err;
2750 
2751 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2752 
2753 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2754 	if (!ext4_data_block_valid(sbi, block, len)) {
2755 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2756 			   "fs metadata", block, block+len);
2757 		/* File system mounted not to panic on error
2758 		 * Fix the bitmap and repeat the block allocation
2759 		 * We leak some of the blocks here.
2760 		 */
2761 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2762 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2763 			      ac->ac_b_ex.fe_len);
2764 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2765 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2766 		if (!err)
2767 			err = -EAGAIN;
2768 		goto out_err;
2769 	}
2770 
2771 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2772 #ifdef AGGRESSIVE_CHECK
2773 	{
2774 		int i;
2775 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2776 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2777 						bitmap_bh->b_data));
2778 		}
2779 	}
2780 #endif
2781 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2782 		      ac->ac_b_ex.fe_len);
2783 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2784 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2785 		ext4_free_group_clusters_set(sb, gdp,
2786 					     ext4_free_clusters_after_init(sb,
2787 						ac->ac_b_ex.fe_group, gdp));
2788 	}
2789 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2790 	ext4_free_group_clusters_set(sb, gdp, len);
2791 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
2792 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2793 
2794 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2795 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2796 	/*
2797 	 * Now reduce the dirty block count also. Should not go negative
2798 	 */
2799 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2800 		/* release all the reserved blocks if non delalloc */
2801 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2802 				   reserv_clstrs);
2803 
2804 	if (sbi->s_log_groups_per_flex) {
2805 		ext4_group_t flex_group = ext4_flex_group(sbi,
2806 							  ac->ac_b_ex.fe_group);
2807 		atomic_sub(ac->ac_b_ex.fe_len,
2808 			   &sbi->s_flex_groups[flex_group].free_clusters);
2809 	}
2810 
2811 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2812 	if (err)
2813 		goto out_err;
2814 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2815 
2816 out_err:
2817 	brelse(bitmap_bh);
2818 	return err;
2819 }
2820 
2821 /*
2822  * here we normalize request for locality group
2823  * Group request are normalized to s_mb_group_prealloc, which goes to
2824  * s_strip if we set the same via mount option.
2825  * s_mb_group_prealloc can be configured via
2826  * /sys/fs/ext4/<partition>/mb_group_prealloc
2827  *
2828  * XXX: should we try to preallocate more than the group has now?
2829  */
2830 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2831 {
2832 	struct super_block *sb = ac->ac_sb;
2833 	struct ext4_locality_group *lg = ac->ac_lg;
2834 
2835 	BUG_ON(lg == NULL);
2836 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2837 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2838 		current->pid, ac->ac_g_ex.fe_len);
2839 }
2840 
2841 /*
2842  * Normalization means making request better in terms of
2843  * size and alignment
2844  */
2845 static noinline_for_stack void
2846 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2847 				struct ext4_allocation_request *ar)
2848 {
2849 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2850 	int bsbits, max;
2851 	ext4_lblk_t end;
2852 	loff_t size, start_off;
2853 	loff_t orig_size __maybe_unused;
2854 	ext4_lblk_t start;
2855 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2856 	struct ext4_prealloc_space *pa;
2857 
2858 	/* do normalize only data requests, metadata requests
2859 	   do not need preallocation */
2860 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2861 		return;
2862 
2863 	/* sometime caller may want exact blocks */
2864 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2865 		return;
2866 
2867 	/* caller may indicate that preallocation isn't
2868 	 * required (it's a tail, for example) */
2869 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2870 		return;
2871 
2872 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2873 		ext4_mb_normalize_group_request(ac);
2874 		return ;
2875 	}
2876 
2877 	bsbits = ac->ac_sb->s_blocksize_bits;
2878 
2879 	/* first, let's learn actual file size
2880 	 * given current request is allocated */
2881 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
2882 	size = size << bsbits;
2883 	if (size < i_size_read(ac->ac_inode))
2884 		size = i_size_read(ac->ac_inode);
2885 	orig_size = size;
2886 
2887 	/* max size of free chunks */
2888 	max = 2 << bsbits;
2889 
2890 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2891 		(req <= (size) || max <= (chunk_size))
2892 
2893 	/* first, try to predict filesize */
2894 	/* XXX: should this table be tunable? */
2895 	start_off = 0;
2896 	if (size <= 16 * 1024) {
2897 		size = 16 * 1024;
2898 	} else if (size <= 32 * 1024) {
2899 		size = 32 * 1024;
2900 	} else if (size <= 64 * 1024) {
2901 		size = 64 * 1024;
2902 	} else if (size <= 128 * 1024) {
2903 		size = 128 * 1024;
2904 	} else if (size <= 256 * 1024) {
2905 		size = 256 * 1024;
2906 	} else if (size <= 512 * 1024) {
2907 		size = 512 * 1024;
2908 	} else if (size <= 1024 * 1024) {
2909 		size = 1024 * 1024;
2910 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2911 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2912 						(21 - bsbits)) << 21;
2913 		size = 2 * 1024 * 1024;
2914 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2915 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2916 							(22 - bsbits)) << 22;
2917 		size = 4 * 1024 * 1024;
2918 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2919 					(8<<20)>>bsbits, max, 8 * 1024)) {
2920 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2921 							(23 - bsbits)) << 23;
2922 		size = 8 * 1024 * 1024;
2923 	} else {
2924 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2925 		size	  = ac->ac_o_ex.fe_len << bsbits;
2926 	}
2927 	size = size >> bsbits;
2928 	start = start_off >> bsbits;
2929 
2930 	/* don't cover already allocated blocks in selected range */
2931 	if (ar->pleft && start <= ar->lleft) {
2932 		size -= ar->lleft + 1 - start;
2933 		start = ar->lleft + 1;
2934 	}
2935 	if (ar->pright && start + size - 1 >= ar->lright)
2936 		size -= start + size - ar->lright;
2937 
2938 	end = start + size;
2939 
2940 	/* check we don't cross already preallocated blocks */
2941 	rcu_read_lock();
2942 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2943 		ext4_lblk_t pa_end;
2944 
2945 		if (pa->pa_deleted)
2946 			continue;
2947 		spin_lock(&pa->pa_lock);
2948 		if (pa->pa_deleted) {
2949 			spin_unlock(&pa->pa_lock);
2950 			continue;
2951 		}
2952 
2953 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2954 						  pa->pa_len);
2955 
2956 		/* PA must not overlap original request */
2957 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2958 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2959 
2960 		/* skip PAs this normalized request doesn't overlap with */
2961 		if (pa->pa_lstart >= end || pa_end <= start) {
2962 			spin_unlock(&pa->pa_lock);
2963 			continue;
2964 		}
2965 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2966 
2967 		/* adjust start or end to be adjacent to this pa */
2968 		if (pa_end <= ac->ac_o_ex.fe_logical) {
2969 			BUG_ON(pa_end < start);
2970 			start = pa_end;
2971 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
2972 			BUG_ON(pa->pa_lstart > end);
2973 			end = pa->pa_lstart;
2974 		}
2975 		spin_unlock(&pa->pa_lock);
2976 	}
2977 	rcu_read_unlock();
2978 	size = end - start;
2979 
2980 	/* XXX: extra loop to check we really don't overlap preallocations */
2981 	rcu_read_lock();
2982 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2983 		ext4_lblk_t pa_end;
2984 
2985 		spin_lock(&pa->pa_lock);
2986 		if (pa->pa_deleted == 0) {
2987 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2988 							  pa->pa_len);
2989 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
2990 		}
2991 		spin_unlock(&pa->pa_lock);
2992 	}
2993 	rcu_read_unlock();
2994 
2995 	if (start + size <= ac->ac_o_ex.fe_logical &&
2996 			start > ac->ac_o_ex.fe_logical) {
2997 		ext4_msg(ac->ac_sb, KERN_ERR,
2998 			 "start %lu, size %lu, fe_logical %lu",
2999 			 (unsigned long) start, (unsigned long) size,
3000 			 (unsigned long) ac->ac_o_ex.fe_logical);
3001 	}
3002 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3003 			start > ac->ac_o_ex.fe_logical);
3004 	BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
3005 
3006 	/* now prepare goal request */
3007 
3008 	/* XXX: is it better to align blocks WRT to logical
3009 	 * placement or satisfy big request as is */
3010 	ac->ac_g_ex.fe_logical = start;
3011 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3012 
3013 	/* define goal start in order to merge */
3014 	if (ar->pright && (ar->lright == (start + size))) {
3015 		/* merge to the right */
3016 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3017 						&ac->ac_f_ex.fe_group,
3018 						&ac->ac_f_ex.fe_start);
3019 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3020 	}
3021 	if (ar->pleft && (ar->lleft + 1 == start)) {
3022 		/* merge to the left */
3023 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3024 						&ac->ac_f_ex.fe_group,
3025 						&ac->ac_f_ex.fe_start);
3026 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3027 	}
3028 
3029 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3030 		(unsigned) orig_size, (unsigned) start);
3031 }
3032 
3033 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3034 {
3035 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3036 
3037 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3038 		atomic_inc(&sbi->s_bal_reqs);
3039 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3040 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3041 			atomic_inc(&sbi->s_bal_success);
3042 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3043 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3044 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3045 			atomic_inc(&sbi->s_bal_goals);
3046 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3047 			atomic_inc(&sbi->s_bal_breaks);
3048 	}
3049 
3050 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3051 		trace_ext4_mballoc_alloc(ac);
3052 	else
3053 		trace_ext4_mballoc_prealloc(ac);
3054 }
3055 
3056 /*
3057  * Called on failure; free up any blocks from the inode PA for this
3058  * context.  We don't need this for MB_GROUP_PA because we only change
3059  * pa_free in ext4_mb_release_context(), but on failure, we've already
3060  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3061  */
3062 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3063 {
3064 	struct ext4_prealloc_space *pa = ac->ac_pa;
3065 
3066 	if (pa && pa->pa_type == MB_INODE_PA)
3067 		pa->pa_free += ac->ac_b_ex.fe_len;
3068 }
3069 
3070 /*
3071  * use blocks preallocated to inode
3072  */
3073 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3074 				struct ext4_prealloc_space *pa)
3075 {
3076 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3077 	ext4_fsblk_t start;
3078 	ext4_fsblk_t end;
3079 	int len;
3080 
3081 	/* found preallocated blocks, use them */
3082 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3083 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3084 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3085 	len = EXT4_NUM_B2C(sbi, end - start);
3086 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3087 					&ac->ac_b_ex.fe_start);
3088 	ac->ac_b_ex.fe_len = len;
3089 	ac->ac_status = AC_STATUS_FOUND;
3090 	ac->ac_pa = pa;
3091 
3092 	BUG_ON(start < pa->pa_pstart);
3093 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3094 	BUG_ON(pa->pa_free < len);
3095 	pa->pa_free -= len;
3096 
3097 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3098 }
3099 
3100 /*
3101  * use blocks preallocated to locality group
3102  */
3103 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3104 				struct ext4_prealloc_space *pa)
3105 {
3106 	unsigned int len = ac->ac_o_ex.fe_len;
3107 
3108 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3109 					&ac->ac_b_ex.fe_group,
3110 					&ac->ac_b_ex.fe_start);
3111 	ac->ac_b_ex.fe_len = len;
3112 	ac->ac_status = AC_STATUS_FOUND;
3113 	ac->ac_pa = pa;
3114 
3115 	/* we don't correct pa_pstart or pa_plen here to avoid
3116 	 * possible race when the group is being loaded concurrently
3117 	 * instead we correct pa later, after blocks are marked
3118 	 * in on-disk bitmap -- see ext4_mb_release_context()
3119 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3120 	 */
3121 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3122 }
3123 
3124 /*
3125  * Return the prealloc space that have minimal distance
3126  * from the goal block. @cpa is the prealloc
3127  * space that is having currently known minimal distance
3128  * from the goal block.
3129  */
3130 static struct ext4_prealloc_space *
3131 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3132 			struct ext4_prealloc_space *pa,
3133 			struct ext4_prealloc_space *cpa)
3134 {
3135 	ext4_fsblk_t cur_distance, new_distance;
3136 
3137 	if (cpa == NULL) {
3138 		atomic_inc(&pa->pa_count);
3139 		return pa;
3140 	}
3141 	cur_distance = abs(goal_block - cpa->pa_pstart);
3142 	new_distance = abs(goal_block - pa->pa_pstart);
3143 
3144 	if (cur_distance <= new_distance)
3145 		return cpa;
3146 
3147 	/* drop the previous reference */
3148 	atomic_dec(&cpa->pa_count);
3149 	atomic_inc(&pa->pa_count);
3150 	return pa;
3151 }
3152 
3153 /*
3154  * search goal blocks in preallocated space
3155  */
3156 static noinline_for_stack int
3157 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3158 {
3159 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3160 	int order, i;
3161 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3162 	struct ext4_locality_group *lg;
3163 	struct ext4_prealloc_space *pa, *cpa = NULL;
3164 	ext4_fsblk_t goal_block;
3165 
3166 	/* only data can be preallocated */
3167 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3168 		return 0;
3169 
3170 	/* first, try per-file preallocation */
3171 	rcu_read_lock();
3172 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3173 
3174 		/* all fields in this condition don't change,
3175 		 * so we can skip locking for them */
3176 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3177 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3178 					       EXT4_C2B(sbi, pa->pa_len)))
3179 			continue;
3180 
3181 		/* non-extent files can't have physical blocks past 2^32 */
3182 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3183 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3184 		     EXT4_MAX_BLOCK_FILE_PHYS))
3185 			continue;
3186 
3187 		/* found preallocated blocks, use them */
3188 		spin_lock(&pa->pa_lock);
3189 		if (pa->pa_deleted == 0 && pa->pa_free) {
3190 			atomic_inc(&pa->pa_count);
3191 			ext4_mb_use_inode_pa(ac, pa);
3192 			spin_unlock(&pa->pa_lock);
3193 			ac->ac_criteria = 10;
3194 			rcu_read_unlock();
3195 			return 1;
3196 		}
3197 		spin_unlock(&pa->pa_lock);
3198 	}
3199 	rcu_read_unlock();
3200 
3201 	/* can we use group allocation? */
3202 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3203 		return 0;
3204 
3205 	/* inode may have no locality group for some reason */
3206 	lg = ac->ac_lg;
3207 	if (lg == NULL)
3208 		return 0;
3209 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3210 	if (order > PREALLOC_TB_SIZE - 1)
3211 		/* The max size of hash table is PREALLOC_TB_SIZE */
3212 		order = PREALLOC_TB_SIZE - 1;
3213 
3214 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3215 	/*
3216 	 * search for the prealloc space that is having
3217 	 * minimal distance from the goal block.
3218 	 */
3219 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3220 		rcu_read_lock();
3221 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3222 					pa_inode_list) {
3223 			spin_lock(&pa->pa_lock);
3224 			if (pa->pa_deleted == 0 &&
3225 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3226 
3227 				cpa = ext4_mb_check_group_pa(goal_block,
3228 								pa, cpa);
3229 			}
3230 			spin_unlock(&pa->pa_lock);
3231 		}
3232 		rcu_read_unlock();
3233 	}
3234 	if (cpa) {
3235 		ext4_mb_use_group_pa(ac, cpa);
3236 		ac->ac_criteria = 20;
3237 		return 1;
3238 	}
3239 	return 0;
3240 }
3241 
3242 /*
3243  * the function goes through all block freed in the group
3244  * but not yet committed and marks them used in in-core bitmap.
3245  * buddy must be generated from this bitmap
3246  * Need to be called with the ext4 group lock held
3247  */
3248 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3249 						ext4_group_t group)
3250 {
3251 	struct rb_node *n;
3252 	struct ext4_group_info *grp;
3253 	struct ext4_free_data *entry;
3254 
3255 	grp = ext4_get_group_info(sb, group);
3256 	n = rb_first(&(grp->bb_free_root));
3257 
3258 	while (n) {
3259 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3260 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3261 		n = rb_next(n);
3262 	}
3263 	return;
3264 }
3265 
3266 /*
3267  * the function goes through all preallocation in this group and marks them
3268  * used in in-core bitmap. buddy must be generated from this bitmap
3269  * Need to be called with ext4 group lock held
3270  */
3271 static noinline_for_stack
3272 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3273 					ext4_group_t group)
3274 {
3275 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3276 	struct ext4_prealloc_space *pa;
3277 	struct list_head *cur;
3278 	ext4_group_t groupnr;
3279 	ext4_grpblk_t start;
3280 	int preallocated = 0;
3281 	int len;
3282 
3283 	/* all form of preallocation discards first load group,
3284 	 * so the only competing code is preallocation use.
3285 	 * we don't need any locking here
3286 	 * notice we do NOT ignore preallocations with pa_deleted
3287 	 * otherwise we could leave used blocks available for
3288 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3289 	 * is dropping preallocation
3290 	 */
3291 	list_for_each(cur, &grp->bb_prealloc_list) {
3292 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3293 		spin_lock(&pa->pa_lock);
3294 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3295 					     &groupnr, &start);
3296 		len = pa->pa_len;
3297 		spin_unlock(&pa->pa_lock);
3298 		if (unlikely(len == 0))
3299 			continue;
3300 		BUG_ON(groupnr != group);
3301 		ext4_set_bits(bitmap, start, len);
3302 		preallocated += len;
3303 	}
3304 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3305 }
3306 
3307 static void ext4_mb_pa_callback(struct rcu_head *head)
3308 {
3309 	struct ext4_prealloc_space *pa;
3310 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3311 	kmem_cache_free(ext4_pspace_cachep, pa);
3312 }
3313 
3314 /*
3315  * drops a reference to preallocated space descriptor
3316  * if this was the last reference and the space is consumed
3317  */
3318 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3319 			struct super_block *sb, struct ext4_prealloc_space *pa)
3320 {
3321 	ext4_group_t grp;
3322 	ext4_fsblk_t grp_blk;
3323 
3324 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3325 		return;
3326 
3327 	/* in this short window concurrent discard can set pa_deleted */
3328 	spin_lock(&pa->pa_lock);
3329 	if (pa->pa_deleted == 1) {
3330 		spin_unlock(&pa->pa_lock);
3331 		return;
3332 	}
3333 
3334 	pa->pa_deleted = 1;
3335 	spin_unlock(&pa->pa_lock);
3336 
3337 	grp_blk = pa->pa_pstart;
3338 	/*
3339 	 * If doing group-based preallocation, pa_pstart may be in the
3340 	 * next group when pa is used up
3341 	 */
3342 	if (pa->pa_type == MB_GROUP_PA)
3343 		grp_blk--;
3344 
3345 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3346 
3347 	/*
3348 	 * possible race:
3349 	 *
3350 	 *  P1 (buddy init)			P2 (regular allocation)
3351 	 *					find block B in PA
3352 	 *  copy on-disk bitmap to buddy
3353 	 *  					mark B in on-disk bitmap
3354 	 *					drop PA from group
3355 	 *  mark all PAs in buddy
3356 	 *
3357 	 * thus, P1 initializes buddy with B available. to prevent this
3358 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3359 	 * against that pair
3360 	 */
3361 	ext4_lock_group(sb, grp);
3362 	list_del(&pa->pa_group_list);
3363 	ext4_unlock_group(sb, grp);
3364 
3365 	spin_lock(pa->pa_obj_lock);
3366 	list_del_rcu(&pa->pa_inode_list);
3367 	spin_unlock(pa->pa_obj_lock);
3368 
3369 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3370 }
3371 
3372 /*
3373  * creates new preallocated space for given inode
3374  */
3375 static noinline_for_stack int
3376 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3377 {
3378 	struct super_block *sb = ac->ac_sb;
3379 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3380 	struct ext4_prealloc_space *pa;
3381 	struct ext4_group_info *grp;
3382 	struct ext4_inode_info *ei;
3383 
3384 	/* preallocate only when found space is larger then requested */
3385 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3386 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3387 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3388 
3389 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3390 	if (pa == NULL)
3391 		return -ENOMEM;
3392 
3393 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3394 		int winl;
3395 		int wins;
3396 		int win;
3397 		int offs;
3398 
3399 		/* we can't allocate as much as normalizer wants.
3400 		 * so, found space must get proper lstart
3401 		 * to cover original request */
3402 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3403 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3404 
3405 		/* we're limited by original request in that
3406 		 * logical block must be covered any way
3407 		 * winl is window we can move our chunk within */
3408 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3409 
3410 		/* also, we should cover whole original request */
3411 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3412 
3413 		/* the smallest one defines real window */
3414 		win = min(winl, wins);
3415 
3416 		offs = ac->ac_o_ex.fe_logical %
3417 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3418 		if (offs && offs < win)
3419 			win = offs;
3420 
3421 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3422 			EXT4_NUM_B2C(sbi, win);
3423 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3424 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3425 	}
3426 
3427 	/* preallocation can change ac_b_ex, thus we store actually
3428 	 * allocated blocks for history */
3429 	ac->ac_f_ex = ac->ac_b_ex;
3430 
3431 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3432 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3433 	pa->pa_len = ac->ac_b_ex.fe_len;
3434 	pa->pa_free = pa->pa_len;
3435 	atomic_set(&pa->pa_count, 1);
3436 	spin_lock_init(&pa->pa_lock);
3437 	INIT_LIST_HEAD(&pa->pa_inode_list);
3438 	INIT_LIST_HEAD(&pa->pa_group_list);
3439 	pa->pa_deleted = 0;
3440 	pa->pa_type = MB_INODE_PA;
3441 
3442 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3443 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3444 	trace_ext4_mb_new_inode_pa(ac, pa);
3445 
3446 	ext4_mb_use_inode_pa(ac, pa);
3447 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3448 
3449 	ei = EXT4_I(ac->ac_inode);
3450 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3451 
3452 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3453 	pa->pa_inode = ac->ac_inode;
3454 
3455 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3456 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3457 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3458 
3459 	spin_lock(pa->pa_obj_lock);
3460 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3461 	spin_unlock(pa->pa_obj_lock);
3462 
3463 	return 0;
3464 }
3465 
3466 /*
3467  * creates new preallocated space for locality group inodes belongs to
3468  */
3469 static noinline_for_stack int
3470 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3471 {
3472 	struct super_block *sb = ac->ac_sb;
3473 	struct ext4_locality_group *lg;
3474 	struct ext4_prealloc_space *pa;
3475 	struct ext4_group_info *grp;
3476 
3477 	/* preallocate only when found space is larger then requested */
3478 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3479 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3480 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3481 
3482 	BUG_ON(ext4_pspace_cachep == NULL);
3483 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3484 	if (pa == NULL)
3485 		return -ENOMEM;
3486 
3487 	/* preallocation can change ac_b_ex, thus we store actually
3488 	 * allocated blocks for history */
3489 	ac->ac_f_ex = ac->ac_b_ex;
3490 
3491 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3492 	pa->pa_lstart = pa->pa_pstart;
3493 	pa->pa_len = ac->ac_b_ex.fe_len;
3494 	pa->pa_free = pa->pa_len;
3495 	atomic_set(&pa->pa_count, 1);
3496 	spin_lock_init(&pa->pa_lock);
3497 	INIT_LIST_HEAD(&pa->pa_inode_list);
3498 	INIT_LIST_HEAD(&pa->pa_group_list);
3499 	pa->pa_deleted = 0;
3500 	pa->pa_type = MB_GROUP_PA;
3501 
3502 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3503 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3504 	trace_ext4_mb_new_group_pa(ac, pa);
3505 
3506 	ext4_mb_use_group_pa(ac, pa);
3507 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3508 
3509 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3510 	lg = ac->ac_lg;
3511 	BUG_ON(lg == NULL);
3512 
3513 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3514 	pa->pa_inode = NULL;
3515 
3516 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3517 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3518 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3519 
3520 	/*
3521 	 * We will later add the new pa to the right bucket
3522 	 * after updating the pa_free in ext4_mb_release_context
3523 	 */
3524 	return 0;
3525 }
3526 
3527 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3528 {
3529 	int err;
3530 
3531 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3532 		err = ext4_mb_new_group_pa(ac);
3533 	else
3534 		err = ext4_mb_new_inode_pa(ac);
3535 	return err;
3536 }
3537 
3538 /*
3539  * finds all unused blocks in on-disk bitmap, frees them in
3540  * in-core bitmap and buddy.
3541  * @pa must be unlinked from inode and group lists, so that
3542  * nobody else can find/use it.
3543  * the caller MUST hold group/inode locks.
3544  * TODO: optimize the case when there are no in-core structures yet
3545  */
3546 static noinline_for_stack int
3547 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3548 			struct ext4_prealloc_space *pa)
3549 {
3550 	struct super_block *sb = e4b->bd_sb;
3551 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3552 	unsigned int end;
3553 	unsigned int next;
3554 	ext4_group_t group;
3555 	ext4_grpblk_t bit;
3556 	unsigned long long grp_blk_start;
3557 	int err = 0;
3558 	int free = 0;
3559 
3560 	BUG_ON(pa->pa_deleted == 0);
3561 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3562 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3563 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3564 	end = bit + pa->pa_len;
3565 
3566 	while (bit < end) {
3567 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3568 		if (bit >= end)
3569 			break;
3570 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3571 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3572 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3573 			 (unsigned) next - bit, (unsigned) group);
3574 		free += next - bit;
3575 
3576 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3577 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3578 						    EXT4_C2B(sbi, bit)),
3579 					       next - bit);
3580 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3581 		bit = next + 1;
3582 	}
3583 	if (free != pa->pa_free) {
3584 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3585 			 "pa %p: logic %lu, phys. %lu, len %lu",
3586 			 pa, (unsigned long) pa->pa_lstart,
3587 			 (unsigned long) pa->pa_pstart,
3588 			 (unsigned long) pa->pa_len);
3589 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3590 					free, pa->pa_free);
3591 		/*
3592 		 * pa is already deleted so we use the value obtained
3593 		 * from the bitmap and continue.
3594 		 */
3595 	}
3596 	atomic_add(free, &sbi->s_mb_discarded);
3597 
3598 	return err;
3599 }
3600 
3601 static noinline_for_stack int
3602 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3603 				struct ext4_prealloc_space *pa)
3604 {
3605 	struct super_block *sb = e4b->bd_sb;
3606 	ext4_group_t group;
3607 	ext4_grpblk_t bit;
3608 
3609 	trace_ext4_mb_release_group_pa(sb, pa);
3610 	BUG_ON(pa->pa_deleted == 0);
3611 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3612 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3613 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3614 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3615 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3616 
3617 	return 0;
3618 }
3619 
3620 /*
3621  * releases all preallocations in given group
3622  *
3623  * first, we need to decide discard policy:
3624  * - when do we discard
3625  *   1) ENOSPC
3626  * - how many do we discard
3627  *   1) how many requested
3628  */
3629 static noinline_for_stack int
3630 ext4_mb_discard_group_preallocations(struct super_block *sb,
3631 					ext4_group_t group, int needed)
3632 {
3633 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3634 	struct buffer_head *bitmap_bh = NULL;
3635 	struct ext4_prealloc_space *pa, *tmp;
3636 	struct list_head list;
3637 	struct ext4_buddy e4b;
3638 	int err;
3639 	int busy = 0;
3640 	int free = 0;
3641 
3642 	mb_debug(1, "discard preallocation for group %u\n", group);
3643 
3644 	if (list_empty(&grp->bb_prealloc_list))
3645 		return 0;
3646 
3647 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3648 	if (bitmap_bh == NULL) {
3649 		ext4_error(sb, "Error reading block bitmap for %u", group);
3650 		return 0;
3651 	}
3652 
3653 	err = ext4_mb_load_buddy(sb, group, &e4b);
3654 	if (err) {
3655 		ext4_error(sb, "Error loading buddy information for %u", group);
3656 		put_bh(bitmap_bh);
3657 		return 0;
3658 	}
3659 
3660 	if (needed == 0)
3661 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3662 
3663 	INIT_LIST_HEAD(&list);
3664 repeat:
3665 	ext4_lock_group(sb, group);
3666 	list_for_each_entry_safe(pa, tmp,
3667 				&grp->bb_prealloc_list, pa_group_list) {
3668 		spin_lock(&pa->pa_lock);
3669 		if (atomic_read(&pa->pa_count)) {
3670 			spin_unlock(&pa->pa_lock);
3671 			busy = 1;
3672 			continue;
3673 		}
3674 		if (pa->pa_deleted) {
3675 			spin_unlock(&pa->pa_lock);
3676 			continue;
3677 		}
3678 
3679 		/* seems this one can be freed ... */
3680 		pa->pa_deleted = 1;
3681 
3682 		/* we can trust pa_free ... */
3683 		free += pa->pa_free;
3684 
3685 		spin_unlock(&pa->pa_lock);
3686 
3687 		list_del(&pa->pa_group_list);
3688 		list_add(&pa->u.pa_tmp_list, &list);
3689 	}
3690 
3691 	/* if we still need more blocks and some PAs were used, try again */
3692 	if (free < needed && busy) {
3693 		busy = 0;
3694 		ext4_unlock_group(sb, group);
3695 		/*
3696 		 * Yield the CPU here so that we don't get soft lockup
3697 		 * in non preempt case.
3698 		 */
3699 		yield();
3700 		goto repeat;
3701 	}
3702 
3703 	/* found anything to free? */
3704 	if (list_empty(&list)) {
3705 		BUG_ON(free != 0);
3706 		goto out;
3707 	}
3708 
3709 	/* now free all selected PAs */
3710 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3711 
3712 		/* remove from object (inode or locality group) */
3713 		spin_lock(pa->pa_obj_lock);
3714 		list_del_rcu(&pa->pa_inode_list);
3715 		spin_unlock(pa->pa_obj_lock);
3716 
3717 		if (pa->pa_type == MB_GROUP_PA)
3718 			ext4_mb_release_group_pa(&e4b, pa);
3719 		else
3720 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3721 
3722 		list_del(&pa->u.pa_tmp_list);
3723 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3724 	}
3725 
3726 out:
3727 	ext4_unlock_group(sb, group);
3728 	ext4_mb_unload_buddy(&e4b);
3729 	put_bh(bitmap_bh);
3730 	return free;
3731 }
3732 
3733 /*
3734  * releases all non-used preallocated blocks for given inode
3735  *
3736  * It's important to discard preallocations under i_data_sem
3737  * We don't want another block to be served from the prealloc
3738  * space when we are discarding the inode prealloc space.
3739  *
3740  * FIXME!! Make sure it is valid at all the call sites
3741  */
3742 void ext4_discard_preallocations(struct inode *inode)
3743 {
3744 	struct ext4_inode_info *ei = EXT4_I(inode);
3745 	struct super_block *sb = inode->i_sb;
3746 	struct buffer_head *bitmap_bh = NULL;
3747 	struct ext4_prealloc_space *pa, *tmp;
3748 	ext4_group_t group = 0;
3749 	struct list_head list;
3750 	struct ext4_buddy e4b;
3751 	int err;
3752 
3753 	if (!S_ISREG(inode->i_mode)) {
3754 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3755 		return;
3756 	}
3757 
3758 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3759 	trace_ext4_discard_preallocations(inode);
3760 
3761 	INIT_LIST_HEAD(&list);
3762 
3763 repeat:
3764 	/* first, collect all pa's in the inode */
3765 	spin_lock(&ei->i_prealloc_lock);
3766 	while (!list_empty(&ei->i_prealloc_list)) {
3767 		pa = list_entry(ei->i_prealloc_list.next,
3768 				struct ext4_prealloc_space, pa_inode_list);
3769 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3770 		spin_lock(&pa->pa_lock);
3771 		if (atomic_read(&pa->pa_count)) {
3772 			/* this shouldn't happen often - nobody should
3773 			 * use preallocation while we're discarding it */
3774 			spin_unlock(&pa->pa_lock);
3775 			spin_unlock(&ei->i_prealloc_lock);
3776 			ext4_msg(sb, KERN_ERR,
3777 				 "uh-oh! used pa while discarding");
3778 			WARN_ON(1);
3779 			schedule_timeout_uninterruptible(HZ);
3780 			goto repeat;
3781 
3782 		}
3783 		if (pa->pa_deleted == 0) {
3784 			pa->pa_deleted = 1;
3785 			spin_unlock(&pa->pa_lock);
3786 			list_del_rcu(&pa->pa_inode_list);
3787 			list_add(&pa->u.pa_tmp_list, &list);
3788 			continue;
3789 		}
3790 
3791 		/* someone is deleting pa right now */
3792 		spin_unlock(&pa->pa_lock);
3793 		spin_unlock(&ei->i_prealloc_lock);
3794 
3795 		/* we have to wait here because pa_deleted
3796 		 * doesn't mean pa is already unlinked from
3797 		 * the list. as we might be called from
3798 		 * ->clear_inode() the inode will get freed
3799 		 * and concurrent thread which is unlinking
3800 		 * pa from inode's list may access already
3801 		 * freed memory, bad-bad-bad */
3802 
3803 		/* XXX: if this happens too often, we can
3804 		 * add a flag to force wait only in case
3805 		 * of ->clear_inode(), but not in case of
3806 		 * regular truncate */
3807 		schedule_timeout_uninterruptible(HZ);
3808 		goto repeat;
3809 	}
3810 	spin_unlock(&ei->i_prealloc_lock);
3811 
3812 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3813 		BUG_ON(pa->pa_type != MB_INODE_PA);
3814 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3815 
3816 		err = ext4_mb_load_buddy(sb, group, &e4b);
3817 		if (err) {
3818 			ext4_error(sb, "Error loading buddy information for %u",
3819 					group);
3820 			continue;
3821 		}
3822 
3823 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3824 		if (bitmap_bh == NULL) {
3825 			ext4_error(sb, "Error reading block bitmap for %u",
3826 					group);
3827 			ext4_mb_unload_buddy(&e4b);
3828 			continue;
3829 		}
3830 
3831 		ext4_lock_group(sb, group);
3832 		list_del(&pa->pa_group_list);
3833 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3834 		ext4_unlock_group(sb, group);
3835 
3836 		ext4_mb_unload_buddy(&e4b);
3837 		put_bh(bitmap_bh);
3838 
3839 		list_del(&pa->u.pa_tmp_list);
3840 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3841 	}
3842 }
3843 
3844 #ifdef CONFIG_EXT4_DEBUG
3845 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3846 {
3847 	struct super_block *sb = ac->ac_sb;
3848 	ext4_group_t ngroups, i;
3849 
3850 	if (!ext4_mballoc_debug ||
3851 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3852 		return;
3853 
3854 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
3855 			" Allocation context details:");
3856 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
3857 			ac->ac_status, ac->ac_flags);
3858 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
3859 		 	"goal %lu/%lu/%lu@%lu, "
3860 			"best %lu/%lu/%lu@%lu cr %d",
3861 			(unsigned long)ac->ac_o_ex.fe_group,
3862 			(unsigned long)ac->ac_o_ex.fe_start,
3863 			(unsigned long)ac->ac_o_ex.fe_len,
3864 			(unsigned long)ac->ac_o_ex.fe_logical,
3865 			(unsigned long)ac->ac_g_ex.fe_group,
3866 			(unsigned long)ac->ac_g_ex.fe_start,
3867 			(unsigned long)ac->ac_g_ex.fe_len,
3868 			(unsigned long)ac->ac_g_ex.fe_logical,
3869 			(unsigned long)ac->ac_b_ex.fe_group,
3870 			(unsigned long)ac->ac_b_ex.fe_start,
3871 			(unsigned long)ac->ac_b_ex.fe_len,
3872 			(unsigned long)ac->ac_b_ex.fe_logical,
3873 			(int)ac->ac_criteria);
3874 	ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
3875 		 ac->ac_ex_scanned, ac->ac_found);
3876 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
3877 	ngroups = ext4_get_groups_count(sb);
3878 	for (i = 0; i < ngroups; i++) {
3879 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3880 		struct ext4_prealloc_space *pa;
3881 		ext4_grpblk_t start;
3882 		struct list_head *cur;
3883 		ext4_lock_group(sb, i);
3884 		list_for_each(cur, &grp->bb_prealloc_list) {
3885 			pa = list_entry(cur, struct ext4_prealloc_space,
3886 					pa_group_list);
3887 			spin_lock(&pa->pa_lock);
3888 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3889 						     NULL, &start);
3890 			spin_unlock(&pa->pa_lock);
3891 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3892 			       start, pa->pa_len);
3893 		}
3894 		ext4_unlock_group(sb, i);
3895 
3896 		if (grp->bb_free == 0)
3897 			continue;
3898 		printk(KERN_ERR "%u: %d/%d \n",
3899 		       i, grp->bb_free, grp->bb_fragments);
3900 	}
3901 	printk(KERN_ERR "\n");
3902 }
3903 #else
3904 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3905 {
3906 	return;
3907 }
3908 #endif
3909 
3910 /*
3911  * We use locality group preallocation for small size file. The size of the
3912  * file is determined by the current size or the resulting size after
3913  * allocation which ever is larger
3914  *
3915  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3916  */
3917 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3918 {
3919 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3920 	int bsbits = ac->ac_sb->s_blocksize_bits;
3921 	loff_t size, isize;
3922 
3923 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3924 		return;
3925 
3926 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3927 		return;
3928 
3929 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3930 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3931 		>> bsbits;
3932 
3933 	if ((size == isize) &&
3934 	    !ext4_fs_is_busy(sbi) &&
3935 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3936 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3937 		return;
3938 	}
3939 
3940 	if (sbi->s_mb_group_prealloc <= 0) {
3941 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3942 		return;
3943 	}
3944 
3945 	/* don't use group allocation for large files */
3946 	size = max(size, isize);
3947 	if (size > sbi->s_mb_stream_request) {
3948 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3949 		return;
3950 	}
3951 
3952 	BUG_ON(ac->ac_lg != NULL);
3953 	/*
3954 	 * locality group prealloc space are per cpu. The reason for having
3955 	 * per cpu locality group is to reduce the contention between block
3956 	 * request from multiple CPUs.
3957 	 */
3958 	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3959 
3960 	/* we're going to use group allocation */
3961 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3962 
3963 	/* serialize all allocations in the group */
3964 	mutex_lock(&ac->ac_lg->lg_mutex);
3965 }
3966 
3967 static noinline_for_stack int
3968 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3969 				struct ext4_allocation_request *ar)
3970 {
3971 	struct super_block *sb = ar->inode->i_sb;
3972 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3973 	struct ext4_super_block *es = sbi->s_es;
3974 	ext4_group_t group;
3975 	unsigned int len;
3976 	ext4_fsblk_t goal;
3977 	ext4_grpblk_t block;
3978 
3979 	/* we can't allocate > group size */
3980 	len = ar->len;
3981 
3982 	/* just a dirty hack to filter too big requests  */
3983 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
3984 		len = EXT4_CLUSTERS_PER_GROUP(sb);
3985 
3986 	/* start searching from the goal */
3987 	goal = ar->goal;
3988 	if (goal < le32_to_cpu(es->s_first_data_block) ||
3989 			goal >= ext4_blocks_count(es))
3990 		goal = le32_to_cpu(es->s_first_data_block);
3991 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
3992 
3993 	/* set up allocation goals */
3994 	ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
3995 	ac->ac_status = AC_STATUS_CONTINUE;
3996 	ac->ac_sb = sb;
3997 	ac->ac_inode = ar->inode;
3998 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
3999 	ac->ac_o_ex.fe_group = group;
4000 	ac->ac_o_ex.fe_start = block;
4001 	ac->ac_o_ex.fe_len = len;
4002 	ac->ac_g_ex = ac->ac_o_ex;
4003 	ac->ac_flags = ar->flags;
4004 
4005 	/* we have to define context: we'll we work with a file or
4006 	 * locality group. this is a policy, actually */
4007 	ext4_mb_group_or_file(ac);
4008 
4009 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4010 			"left: %u/%u, right %u/%u to %swritable\n",
4011 			(unsigned) ar->len, (unsigned) ar->logical,
4012 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4013 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4014 			(unsigned) ar->lright, (unsigned) ar->pright,
4015 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4016 	return 0;
4017 
4018 }
4019 
4020 static noinline_for_stack void
4021 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4022 					struct ext4_locality_group *lg,
4023 					int order, int total_entries)
4024 {
4025 	ext4_group_t group = 0;
4026 	struct ext4_buddy e4b;
4027 	struct list_head discard_list;
4028 	struct ext4_prealloc_space *pa, *tmp;
4029 
4030 	mb_debug(1, "discard locality group preallocation\n");
4031 
4032 	INIT_LIST_HEAD(&discard_list);
4033 
4034 	spin_lock(&lg->lg_prealloc_lock);
4035 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4036 						pa_inode_list) {
4037 		spin_lock(&pa->pa_lock);
4038 		if (atomic_read(&pa->pa_count)) {
4039 			/*
4040 			 * This is the pa that we just used
4041 			 * for block allocation. So don't
4042 			 * free that
4043 			 */
4044 			spin_unlock(&pa->pa_lock);
4045 			continue;
4046 		}
4047 		if (pa->pa_deleted) {
4048 			spin_unlock(&pa->pa_lock);
4049 			continue;
4050 		}
4051 		/* only lg prealloc space */
4052 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4053 
4054 		/* seems this one can be freed ... */
4055 		pa->pa_deleted = 1;
4056 		spin_unlock(&pa->pa_lock);
4057 
4058 		list_del_rcu(&pa->pa_inode_list);
4059 		list_add(&pa->u.pa_tmp_list, &discard_list);
4060 
4061 		total_entries--;
4062 		if (total_entries <= 5) {
4063 			/*
4064 			 * we want to keep only 5 entries
4065 			 * allowing it to grow to 8. This
4066 			 * mak sure we don't call discard
4067 			 * soon for this list.
4068 			 */
4069 			break;
4070 		}
4071 	}
4072 	spin_unlock(&lg->lg_prealloc_lock);
4073 
4074 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4075 
4076 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4077 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4078 			ext4_error(sb, "Error loading buddy information for %u",
4079 					group);
4080 			continue;
4081 		}
4082 		ext4_lock_group(sb, group);
4083 		list_del(&pa->pa_group_list);
4084 		ext4_mb_release_group_pa(&e4b, pa);
4085 		ext4_unlock_group(sb, group);
4086 
4087 		ext4_mb_unload_buddy(&e4b);
4088 		list_del(&pa->u.pa_tmp_list);
4089 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4090 	}
4091 }
4092 
4093 /*
4094  * We have incremented pa_count. So it cannot be freed at this
4095  * point. Also we hold lg_mutex. So no parallel allocation is
4096  * possible from this lg. That means pa_free cannot be updated.
4097  *
4098  * A parallel ext4_mb_discard_group_preallocations is possible.
4099  * which can cause the lg_prealloc_list to be updated.
4100  */
4101 
4102 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4103 {
4104 	int order, added = 0, lg_prealloc_count = 1;
4105 	struct super_block *sb = ac->ac_sb;
4106 	struct ext4_locality_group *lg = ac->ac_lg;
4107 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4108 
4109 	order = fls(pa->pa_free) - 1;
4110 	if (order > PREALLOC_TB_SIZE - 1)
4111 		/* The max size of hash table is PREALLOC_TB_SIZE */
4112 		order = PREALLOC_TB_SIZE - 1;
4113 	/* Add the prealloc space to lg */
4114 	spin_lock(&lg->lg_prealloc_lock);
4115 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4116 						pa_inode_list) {
4117 		spin_lock(&tmp_pa->pa_lock);
4118 		if (tmp_pa->pa_deleted) {
4119 			spin_unlock(&tmp_pa->pa_lock);
4120 			continue;
4121 		}
4122 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4123 			/* Add to the tail of the previous entry */
4124 			list_add_tail_rcu(&pa->pa_inode_list,
4125 						&tmp_pa->pa_inode_list);
4126 			added = 1;
4127 			/*
4128 			 * we want to count the total
4129 			 * number of entries in the list
4130 			 */
4131 		}
4132 		spin_unlock(&tmp_pa->pa_lock);
4133 		lg_prealloc_count++;
4134 	}
4135 	if (!added)
4136 		list_add_tail_rcu(&pa->pa_inode_list,
4137 					&lg->lg_prealloc_list[order]);
4138 	spin_unlock(&lg->lg_prealloc_lock);
4139 
4140 	/* Now trim the list to be not more than 8 elements */
4141 	if (lg_prealloc_count > 8) {
4142 		ext4_mb_discard_lg_preallocations(sb, lg,
4143 						  order, lg_prealloc_count);
4144 		return;
4145 	}
4146 	return ;
4147 }
4148 
4149 /*
4150  * release all resource we used in allocation
4151  */
4152 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4153 {
4154 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4155 	struct ext4_prealloc_space *pa = ac->ac_pa;
4156 	if (pa) {
4157 		if (pa->pa_type == MB_GROUP_PA) {
4158 			/* see comment in ext4_mb_use_group_pa() */
4159 			spin_lock(&pa->pa_lock);
4160 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4161 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4162 			pa->pa_free -= ac->ac_b_ex.fe_len;
4163 			pa->pa_len -= ac->ac_b_ex.fe_len;
4164 			spin_unlock(&pa->pa_lock);
4165 		}
4166 	}
4167 	if (pa) {
4168 		/*
4169 		 * We want to add the pa to the right bucket.
4170 		 * Remove it from the list and while adding
4171 		 * make sure the list to which we are adding
4172 		 * doesn't grow big.
4173 		 */
4174 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4175 			spin_lock(pa->pa_obj_lock);
4176 			list_del_rcu(&pa->pa_inode_list);
4177 			spin_unlock(pa->pa_obj_lock);
4178 			ext4_mb_add_n_trim(ac);
4179 		}
4180 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4181 	}
4182 	if (ac->ac_bitmap_page)
4183 		page_cache_release(ac->ac_bitmap_page);
4184 	if (ac->ac_buddy_page)
4185 		page_cache_release(ac->ac_buddy_page);
4186 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4187 		mutex_unlock(&ac->ac_lg->lg_mutex);
4188 	ext4_mb_collect_stats(ac);
4189 	return 0;
4190 }
4191 
4192 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4193 {
4194 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4195 	int ret;
4196 	int freed = 0;
4197 
4198 	trace_ext4_mb_discard_preallocations(sb, needed);
4199 	for (i = 0; i < ngroups && needed > 0; i++) {
4200 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4201 		freed += ret;
4202 		needed -= ret;
4203 	}
4204 
4205 	return freed;
4206 }
4207 
4208 /*
4209  * Main entry point into mballoc to allocate blocks
4210  * it tries to use preallocation first, then falls back
4211  * to usual allocation
4212  */
4213 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4214 				struct ext4_allocation_request *ar, int *errp)
4215 {
4216 	int freed;
4217 	struct ext4_allocation_context *ac = NULL;
4218 	struct ext4_sb_info *sbi;
4219 	struct super_block *sb;
4220 	ext4_fsblk_t block = 0;
4221 	unsigned int inquota = 0;
4222 	unsigned int reserv_clstrs = 0;
4223 
4224 	sb = ar->inode->i_sb;
4225 	sbi = EXT4_SB(sb);
4226 
4227 	trace_ext4_request_blocks(ar);
4228 
4229 	/* Allow to use superuser reservation for quota file */
4230 	if (IS_NOQUOTA(ar->inode))
4231 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4232 
4233 	/*
4234 	 * For delayed allocation, we could skip the ENOSPC and
4235 	 * EDQUOT check, as blocks and quotas have been already
4236 	 * reserved when data being copied into pagecache.
4237 	 */
4238 	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4239 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4240 	else {
4241 		/* Without delayed allocation we need to verify
4242 		 * there is enough free blocks to do block allocation
4243 		 * and verify allocation doesn't exceed the quota limits.
4244 		 */
4245 		while (ar->len &&
4246 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4247 
4248 			/* let others to free the space */
4249 			yield();
4250 			ar->len = ar->len >> 1;
4251 		}
4252 		if (!ar->len) {
4253 			*errp = -ENOSPC;
4254 			return 0;
4255 		}
4256 		reserv_clstrs = ar->len;
4257 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4258 			dquot_alloc_block_nofail(ar->inode,
4259 						 EXT4_C2B(sbi, ar->len));
4260 		} else {
4261 			while (ar->len &&
4262 				dquot_alloc_block(ar->inode,
4263 						  EXT4_C2B(sbi, ar->len))) {
4264 
4265 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4266 				ar->len--;
4267 			}
4268 		}
4269 		inquota = ar->len;
4270 		if (ar->len == 0) {
4271 			*errp = -EDQUOT;
4272 			goto out;
4273 		}
4274 	}
4275 
4276 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4277 	if (!ac) {
4278 		ar->len = 0;
4279 		*errp = -ENOMEM;
4280 		goto out;
4281 	}
4282 
4283 	*errp = ext4_mb_initialize_context(ac, ar);
4284 	if (*errp) {
4285 		ar->len = 0;
4286 		goto out;
4287 	}
4288 
4289 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4290 	if (!ext4_mb_use_preallocated(ac)) {
4291 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4292 		ext4_mb_normalize_request(ac, ar);
4293 repeat:
4294 		/* allocate space in core */
4295 		*errp = ext4_mb_regular_allocator(ac);
4296 		if (*errp) {
4297 			ext4_discard_allocated_blocks(ac);
4298 			goto errout;
4299 		}
4300 
4301 		/* as we've just preallocated more space than
4302 		 * user requested orinally, we store allocated
4303 		 * space in a special descriptor */
4304 		if (ac->ac_status == AC_STATUS_FOUND &&
4305 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4306 			ext4_mb_new_preallocation(ac);
4307 	}
4308 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4309 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4310 		if (*errp == -EAGAIN) {
4311 			/*
4312 			 * drop the reference that we took
4313 			 * in ext4_mb_use_best_found
4314 			 */
4315 			ext4_mb_release_context(ac);
4316 			ac->ac_b_ex.fe_group = 0;
4317 			ac->ac_b_ex.fe_start = 0;
4318 			ac->ac_b_ex.fe_len = 0;
4319 			ac->ac_status = AC_STATUS_CONTINUE;
4320 			goto repeat;
4321 		} else if (*errp) {
4322 			ext4_discard_allocated_blocks(ac);
4323 			goto errout;
4324 		} else {
4325 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4326 			ar->len = ac->ac_b_ex.fe_len;
4327 		}
4328 	} else {
4329 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4330 		if (freed)
4331 			goto repeat;
4332 		*errp = -ENOSPC;
4333 	}
4334 
4335 errout:
4336 	if (*errp) {
4337 		ac->ac_b_ex.fe_len = 0;
4338 		ar->len = 0;
4339 		ext4_mb_show_ac(ac);
4340 	}
4341 	ext4_mb_release_context(ac);
4342 out:
4343 	if (ac)
4344 		kmem_cache_free(ext4_ac_cachep, ac);
4345 	if (inquota && ar->len < inquota)
4346 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4347 	if (!ar->len) {
4348 		if (!ext4_test_inode_state(ar->inode,
4349 					   EXT4_STATE_DELALLOC_RESERVED))
4350 			/* release all the reserved blocks if non delalloc */
4351 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4352 						reserv_clstrs);
4353 	}
4354 
4355 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4356 
4357 	return block;
4358 }
4359 
4360 /*
4361  * We can merge two free data extents only if the physical blocks
4362  * are contiguous, AND the extents were freed by the same transaction,
4363  * AND the blocks are associated with the same group.
4364  */
4365 static int can_merge(struct ext4_free_data *entry1,
4366 			struct ext4_free_data *entry2)
4367 {
4368 	if ((entry1->efd_tid == entry2->efd_tid) &&
4369 	    (entry1->efd_group == entry2->efd_group) &&
4370 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4371 		return 1;
4372 	return 0;
4373 }
4374 
4375 static noinline_for_stack int
4376 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4377 		      struct ext4_free_data *new_entry)
4378 {
4379 	ext4_group_t group = e4b->bd_group;
4380 	ext4_grpblk_t cluster;
4381 	struct ext4_free_data *entry;
4382 	struct ext4_group_info *db = e4b->bd_info;
4383 	struct super_block *sb = e4b->bd_sb;
4384 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4385 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4386 	struct rb_node *parent = NULL, *new_node;
4387 
4388 	BUG_ON(!ext4_handle_valid(handle));
4389 	BUG_ON(e4b->bd_bitmap_page == NULL);
4390 	BUG_ON(e4b->bd_buddy_page == NULL);
4391 
4392 	new_node = &new_entry->efd_node;
4393 	cluster = new_entry->efd_start_cluster;
4394 
4395 	if (!*n) {
4396 		/* first free block exent. We need to
4397 		   protect buddy cache from being freed,
4398 		 * otherwise we'll refresh it from
4399 		 * on-disk bitmap and lose not-yet-available
4400 		 * blocks */
4401 		page_cache_get(e4b->bd_buddy_page);
4402 		page_cache_get(e4b->bd_bitmap_page);
4403 	}
4404 	while (*n) {
4405 		parent = *n;
4406 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4407 		if (cluster < entry->efd_start_cluster)
4408 			n = &(*n)->rb_left;
4409 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4410 			n = &(*n)->rb_right;
4411 		else {
4412 			ext4_grp_locked_error(sb, group, 0,
4413 				ext4_group_first_block_no(sb, group) +
4414 				EXT4_C2B(sbi, cluster),
4415 				"Block already on to-be-freed list");
4416 			return 0;
4417 		}
4418 	}
4419 
4420 	rb_link_node(new_node, parent, n);
4421 	rb_insert_color(new_node, &db->bb_free_root);
4422 
4423 	/* Now try to see the extent can be merged to left and right */
4424 	node = rb_prev(new_node);
4425 	if (node) {
4426 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4427 		if (can_merge(entry, new_entry)) {
4428 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4429 			new_entry->efd_count += entry->efd_count;
4430 			rb_erase(node, &(db->bb_free_root));
4431 			ext4_journal_callback_del(handle, &entry->efd_jce);
4432 			kmem_cache_free(ext4_free_data_cachep, entry);
4433 		}
4434 	}
4435 
4436 	node = rb_next(new_node);
4437 	if (node) {
4438 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4439 		if (can_merge(new_entry, entry)) {
4440 			new_entry->efd_count += entry->efd_count;
4441 			rb_erase(node, &(db->bb_free_root));
4442 			ext4_journal_callback_del(handle, &entry->efd_jce);
4443 			kmem_cache_free(ext4_free_data_cachep, entry);
4444 		}
4445 	}
4446 	/* Add the extent to transaction's private list */
4447 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4448 				  &new_entry->efd_jce);
4449 	return 0;
4450 }
4451 
4452 /**
4453  * ext4_free_blocks() -- Free given blocks and update quota
4454  * @handle:		handle for this transaction
4455  * @inode:		inode
4456  * @block:		start physical block to free
4457  * @count:		number of blocks to count
4458  * @flags:		flags used by ext4_free_blocks
4459  */
4460 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4461 		      struct buffer_head *bh, ext4_fsblk_t block,
4462 		      unsigned long count, int flags)
4463 {
4464 	struct buffer_head *bitmap_bh = NULL;
4465 	struct super_block *sb = inode->i_sb;
4466 	struct ext4_group_desc *gdp;
4467 	unsigned long freed = 0;
4468 	unsigned int overflow;
4469 	ext4_grpblk_t bit;
4470 	struct buffer_head *gd_bh;
4471 	ext4_group_t block_group;
4472 	struct ext4_sb_info *sbi;
4473 	struct ext4_buddy e4b;
4474 	unsigned int count_clusters;
4475 	int err = 0;
4476 	int ret;
4477 
4478 	if (bh) {
4479 		if (block)
4480 			BUG_ON(block != bh->b_blocknr);
4481 		else
4482 			block = bh->b_blocknr;
4483 	}
4484 
4485 	sbi = EXT4_SB(sb);
4486 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4487 	    !ext4_data_block_valid(sbi, block, count)) {
4488 		ext4_error(sb, "Freeing blocks not in datazone - "
4489 			   "block = %llu, count = %lu", block, count);
4490 		goto error_return;
4491 	}
4492 
4493 	ext4_debug("freeing block %llu\n", block);
4494 	trace_ext4_free_blocks(inode, block, count, flags);
4495 
4496 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4497 		struct buffer_head *tbh = bh;
4498 		int i;
4499 
4500 		BUG_ON(bh && (count > 1));
4501 
4502 		for (i = 0; i < count; i++) {
4503 			if (!bh)
4504 				tbh = sb_find_get_block(inode->i_sb,
4505 							block + i);
4506 			if (unlikely(!tbh))
4507 				continue;
4508 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4509 				    inode, tbh, block + i);
4510 		}
4511 	}
4512 
4513 	/*
4514 	 * We need to make sure we don't reuse the freed block until
4515 	 * after the transaction is committed, which we can do by
4516 	 * treating the block as metadata, below.  We make an
4517 	 * exception if the inode is to be written in writeback mode
4518 	 * since writeback mode has weak data consistency guarantees.
4519 	 */
4520 	if (!ext4_should_writeback_data(inode))
4521 		flags |= EXT4_FREE_BLOCKS_METADATA;
4522 
4523 	/*
4524 	 * If the extent to be freed does not begin on a cluster
4525 	 * boundary, we need to deal with partial clusters at the
4526 	 * beginning and end of the extent.  Normally we will free
4527 	 * blocks at the beginning or the end unless we are explicitly
4528 	 * requested to avoid doing so.
4529 	 */
4530 	overflow = block & (sbi->s_cluster_ratio - 1);
4531 	if (overflow) {
4532 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4533 			overflow = sbi->s_cluster_ratio - overflow;
4534 			block += overflow;
4535 			if (count > overflow)
4536 				count -= overflow;
4537 			else
4538 				return;
4539 		} else {
4540 			block -= overflow;
4541 			count += overflow;
4542 		}
4543 	}
4544 	overflow = count & (sbi->s_cluster_ratio - 1);
4545 	if (overflow) {
4546 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4547 			if (count > overflow)
4548 				count -= overflow;
4549 			else
4550 				return;
4551 		} else
4552 			count += sbi->s_cluster_ratio - overflow;
4553 	}
4554 
4555 do_more:
4556 	overflow = 0;
4557 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4558 
4559 	/*
4560 	 * Check to see if we are freeing blocks across a group
4561 	 * boundary.
4562 	 */
4563 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4564 		overflow = EXT4_C2B(sbi, bit) + count -
4565 			EXT4_BLOCKS_PER_GROUP(sb);
4566 		count -= overflow;
4567 	}
4568 	count_clusters = EXT4_NUM_B2C(sbi, count);
4569 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4570 	if (!bitmap_bh) {
4571 		err = -EIO;
4572 		goto error_return;
4573 	}
4574 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4575 	if (!gdp) {
4576 		err = -EIO;
4577 		goto error_return;
4578 	}
4579 
4580 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4581 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4582 	    in_range(block, ext4_inode_table(sb, gdp),
4583 		     EXT4_SB(sb)->s_itb_per_group) ||
4584 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4585 		     EXT4_SB(sb)->s_itb_per_group)) {
4586 
4587 		ext4_error(sb, "Freeing blocks in system zone - "
4588 			   "Block = %llu, count = %lu", block, count);
4589 		/* err = 0. ext4_std_error should be a no op */
4590 		goto error_return;
4591 	}
4592 
4593 	BUFFER_TRACE(bitmap_bh, "getting write access");
4594 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4595 	if (err)
4596 		goto error_return;
4597 
4598 	/*
4599 	 * We are about to modify some metadata.  Call the journal APIs
4600 	 * to unshare ->b_data if a currently-committing transaction is
4601 	 * using it
4602 	 */
4603 	BUFFER_TRACE(gd_bh, "get_write_access");
4604 	err = ext4_journal_get_write_access(handle, gd_bh);
4605 	if (err)
4606 		goto error_return;
4607 #ifdef AGGRESSIVE_CHECK
4608 	{
4609 		int i;
4610 		for (i = 0; i < count_clusters; i++)
4611 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4612 	}
4613 #endif
4614 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4615 
4616 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4617 	if (err)
4618 		goto error_return;
4619 
4620 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4621 		struct ext4_free_data *new_entry;
4622 		/*
4623 		 * blocks being freed are metadata. these blocks shouldn't
4624 		 * be used until this transaction is committed
4625 		 */
4626 		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
4627 		if (!new_entry) {
4628 			ext4_mb_unload_buddy(&e4b);
4629 			err = -ENOMEM;
4630 			goto error_return;
4631 		}
4632 		new_entry->efd_start_cluster = bit;
4633 		new_entry->efd_group = block_group;
4634 		new_entry->efd_count = count_clusters;
4635 		new_entry->efd_tid = handle->h_transaction->t_tid;
4636 
4637 		ext4_lock_group(sb, block_group);
4638 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4639 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4640 	} else {
4641 		/* need to update group_info->bb_free and bitmap
4642 		 * with group lock held. generate_buddy look at
4643 		 * them with group lock_held
4644 		 */
4645 		if (test_opt(sb, DISCARD)) {
4646 			err = ext4_issue_discard(sb, block_group, bit, count);
4647 			if (err && err != -EOPNOTSUPP)
4648 				ext4_msg(sb, KERN_WARNING, "discard request in"
4649 					 " group:%d block:%d count:%lu failed"
4650 					 " with %d", block_group, bit, count,
4651 					 err);
4652 		}
4653 
4654 
4655 		ext4_lock_group(sb, block_group);
4656 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4657 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4658 	}
4659 
4660 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4661 	ext4_free_group_clusters_set(sb, gdp, ret);
4662 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4663 	ext4_group_desc_csum_set(sb, block_group, gdp);
4664 	ext4_unlock_group(sb, block_group);
4665 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4666 
4667 	if (sbi->s_log_groups_per_flex) {
4668 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4669 		atomic_add(count_clusters,
4670 			   &sbi->s_flex_groups[flex_group].free_clusters);
4671 	}
4672 
4673 	ext4_mb_unload_buddy(&e4b);
4674 
4675 	freed += count;
4676 
4677 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4678 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4679 
4680 	/* We dirtied the bitmap block */
4681 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4682 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4683 
4684 	/* And the group descriptor block */
4685 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4686 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4687 	if (!err)
4688 		err = ret;
4689 
4690 	if (overflow && !err) {
4691 		block += count;
4692 		count = overflow;
4693 		put_bh(bitmap_bh);
4694 		goto do_more;
4695 	}
4696 error_return:
4697 	brelse(bitmap_bh);
4698 	ext4_std_error(sb, err);
4699 	return;
4700 }
4701 
4702 /**
4703  * ext4_group_add_blocks() -- Add given blocks to an existing group
4704  * @handle:			handle to this transaction
4705  * @sb:				super block
4706  * @block:			start physical block to add to the block group
4707  * @count:			number of blocks to free
4708  *
4709  * This marks the blocks as free in the bitmap and buddy.
4710  */
4711 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4712 			 ext4_fsblk_t block, unsigned long count)
4713 {
4714 	struct buffer_head *bitmap_bh = NULL;
4715 	struct buffer_head *gd_bh;
4716 	ext4_group_t block_group;
4717 	ext4_grpblk_t bit;
4718 	unsigned int i;
4719 	struct ext4_group_desc *desc;
4720 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4721 	struct ext4_buddy e4b;
4722 	int err = 0, ret, blk_free_count;
4723 	ext4_grpblk_t blocks_freed;
4724 
4725 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4726 
4727 	if (count == 0)
4728 		return 0;
4729 
4730 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4731 	/*
4732 	 * Check to see if we are freeing blocks across a group
4733 	 * boundary.
4734 	 */
4735 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4736 		ext4_warning(sb, "too much blocks added to group %u\n",
4737 			     block_group);
4738 		err = -EINVAL;
4739 		goto error_return;
4740 	}
4741 
4742 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4743 	if (!bitmap_bh) {
4744 		err = -EIO;
4745 		goto error_return;
4746 	}
4747 
4748 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4749 	if (!desc) {
4750 		err = -EIO;
4751 		goto error_return;
4752 	}
4753 
4754 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4755 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4756 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4757 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4758 		     sbi->s_itb_per_group)) {
4759 		ext4_error(sb, "Adding blocks in system zones - "
4760 			   "Block = %llu, count = %lu",
4761 			   block, count);
4762 		err = -EINVAL;
4763 		goto error_return;
4764 	}
4765 
4766 	BUFFER_TRACE(bitmap_bh, "getting write access");
4767 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4768 	if (err)
4769 		goto error_return;
4770 
4771 	/*
4772 	 * We are about to modify some metadata.  Call the journal APIs
4773 	 * to unshare ->b_data if a currently-committing transaction is
4774 	 * using it
4775 	 */
4776 	BUFFER_TRACE(gd_bh, "get_write_access");
4777 	err = ext4_journal_get_write_access(handle, gd_bh);
4778 	if (err)
4779 		goto error_return;
4780 
4781 	for (i = 0, blocks_freed = 0; i < count; i++) {
4782 		BUFFER_TRACE(bitmap_bh, "clear bit");
4783 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4784 			ext4_error(sb, "bit already cleared for block %llu",
4785 				   (ext4_fsblk_t)(block + i));
4786 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4787 		} else {
4788 			blocks_freed++;
4789 		}
4790 	}
4791 
4792 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4793 	if (err)
4794 		goto error_return;
4795 
4796 	/*
4797 	 * need to update group_info->bb_free and bitmap
4798 	 * with group lock held. generate_buddy look at
4799 	 * them with group lock_held
4800 	 */
4801 	ext4_lock_group(sb, block_group);
4802 	mb_clear_bits(bitmap_bh->b_data, bit, count);
4803 	mb_free_blocks(NULL, &e4b, bit, count);
4804 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4805 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
4806 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
4807 	ext4_group_desc_csum_set(sb, block_group, desc);
4808 	ext4_unlock_group(sb, block_group);
4809 	percpu_counter_add(&sbi->s_freeclusters_counter,
4810 			   EXT4_NUM_B2C(sbi, blocks_freed));
4811 
4812 	if (sbi->s_log_groups_per_flex) {
4813 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4814 		atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
4815 			   &sbi->s_flex_groups[flex_group].free_clusters);
4816 	}
4817 
4818 	ext4_mb_unload_buddy(&e4b);
4819 
4820 	/* We dirtied the bitmap block */
4821 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4822 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4823 
4824 	/* And the group descriptor block */
4825 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4826 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4827 	if (!err)
4828 		err = ret;
4829 
4830 error_return:
4831 	brelse(bitmap_bh);
4832 	ext4_std_error(sb, err);
4833 	return err;
4834 }
4835 
4836 /**
4837  * ext4_trim_extent -- function to TRIM one single free extent in the group
4838  * @sb:		super block for the file system
4839  * @start:	starting block of the free extent in the alloc. group
4840  * @count:	number of blocks to TRIM
4841  * @group:	alloc. group we are working with
4842  * @e4b:	ext4 buddy for the group
4843  *
4844  * Trim "count" blocks starting at "start" in the "group". To assure that no
4845  * one will allocate those blocks, mark it as used in buddy bitmap. This must
4846  * be called with under the group lock.
4847  */
4848 static int ext4_trim_extent(struct super_block *sb, int start, int count,
4849 			     ext4_group_t group, struct ext4_buddy *e4b)
4850 {
4851 	struct ext4_free_extent ex;
4852 	int ret = 0;
4853 
4854 	trace_ext4_trim_extent(sb, group, start, count);
4855 
4856 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
4857 
4858 	ex.fe_start = start;
4859 	ex.fe_group = group;
4860 	ex.fe_len = count;
4861 
4862 	/*
4863 	 * Mark blocks used, so no one can reuse them while
4864 	 * being trimmed.
4865 	 */
4866 	mb_mark_used(e4b, &ex);
4867 	ext4_unlock_group(sb, group);
4868 	ret = ext4_issue_discard(sb, group, start, count);
4869 	ext4_lock_group(sb, group);
4870 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
4871 	return ret;
4872 }
4873 
4874 /**
4875  * ext4_trim_all_free -- function to trim all free space in alloc. group
4876  * @sb:			super block for file system
4877  * @group:		group to be trimmed
4878  * @start:		first group block to examine
4879  * @max:		last group block to examine
4880  * @minblocks:		minimum extent block count
4881  *
4882  * ext4_trim_all_free walks through group's buddy bitmap searching for free
4883  * extents. When the free block is found, ext4_trim_extent is called to TRIM
4884  * the extent.
4885  *
4886  *
4887  * ext4_trim_all_free walks through group's block bitmap searching for free
4888  * extents. When the free extent is found, mark it as used in group buddy
4889  * bitmap. Then issue a TRIM command on this extent and free the extent in
4890  * the group buddy bitmap. This is done until whole group is scanned.
4891  */
4892 static ext4_grpblk_t
4893 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4894 		   ext4_grpblk_t start, ext4_grpblk_t max,
4895 		   ext4_grpblk_t minblocks)
4896 {
4897 	void *bitmap;
4898 	ext4_grpblk_t next, count = 0, free_count = 0;
4899 	struct ext4_buddy e4b;
4900 	int ret = 0;
4901 
4902 	trace_ext4_trim_all_free(sb, group, start, max);
4903 
4904 	ret = ext4_mb_load_buddy(sb, group, &e4b);
4905 	if (ret) {
4906 		ext4_error(sb, "Error in loading buddy "
4907 				"information for %u", group);
4908 		return ret;
4909 	}
4910 	bitmap = e4b.bd_bitmap;
4911 
4912 	ext4_lock_group(sb, group);
4913 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4914 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4915 		goto out;
4916 
4917 	start = (e4b.bd_info->bb_first_free > start) ?
4918 		e4b.bd_info->bb_first_free : start;
4919 
4920 	while (start <= max) {
4921 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
4922 		if (start > max)
4923 			break;
4924 		next = mb_find_next_bit(bitmap, max + 1, start);
4925 
4926 		if ((next - start) >= minblocks) {
4927 			ret = ext4_trim_extent(sb, start,
4928 					       next - start, group, &e4b);
4929 			if (ret && ret != -EOPNOTSUPP)
4930 				break;
4931 			ret = 0;
4932 			count += next - start;
4933 		}
4934 		free_count += next - start;
4935 		start = next + 1;
4936 
4937 		if (fatal_signal_pending(current)) {
4938 			count = -ERESTARTSYS;
4939 			break;
4940 		}
4941 
4942 		if (need_resched()) {
4943 			ext4_unlock_group(sb, group);
4944 			cond_resched();
4945 			ext4_lock_group(sb, group);
4946 		}
4947 
4948 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
4949 			break;
4950 	}
4951 
4952 	if (!ret) {
4953 		ret = count;
4954 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4955 	}
4956 out:
4957 	ext4_unlock_group(sb, group);
4958 	ext4_mb_unload_buddy(&e4b);
4959 
4960 	ext4_debug("trimmed %d blocks in the group %d\n",
4961 		count, group);
4962 
4963 	return ret;
4964 }
4965 
4966 /**
4967  * ext4_trim_fs() -- trim ioctl handle function
4968  * @sb:			superblock for filesystem
4969  * @range:		fstrim_range structure
4970  *
4971  * start:	First Byte to trim
4972  * len:		number of Bytes to trim from start
4973  * minlen:	minimum extent length in Bytes
4974  * ext4_trim_fs goes through all allocation groups containing Bytes from
4975  * start to start+len. For each such a group ext4_trim_all_free function
4976  * is invoked to trim all free space.
4977  */
4978 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4979 {
4980 	struct ext4_group_info *grp;
4981 	ext4_group_t group, first_group, last_group;
4982 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
4983 	uint64_t start, end, minlen, trimmed = 0;
4984 	ext4_fsblk_t first_data_blk =
4985 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
4986 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
4987 	int ret = 0;
4988 
4989 	start = range->start >> sb->s_blocksize_bits;
4990 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
4991 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
4992 			      range->minlen >> sb->s_blocksize_bits);
4993 
4994 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
4995 	    start >= max_blks ||
4996 	    range->len < sb->s_blocksize)
4997 		return -EINVAL;
4998 	if (end >= max_blks)
4999 		end = max_blks - 1;
5000 	if (end <= first_data_blk)
5001 		goto out;
5002 	if (start < first_data_blk)
5003 		start = first_data_blk;
5004 
5005 	/* Determine first and last group to examine based on start and end */
5006 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5007 				     &first_group, &first_cluster);
5008 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5009 				     &last_group, &last_cluster);
5010 
5011 	/* end now represents the last cluster to discard in this group */
5012 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5013 
5014 	for (group = first_group; group <= last_group; group++) {
5015 		grp = ext4_get_group_info(sb, group);
5016 		/* We only do this if the grp has never been initialized */
5017 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5018 			ret = ext4_mb_init_group(sb, group);
5019 			if (ret)
5020 				break;
5021 		}
5022 
5023 		/*
5024 		 * For all the groups except the last one, last cluster will
5025 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5026 		 * change it for the last group, note that last_cluster is
5027 		 * already computed earlier by ext4_get_group_no_and_offset()
5028 		 */
5029 		if (group == last_group)
5030 			end = last_cluster;
5031 
5032 		if (grp->bb_free >= minlen) {
5033 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5034 						end, minlen);
5035 			if (cnt < 0) {
5036 				ret = cnt;
5037 				break;
5038 			}
5039 			trimmed += cnt;
5040 		}
5041 
5042 		/*
5043 		 * For every group except the first one, we are sure
5044 		 * that the first cluster to discard will be cluster #0.
5045 		 */
5046 		first_cluster = 0;
5047 	}
5048 
5049 	if (!ret)
5050 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5051 
5052 out:
5053 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5054 	return ret;
5055 }
5056