xref: /openbmc/linux/fs/ext4/mballoc.c (revision 56d06fa2)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/log2.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/backing-dev.h>
30 #include <trace/events/ext4.h>
31 
32 #ifdef CONFIG_EXT4_DEBUG
33 ushort ext4_mballoc_debug __read_mostly;
34 
35 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
36 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
37 #endif
38 
39 /*
40  * MUSTDO:
41  *   - test ext4_ext_search_left() and ext4_ext_search_right()
42  *   - search for metadata in few groups
43  *
44  * TODO v4:
45  *   - normalization should take into account whether file is still open
46  *   - discard preallocations if no free space left (policy?)
47  *   - don't normalize tails
48  *   - quota
49  *   - reservation for superuser
50  *
51  * TODO v3:
52  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
53  *   - track min/max extents in each group for better group selection
54  *   - mb_mark_used() may allocate chunk right after splitting buddy
55  *   - tree of groups sorted by number of free blocks
56  *   - error handling
57  */
58 
59 /*
60  * The allocation request involve request for multiple number of blocks
61  * near to the goal(block) value specified.
62  *
63  * During initialization phase of the allocator we decide to use the
64  * group preallocation or inode preallocation depending on the size of
65  * the file. The size of the file could be the resulting file size we
66  * would have after allocation, or the current file size, which ever
67  * is larger. If the size is less than sbi->s_mb_stream_request we
68  * select to use the group preallocation. The default value of
69  * s_mb_stream_request is 16 blocks. This can also be tuned via
70  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
71  * terms of number of blocks.
72  *
73  * The main motivation for having small file use group preallocation is to
74  * ensure that we have small files closer together on the disk.
75  *
76  * First stage the allocator looks at the inode prealloc list,
77  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
78  * spaces for this particular inode. The inode prealloc space is
79  * represented as:
80  *
81  * pa_lstart -> the logical start block for this prealloc space
82  * pa_pstart -> the physical start block for this prealloc space
83  * pa_len    -> length for this prealloc space (in clusters)
84  * pa_free   ->  free space available in this prealloc space (in clusters)
85  *
86  * The inode preallocation space is used looking at the _logical_ start
87  * block. If only the logical file block falls within the range of prealloc
88  * space we will consume the particular prealloc space. This makes sure that
89  * we have contiguous physical blocks representing the file blocks
90  *
91  * The important thing to be noted in case of inode prealloc space is that
92  * we don't modify the values associated to inode prealloc space except
93  * pa_free.
94  *
95  * If we are not able to find blocks in the inode prealloc space and if we
96  * have the group allocation flag set then we look at the locality group
97  * prealloc space. These are per CPU prealloc list represented as
98  *
99  * ext4_sb_info.s_locality_groups[smp_processor_id()]
100  *
101  * The reason for having a per cpu locality group is to reduce the contention
102  * between CPUs. It is possible to get scheduled at this point.
103  *
104  * The locality group prealloc space is used looking at whether we have
105  * enough free space (pa_free) within the prealloc space.
106  *
107  * If we can't allocate blocks via inode prealloc or/and locality group
108  * prealloc then we look at the buddy cache. The buddy cache is represented
109  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
110  * mapped to the buddy and bitmap information regarding different
111  * groups. The buddy information is attached to buddy cache inode so that
112  * we can access them through the page cache. The information regarding
113  * each group is loaded via ext4_mb_load_buddy.  The information involve
114  * block bitmap and buddy information. The information are stored in the
115  * inode as:
116  *
117  *  {                        page                        }
118  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
119  *
120  *
121  * one block each for bitmap and buddy information.  So for each group we
122  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123  * blocksize) blocks.  So it can have information regarding groups_per_page
124  * which is blocks_per_page/2
125  *
126  * The buddy cache inode is not stored on disk. The inode is thrown
127  * away when the filesystem is unmounted.
128  *
129  * We look for count number of blocks in the buddy cache. If we were able
130  * to locate that many free blocks we return with additional information
131  * regarding rest of the contiguous physical block available
132  *
133  * Before allocating blocks via buddy cache we normalize the request
134  * blocks. This ensure we ask for more blocks that we needed. The extra
135  * blocks that we get after allocation is added to the respective prealloc
136  * list. In case of inode preallocation we follow a list of heuristics
137  * based on file size. This can be found in ext4_mb_normalize_request. If
138  * we are doing a group prealloc we try to normalize the request to
139  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
140  * dependent on the cluster size; for non-bigalloc file systems, it is
141  * 512 blocks. This can be tuned via
142  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
143  * terms of number of blocks. If we have mounted the file system with -O
144  * stripe=<value> option the group prealloc request is normalized to the
145  * the smallest multiple of the stripe value (sbi->s_stripe) which is
146  * greater than the default mb_group_prealloc.
147  *
148  * The regular allocator (using the buddy cache) supports a few tunables.
149  *
150  * /sys/fs/ext4/<partition>/mb_min_to_scan
151  * /sys/fs/ext4/<partition>/mb_max_to_scan
152  * /sys/fs/ext4/<partition>/mb_order2_req
153  *
154  * The regular allocator uses buddy scan only if the request len is power of
155  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
156  * value of s_mb_order2_reqs can be tuned via
157  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
158  * stripe size (sbi->s_stripe), we try to search for contiguous block in
159  * stripe size. This should result in better allocation on RAID setups. If
160  * not, we search in the specific group using bitmap for best extents. The
161  * tunable min_to_scan and max_to_scan control the behaviour here.
162  * min_to_scan indicate how long the mballoc __must__ look for a best
163  * extent and max_to_scan indicates how long the mballoc __can__ look for a
164  * best extent in the found extents. Searching for the blocks starts with
165  * the group specified as the goal value in allocation context via
166  * ac_g_ex. Each group is first checked based on the criteria whether it
167  * can be used for allocation. ext4_mb_good_group explains how the groups are
168  * checked.
169  *
170  * Both the prealloc space are getting populated as above. So for the first
171  * request we will hit the buddy cache which will result in this prealloc
172  * space getting filled. The prealloc space is then later used for the
173  * subsequent request.
174  */
175 
176 /*
177  * mballoc operates on the following data:
178  *  - on-disk bitmap
179  *  - in-core buddy (actually includes buddy and bitmap)
180  *  - preallocation descriptors (PAs)
181  *
182  * there are two types of preallocations:
183  *  - inode
184  *    assiged to specific inode and can be used for this inode only.
185  *    it describes part of inode's space preallocated to specific
186  *    physical blocks. any block from that preallocated can be used
187  *    independent. the descriptor just tracks number of blocks left
188  *    unused. so, before taking some block from descriptor, one must
189  *    make sure corresponded logical block isn't allocated yet. this
190  *    also means that freeing any block within descriptor's range
191  *    must discard all preallocated blocks.
192  *  - locality group
193  *    assigned to specific locality group which does not translate to
194  *    permanent set of inodes: inode can join and leave group. space
195  *    from this type of preallocation can be used for any inode. thus
196  *    it's consumed from the beginning to the end.
197  *
198  * relation between them can be expressed as:
199  *    in-core buddy = on-disk bitmap + preallocation descriptors
200  *
201  * this mean blocks mballoc considers used are:
202  *  - allocated blocks (persistent)
203  *  - preallocated blocks (non-persistent)
204  *
205  * consistency in mballoc world means that at any time a block is either
206  * free or used in ALL structures. notice: "any time" should not be read
207  * literally -- time is discrete and delimited by locks.
208  *
209  *  to keep it simple, we don't use block numbers, instead we count number of
210  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
211  *
212  * all operations can be expressed as:
213  *  - init buddy:			buddy = on-disk + PAs
214  *  - new PA:				buddy += N; PA = N
215  *  - use inode PA:			on-disk += N; PA -= N
216  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
217  *  - use locality group PA		on-disk += N; PA -= N
218  *  - discard locality group PA		buddy -= PA; PA = 0
219  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
220  *        is used in real operation because we can't know actual used
221  *        bits from PA, only from on-disk bitmap
222  *
223  * if we follow this strict logic, then all operations above should be atomic.
224  * given some of them can block, we'd have to use something like semaphores
225  * killing performance on high-end SMP hardware. let's try to relax it using
226  * the following knowledge:
227  *  1) if buddy is referenced, it's already initialized
228  *  2) while block is used in buddy and the buddy is referenced,
229  *     nobody can re-allocate that block
230  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
231  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
232  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
233  *     block
234  *
235  * so, now we're building a concurrency table:
236  *  - init buddy vs.
237  *    - new PA
238  *      blocks for PA are allocated in the buddy, buddy must be referenced
239  *      until PA is linked to allocation group to avoid concurrent buddy init
240  *    - use inode PA
241  *      we need to make sure that either on-disk bitmap or PA has uptodate data
242  *      given (3) we care that PA-=N operation doesn't interfere with init
243  *    - discard inode PA
244  *      the simplest way would be to have buddy initialized by the discard
245  *    - use locality group PA
246  *      again PA-=N must be serialized with init
247  *    - discard locality group PA
248  *      the simplest way would be to have buddy initialized by the discard
249  *  - new PA vs.
250  *    - use inode PA
251  *      i_data_sem serializes them
252  *    - discard inode PA
253  *      discard process must wait until PA isn't used by another process
254  *    - use locality group PA
255  *      some mutex should serialize them
256  *    - discard locality group PA
257  *      discard process must wait until PA isn't used by another process
258  *  - use inode PA
259  *    - use inode PA
260  *      i_data_sem or another mutex should serializes them
261  *    - discard inode PA
262  *      discard process must wait until PA isn't used by another process
263  *    - use locality group PA
264  *      nothing wrong here -- they're different PAs covering different blocks
265  *    - discard locality group PA
266  *      discard process must wait until PA isn't used by another process
267  *
268  * now we're ready to make few consequences:
269  *  - PA is referenced and while it is no discard is possible
270  *  - PA is referenced until block isn't marked in on-disk bitmap
271  *  - PA changes only after on-disk bitmap
272  *  - discard must not compete with init. either init is done before
273  *    any discard or they're serialized somehow
274  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
275  *
276  * a special case when we've used PA to emptiness. no need to modify buddy
277  * in this case, but we should care about concurrent init
278  *
279  */
280 
281  /*
282  * Logic in few words:
283  *
284  *  - allocation:
285  *    load group
286  *    find blocks
287  *    mark bits in on-disk bitmap
288  *    release group
289  *
290  *  - use preallocation:
291  *    find proper PA (per-inode or group)
292  *    load group
293  *    mark bits in on-disk bitmap
294  *    release group
295  *    release PA
296  *
297  *  - free:
298  *    load group
299  *    mark bits in on-disk bitmap
300  *    release group
301  *
302  *  - discard preallocations in group:
303  *    mark PAs deleted
304  *    move them onto local list
305  *    load on-disk bitmap
306  *    load group
307  *    remove PA from object (inode or locality group)
308  *    mark free blocks in-core
309  *
310  *  - discard inode's preallocations:
311  */
312 
313 /*
314  * Locking rules
315  *
316  * Locks:
317  *  - bitlock on a group	(group)
318  *  - object (inode/locality)	(object)
319  *  - per-pa lock		(pa)
320  *
321  * Paths:
322  *  - new pa
323  *    object
324  *    group
325  *
326  *  - find and use pa:
327  *    pa
328  *
329  *  - release consumed pa:
330  *    pa
331  *    group
332  *    object
333  *
334  *  - generate in-core bitmap:
335  *    group
336  *        pa
337  *
338  *  - discard all for given object (inode, locality group):
339  *    object
340  *        pa
341  *    group
342  *
343  *  - discard all for given group:
344  *    group
345  *        pa
346  *    group
347  *        object
348  *
349  */
350 static struct kmem_cache *ext4_pspace_cachep;
351 static struct kmem_cache *ext4_ac_cachep;
352 static struct kmem_cache *ext4_free_data_cachep;
353 
354 /* We create slab caches for groupinfo data structures based on the
355  * superblock block size.  There will be one per mounted filesystem for
356  * each unique s_blocksize_bits */
357 #define NR_GRPINFO_CACHES 8
358 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
359 
360 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
361 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
362 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
363 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
364 };
365 
366 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
367 					ext4_group_t group);
368 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
369 						ext4_group_t group);
370 static void ext4_free_data_callback(struct super_block *sb,
371 				struct ext4_journal_cb_entry *jce, int rc);
372 
373 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
374 {
375 #if BITS_PER_LONG == 64
376 	*bit += ((unsigned long) addr & 7UL) << 3;
377 	addr = (void *) ((unsigned long) addr & ~7UL);
378 #elif BITS_PER_LONG == 32
379 	*bit += ((unsigned long) addr & 3UL) << 3;
380 	addr = (void *) ((unsigned long) addr & ~3UL);
381 #else
382 #error "how many bits you are?!"
383 #endif
384 	return addr;
385 }
386 
387 static inline int mb_test_bit(int bit, void *addr)
388 {
389 	/*
390 	 * ext4_test_bit on architecture like powerpc
391 	 * needs unsigned long aligned address
392 	 */
393 	addr = mb_correct_addr_and_bit(&bit, addr);
394 	return ext4_test_bit(bit, addr);
395 }
396 
397 static inline void mb_set_bit(int bit, void *addr)
398 {
399 	addr = mb_correct_addr_and_bit(&bit, addr);
400 	ext4_set_bit(bit, addr);
401 }
402 
403 static inline void mb_clear_bit(int bit, void *addr)
404 {
405 	addr = mb_correct_addr_and_bit(&bit, addr);
406 	ext4_clear_bit(bit, addr);
407 }
408 
409 static inline int mb_test_and_clear_bit(int bit, void *addr)
410 {
411 	addr = mb_correct_addr_and_bit(&bit, addr);
412 	return ext4_test_and_clear_bit(bit, addr);
413 }
414 
415 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
416 {
417 	int fix = 0, ret, tmpmax;
418 	addr = mb_correct_addr_and_bit(&fix, addr);
419 	tmpmax = max + fix;
420 	start += fix;
421 
422 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
423 	if (ret > max)
424 		return max;
425 	return ret;
426 }
427 
428 static inline int mb_find_next_bit(void *addr, int max, int start)
429 {
430 	int fix = 0, ret, tmpmax;
431 	addr = mb_correct_addr_and_bit(&fix, addr);
432 	tmpmax = max + fix;
433 	start += fix;
434 
435 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
436 	if (ret > max)
437 		return max;
438 	return ret;
439 }
440 
441 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
442 {
443 	char *bb;
444 
445 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
446 	BUG_ON(max == NULL);
447 
448 	if (order > e4b->bd_blkbits + 1) {
449 		*max = 0;
450 		return NULL;
451 	}
452 
453 	/* at order 0 we see each particular block */
454 	if (order == 0) {
455 		*max = 1 << (e4b->bd_blkbits + 3);
456 		return e4b->bd_bitmap;
457 	}
458 
459 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
460 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
461 
462 	return bb;
463 }
464 
465 #ifdef DOUBLE_CHECK
466 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
467 			   int first, int count)
468 {
469 	int i;
470 	struct super_block *sb = e4b->bd_sb;
471 
472 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
473 		return;
474 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
475 	for (i = 0; i < count; i++) {
476 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
477 			ext4_fsblk_t blocknr;
478 
479 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
480 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
481 			ext4_grp_locked_error(sb, e4b->bd_group,
482 					      inode ? inode->i_ino : 0,
483 					      blocknr,
484 					      "freeing block already freed "
485 					      "(bit %u)",
486 					      first + i);
487 		}
488 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
489 	}
490 }
491 
492 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
493 {
494 	int i;
495 
496 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
497 		return;
498 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
499 	for (i = 0; i < count; i++) {
500 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
501 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
502 	}
503 }
504 
505 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
506 {
507 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
508 		unsigned char *b1, *b2;
509 		int i;
510 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
511 		b2 = (unsigned char *) bitmap;
512 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
513 			if (b1[i] != b2[i]) {
514 				ext4_msg(e4b->bd_sb, KERN_ERR,
515 					 "corruption in group %u "
516 					 "at byte %u(%u): %x in copy != %x "
517 					 "on disk/prealloc",
518 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
519 				BUG();
520 			}
521 		}
522 	}
523 }
524 
525 #else
526 static inline void mb_free_blocks_double(struct inode *inode,
527 				struct ext4_buddy *e4b, int first, int count)
528 {
529 	return;
530 }
531 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
532 						int first, int count)
533 {
534 	return;
535 }
536 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
537 {
538 	return;
539 }
540 #endif
541 
542 #ifdef AGGRESSIVE_CHECK
543 
544 #define MB_CHECK_ASSERT(assert)						\
545 do {									\
546 	if (!(assert)) {						\
547 		printk(KERN_EMERG					\
548 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
549 			function, file, line, # assert);		\
550 		BUG();							\
551 	}								\
552 } while (0)
553 
554 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
555 				const char *function, int line)
556 {
557 	struct super_block *sb = e4b->bd_sb;
558 	int order = e4b->bd_blkbits + 1;
559 	int max;
560 	int max2;
561 	int i;
562 	int j;
563 	int k;
564 	int count;
565 	struct ext4_group_info *grp;
566 	int fragments = 0;
567 	int fstart;
568 	struct list_head *cur;
569 	void *buddy;
570 	void *buddy2;
571 
572 	{
573 		static int mb_check_counter;
574 		if (mb_check_counter++ % 100 != 0)
575 			return 0;
576 	}
577 
578 	while (order > 1) {
579 		buddy = mb_find_buddy(e4b, order, &max);
580 		MB_CHECK_ASSERT(buddy);
581 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
582 		MB_CHECK_ASSERT(buddy2);
583 		MB_CHECK_ASSERT(buddy != buddy2);
584 		MB_CHECK_ASSERT(max * 2 == max2);
585 
586 		count = 0;
587 		for (i = 0; i < max; i++) {
588 
589 			if (mb_test_bit(i, buddy)) {
590 				/* only single bit in buddy2 may be 1 */
591 				if (!mb_test_bit(i << 1, buddy2)) {
592 					MB_CHECK_ASSERT(
593 						mb_test_bit((i<<1)+1, buddy2));
594 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
595 					MB_CHECK_ASSERT(
596 						mb_test_bit(i << 1, buddy2));
597 				}
598 				continue;
599 			}
600 
601 			/* both bits in buddy2 must be 1 */
602 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
603 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
604 
605 			for (j = 0; j < (1 << order); j++) {
606 				k = (i * (1 << order)) + j;
607 				MB_CHECK_ASSERT(
608 					!mb_test_bit(k, e4b->bd_bitmap));
609 			}
610 			count++;
611 		}
612 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
613 		order--;
614 	}
615 
616 	fstart = -1;
617 	buddy = mb_find_buddy(e4b, 0, &max);
618 	for (i = 0; i < max; i++) {
619 		if (!mb_test_bit(i, buddy)) {
620 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
621 			if (fstart == -1) {
622 				fragments++;
623 				fstart = i;
624 			}
625 			continue;
626 		}
627 		fstart = -1;
628 		/* check used bits only */
629 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
630 			buddy2 = mb_find_buddy(e4b, j, &max2);
631 			k = i >> j;
632 			MB_CHECK_ASSERT(k < max2);
633 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
634 		}
635 	}
636 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
637 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
638 
639 	grp = ext4_get_group_info(sb, e4b->bd_group);
640 	list_for_each(cur, &grp->bb_prealloc_list) {
641 		ext4_group_t groupnr;
642 		struct ext4_prealloc_space *pa;
643 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
644 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
645 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
646 		for (i = 0; i < pa->pa_len; i++)
647 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
648 	}
649 	return 0;
650 }
651 #undef MB_CHECK_ASSERT
652 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
653 					__FILE__, __func__, __LINE__)
654 #else
655 #define mb_check_buddy(e4b)
656 #endif
657 
658 /*
659  * Divide blocks started from @first with length @len into
660  * smaller chunks with power of 2 blocks.
661  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
662  * then increase bb_counters[] for corresponded chunk size.
663  */
664 static void ext4_mb_mark_free_simple(struct super_block *sb,
665 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
666 					struct ext4_group_info *grp)
667 {
668 	struct ext4_sb_info *sbi = EXT4_SB(sb);
669 	ext4_grpblk_t min;
670 	ext4_grpblk_t max;
671 	ext4_grpblk_t chunk;
672 	unsigned short border;
673 
674 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
675 
676 	border = 2 << sb->s_blocksize_bits;
677 
678 	while (len > 0) {
679 		/* find how many blocks can be covered since this position */
680 		max = ffs(first | border) - 1;
681 
682 		/* find how many blocks of power 2 we need to mark */
683 		min = fls(len) - 1;
684 
685 		if (max < min)
686 			min = max;
687 		chunk = 1 << min;
688 
689 		/* mark multiblock chunks only */
690 		grp->bb_counters[min]++;
691 		if (min > 0)
692 			mb_clear_bit(first >> min,
693 				     buddy + sbi->s_mb_offsets[min]);
694 
695 		len -= chunk;
696 		first += chunk;
697 	}
698 }
699 
700 /*
701  * Cache the order of the largest free extent we have available in this block
702  * group.
703  */
704 static void
705 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
706 {
707 	int i;
708 	int bits;
709 
710 	grp->bb_largest_free_order = -1; /* uninit */
711 
712 	bits = sb->s_blocksize_bits + 1;
713 	for (i = bits; i >= 0; i--) {
714 		if (grp->bb_counters[i] > 0) {
715 			grp->bb_largest_free_order = i;
716 			break;
717 		}
718 	}
719 }
720 
721 static noinline_for_stack
722 void ext4_mb_generate_buddy(struct super_block *sb,
723 				void *buddy, void *bitmap, ext4_group_t group)
724 {
725 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
726 	struct ext4_sb_info *sbi = EXT4_SB(sb);
727 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
728 	ext4_grpblk_t i = 0;
729 	ext4_grpblk_t first;
730 	ext4_grpblk_t len;
731 	unsigned free = 0;
732 	unsigned fragments = 0;
733 	unsigned long long period = get_cycles();
734 
735 	/* initialize buddy from bitmap which is aggregation
736 	 * of on-disk bitmap and preallocations */
737 	i = mb_find_next_zero_bit(bitmap, max, 0);
738 	grp->bb_first_free = i;
739 	while (i < max) {
740 		fragments++;
741 		first = i;
742 		i = mb_find_next_bit(bitmap, max, i);
743 		len = i - first;
744 		free += len;
745 		if (len > 1)
746 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
747 		else
748 			grp->bb_counters[0]++;
749 		if (i < max)
750 			i = mb_find_next_zero_bit(bitmap, max, i);
751 	}
752 	grp->bb_fragments = fragments;
753 
754 	if (free != grp->bb_free) {
755 		ext4_grp_locked_error(sb, group, 0, 0,
756 				      "block bitmap and bg descriptor "
757 				      "inconsistent: %u vs %u free clusters",
758 				      free, grp->bb_free);
759 		/*
760 		 * If we intend to continue, we consider group descriptor
761 		 * corrupt and update bb_free using bitmap value
762 		 */
763 		grp->bb_free = free;
764 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
765 			percpu_counter_sub(&sbi->s_freeclusters_counter,
766 					   grp->bb_free);
767 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
768 	}
769 	mb_set_largest_free_order(sb, grp);
770 
771 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
772 
773 	period = get_cycles() - period;
774 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
775 	EXT4_SB(sb)->s_mb_buddies_generated++;
776 	EXT4_SB(sb)->s_mb_generation_time += period;
777 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
778 }
779 
780 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
781 {
782 	int count;
783 	int order = 1;
784 	void *buddy;
785 
786 	while ((buddy = mb_find_buddy(e4b, order++, &count))) {
787 		ext4_set_bits(buddy, 0, count);
788 	}
789 	e4b->bd_info->bb_fragments = 0;
790 	memset(e4b->bd_info->bb_counters, 0,
791 		sizeof(*e4b->bd_info->bb_counters) *
792 		(e4b->bd_sb->s_blocksize_bits + 2));
793 
794 	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
795 		e4b->bd_bitmap, e4b->bd_group);
796 }
797 
798 /* The buddy information is attached the buddy cache inode
799  * for convenience. The information regarding each group
800  * is loaded via ext4_mb_load_buddy. The information involve
801  * block bitmap and buddy information. The information are
802  * stored in the inode as
803  *
804  * {                        page                        }
805  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
806  *
807  *
808  * one block each for bitmap and buddy information.
809  * So for each group we take up 2 blocks. A page can
810  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
811  * So it can have information regarding groups_per_page which
812  * is blocks_per_page/2
813  *
814  * Locking note:  This routine takes the block group lock of all groups
815  * for this page; do not hold this lock when calling this routine!
816  */
817 
818 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
819 {
820 	ext4_group_t ngroups;
821 	int blocksize;
822 	int blocks_per_page;
823 	int groups_per_page;
824 	int err = 0;
825 	int i;
826 	ext4_group_t first_group, group;
827 	int first_block;
828 	struct super_block *sb;
829 	struct buffer_head *bhs;
830 	struct buffer_head **bh = NULL;
831 	struct inode *inode;
832 	char *data;
833 	char *bitmap;
834 	struct ext4_group_info *grinfo;
835 
836 	mb_debug(1, "init page %lu\n", page->index);
837 
838 	inode = page->mapping->host;
839 	sb = inode->i_sb;
840 	ngroups = ext4_get_groups_count(sb);
841 	blocksize = 1 << inode->i_blkbits;
842 	blocks_per_page = PAGE_SIZE / blocksize;
843 
844 	groups_per_page = blocks_per_page >> 1;
845 	if (groups_per_page == 0)
846 		groups_per_page = 1;
847 
848 	/* allocate buffer_heads to read bitmaps */
849 	if (groups_per_page > 1) {
850 		i = sizeof(struct buffer_head *) * groups_per_page;
851 		bh = kzalloc(i, gfp);
852 		if (bh == NULL) {
853 			err = -ENOMEM;
854 			goto out;
855 		}
856 	} else
857 		bh = &bhs;
858 
859 	first_group = page->index * blocks_per_page / 2;
860 
861 	/* read all groups the page covers into the cache */
862 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
863 		if (group >= ngroups)
864 			break;
865 
866 		grinfo = ext4_get_group_info(sb, group);
867 		/*
868 		 * If page is uptodate then we came here after online resize
869 		 * which added some new uninitialized group info structs, so
870 		 * we must skip all initialized uptodate buddies on the page,
871 		 * which may be currently in use by an allocating task.
872 		 */
873 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
874 			bh[i] = NULL;
875 			continue;
876 		}
877 		bh[i] = ext4_read_block_bitmap_nowait(sb, group);
878 		if (IS_ERR(bh[i])) {
879 			err = PTR_ERR(bh[i]);
880 			bh[i] = NULL;
881 			goto out;
882 		}
883 		mb_debug(1, "read bitmap for group %u\n", group);
884 	}
885 
886 	/* wait for I/O completion */
887 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
888 		int err2;
889 
890 		if (!bh[i])
891 			continue;
892 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
893 		if (!err)
894 			err = err2;
895 	}
896 
897 	first_block = page->index * blocks_per_page;
898 	for (i = 0; i < blocks_per_page; i++) {
899 		group = (first_block + i) >> 1;
900 		if (group >= ngroups)
901 			break;
902 
903 		if (!bh[group - first_group])
904 			/* skip initialized uptodate buddy */
905 			continue;
906 
907 		if (!buffer_verified(bh[group - first_group]))
908 			/* Skip faulty bitmaps */
909 			continue;
910 		err = 0;
911 
912 		/*
913 		 * data carry information regarding this
914 		 * particular group in the format specified
915 		 * above
916 		 *
917 		 */
918 		data = page_address(page) + (i * blocksize);
919 		bitmap = bh[group - first_group]->b_data;
920 
921 		/*
922 		 * We place the buddy block and bitmap block
923 		 * close together
924 		 */
925 		if ((first_block + i) & 1) {
926 			/* this is block of buddy */
927 			BUG_ON(incore == NULL);
928 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
929 				group, page->index, i * blocksize);
930 			trace_ext4_mb_buddy_bitmap_load(sb, group);
931 			grinfo = ext4_get_group_info(sb, group);
932 			grinfo->bb_fragments = 0;
933 			memset(grinfo->bb_counters, 0,
934 			       sizeof(*grinfo->bb_counters) *
935 				(sb->s_blocksize_bits+2));
936 			/*
937 			 * incore got set to the group block bitmap below
938 			 */
939 			ext4_lock_group(sb, group);
940 			/* init the buddy */
941 			memset(data, 0xff, blocksize);
942 			ext4_mb_generate_buddy(sb, data, incore, group);
943 			ext4_unlock_group(sb, group);
944 			incore = NULL;
945 		} else {
946 			/* this is block of bitmap */
947 			BUG_ON(incore != NULL);
948 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
949 				group, page->index, i * blocksize);
950 			trace_ext4_mb_bitmap_load(sb, group);
951 
952 			/* see comments in ext4_mb_put_pa() */
953 			ext4_lock_group(sb, group);
954 			memcpy(data, bitmap, blocksize);
955 
956 			/* mark all preallocated blks used in in-core bitmap */
957 			ext4_mb_generate_from_pa(sb, data, group);
958 			ext4_mb_generate_from_freelist(sb, data, group);
959 			ext4_unlock_group(sb, group);
960 
961 			/* set incore so that the buddy information can be
962 			 * generated using this
963 			 */
964 			incore = data;
965 		}
966 	}
967 	SetPageUptodate(page);
968 
969 out:
970 	if (bh) {
971 		for (i = 0; i < groups_per_page; i++)
972 			brelse(bh[i]);
973 		if (bh != &bhs)
974 			kfree(bh);
975 	}
976 	return err;
977 }
978 
979 /*
980  * Lock the buddy and bitmap pages. This make sure other parallel init_group
981  * on the same buddy page doesn't happen whild holding the buddy page lock.
982  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
983  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
984  */
985 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
986 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
987 {
988 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
989 	int block, pnum, poff;
990 	int blocks_per_page;
991 	struct page *page;
992 
993 	e4b->bd_buddy_page = NULL;
994 	e4b->bd_bitmap_page = NULL;
995 
996 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
997 	/*
998 	 * the buddy cache inode stores the block bitmap
999 	 * and buddy information in consecutive blocks.
1000 	 * So for each group we need two blocks.
1001 	 */
1002 	block = group * 2;
1003 	pnum = block / blocks_per_page;
1004 	poff = block % blocks_per_page;
1005 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1006 	if (!page)
1007 		return -ENOMEM;
1008 	BUG_ON(page->mapping != inode->i_mapping);
1009 	e4b->bd_bitmap_page = page;
1010 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1011 
1012 	if (blocks_per_page >= 2) {
1013 		/* buddy and bitmap are on the same page */
1014 		return 0;
1015 	}
1016 
1017 	block++;
1018 	pnum = block / blocks_per_page;
1019 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1020 	if (!page)
1021 		return -ENOMEM;
1022 	BUG_ON(page->mapping != inode->i_mapping);
1023 	e4b->bd_buddy_page = page;
1024 	return 0;
1025 }
1026 
1027 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1028 {
1029 	if (e4b->bd_bitmap_page) {
1030 		unlock_page(e4b->bd_bitmap_page);
1031 		put_page(e4b->bd_bitmap_page);
1032 	}
1033 	if (e4b->bd_buddy_page) {
1034 		unlock_page(e4b->bd_buddy_page);
1035 		put_page(e4b->bd_buddy_page);
1036 	}
1037 }
1038 
1039 /*
1040  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1041  * block group lock of all groups for this page; do not hold the BG lock when
1042  * calling this routine!
1043  */
1044 static noinline_for_stack
1045 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1046 {
1047 
1048 	struct ext4_group_info *this_grp;
1049 	struct ext4_buddy e4b;
1050 	struct page *page;
1051 	int ret = 0;
1052 
1053 	might_sleep();
1054 	mb_debug(1, "init group %u\n", group);
1055 	this_grp = ext4_get_group_info(sb, group);
1056 	/*
1057 	 * This ensures that we don't reinit the buddy cache
1058 	 * page which map to the group from which we are already
1059 	 * allocating. If we are looking at the buddy cache we would
1060 	 * have taken a reference using ext4_mb_load_buddy and that
1061 	 * would have pinned buddy page to page cache.
1062 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1063 	 * page accessed.
1064 	 */
1065 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1066 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1067 		/*
1068 		 * somebody initialized the group
1069 		 * return without doing anything
1070 		 */
1071 		goto err;
1072 	}
1073 
1074 	page = e4b.bd_bitmap_page;
1075 	ret = ext4_mb_init_cache(page, NULL, gfp);
1076 	if (ret)
1077 		goto err;
1078 	if (!PageUptodate(page)) {
1079 		ret = -EIO;
1080 		goto err;
1081 	}
1082 
1083 	if (e4b.bd_buddy_page == NULL) {
1084 		/*
1085 		 * If both the bitmap and buddy are in
1086 		 * the same page we don't need to force
1087 		 * init the buddy
1088 		 */
1089 		ret = 0;
1090 		goto err;
1091 	}
1092 	/* init buddy cache */
1093 	page = e4b.bd_buddy_page;
1094 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1095 	if (ret)
1096 		goto err;
1097 	if (!PageUptodate(page)) {
1098 		ret = -EIO;
1099 		goto err;
1100 	}
1101 err:
1102 	ext4_mb_put_buddy_page_lock(&e4b);
1103 	return ret;
1104 }
1105 
1106 /*
1107  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1108  * block group lock of all groups for this page; do not hold the BG lock when
1109  * calling this routine!
1110  */
1111 static noinline_for_stack int
1112 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1113 		       struct ext4_buddy *e4b, gfp_t gfp)
1114 {
1115 	int blocks_per_page;
1116 	int block;
1117 	int pnum;
1118 	int poff;
1119 	struct page *page;
1120 	int ret;
1121 	struct ext4_group_info *grp;
1122 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1123 	struct inode *inode = sbi->s_buddy_cache;
1124 
1125 	might_sleep();
1126 	mb_debug(1, "load group %u\n", group);
1127 
1128 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1129 	grp = ext4_get_group_info(sb, group);
1130 
1131 	e4b->bd_blkbits = sb->s_blocksize_bits;
1132 	e4b->bd_info = grp;
1133 	e4b->bd_sb = sb;
1134 	e4b->bd_group = group;
1135 	e4b->bd_buddy_page = NULL;
1136 	e4b->bd_bitmap_page = NULL;
1137 
1138 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1139 		/*
1140 		 * we need full data about the group
1141 		 * to make a good selection
1142 		 */
1143 		ret = ext4_mb_init_group(sb, group, gfp);
1144 		if (ret)
1145 			return ret;
1146 	}
1147 
1148 	/*
1149 	 * the buddy cache inode stores the block bitmap
1150 	 * and buddy information in consecutive blocks.
1151 	 * So for each group we need two blocks.
1152 	 */
1153 	block = group * 2;
1154 	pnum = block / blocks_per_page;
1155 	poff = block % blocks_per_page;
1156 
1157 	/* we could use find_or_create_page(), but it locks page
1158 	 * what we'd like to avoid in fast path ... */
1159 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1160 	if (page == NULL || !PageUptodate(page)) {
1161 		if (page)
1162 			/*
1163 			 * drop the page reference and try
1164 			 * to get the page with lock. If we
1165 			 * are not uptodate that implies
1166 			 * somebody just created the page but
1167 			 * is yet to initialize the same. So
1168 			 * wait for it to initialize.
1169 			 */
1170 			put_page(page);
1171 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1172 		if (page) {
1173 			BUG_ON(page->mapping != inode->i_mapping);
1174 			if (!PageUptodate(page)) {
1175 				ret = ext4_mb_init_cache(page, NULL, gfp);
1176 				if (ret) {
1177 					unlock_page(page);
1178 					goto err;
1179 				}
1180 				mb_cmp_bitmaps(e4b, page_address(page) +
1181 					       (poff * sb->s_blocksize));
1182 			}
1183 			unlock_page(page);
1184 		}
1185 	}
1186 	if (page == NULL) {
1187 		ret = -ENOMEM;
1188 		goto err;
1189 	}
1190 	if (!PageUptodate(page)) {
1191 		ret = -EIO;
1192 		goto err;
1193 	}
1194 
1195 	/* Pages marked accessed already */
1196 	e4b->bd_bitmap_page = page;
1197 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1198 
1199 	block++;
1200 	pnum = block / blocks_per_page;
1201 	poff = block % blocks_per_page;
1202 
1203 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1204 	if (page == NULL || !PageUptodate(page)) {
1205 		if (page)
1206 			put_page(page);
1207 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1208 		if (page) {
1209 			BUG_ON(page->mapping != inode->i_mapping);
1210 			if (!PageUptodate(page)) {
1211 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1212 							 gfp);
1213 				if (ret) {
1214 					unlock_page(page);
1215 					goto err;
1216 				}
1217 			}
1218 			unlock_page(page);
1219 		}
1220 	}
1221 	if (page == NULL) {
1222 		ret = -ENOMEM;
1223 		goto err;
1224 	}
1225 	if (!PageUptodate(page)) {
1226 		ret = -EIO;
1227 		goto err;
1228 	}
1229 
1230 	/* Pages marked accessed already */
1231 	e4b->bd_buddy_page = page;
1232 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1233 
1234 	BUG_ON(e4b->bd_bitmap_page == NULL);
1235 	BUG_ON(e4b->bd_buddy_page == NULL);
1236 
1237 	return 0;
1238 
1239 err:
1240 	if (page)
1241 		put_page(page);
1242 	if (e4b->bd_bitmap_page)
1243 		put_page(e4b->bd_bitmap_page);
1244 	if (e4b->bd_buddy_page)
1245 		put_page(e4b->bd_buddy_page);
1246 	e4b->bd_buddy = NULL;
1247 	e4b->bd_bitmap = NULL;
1248 	return ret;
1249 }
1250 
1251 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1252 			      struct ext4_buddy *e4b)
1253 {
1254 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1255 }
1256 
1257 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1258 {
1259 	if (e4b->bd_bitmap_page)
1260 		put_page(e4b->bd_bitmap_page);
1261 	if (e4b->bd_buddy_page)
1262 		put_page(e4b->bd_buddy_page);
1263 }
1264 
1265 
1266 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1267 {
1268 	int order = 1;
1269 	void *bb;
1270 
1271 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1272 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1273 
1274 	bb = e4b->bd_buddy;
1275 	while (order <= e4b->bd_blkbits + 1) {
1276 		block = block >> 1;
1277 		if (!mb_test_bit(block, bb)) {
1278 			/* this block is part of buddy of order 'order' */
1279 			return order;
1280 		}
1281 		bb += 1 << (e4b->bd_blkbits - order);
1282 		order++;
1283 	}
1284 	return 0;
1285 }
1286 
1287 static void mb_clear_bits(void *bm, int cur, int len)
1288 {
1289 	__u32 *addr;
1290 
1291 	len = cur + len;
1292 	while (cur < len) {
1293 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1294 			/* fast path: clear whole word at once */
1295 			addr = bm + (cur >> 3);
1296 			*addr = 0;
1297 			cur += 32;
1298 			continue;
1299 		}
1300 		mb_clear_bit(cur, bm);
1301 		cur++;
1302 	}
1303 }
1304 
1305 /* clear bits in given range
1306  * will return first found zero bit if any, -1 otherwise
1307  */
1308 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1309 {
1310 	__u32 *addr;
1311 	int zero_bit = -1;
1312 
1313 	len = cur + len;
1314 	while (cur < len) {
1315 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1316 			/* fast path: clear whole word at once */
1317 			addr = bm + (cur >> 3);
1318 			if (*addr != (__u32)(-1) && zero_bit == -1)
1319 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1320 			*addr = 0;
1321 			cur += 32;
1322 			continue;
1323 		}
1324 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1325 			zero_bit = cur;
1326 		cur++;
1327 	}
1328 
1329 	return zero_bit;
1330 }
1331 
1332 void ext4_set_bits(void *bm, int cur, int len)
1333 {
1334 	__u32 *addr;
1335 
1336 	len = cur + len;
1337 	while (cur < len) {
1338 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1339 			/* fast path: set whole word at once */
1340 			addr = bm + (cur >> 3);
1341 			*addr = 0xffffffff;
1342 			cur += 32;
1343 			continue;
1344 		}
1345 		mb_set_bit(cur, bm);
1346 		cur++;
1347 	}
1348 }
1349 
1350 /*
1351  * _________________________________________________________________ */
1352 
1353 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1354 {
1355 	if (mb_test_bit(*bit + side, bitmap)) {
1356 		mb_clear_bit(*bit, bitmap);
1357 		(*bit) -= side;
1358 		return 1;
1359 	}
1360 	else {
1361 		(*bit) += side;
1362 		mb_set_bit(*bit, bitmap);
1363 		return -1;
1364 	}
1365 }
1366 
1367 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1368 {
1369 	int max;
1370 	int order = 1;
1371 	void *buddy = mb_find_buddy(e4b, order, &max);
1372 
1373 	while (buddy) {
1374 		void *buddy2;
1375 
1376 		/* Bits in range [first; last] are known to be set since
1377 		 * corresponding blocks were allocated. Bits in range
1378 		 * (first; last) will stay set because they form buddies on
1379 		 * upper layer. We just deal with borders if they don't
1380 		 * align with upper layer and then go up.
1381 		 * Releasing entire group is all about clearing
1382 		 * single bit of highest order buddy.
1383 		 */
1384 
1385 		/* Example:
1386 		 * ---------------------------------
1387 		 * |   1   |   1   |   1   |   1   |
1388 		 * ---------------------------------
1389 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1390 		 * ---------------------------------
1391 		 *   0   1   2   3   4   5   6   7
1392 		 *      \_____________________/
1393 		 *
1394 		 * Neither [1] nor [6] is aligned to above layer.
1395 		 * Left neighbour [0] is free, so mark it busy,
1396 		 * decrease bb_counters and extend range to
1397 		 * [0; 6]
1398 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1399 		 * mark [6] free, increase bb_counters and shrink range to
1400 		 * [0; 5].
1401 		 * Then shift range to [0; 2], go up and do the same.
1402 		 */
1403 
1404 
1405 		if (first & 1)
1406 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1407 		if (!(last & 1))
1408 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1409 		if (first > last)
1410 			break;
1411 		order++;
1412 
1413 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1414 			mb_clear_bits(buddy, first, last - first + 1);
1415 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1416 			break;
1417 		}
1418 		first >>= 1;
1419 		last >>= 1;
1420 		buddy = buddy2;
1421 	}
1422 }
1423 
1424 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1425 			   int first, int count)
1426 {
1427 	int left_is_free = 0;
1428 	int right_is_free = 0;
1429 	int block;
1430 	int last = first + count - 1;
1431 	struct super_block *sb = e4b->bd_sb;
1432 
1433 	if (WARN_ON(count == 0))
1434 		return;
1435 	BUG_ON(last >= (sb->s_blocksize << 3));
1436 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1437 	/* Don't bother if the block group is corrupt. */
1438 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1439 		return;
1440 
1441 	mb_check_buddy(e4b);
1442 	mb_free_blocks_double(inode, e4b, first, count);
1443 
1444 	e4b->bd_info->bb_free += count;
1445 	if (first < e4b->bd_info->bb_first_free)
1446 		e4b->bd_info->bb_first_free = first;
1447 
1448 	/* access memory sequentially: check left neighbour,
1449 	 * clear range and then check right neighbour
1450 	 */
1451 	if (first != 0)
1452 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1453 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1454 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1455 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1456 
1457 	if (unlikely(block != -1)) {
1458 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1459 		ext4_fsblk_t blocknr;
1460 
1461 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1462 		blocknr += EXT4_C2B(EXT4_SB(sb), block);
1463 		ext4_grp_locked_error(sb, e4b->bd_group,
1464 				      inode ? inode->i_ino : 0,
1465 				      blocknr,
1466 				      "freeing already freed block "
1467 				      "(bit %u); block bitmap corrupt.",
1468 				      block);
1469 		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1470 			percpu_counter_sub(&sbi->s_freeclusters_counter,
1471 					   e4b->bd_info->bb_free);
1472 		/* Mark the block group as corrupt. */
1473 		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1474 			&e4b->bd_info->bb_state);
1475 		mb_regenerate_buddy(e4b);
1476 		goto done;
1477 	}
1478 
1479 	/* let's maintain fragments counter */
1480 	if (left_is_free && right_is_free)
1481 		e4b->bd_info->bb_fragments--;
1482 	else if (!left_is_free && !right_is_free)
1483 		e4b->bd_info->bb_fragments++;
1484 
1485 	/* buddy[0] == bd_bitmap is a special case, so handle
1486 	 * it right away and let mb_buddy_mark_free stay free of
1487 	 * zero order checks.
1488 	 * Check if neighbours are to be coaleasced,
1489 	 * adjust bitmap bb_counters and borders appropriately.
1490 	 */
1491 	if (first & 1) {
1492 		first += !left_is_free;
1493 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1494 	}
1495 	if (!(last & 1)) {
1496 		last -= !right_is_free;
1497 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1498 	}
1499 
1500 	if (first <= last)
1501 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1502 
1503 done:
1504 	mb_set_largest_free_order(sb, e4b->bd_info);
1505 	mb_check_buddy(e4b);
1506 }
1507 
1508 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1509 				int needed, struct ext4_free_extent *ex)
1510 {
1511 	int next = block;
1512 	int max, order;
1513 	void *buddy;
1514 
1515 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1516 	BUG_ON(ex == NULL);
1517 
1518 	buddy = mb_find_buddy(e4b, 0, &max);
1519 	BUG_ON(buddy == NULL);
1520 	BUG_ON(block >= max);
1521 	if (mb_test_bit(block, buddy)) {
1522 		ex->fe_len = 0;
1523 		ex->fe_start = 0;
1524 		ex->fe_group = 0;
1525 		return 0;
1526 	}
1527 
1528 	/* find actual order */
1529 	order = mb_find_order_for_block(e4b, block);
1530 	block = block >> order;
1531 
1532 	ex->fe_len = 1 << order;
1533 	ex->fe_start = block << order;
1534 	ex->fe_group = e4b->bd_group;
1535 
1536 	/* calc difference from given start */
1537 	next = next - ex->fe_start;
1538 	ex->fe_len -= next;
1539 	ex->fe_start += next;
1540 
1541 	while (needed > ex->fe_len &&
1542 	       mb_find_buddy(e4b, order, &max)) {
1543 
1544 		if (block + 1 >= max)
1545 			break;
1546 
1547 		next = (block + 1) * (1 << order);
1548 		if (mb_test_bit(next, e4b->bd_bitmap))
1549 			break;
1550 
1551 		order = mb_find_order_for_block(e4b, next);
1552 
1553 		block = next >> order;
1554 		ex->fe_len += 1 << order;
1555 	}
1556 
1557 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1558 	return ex->fe_len;
1559 }
1560 
1561 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1562 {
1563 	int ord;
1564 	int mlen = 0;
1565 	int max = 0;
1566 	int cur;
1567 	int start = ex->fe_start;
1568 	int len = ex->fe_len;
1569 	unsigned ret = 0;
1570 	int len0 = len;
1571 	void *buddy;
1572 
1573 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1574 	BUG_ON(e4b->bd_group != ex->fe_group);
1575 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1576 	mb_check_buddy(e4b);
1577 	mb_mark_used_double(e4b, start, len);
1578 
1579 	e4b->bd_info->bb_free -= len;
1580 	if (e4b->bd_info->bb_first_free == start)
1581 		e4b->bd_info->bb_first_free += len;
1582 
1583 	/* let's maintain fragments counter */
1584 	if (start != 0)
1585 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1586 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1587 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1588 	if (mlen && max)
1589 		e4b->bd_info->bb_fragments++;
1590 	else if (!mlen && !max)
1591 		e4b->bd_info->bb_fragments--;
1592 
1593 	/* let's maintain buddy itself */
1594 	while (len) {
1595 		ord = mb_find_order_for_block(e4b, start);
1596 
1597 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1598 			/* the whole chunk may be allocated at once! */
1599 			mlen = 1 << ord;
1600 			buddy = mb_find_buddy(e4b, ord, &max);
1601 			BUG_ON((start >> ord) >= max);
1602 			mb_set_bit(start >> ord, buddy);
1603 			e4b->bd_info->bb_counters[ord]--;
1604 			start += mlen;
1605 			len -= mlen;
1606 			BUG_ON(len < 0);
1607 			continue;
1608 		}
1609 
1610 		/* store for history */
1611 		if (ret == 0)
1612 			ret = len | (ord << 16);
1613 
1614 		/* we have to split large buddy */
1615 		BUG_ON(ord <= 0);
1616 		buddy = mb_find_buddy(e4b, ord, &max);
1617 		mb_set_bit(start >> ord, buddy);
1618 		e4b->bd_info->bb_counters[ord]--;
1619 
1620 		ord--;
1621 		cur = (start >> ord) & ~1U;
1622 		buddy = mb_find_buddy(e4b, ord, &max);
1623 		mb_clear_bit(cur, buddy);
1624 		mb_clear_bit(cur + 1, buddy);
1625 		e4b->bd_info->bb_counters[ord]++;
1626 		e4b->bd_info->bb_counters[ord]++;
1627 	}
1628 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1629 
1630 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1631 	mb_check_buddy(e4b);
1632 
1633 	return ret;
1634 }
1635 
1636 /*
1637  * Must be called under group lock!
1638  */
1639 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1640 					struct ext4_buddy *e4b)
1641 {
1642 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1643 	int ret;
1644 
1645 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1646 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1647 
1648 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1649 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1650 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1651 
1652 	/* preallocation can change ac_b_ex, thus we store actually
1653 	 * allocated blocks for history */
1654 	ac->ac_f_ex = ac->ac_b_ex;
1655 
1656 	ac->ac_status = AC_STATUS_FOUND;
1657 	ac->ac_tail = ret & 0xffff;
1658 	ac->ac_buddy = ret >> 16;
1659 
1660 	/*
1661 	 * take the page reference. We want the page to be pinned
1662 	 * so that we don't get a ext4_mb_init_cache_call for this
1663 	 * group until we update the bitmap. That would mean we
1664 	 * double allocate blocks. The reference is dropped
1665 	 * in ext4_mb_release_context
1666 	 */
1667 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1668 	get_page(ac->ac_bitmap_page);
1669 	ac->ac_buddy_page = e4b->bd_buddy_page;
1670 	get_page(ac->ac_buddy_page);
1671 	/* store last allocated for subsequent stream allocation */
1672 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1673 		spin_lock(&sbi->s_md_lock);
1674 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1675 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1676 		spin_unlock(&sbi->s_md_lock);
1677 	}
1678 }
1679 
1680 /*
1681  * regular allocator, for general purposes allocation
1682  */
1683 
1684 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1685 					struct ext4_buddy *e4b,
1686 					int finish_group)
1687 {
1688 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1689 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1690 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1691 	struct ext4_free_extent ex;
1692 	int max;
1693 
1694 	if (ac->ac_status == AC_STATUS_FOUND)
1695 		return;
1696 	/*
1697 	 * We don't want to scan for a whole year
1698 	 */
1699 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1700 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1701 		ac->ac_status = AC_STATUS_BREAK;
1702 		return;
1703 	}
1704 
1705 	/*
1706 	 * Haven't found good chunk so far, let's continue
1707 	 */
1708 	if (bex->fe_len < gex->fe_len)
1709 		return;
1710 
1711 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1712 			&& bex->fe_group == e4b->bd_group) {
1713 		/* recheck chunk's availability - we don't know
1714 		 * when it was found (within this lock-unlock
1715 		 * period or not) */
1716 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1717 		if (max >= gex->fe_len) {
1718 			ext4_mb_use_best_found(ac, e4b);
1719 			return;
1720 		}
1721 	}
1722 }
1723 
1724 /*
1725  * The routine checks whether found extent is good enough. If it is,
1726  * then the extent gets marked used and flag is set to the context
1727  * to stop scanning. Otherwise, the extent is compared with the
1728  * previous found extent and if new one is better, then it's stored
1729  * in the context. Later, the best found extent will be used, if
1730  * mballoc can't find good enough extent.
1731  *
1732  * FIXME: real allocation policy is to be designed yet!
1733  */
1734 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1735 					struct ext4_free_extent *ex,
1736 					struct ext4_buddy *e4b)
1737 {
1738 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1739 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1740 
1741 	BUG_ON(ex->fe_len <= 0);
1742 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1743 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1744 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1745 
1746 	ac->ac_found++;
1747 
1748 	/*
1749 	 * The special case - take what you catch first
1750 	 */
1751 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1752 		*bex = *ex;
1753 		ext4_mb_use_best_found(ac, e4b);
1754 		return;
1755 	}
1756 
1757 	/*
1758 	 * Let's check whether the chuck is good enough
1759 	 */
1760 	if (ex->fe_len == gex->fe_len) {
1761 		*bex = *ex;
1762 		ext4_mb_use_best_found(ac, e4b);
1763 		return;
1764 	}
1765 
1766 	/*
1767 	 * If this is first found extent, just store it in the context
1768 	 */
1769 	if (bex->fe_len == 0) {
1770 		*bex = *ex;
1771 		return;
1772 	}
1773 
1774 	/*
1775 	 * If new found extent is better, store it in the context
1776 	 */
1777 	if (bex->fe_len < gex->fe_len) {
1778 		/* if the request isn't satisfied, any found extent
1779 		 * larger than previous best one is better */
1780 		if (ex->fe_len > bex->fe_len)
1781 			*bex = *ex;
1782 	} else if (ex->fe_len > gex->fe_len) {
1783 		/* if the request is satisfied, then we try to find
1784 		 * an extent that still satisfy the request, but is
1785 		 * smaller than previous one */
1786 		if (ex->fe_len < bex->fe_len)
1787 			*bex = *ex;
1788 	}
1789 
1790 	ext4_mb_check_limits(ac, e4b, 0);
1791 }
1792 
1793 static noinline_for_stack
1794 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1795 					struct ext4_buddy *e4b)
1796 {
1797 	struct ext4_free_extent ex = ac->ac_b_ex;
1798 	ext4_group_t group = ex.fe_group;
1799 	int max;
1800 	int err;
1801 
1802 	BUG_ON(ex.fe_len <= 0);
1803 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1804 	if (err)
1805 		return err;
1806 
1807 	ext4_lock_group(ac->ac_sb, group);
1808 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1809 
1810 	if (max > 0) {
1811 		ac->ac_b_ex = ex;
1812 		ext4_mb_use_best_found(ac, e4b);
1813 	}
1814 
1815 	ext4_unlock_group(ac->ac_sb, group);
1816 	ext4_mb_unload_buddy(e4b);
1817 
1818 	return 0;
1819 }
1820 
1821 static noinline_for_stack
1822 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1823 				struct ext4_buddy *e4b)
1824 {
1825 	ext4_group_t group = ac->ac_g_ex.fe_group;
1826 	int max;
1827 	int err;
1828 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1829 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1830 	struct ext4_free_extent ex;
1831 
1832 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1833 		return 0;
1834 	if (grp->bb_free == 0)
1835 		return 0;
1836 
1837 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1838 	if (err)
1839 		return err;
1840 
1841 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1842 		ext4_mb_unload_buddy(e4b);
1843 		return 0;
1844 	}
1845 
1846 	ext4_lock_group(ac->ac_sb, group);
1847 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1848 			     ac->ac_g_ex.fe_len, &ex);
1849 	ex.fe_logical = 0xDEADFA11; /* debug value */
1850 
1851 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1852 		ext4_fsblk_t start;
1853 
1854 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1855 			ex.fe_start;
1856 		/* use do_div to get remainder (would be 64-bit modulo) */
1857 		if (do_div(start, sbi->s_stripe) == 0) {
1858 			ac->ac_found++;
1859 			ac->ac_b_ex = ex;
1860 			ext4_mb_use_best_found(ac, e4b);
1861 		}
1862 	} else if (max >= ac->ac_g_ex.fe_len) {
1863 		BUG_ON(ex.fe_len <= 0);
1864 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1865 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1866 		ac->ac_found++;
1867 		ac->ac_b_ex = ex;
1868 		ext4_mb_use_best_found(ac, e4b);
1869 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1870 		/* Sometimes, caller may want to merge even small
1871 		 * number of blocks to an existing extent */
1872 		BUG_ON(ex.fe_len <= 0);
1873 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1874 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1875 		ac->ac_found++;
1876 		ac->ac_b_ex = ex;
1877 		ext4_mb_use_best_found(ac, e4b);
1878 	}
1879 	ext4_unlock_group(ac->ac_sb, group);
1880 	ext4_mb_unload_buddy(e4b);
1881 
1882 	return 0;
1883 }
1884 
1885 /*
1886  * The routine scans buddy structures (not bitmap!) from given order
1887  * to max order and tries to find big enough chunk to satisfy the req
1888  */
1889 static noinline_for_stack
1890 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1891 					struct ext4_buddy *e4b)
1892 {
1893 	struct super_block *sb = ac->ac_sb;
1894 	struct ext4_group_info *grp = e4b->bd_info;
1895 	void *buddy;
1896 	int i;
1897 	int k;
1898 	int max;
1899 
1900 	BUG_ON(ac->ac_2order <= 0);
1901 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1902 		if (grp->bb_counters[i] == 0)
1903 			continue;
1904 
1905 		buddy = mb_find_buddy(e4b, i, &max);
1906 		BUG_ON(buddy == NULL);
1907 
1908 		k = mb_find_next_zero_bit(buddy, max, 0);
1909 		BUG_ON(k >= max);
1910 
1911 		ac->ac_found++;
1912 
1913 		ac->ac_b_ex.fe_len = 1 << i;
1914 		ac->ac_b_ex.fe_start = k << i;
1915 		ac->ac_b_ex.fe_group = e4b->bd_group;
1916 
1917 		ext4_mb_use_best_found(ac, e4b);
1918 
1919 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1920 
1921 		if (EXT4_SB(sb)->s_mb_stats)
1922 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1923 
1924 		break;
1925 	}
1926 }
1927 
1928 /*
1929  * The routine scans the group and measures all found extents.
1930  * In order to optimize scanning, caller must pass number of
1931  * free blocks in the group, so the routine can know upper limit.
1932  */
1933 static noinline_for_stack
1934 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1935 					struct ext4_buddy *e4b)
1936 {
1937 	struct super_block *sb = ac->ac_sb;
1938 	void *bitmap = e4b->bd_bitmap;
1939 	struct ext4_free_extent ex;
1940 	int i;
1941 	int free;
1942 
1943 	free = e4b->bd_info->bb_free;
1944 	BUG_ON(free <= 0);
1945 
1946 	i = e4b->bd_info->bb_first_free;
1947 
1948 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1949 		i = mb_find_next_zero_bit(bitmap,
1950 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1951 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1952 			/*
1953 			 * IF we have corrupt bitmap, we won't find any
1954 			 * free blocks even though group info says we
1955 			 * we have free blocks
1956 			 */
1957 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1958 					"%d free clusters as per "
1959 					"group info. But bitmap says 0",
1960 					free);
1961 			break;
1962 		}
1963 
1964 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
1965 		BUG_ON(ex.fe_len <= 0);
1966 		if (free < ex.fe_len) {
1967 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1968 					"%d free clusters as per "
1969 					"group info. But got %d blocks",
1970 					free, ex.fe_len);
1971 			/*
1972 			 * The number of free blocks differs. This mostly
1973 			 * indicate that the bitmap is corrupt. So exit
1974 			 * without claiming the space.
1975 			 */
1976 			break;
1977 		}
1978 		ex.fe_logical = 0xDEADC0DE; /* debug value */
1979 		ext4_mb_measure_extent(ac, &ex, e4b);
1980 
1981 		i += ex.fe_len;
1982 		free -= ex.fe_len;
1983 	}
1984 
1985 	ext4_mb_check_limits(ac, e4b, 1);
1986 }
1987 
1988 /*
1989  * This is a special case for storages like raid5
1990  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1991  */
1992 static noinline_for_stack
1993 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1994 				 struct ext4_buddy *e4b)
1995 {
1996 	struct super_block *sb = ac->ac_sb;
1997 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1998 	void *bitmap = e4b->bd_bitmap;
1999 	struct ext4_free_extent ex;
2000 	ext4_fsblk_t first_group_block;
2001 	ext4_fsblk_t a;
2002 	ext4_grpblk_t i;
2003 	int max;
2004 
2005 	BUG_ON(sbi->s_stripe == 0);
2006 
2007 	/* find first stripe-aligned block in group */
2008 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2009 
2010 	a = first_group_block + sbi->s_stripe - 1;
2011 	do_div(a, sbi->s_stripe);
2012 	i = (a * sbi->s_stripe) - first_group_block;
2013 
2014 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2015 		if (!mb_test_bit(i, bitmap)) {
2016 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2017 			if (max >= sbi->s_stripe) {
2018 				ac->ac_found++;
2019 				ex.fe_logical = 0xDEADF00D; /* debug value */
2020 				ac->ac_b_ex = ex;
2021 				ext4_mb_use_best_found(ac, e4b);
2022 				break;
2023 			}
2024 		}
2025 		i += sbi->s_stripe;
2026 	}
2027 }
2028 
2029 /*
2030  * This is now called BEFORE we load the buddy bitmap.
2031  * Returns either 1 or 0 indicating that the group is either suitable
2032  * for the allocation or not. In addition it can also return negative
2033  * error code when something goes wrong.
2034  */
2035 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
2036 				ext4_group_t group, int cr)
2037 {
2038 	unsigned free, fragments;
2039 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2040 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2041 
2042 	BUG_ON(cr < 0 || cr >= 4);
2043 
2044 	free = grp->bb_free;
2045 	if (free == 0)
2046 		return 0;
2047 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2048 		return 0;
2049 
2050 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2051 		return 0;
2052 
2053 	/* We only do this if the grp has never been initialized */
2054 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2055 		int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
2056 		if (ret)
2057 			return ret;
2058 	}
2059 
2060 	fragments = grp->bb_fragments;
2061 	if (fragments == 0)
2062 		return 0;
2063 
2064 	switch (cr) {
2065 	case 0:
2066 		BUG_ON(ac->ac_2order == 0);
2067 
2068 		/* Avoid using the first bg of a flexgroup for data files */
2069 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2070 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2071 		    ((group % flex_size) == 0))
2072 			return 0;
2073 
2074 		if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
2075 		    (free / fragments) >= ac->ac_g_ex.fe_len)
2076 			return 1;
2077 
2078 		if (grp->bb_largest_free_order < ac->ac_2order)
2079 			return 0;
2080 
2081 		return 1;
2082 	case 1:
2083 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2084 			return 1;
2085 		break;
2086 	case 2:
2087 		if (free >= ac->ac_g_ex.fe_len)
2088 			return 1;
2089 		break;
2090 	case 3:
2091 		return 1;
2092 	default:
2093 		BUG();
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 static noinline_for_stack int
2100 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2101 {
2102 	ext4_group_t ngroups, group, i;
2103 	int cr;
2104 	int err = 0, first_err = 0;
2105 	struct ext4_sb_info *sbi;
2106 	struct super_block *sb;
2107 	struct ext4_buddy e4b;
2108 
2109 	sb = ac->ac_sb;
2110 	sbi = EXT4_SB(sb);
2111 	ngroups = ext4_get_groups_count(sb);
2112 	/* non-extent files are limited to low blocks/groups */
2113 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2114 		ngroups = sbi->s_blockfile_groups;
2115 
2116 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2117 
2118 	/* first, try the goal */
2119 	err = ext4_mb_find_by_goal(ac, &e4b);
2120 	if (err || ac->ac_status == AC_STATUS_FOUND)
2121 		goto out;
2122 
2123 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2124 		goto out;
2125 
2126 	/*
2127 	 * ac->ac2_order is set only if the fe_len is a power of 2
2128 	 * if ac2_order is set we also set criteria to 0 so that we
2129 	 * try exact allocation using buddy.
2130 	 */
2131 	i = fls(ac->ac_g_ex.fe_len);
2132 	ac->ac_2order = 0;
2133 	/*
2134 	 * We search using buddy data only if the order of the request
2135 	 * is greater than equal to the sbi_s_mb_order2_reqs
2136 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2137 	 */
2138 	if (i >= sbi->s_mb_order2_reqs) {
2139 		/*
2140 		 * This should tell if fe_len is exactly power of 2
2141 		 */
2142 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2143 			ac->ac_2order = i - 1;
2144 	}
2145 
2146 	/* if stream allocation is enabled, use global goal */
2147 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2148 		/* TBD: may be hot point */
2149 		spin_lock(&sbi->s_md_lock);
2150 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2151 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2152 		spin_unlock(&sbi->s_md_lock);
2153 	}
2154 
2155 	/* Let's just scan groups to find more-less suitable blocks */
2156 	cr = ac->ac_2order ? 0 : 1;
2157 	/*
2158 	 * cr == 0 try to get exact allocation,
2159 	 * cr == 3  try to get anything
2160 	 */
2161 repeat:
2162 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2163 		ac->ac_criteria = cr;
2164 		/*
2165 		 * searching for the right group start
2166 		 * from the goal value specified
2167 		 */
2168 		group = ac->ac_g_ex.fe_group;
2169 
2170 		for (i = 0; i < ngroups; group++, i++) {
2171 			int ret = 0;
2172 			cond_resched();
2173 			/*
2174 			 * Artificially restricted ngroups for non-extent
2175 			 * files makes group > ngroups possible on first loop.
2176 			 */
2177 			if (group >= ngroups)
2178 				group = 0;
2179 
2180 			/* This now checks without needing the buddy page */
2181 			ret = ext4_mb_good_group(ac, group, cr);
2182 			if (ret <= 0) {
2183 				if (!first_err)
2184 					first_err = ret;
2185 				continue;
2186 			}
2187 
2188 			err = ext4_mb_load_buddy(sb, group, &e4b);
2189 			if (err)
2190 				goto out;
2191 
2192 			ext4_lock_group(sb, group);
2193 
2194 			/*
2195 			 * We need to check again after locking the
2196 			 * block group
2197 			 */
2198 			ret = ext4_mb_good_group(ac, group, cr);
2199 			if (ret <= 0) {
2200 				ext4_unlock_group(sb, group);
2201 				ext4_mb_unload_buddy(&e4b);
2202 				if (!first_err)
2203 					first_err = ret;
2204 				continue;
2205 			}
2206 
2207 			ac->ac_groups_scanned++;
2208 			if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
2209 				ext4_mb_simple_scan_group(ac, &e4b);
2210 			else if (cr == 1 && sbi->s_stripe &&
2211 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2212 				ext4_mb_scan_aligned(ac, &e4b);
2213 			else
2214 				ext4_mb_complex_scan_group(ac, &e4b);
2215 
2216 			ext4_unlock_group(sb, group);
2217 			ext4_mb_unload_buddy(&e4b);
2218 
2219 			if (ac->ac_status != AC_STATUS_CONTINUE)
2220 				break;
2221 		}
2222 	}
2223 
2224 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2225 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2226 		/*
2227 		 * We've been searching too long. Let's try to allocate
2228 		 * the best chunk we've found so far
2229 		 */
2230 
2231 		ext4_mb_try_best_found(ac, &e4b);
2232 		if (ac->ac_status != AC_STATUS_FOUND) {
2233 			/*
2234 			 * Someone more lucky has already allocated it.
2235 			 * The only thing we can do is just take first
2236 			 * found block(s)
2237 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2238 			 */
2239 			ac->ac_b_ex.fe_group = 0;
2240 			ac->ac_b_ex.fe_start = 0;
2241 			ac->ac_b_ex.fe_len = 0;
2242 			ac->ac_status = AC_STATUS_CONTINUE;
2243 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2244 			cr = 3;
2245 			atomic_inc(&sbi->s_mb_lost_chunks);
2246 			goto repeat;
2247 		}
2248 	}
2249 out:
2250 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2251 		err = first_err;
2252 	return err;
2253 }
2254 
2255 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2256 {
2257 	struct super_block *sb = seq->private;
2258 	ext4_group_t group;
2259 
2260 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2261 		return NULL;
2262 	group = *pos + 1;
2263 	return (void *) ((unsigned long) group);
2264 }
2265 
2266 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2267 {
2268 	struct super_block *sb = seq->private;
2269 	ext4_group_t group;
2270 
2271 	++*pos;
2272 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2273 		return NULL;
2274 	group = *pos + 1;
2275 	return (void *) ((unsigned long) group);
2276 }
2277 
2278 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2279 {
2280 	struct super_block *sb = seq->private;
2281 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2282 	int i;
2283 	int err, buddy_loaded = 0;
2284 	struct ext4_buddy e4b;
2285 	struct ext4_group_info *grinfo;
2286 	struct sg {
2287 		struct ext4_group_info info;
2288 		ext4_grpblk_t counters[16];
2289 	} sg;
2290 
2291 	group--;
2292 	if (group == 0)
2293 		seq_puts(seq, "#group: free  frags first ["
2294 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2295 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2296 
2297 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2298 		sizeof(struct ext4_group_info);
2299 	grinfo = ext4_get_group_info(sb, group);
2300 	/* Load the group info in memory only if not already loaded. */
2301 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2302 		err = ext4_mb_load_buddy(sb, group, &e4b);
2303 		if (err) {
2304 			seq_printf(seq, "#%-5u: I/O error\n", group);
2305 			return 0;
2306 		}
2307 		buddy_loaded = 1;
2308 	}
2309 
2310 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2311 
2312 	if (buddy_loaded)
2313 		ext4_mb_unload_buddy(&e4b);
2314 
2315 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2316 			sg.info.bb_fragments, sg.info.bb_first_free);
2317 	for (i = 0; i <= 13; i++)
2318 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2319 				sg.info.bb_counters[i] : 0);
2320 	seq_printf(seq, " ]\n");
2321 
2322 	return 0;
2323 }
2324 
2325 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2326 {
2327 }
2328 
2329 static const struct seq_operations ext4_mb_seq_groups_ops = {
2330 	.start  = ext4_mb_seq_groups_start,
2331 	.next   = ext4_mb_seq_groups_next,
2332 	.stop   = ext4_mb_seq_groups_stop,
2333 	.show   = ext4_mb_seq_groups_show,
2334 };
2335 
2336 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2337 {
2338 	struct super_block *sb = PDE_DATA(inode);
2339 	int rc;
2340 
2341 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2342 	if (rc == 0) {
2343 		struct seq_file *m = file->private_data;
2344 		m->private = sb;
2345 	}
2346 	return rc;
2347 
2348 }
2349 
2350 const struct file_operations ext4_seq_mb_groups_fops = {
2351 	.owner		= THIS_MODULE,
2352 	.open		= ext4_mb_seq_groups_open,
2353 	.read		= seq_read,
2354 	.llseek		= seq_lseek,
2355 	.release	= seq_release,
2356 };
2357 
2358 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2359 {
2360 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2361 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2362 
2363 	BUG_ON(!cachep);
2364 	return cachep;
2365 }
2366 
2367 /*
2368  * Allocate the top-level s_group_info array for the specified number
2369  * of groups
2370  */
2371 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2372 {
2373 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2374 	unsigned size;
2375 	struct ext4_group_info ***new_groupinfo;
2376 
2377 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2378 		EXT4_DESC_PER_BLOCK_BITS(sb);
2379 	if (size <= sbi->s_group_info_size)
2380 		return 0;
2381 
2382 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2383 	new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
2384 	if (!new_groupinfo) {
2385 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2386 		return -ENOMEM;
2387 	}
2388 	if (sbi->s_group_info) {
2389 		memcpy(new_groupinfo, sbi->s_group_info,
2390 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2391 		kvfree(sbi->s_group_info);
2392 	}
2393 	sbi->s_group_info = new_groupinfo;
2394 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2395 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2396 		   sbi->s_group_info_size);
2397 	return 0;
2398 }
2399 
2400 /* Create and initialize ext4_group_info data for the given group. */
2401 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2402 			  struct ext4_group_desc *desc)
2403 {
2404 	int i;
2405 	int metalen = 0;
2406 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2407 	struct ext4_group_info **meta_group_info;
2408 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2409 
2410 	/*
2411 	 * First check if this group is the first of a reserved block.
2412 	 * If it's true, we have to allocate a new table of pointers
2413 	 * to ext4_group_info structures
2414 	 */
2415 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2416 		metalen = sizeof(*meta_group_info) <<
2417 			EXT4_DESC_PER_BLOCK_BITS(sb);
2418 		meta_group_info = kmalloc(metalen, GFP_NOFS);
2419 		if (meta_group_info == NULL) {
2420 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2421 				 "for a buddy group");
2422 			goto exit_meta_group_info;
2423 		}
2424 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2425 			meta_group_info;
2426 	}
2427 
2428 	meta_group_info =
2429 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2430 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2431 
2432 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2433 	if (meta_group_info[i] == NULL) {
2434 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2435 		goto exit_group_info;
2436 	}
2437 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2438 		&(meta_group_info[i]->bb_state));
2439 
2440 	/*
2441 	 * initialize bb_free to be able to skip
2442 	 * empty groups without initialization
2443 	 */
2444 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2445 		meta_group_info[i]->bb_free =
2446 			ext4_free_clusters_after_init(sb, group, desc);
2447 	} else {
2448 		meta_group_info[i]->bb_free =
2449 			ext4_free_group_clusters(sb, desc);
2450 	}
2451 
2452 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2453 	init_rwsem(&meta_group_info[i]->alloc_sem);
2454 	meta_group_info[i]->bb_free_root = RB_ROOT;
2455 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2456 
2457 #ifdef DOUBLE_CHECK
2458 	{
2459 		struct buffer_head *bh;
2460 		meta_group_info[i]->bb_bitmap =
2461 			kmalloc(sb->s_blocksize, GFP_NOFS);
2462 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2463 		bh = ext4_read_block_bitmap(sb, group);
2464 		BUG_ON(IS_ERR_OR_NULL(bh));
2465 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2466 			sb->s_blocksize);
2467 		put_bh(bh);
2468 	}
2469 #endif
2470 
2471 	return 0;
2472 
2473 exit_group_info:
2474 	/* If a meta_group_info table has been allocated, release it now */
2475 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2476 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2477 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2478 	}
2479 exit_meta_group_info:
2480 	return -ENOMEM;
2481 } /* ext4_mb_add_groupinfo */
2482 
2483 static int ext4_mb_init_backend(struct super_block *sb)
2484 {
2485 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2486 	ext4_group_t i;
2487 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2488 	int err;
2489 	struct ext4_group_desc *desc;
2490 	struct kmem_cache *cachep;
2491 
2492 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2493 	if (err)
2494 		return err;
2495 
2496 	sbi->s_buddy_cache = new_inode(sb);
2497 	if (sbi->s_buddy_cache == NULL) {
2498 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2499 		goto err_freesgi;
2500 	}
2501 	/* To avoid potentially colliding with an valid on-disk inode number,
2502 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2503 	 * not in the inode hash, so it should never be found by iget(), but
2504 	 * this will avoid confusion if it ever shows up during debugging. */
2505 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2506 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2507 	for (i = 0; i < ngroups; i++) {
2508 		desc = ext4_get_group_desc(sb, i, NULL);
2509 		if (desc == NULL) {
2510 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2511 			goto err_freebuddy;
2512 		}
2513 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2514 			goto err_freebuddy;
2515 	}
2516 
2517 	return 0;
2518 
2519 err_freebuddy:
2520 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2521 	while (i-- > 0)
2522 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2523 	i = sbi->s_group_info_size;
2524 	while (i-- > 0)
2525 		kfree(sbi->s_group_info[i]);
2526 	iput(sbi->s_buddy_cache);
2527 err_freesgi:
2528 	kvfree(sbi->s_group_info);
2529 	return -ENOMEM;
2530 }
2531 
2532 static void ext4_groupinfo_destroy_slabs(void)
2533 {
2534 	int i;
2535 
2536 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2537 		if (ext4_groupinfo_caches[i])
2538 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2539 		ext4_groupinfo_caches[i] = NULL;
2540 	}
2541 }
2542 
2543 static int ext4_groupinfo_create_slab(size_t size)
2544 {
2545 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2546 	int slab_size;
2547 	int blocksize_bits = order_base_2(size);
2548 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2549 	struct kmem_cache *cachep;
2550 
2551 	if (cache_index >= NR_GRPINFO_CACHES)
2552 		return -EINVAL;
2553 
2554 	if (unlikely(cache_index < 0))
2555 		cache_index = 0;
2556 
2557 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2558 	if (ext4_groupinfo_caches[cache_index]) {
2559 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2560 		return 0;	/* Already created */
2561 	}
2562 
2563 	slab_size = offsetof(struct ext4_group_info,
2564 				bb_counters[blocksize_bits + 2]);
2565 
2566 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2567 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2568 					NULL);
2569 
2570 	ext4_groupinfo_caches[cache_index] = cachep;
2571 
2572 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2573 	if (!cachep) {
2574 		printk(KERN_EMERG
2575 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2576 		return -ENOMEM;
2577 	}
2578 
2579 	return 0;
2580 }
2581 
2582 int ext4_mb_init(struct super_block *sb)
2583 {
2584 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2585 	unsigned i, j;
2586 	unsigned offset;
2587 	unsigned max;
2588 	int ret;
2589 
2590 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2591 
2592 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2593 	if (sbi->s_mb_offsets == NULL) {
2594 		ret = -ENOMEM;
2595 		goto out;
2596 	}
2597 
2598 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2599 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2600 	if (sbi->s_mb_maxs == NULL) {
2601 		ret = -ENOMEM;
2602 		goto out;
2603 	}
2604 
2605 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2606 	if (ret < 0)
2607 		goto out;
2608 
2609 	/* order 0 is regular bitmap */
2610 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2611 	sbi->s_mb_offsets[0] = 0;
2612 
2613 	i = 1;
2614 	offset = 0;
2615 	max = sb->s_blocksize << 2;
2616 	do {
2617 		sbi->s_mb_offsets[i] = offset;
2618 		sbi->s_mb_maxs[i] = max;
2619 		offset += 1 << (sb->s_blocksize_bits - i);
2620 		max = max >> 1;
2621 		i++;
2622 	} while (i <= sb->s_blocksize_bits + 1);
2623 
2624 	spin_lock_init(&sbi->s_md_lock);
2625 	spin_lock_init(&sbi->s_bal_lock);
2626 
2627 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2628 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2629 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2630 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2631 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2632 	/*
2633 	 * The default group preallocation is 512, which for 4k block
2634 	 * sizes translates to 2 megabytes.  However for bigalloc file
2635 	 * systems, this is probably too big (i.e, if the cluster size
2636 	 * is 1 megabyte, then group preallocation size becomes half a
2637 	 * gigabyte!).  As a default, we will keep a two megabyte
2638 	 * group pralloc size for cluster sizes up to 64k, and after
2639 	 * that, we will force a minimum group preallocation size of
2640 	 * 32 clusters.  This translates to 8 megs when the cluster
2641 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2642 	 * which seems reasonable as a default.
2643 	 */
2644 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2645 				       sbi->s_cluster_bits, 32);
2646 	/*
2647 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2648 	 * to the lowest multiple of s_stripe which is bigger than
2649 	 * the s_mb_group_prealloc as determined above. We want
2650 	 * the preallocation size to be an exact multiple of the
2651 	 * RAID stripe size so that preallocations don't fragment
2652 	 * the stripes.
2653 	 */
2654 	if (sbi->s_stripe > 1) {
2655 		sbi->s_mb_group_prealloc = roundup(
2656 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2657 	}
2658 
2659 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2660 	if (sbi->s_locality_groups == NULL) {
2661 		ret = -ENOMEM;
2662 		goto out;
2663 	}
2664 	for_each_possible_cpu(i) {
2665 		struct ext4_locality_group *lg;
2666 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2667 		mutex_init(&lg->lg_mutex);
2668 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2669 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2670 		spin_lock_init(&lg->lg_prealloc_lock);
2671 	}
2672 
2673 	/* init file for buddy data */
2674 	ret = ext4_mb_init_backend(sb);
2675 	if (ret != 0)
2676 		goto out_free_locality_groups;
2677 
2678 	return 0;
2679 
2680 out_free_locality_groups:
2681 	free_percpu(sbi->s_locality_groups);
2682 	sbi->s_locality_groups = NULL;
2683 out:
2684 	kfree(sbi->s_mb_offsets);
2685 	sbi->s_mb_offsets = NULL;
2686 	kfree(sbi->s_mb_maxs);
2687 	sbi->s_mb_maxs = NULL;
2688 	return ret;
2689 }
2690 
2691 /* need to called with the ext4 group lock held */
2692 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2693 {
2694 	struct ext4_prealloc_space *pa;
2695 	struct list_head *cur, *tmp;
2696 	int count = 0;
2697 
2698 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2699 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2700 		list_del(&pa->pa_group_list);
2701 		count++;
2702 		kmem_cache_free(ext4_pspace_cachep, pa);
2703 	}
2704 	if (count)
2705 		mb_debug(1, "mballoc: %u PAs left\n", count);
2706 
2707 }
2708 
2709 int ext4_mb_release(struct super_block *sb)
2710 {
2711 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2712 	ext4_group_t i;
2713 	int num_meta_group_infos;
2714 	struct ext4_group_info *grinfo;
2715 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2716 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2717 
2718 	if (sbi->s_group_info) {
2719 		for (i = 0; i < ngroups; i++) {
2720 			grinfo = ext4_get_group_info(sb, i);
2721 #ifdef DOUBLE_CHECK
2722 			kfree(grinfo->bb_bitmap);
2723 #endif
2724 			ext4_lock_group(sb, i);
2725 			ext4_mb_cleanup_pa(grinfo);
2726 			ext4_unlock_group(sb, i);
2727 			kmem_cache_free(cachep, grinfo);
2728 		}
2729 		num_meta_group_infos = (ngroups +
2730 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2731 			EXT4_DESC_PER_BLOCK_BITS(sb);
2732 		for (i = 0; i < num_meta_group_infos; i++)
2733 			kfree(sbi->s_group_info[i]);
2734 		kvfree(sbi->s_group_info);
2735 	}
2736 	kfree(sbi->s_mb_offsets);
2737 	kfree(sbi->s_mb_maxs);
2738 	iput(sbi->s_buddy_cache);
2739 	if (sbi->s_mb_stats) {
2740 		ext4_msg(sb, KERN_INFO,
2741 		       "mballoc: %u blocks %u reqs (%u success)",
2742 				atomic_read(&sbi->s_bal_allocated),
2743 				atomic_read(&sbi->s_bal_reqs),
2744 				atomic_read(&sbi->s_bal_success));
2745 		ext4_msg(sb, KERN_INFO,
2746 		      "mballoc: %u extents scanned, %u goal hits, "
2747 				"%u 2^N hits, %u breaks, %u lost",
2748 				atomic_read(&sbi->s_bal_ex_scanned),
2749 				atomic_read(&sbi->s_bal_goals),
2750 				atomic_read(&sbi->s_bal_2orders),
2751 				atomic_read(&sbi->s_bal_breaks),
2752 				atomic_read(&sbi->s_mb_lost_chunks));
2753 		ext4_msg(sb, KERN_INFO,
2754 		       "mballoc: %lu generated and it took %Lu",
2755 				sbi->s_mb_buddies_generated,
2756 				sbi->s_mb_generation_time);
2757 		ext4_msg(sb, KERN_INFO,
2758 		       "mballoc: %u preallocated, %u discarded",
2759 				atomic_read(&sbi->s_mb_preallocated),
2760 				atomic_read(&sbi->s_mb_discarded));
2761 	}
2762 
2763 	free_percpu(sbi->s_locality_groups);
2764 
2765 	return 0;
2766 }
2767 
2768 static inline int ext4_issue_discard(struct super_block *sb,
2769 		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
2770 {
2771 	ext4_fsblk_t discard_block;
2772 
2773 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2774 			 ext4_group_first_block_no(sb, block_group));
2775 	count = EXT4_C2B(EXT4_SB(sb), count);
2776 	trace_ext4_discard_blocks(sb,
2777 			(unsigned long long) discard_block, count);
2778 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2779 }
2780 
2781 /*
2782  * This function is called by the jbd2 layer once the commit has finished,
2783  * so we know we can free the blocks that were released with that commit.
2784  */
2785 static void ext4_free_data_callback(struct super_block *sb,
2786 				    struct ext4_journal_cb_entry *jce,
2787 				    int rc)
2788 {
2789 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2790 	struct ext4_buddy e4b;
2791 	struct ext4_group_info *db;
2792 	int err, count = 0, count2 = 0;
2793 
2794 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2795 		 entry->efd_count, entry->efd_group, entry);
2796 
2797 	if (test_opt(sb, DISCARD)) {
2798 		err = ext4_issue_discard(sb, entry->efd_group,
2799 					 entry->efd_start_cluster,
2800 					 entry->efd_count);
2801 		if (err && err != -EOPNOTSUPP)
2802 			ext4_msg(sb, KERN_WARNING, "discard request in"
2803 				 " group:%d block:%d count:%d failed"
2804 				 " with %d", entry->efd_group,
2805 				 entry->efd_start_cluster,
2806 				 entry->efd_count, err);
2807 	}
2808 
2809 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2810 	/* we expect to find existing buddy because it's pinned */
2811 	BUG_ON(err != 0);
2812 
2813 
2814 	db = e4b.bd_info;
2815 	/* there are blocks to put in buddy to make them really free */
2816 	count += entry->efd_count;
2817 	count2++;
2818 	ext4_lock_group(sb, entry->efd_group);
2819 	/* Take it out of per group rb tree */
2820 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2821 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2822 
2823 	/*
2824 	 * Clear the trimmed flag for the group so that the next
2825 	 * ext4_trim_fs can trim it.
2826 	 * If the volume is mounted with -o discard, online discard
2827 	 * is supported and the free blocks will be trimmed online.
2828 	 */
2829 	if (!test_opt(sb, DISCARD))
2830 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2831 
2832 	if (!db->bb_free_root.rb_node) {
2833 		/* No more items in the per group rb tree
2834 		 * balance refcounts from ext4_mb_free_metadata()
2835 		 */
2836 		put_page(e4b.bd_buddy_page);
2837 		put_page(e4b.bd_bitmap_page);
2838 	}
2839 	ext4_unlock_group(sb, entry->efd_group);
2840 	kmem_cache_free(ext4_free_data_cachep, entry);
2841 	ext4_mb_unload_buddy(&e4b);
2842 
2843 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2844 }
2845 
2846 int __init ext4_init_mballoc(void)
2847 {
2848 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2849 					SLAB_RECLAIM_ACCOUNT);
2850 	if (ext4_pspace_cachep == NULL)
2851 		return -ENOMEM;
2852 
2853 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2854 				    SLAB_RECLAIM_ACCOUNT);
2855 	if (ext4_ac_cachep == NULL) {
2856 		kmem_cache_destroy(ext4_pspace_cachep);
2857 		return -ENOMEM;
2858 	}
2859 
2860 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2861 					   SLAB_RECLAIM_ACCOUNT);
2862 	if (ext4_free_data_cachep == NULL) {
2863 		kmem_cache_destroy(ext4_pspace_cachep);
2864 		kmem_cache_destroy(ext4_ac_cachep);
2865 		return -ENOMEM;
2866 	}
2867 	return 0;
2868 }
2869 
2870 void ext4_exit_mballoc(void)
2871 {
2872 	/*
2873 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2874 	 * before destroying the slab cache.
2875 	 */
2876 	rcu_barrier();
2877 	kmem_cache_destroy(ext4_pspace_cachep);
2878 	kmem_cache_destroy(ext4_ac_cachep);
2879 	kmem_cache_destroy(ext4_free_data_cachep);
2880 	ext4_groupinfo_destroy_slabs();
2881 }
2882 
2883 
2884 /*
2885  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2886  * Returns 0 if success or error code
2887  */
2888 static noinline_for_stack int
2889 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2890 				handle_t *handle, unsigned int reserv_clstrs)
2891 {
2892 	struct buffer_head *bitmap_bh = NULL;
2893 	struct ext4_group_desc *gdp;
2894 	struct buffer_head *gdp_bh;
2895 	struct ext4_sb_info *sbi;
2896 	struct super_block *sb;
2897 	ext4_fsblk_t block;
2898 	int err, len;
2899 
2900 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2901 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2902 
2903 	sb = ac->ac_sb;
2904 	sbi = EXT4_SB(sb);
2905 
2906 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2907 	if (IS_ERR(bitmap_bh)) {
2908 		err = PTR_ERR(bitmap_bh);
2909 		bitmap_bh = NULL;
2910 		goto out_err;
2911 	}
2912 
2913 	BUFFER_TRACE(bitmap_bh, "getting write access");
2914 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2915 	if (err)
2916 		goto out_err;
2917 
2918 	err = -EIO;
2919 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2920 	if (!gdp)
2921 		goto out_err;
2922 
2923 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2924 			ext4_free_group_clusters(sb, gdp));
2925 
2926 	BUFFER_TRACE(gdp_bh, "get_write_access");
2927 	err = ext4_journal_get_write_access(handle, gdp_bh);
2928 	if (err)
2929 		goto out_err;
2930 
2931 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2932 
2933 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2934 	if (!ext4_data_block_valid(sbi, block, len)) {
2935 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2936 			   "fs metadata", block, block+len);
2937 		/* File system mounted not to panic on error
2938 		 * Fix the bitmap and repeat the block allocation
2939 		 * We leak some of the blocks here.
2940 		 */
2941 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2942 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2943 			      ac->ac_b_ex.fe_len);
2944 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2945 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2946 		if (!err)
2947 			err = -EAGAIN;
2948 		goto out_err;
2949 	}
2950 
2951 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2952 #ifdef AGGRESSIVE_CHECK
2953 	{
2954 		int i;
2955 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2956 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2957 						bitmap_bh->b_data));
2958 		}
2959 	}
2960 #endif
2961 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2962 		      ac->ac_b_ex.fe_len);
2963 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2964 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2965 		ext4_free_group_clusters_set(sb, gdp,
2966 					     ext4_free_clusters_after_init(sb,
2967 						ac->ac_b_ex.fe_group, gdp));
2968 	}
2969 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2970 	ext4_free_group_clusters_set(sb, gdp, len);
2971 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
2972 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2973 
2974 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2975 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2976 	/*
2977 	 * Now reduce the dirty block count also. Should not go negative
2978 	 */
2979 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2980 		/* release all the reserved blocks if non delalloc */
2981 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2982 				   reserv_clstrs);
2983 
2984 	if (sbi->s_log_groups_per_flex) {
2985 		ext4_group_t flex_group = ext4_flex_group(sbi,
2986 							  ac->ac_b_ex.fe_group);
2987 		atomic64_sub(ac->ac_b_ex.fe_len,
2988 			     &sbi->s_flex_groups[flex_group].free_clusters);
2989 	}
2990 
2991 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2992 	if (err)
2993 		goto out_err;
2994 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2995 
2996 out_err:
2997 	brelse(bitmap_bh);
2998 	return err;
2999 }
3000 
3001 /*
3002  * here we normalize request for locality group
3003  * Group request are normalized to s_mb_group_prealloc, which goes to
3004  * s_strip if we set the same via mount option.
3005  * s_mb_group_prealloc can be configured via
3006  * /sys/fs/ext4/<partition>/mb_group_prealloc
3007  *
3008  * XXX: should we try to preallocate more than the group has now?
3009  */
3010 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3011 {
3012 	struct super_block *sb = ac->ac_sb;
3013 	struct ext4_locality_group *lg = ac->ac_lg;
3014 
3015 	BUG_ON(lg == NULL);
3016 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3017 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
3018 		current->pid, ac->ac_g_ex.fe_len);
3019 }
3020 
3021 /*
3022  * Normalization means making request better in terms of
3023  * size and alignment
3024  */
3025 static noinline_for_stack void
3026 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3027 				struct ext4_allocation_request *ar)
3028 {
3029 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3030 	int bsbits, max;
3031 	ext4_lblk_t end;
3032 	loff_t size, start_off;
3033 	loff_t orig_size __maybe_unused;
3034 	ext4_lblk_t start;
3035 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3036 	struct ext4_prealloc_space *pa;
3037 
3038 	/* do normalize only data requests, metadata requests
3039 	   do not need preallocation */
3040 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3041 		return;
3042 
3043 	/* sometime caller may want exact blocks */
3044 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3045 		return;
3046 
3047 	/* caller may indicate that preallocation isn't
3048 	 * required (it's a tail, for example) */
3049 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3050 		return;
3051 
3052 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3053 		ext4_mb_normalize_group_request(ac);
3054 		return ;
3055 	}
3056 
3057 	bsbits = ac->ac_sb->s_blocksize_bits;
3058 
3059 	/* first, let's learn actual file size
3060 	 * given current request is allocated */
3061 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3062 	size = size << bsbits;
3063 	if (size < i_size_read(ac->ac_inode))
3064 		size = i_size_read(ac->ac_inode);
3065 	orig_size = size;
3066 
3067 	/* max size of free chunks */
3068 	max = 2 << bsbits;
3069 
3070 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3071 		(req <= (size) || max <= (chunk_size))
3072 
3073 	/* first, try to predict filesize */
3074 	/* XXX: should this table be tunable? */
3075 	start_off = 0;
3076 	if (size <= 16 * 1024) {
3077 		size = 16 * 1024;
3078 	} else if (size <= 32 * 1024) {
3079 		size = 32 * 1024;
3080 	} else if (size <= 64 * 1024) {
3081 		size = 64 * 1024;
3082 	} else if (size <= 128 * 1024) {
3083 		size = 128 * 1024;
3084 	} else if (size <= 256 * 1024) {
3085 		size = 256 * 1024;
3086 	} else if (size <= 512 * 1024) {
3087 		size = 512 * 1024;
3088 	} else if (size <= 1024 * 1024) {
3089 		size = 1024 * 1024;
3090 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3091 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3092 						(21 - bsbits)) << 21;
3093 		size = 2 * 1024 * 1024;
3094 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3095 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3096 							(22 - bsbits)) << 22;
3097 		size = 4 * 1024 * 1024;
3098 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3099 					(8<<20)>>bsbits, max, 8 * 1024)) {
3100 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3101 							(23 - bsbits)) << 23;
3102 		size = 8 * 1024 * 1024;
3103 	} else {
3104 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3105 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3106 					      ac->ac_o_ex.fe_len) << bsbits;
3107 	}
3108 	size = size >> bsbits;
3109 	start = start_off >> bsbits;
3110 
3111 	/* don't cover already allocated blocks in selected range */
3112 	if (ar->pleft && start <= ar->lleft) {
3113 		size -= ar->lleft + 1 - start;
3114 		start = ar->lleft + 1;
3115 	}
3116 	if (ar->pright && start + size - 1 >= ar->lright)
3117 		size -= start + size - ar->lright;
3118 
3119 	end = start + size;
3120 
3121 	/* check we don't cross already preallocated blocks */
3122 	rcu_read_lock();
3123 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3124 		ext4_lblk_t pa_end;
3125 
3126 		if (pa->pa_deleted)
3127 			continue;
3128 		spin_lock(&pa->pa_lock);
3129 		if (pa->pa_deleted) {
3130 			spin_unlock(&pa->pa_lock);
3131 			continue;
3132 		}
3133 
3134 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3135 						  pa->pa_len);
3136 
3137 		/* PA must not overlap original request */
3138 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3139 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3140 
3141 		/* skip PAs this normalized request doesn't overlap with */
3142 		if (pa->pa_lstart >= end || pa_end <= start) {
3143 			spin_unlock(&pa->pa_lock);
3144 			continue;
3145 		}
3146 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3147 
3148 		/* adjust start or end to be adjacent to this pa */
3149 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3150 			BUG_ON(pa_end < start);
3151 			start = pa_end;
3152 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3153 			BUG_ON(pa->pa_lstart > end);
3154 			end = pa->pa_lstart;
3155 		}
3156 		spin_unlock(&pa->pa_lock);
3157 	}
3158 	rcu_read_unlock();
3159 	size = end - start;
3160 
3161 	/* XXX: extra loop to check we really don't overlap preallocations */
3162 	rcu_read_lock();
3163 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3164 		ext4_lblk_t pa_end;
3165 
3166 		spin_lock(&pa->pa_lock);
3167 		if (pa->pa_deleted == 0) {
3168 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3169 							  pa->pa_len);
3170 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3171 		}
3172 		spin_unlock(&pa->pa_lock);
3173 	}
3174 	rcu_read_unlock();
3175 
3176 	if (start + size <= ac->ac_o_ex.fe_logical &&
3177 			start > ac->ac_o_ex.fe_logical) {
3178 		ext4_msg(ac->ac_sb, KERN_ERR,
3179 			 "start %lu, size %lu, fe_logical %lu",
3180 			 (unsigned long) start, (unsigned long) size,
3181 			 (unsigned long) ac->ac_o_ex.fe_logical);
3182 		BUG();
3183 	}
3184 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3185 
3186 	/* now prepare goal request */
3187 
3188 	/* XXX: is it better to align blocks WRT to logical
3189 	 * placement or satisfy big request as is */
3190 	ac->ac_g_ex.fe_logical = start;
3191 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3192 
3193 	/* define goal start in order to merge */
3194 	if (ar->pright && (ar->lright == (start + size))) {
3195 		/* merge to the right */
3196 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3197 						&ac->ac_f_ex.fe_group,
3198 						&ac->ac_f_ex.fe_start);
3199 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3200 	}
3201 	if (ar->pleft && (ar->lleft + 1 == start)) {
3202 		/* merge to the left */
3203 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3204 						&ac->ac_f_ex.fe_group,
3205 						&ac->ac_f_ex.fe_start);
3206 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3207 	}
3208 
3209 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3210 		(unsigned) orig_size, (unsigned) start);
3211 }
3212 
3213 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3214 {
3215 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3216 
3217 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3218 		atomic_inc(&sbi->s_bal_reqs);
3219 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3220 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3221 			atomic_inc(&sbi->s_bal_success);
3222 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3223 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3224 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3225 			atomic_inc(&sbi->s_bal_goals);
3226 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3227 			atomic_inc(&sbi->s_bal_breaks);
3228 	}
3229 
3230 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3231 		trace_ext4_mballoc_alloc(ac);
3232 	else
3233 		trace_ext4_mballoc_prealloc(ac);
3234 }
3235 
3236 /*
3237  * Called on failure; free up any blocks from the inode PA for this
3238  * context.  We don't need this for MB_GROUP_PA because we only change
3239  * pa_free in ext4_mb_release_context(), but on failure, we've already
3240  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3241  */
3242 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3243 {
3244 	struct ext4_prealloc_space *pa = ac->ac_pa;
3245 	struct ext4_buddy e4b;
3246 	int err;
3247 
3248 	if (pa == NULL) {
3249 		if (ac->ac_f_ex.fe_len == 0)
3250 			return;
3251 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3252 		if (err) {
3253 			/*
3254 			 * This should never happen since we pin the
3255 			 * pages in the ext4_allocation_context so
3256 			 * ext4_mb_load_buddy() should never fail.
3257 			 */
3258 			WARN(1, "mb_load_buddy failed (%d)", err);
3259 			return;
3260 		}
3261 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3262 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3263 			       ac->ac_f_ex.fe_len);
3264 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3265 		ext4_mb_unload_buddy(&e4b);
3266 		return;
3267 	}
3268 	if (pa->pa_type == MB_INODE_PA)
3269 		pa->pa_free += ac->ac_b_ex.fe_len;
3270 }
3271 
3272 /*
3273  * use blocks preallocated to inode
3274  */
3275 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3276 				struct ext4_prealloc_space *pa)
3277 {
3278 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3279 	ext4_fsblk_t start;
3280 	ext4_fsblk_t end;
3281 	int len;
3282 
3283 	/* found preallocated blocks, use them */
3284 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3285 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3286 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3287 	len = EXT4_NUM_B2C(sbi, end - start);
3288 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3289 					&ac->ac_b_ex.fe_start);
3290 	ac->ac_b_ex.fe_len = len;
3291 	ac->ac_status = AC_STATUS_FOUND;
3292 	ac->ac_pa = pa;
3293 
3294 	BUG_ON(start < pa->pa_pstart);
3295 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3296 	BUG_ON(pa->pa_free < len);
3297 	pa->pa_free -= len;
3298 
3299 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3300 }
3301 
3302 /*
3303  * use blocks preallocated to locality group
3304  */
3305 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3306 				struct ext4_prealloc_space *pa)
3307 {
3308 	unsigned int len = ac->ac_o_ex.fe_len;
3309 
3310 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3311 					&ac->ac_b_ex.fe_group,
3312 					&ac->ac_b_ex.fe_start);
3313 	ac->ac_b_ex.fe_len = len;
3314 	ac->ac_status = AC_STATUS_FOUND;
3315 	ac->ac_pa = pa;
3316 
3317 	/* we don't correct pa_pstart or pa_plen here to avoid
3318 	 * possible race when the group is being loaded concurrently
3319 	 * instead we correct pa later, after blocks are marked
3320 	 * in on-disk bitmap -- see ext4_mb_release_context()
3321 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3322 	 */
3323 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3324 }
3325 
3326 /*
3327  * Return the prealloc space that have minimal distance
3328  * from the goal block. @cpa is the prealloc
3329  * space that is having currently known minimal distance
3330  * from the goal block.
3331  */
3332 static struct ext4_prealloc_space *
3333 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3334 			struct ext4_prealloc_space *pa,
3335 			struct ext4_prealloc_space *cpa)
3336 {
3337 	ext4_fsblk_t cur_distance, new_distance;
3338 
3339 	if (cpa == NULL) {
3340 		atomic_inc(&pa->pa_count);
3341 		return pa;
3342 	}
3343 	cur_distance = abs(goal_block - cpa->pa_pstart);
3344 	new_distance = abs(goal_block - pa->pa_pstart);
3345 
3346 	if (cur_distance <= new_distance)
3347 		return cpa;
3348 
3349 	/* drop the previous reference */
3350 	atomic_dec(&cpa->pa_count);
3351 	atomic_inc(&pa->pa_count);
3352 	return pa;
3353 }
3354 
3355 /*
3356  * search goal blocks in preallocated space
3357  */
3358 static noinline_for_stack int
3359 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3360 {
3361 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3362 	int order, i;
3363 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3364 	struct ext4_locality_group *lg;
3365 	struct ext4_prealloc_space *pa, *cpa = NULL;
3366 	ext4_fsblk_t goal_block;
3367 
3368 	/* only data can be preallocated */
3369 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3370 		return 0;
3371 
3372 	/* first, try per-file preallocation */
3373 	rcu_read_lock();
3374 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3375 
3376 		/* all fields in this condition don't change,
3377 		 * so we can skip locking for them */
3378 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3379 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3380 					       EXT4_C2B(sbi, pa->pa_len)))
3381 			continue;
3382 
3383 		/* non-extent files can't have physical blocks past 2^32 */
3384 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3385 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3386 		     EXT4_MAX_BLOCK_FILE_PHYS))
3387 			continue;
3388 
3389 		/* found preallocated blocks, use them */
3390 		spin_lock(&pa->pa_lock);
3391 		if (pa->pa_deleted == 0 && pa->pa_free) {
3392 			atomic_inc(&pa->pa_count);
3393 			ext4_mb_use_inode_pa(ac, pa);
3394 			spin_unlock(&pa->pa_lock);
3395 			ac->ac_criteria = 10;
3396 			rcu_read_unlock();
3397 			return 1;
3398 		}
3399 		spin_unlock(&pa->pa_lock);
3400 	}
3401 	rcu_read_unlock();
3402 
3403 	/* can we use group allocation? */
3404 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3405 		return 0;
3406 
3407 	/* inode may have no locality group for some reason */
3408 	lg = ac->ac_lg;
3409 	if (lg == NULL)
3410 		return 0;
3411 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3412 	if (order > PREALLOC_TB_SIZE - 1)
3413 		/* The max size of hash table is PREALLOC_TB_SIZE */
3414 		order = PREALLOC_TB_SIZE - 1;
3415 
3416 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3417 	/*
3418 	 * search for the prealloc space that is having
3419 	 * minimal distance from the goal block.
3420 	 */
3421 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3422 		rcu_read_lock();
3423 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3424 					pa_inode_list) {
3425 			spin_lock(&pa->pa_lock);
3426 			if (pa->pa_deleted == 0 &&
3427 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3428 
3429 				cpa = ext4_mb_check_group_pa(goal_block,
3430 								pa, cpa);
3431 			}
3432 			spin_unlock(&pa->pa_lock);
3433 		}
3434 		rcu_read_unlock();
3435 	}
3436 	if (cpa) {
3437 		ext4_mb_use_group_pa(ac, cpa);
3438 		ac->ac_criteria = 20;
3439 		return 1;
3440 	}
3441 	return 0;
3442 }
3443 
3444 /*
3445  * the function goes through all block freed in the group
3446  * but not yet committed and marks them used in in-core bitmap.
3447  * buddy must be generated from this bitmap
3448  * Need to be called with the ext4 group lock held
3449  */
3450 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3451 						ext4_group_t group)
3452 {
3453 	struct rb_node *n;
3454 	struct ext4_group_info *grp;
3455 	struct ext4_free_data *entry;
3456 
3457 	grp = ext4_get_group_info(sb, group);
3458 	n = rb_first(&(grp->bb_free_root));
3459 
3460 	while (n) {
3461 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3462 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3463 		n = rb_next(n);
3464 	}
3465 	return;
3466 }
3467 
3468 /*
3469  * the function goes through all preallocation in this group and marks them
3470  * used in in-core bitmap. buddy must be generated from this bitmap
3471  * Need to be called with ext4 group lock held
3472  */
3473 static noinline_for_stack
3474 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3475 					ext4_group_t group)
3476 {
3477 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3478 	struct ext4_prealloc_space *pa;
3479 	struct list_head *cur;
3480 	ext4_group_t groupnr;
3481 	ext4_grpblk_t start;
3482 	int preallocated = 0;
3483 	int len;
3484 
3485 	/* all form of preallocation discards first load group,
3486 	 * so the only competing code is preallocation use.
3487 	 * we don't need any locking here
3488 	 * notice we do NOT ignore preallocations with pa_deleted
3489 	 * otherwise we could leave used blocks available for
3490 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3491 	 * is dropping preallocation
3492 	 */
3493 	list_for_each(cur, &grp->bb_prealloc_list) {
3494 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3495 		spin_lock(&pa->pa_lock);
3496 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3497 					     &groupnr, &start);
3498 		len = pa->pa_len;
3499 		spin_unlock(&pa->pa_lock);
3500 		if (unlikely(len == 0))
3501 			continue;
3502 		BUG_ON(groupnr != group);
3503 		ext4_set_bits(bitmap, start, len);
3504 		preallocated += len;
3505 	}
3506 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3507 }
3508 
3509 static void ext4_mb_pa_callback(struct rcu_head *head)
3510 {
3511 	struct ext4_prealloc_space *pa;
3512 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3513 
3514 	BUG_ON(atomic_read(&pa->pa_count));
3515 	BUG_ON(pa->pa_deleted == 0);
3516 	kmem_cache_free(ext4_pspace_cachep, pa);
3517 }
3518 
3519 /*
3520  * drops a reference to preallocated space descriptor
3521  * if this was the last reference and the space is consumed
3522  */
3523 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3524 			struct super_block *sb, struct ext4_prealloc_space *pa)
3525 {
3526 	ext4_group_t grp;
3527 	ext4_fsblk_t grp_blk;
3528 
3529 	/* in this short window concurrent discard can set pa_deleted */
3530 	spin_lock(&pa->pa_lock);
3531 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3532 		spin_unlock(&pa->pa_lock);
3533 		return;
3534 	}
3535 
3536 	if (pa->pa_deleted == 1) {
3537 		spin_unlock(&pa->pa_lock);
3538 		return;
3539 	}
3540 
3541 	pa->pa_deleted = 1;
3542 	spin_unlock(&pa->pa_lock);
3543 
3544 	grp_blk = pa->pa_pstart;
3545 	/*
3546 	 * If doing group-based preallocation, pa_pstart may be in the
3547 	 * next group when pa is used up
3548 	 */
3549 	if (pa->pa_type == MB_GROUP_PA)
3550 		grp_blk--;
3551 
3552 	grp = ext4_get_group_number(sb, grp_blk);
3553 
3554 	/*
3555 	 * possible race:
3556 	 *
3557 	 *  P1 (buddy init)			P2 (regular allocation)
3558 	 *					find block B in PA
3559 	 *  copy on-disk bitmap to buddy
3560 	 *  					mark B in on-disk bitmap
3561 	 *					drop PA from group
3562 	 *  mark all PAs in buddy
3563 	 *
3564 	 * thus, P1 initializes buddy with B available. to prevent this
3565 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3566 	 * against that pair
3567 	 */
3568 	ext4_lock_group(sb, grp);
3569 	list_del(&pa->pa_group_list);
3570 	ext4_unlock_group(sb, grp);
3571 
3572 	spin_lock(pa->pa_obj_lock);
3573 	list_del_rcu(&pa->pa_inode_list);
3574 	spin_unlock(pa->pa_obj_lock);
3575 
3576 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3577 }
3578 
3579 /*
3580  * creates new preallocated space for given inode
3581  */
3582 static noinline_for_stack int
3583 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3584 {
3585 	struct super_block *sb = ac->ac_sb;
3586 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3587 	struct ext4_prealloc_space *pa;
3588 	struct ext4_group_info *grp;
3589 	struct ext4_inode_info *ei;
3590 
3591 	/* preallocate only when found space is larger then requested */
3592 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3593 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3594 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3595 
3596 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3597 	if (pa == NULL)
3598 		return -ENOMEM;
3599 
3600 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3601 		int winl;
3602 		int wins;
3603 		int win;
3604 		int offs;
3605 
3606 		/* we can't allocate as much as normalizer wants.
3607 		 * so, found space must get proper lstart
3608 		 * to cover original request */
3609 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3610 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3611 
3612 		/* we're limited by original request in that
3613 		 * logical block must be covered any way
3614 		 * winl is window we can move our chunk within */
3615 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3616 
3617 		/* also, we should cover whole original request */
3618 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3619 
3620 		/* the smallest one defines real window */
3621 		win = min(winl, wins);
3622 
3623 		offs = ac->ac_o_ex.fe_logical %
3624 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3625 		if (offs && offs < win)
3626 			win = offs;
3627 
3628 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3629 			EXT4_NUM_B2C(sbi, win);
3630 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3631 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3632 	}
3633 
3634 	/* preallocation can change ac_b_ex, thus we store actually
3635 	 * allocated blocks for history */
3636 	ac->ac_f_ex = ac->ac_b_ex;
3637 
3638 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3639 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3640 	pa->pa_len = ac->ac_b_ex.fe_len;
3641 	pa->pa_free = pa->pa_len;
3642 	atomic_set(&pa->pa_count, 1);
3643 	spin_lock_init(&pa->pa_lock);
3644 	INIT_LIST_HEAD(&pa->pa_inode_list);
3645 	INIT_LIST_HEAD(&pa->pa_group_list);
3646 	pa->pa_deleted = 0;
3647 	pa->pa_type = MB_INODE_PA;
3648 
3649 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3650 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3651 	trace_ext4_mb_new_inode_pa(ac, pa);
3652 
3653 	ext4_mb_use_inode_pa(ac, pa);
3654 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3655 
3656 	ei = EXT4_I(ac->ac_inode);
3657 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3658 
3659 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3660 	pa->pa_inode = ac->ac_inode;
3661 
3662 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3663 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3664 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3665 
3666 	spin_lock(pa->pa_obj_lock);
3667 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3668 	spin_unlock(pa->pa_obj_lock);
3669 
3670 	return 0;
3671 }
3672 
3673 /*
3674  * creates new preallocated space for locality group inodes belongs to
3675  */
3676 static noinline_for_stack int
3677 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3678 {
3679 	struct super_block *sb = ac->ac_sb;
3680 	struct ext4_locality_group *lg;
3681 	struct ext4_prealloc_space *pa;
3682 	struct ext4_group_info *grp;
3683 
3684 	/* preallocate only when found space is larger then requested */
3685 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3686 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3687 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3688 
3689 	BUG_ON(ext4_pspace_cachep == NULL);
3690 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3691 	if (pa == NULL)
3692 		return -ENOMEM;
3693 
3694 	/* preallocation can change ac_b_ex, thus we store actually
3695 	 * allocated blocks for history */
3696 	ac->ac_f_ex = ac->ac_b_ex;
3697 
3698 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3699 	pa->pa_lstart = pa->pa_pstart;
3700 	pa->pa_len = ac->ac_b_ex.fe_len;
3701 	pa->pa_free = pa->pa_len;
3702 	atomic_set(&pa->pa_count, 1);
3703 	spin_lock_init(&pa->pa_lock);
3704 	INIT_LIST_HEAD(&pa->pa_inode_list);
3705 	INIT_LIST_HEAD(&pa->pa_group_list);
3706 	pa->pa_deleted = 0;
3707 	pa->pa_type = MB_GROUP_PA;
3708 
3709 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3710 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3711 	trace_ext4_mb_new_group_pa(ac, pa);
3712 
3713 	ext4_mb_use_group_pa(ac, pa);
3714 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3715 
3716 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3717 	lg = ac->ac_lg;
3718 	BUG_ON(lg == NULL);
3719 
3720 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3721 	pa->pa_inode = NULL;
3722 
3723 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3724 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3725 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3726 
3727 	/*
3728 	 * We will later add the new pa to the right bucket
3729 	 * after updating the pa_free in ext4_mb_release_context
3730 	 */
3731 	return 0;
3732 }
3733 
3734 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3735 {
3736 	int err;
3737 
3738 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3739 		err = ext4_mb_new_group_pa(ac);
3740 	else
3741 		err = ext4_mb_new_inode_pa(ac);
3742 	return err;
3743 }
3744 
3745 /*
3746  * finds all unused blocks in on-disk bitmap, frees them in
3747  * in-core bitmap and buddy.
3748  * @pa must be unlinked from inode and group lists, so that
3749  * nobody else can find/use it.
3750  * the caller MUST hold group/inode locks.
3751  * TODO: optimize the case when there are no in-core structures yet
3752  */
3753 static noinline_for_stack int
3754 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3755 			struct ext4_prealloc_space *pa)
3756 {
3757 	struct super_block *sb = e4b->bd_sb;
3758 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3759 	unsigned int end;
3760 	unsigned int next;
3761 	ext4_group_t group;
3762 	ext4_grpblk_t bit;
3763 	unsigned long long grp_blk_start;
3764 	int err = 0;
3765 	int free = 0;
3766 
3767 	BUG_ON(pa->pa_deleted == 0);
3768 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3769 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3770 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3771 	end = bit + pa->pa_len;
3772 
3773 	while (bit < end) {
3774 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3775 		if (bit >= end)
3776 			break;
3777 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3778 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3779 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3780 			 (unsigned) next - bit, (unsigned) group);
3781 		free += next - bit;
3782 
3783 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3784 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3785 						    EXT4_C2B(sbi, bit)),
3786 					       next - bit);
3787 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3788 		bit = next + 1;
3789 	}
3790 	if (free != pa->pa_free) {
3791 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3792 			 "pa %p: logic %lu, phys. %lu, len %lu",
3793 			 pa, (unsigned long) pa->pa_lstart,
3794 			 (unsigned long) pa->pa_pstart,
3795 			 (unsigned long) pa->pa_len);
3796 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3797 					free, pa->pa_free);
3798 		/*
3799 		 * pa is already deleted so we use the value obtained
3800 		 * from the bitmap and continue.
3801 		 */
3802 	}
3803 	atomic_add(free, &sbi->s_mb_discarded);
3804 
3805 	return err;
3806 }
3807 
3808 static noinline_for_stack int
3809 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3810 				struct ext4_prealloc_space *pa)
3811 {
3812 	struct super_block *sb = e4b->bd_sb;
3813 	ext4_group_t group;
3814 	ext4_grpblk_t bit;
3815 
3816 	trace_ext4_mb_release_group_pa(sb, pa);
3817 	BUG_ON(pa->pa_deleted == 0);
3818 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3819 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3820 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3821 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3822 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3823 
3824 	return 0;
3825 }
3826 
3827 /*
3828  * releases all preallocations in given group
3829  *
3830  * first, we need to decide discard policy:
3831  * - when do we discard
3832  *   1) ENOSPC
3833  * - how many do we discard
3834  *   1) how many requested
3835  */
3836 static noinline_for_stack int
3837 ext4_mb_discard_group_preallocations(struct super_block *sb,
3838 					ext4_group_t group, int needed)
3839 {
3840 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3841 	struct buffer_head *bitmap_bh = NULL;
3842 	struct ext4_prealloc_space *pa, *tmp;
3843 	struct list_head list;
3844 	struct ext4_buddy e4b;
3845 	int err;
3846 	int busy = 0;
3847 	int free = 0;
3848 
3849 	mb_debug(1, "discard preallocation for group %u\n", group);
3850 
3851 	if (list_empty(&grp->bb_prealloc_list))
3852 		return 0;
3853 
3854 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3855 	if (IS_ERR(bitmap_bh)) {
3856 		err = PTR_ERR(bitmap_bh);
3857 		ext4_error(sb, "Error %d reading block bitmap for %u",
3858 			   err, group);
3859 		return 0;
3860 	}
3861 
3862 	err = ext4_mb_load_buddy(sb, group, &e4b);
3863 	if (err) {
3864 		ext4_error(sb, "Error loading buddy information for %u", group);
3865 		put_bh(bitmap_bh);
3866 		return 0;
3867 	}
3868 
3869 	if (needed == 0)
3870 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3871 
3872 	INIT_LIST_HEAD(&list);
3873 repeat:
3874 	ext4_lock_group(sb, group);
3875 	list_for_each_entry_safe(pa, tmp,
3876 				&grp->bb_prealloc_list, pa_group_list) {
3877 		spin_lock(&pa->pa_lock);
3878 		if (atomic_read(&pa->pa_count)) {
3879 			spin_unlock(&pa->pa_lock);
3880 			busy = 1;
3881 			continue;
3882 		}
3883 		if (pa->pa_deleted) {
3884 			spin_unlock(&pa->pa_lock);
3885 			continue;
3886 		}
3887 
3888 		/* seems this one can be freed ... */
3889 		pa->pa_deleted = 1;
3890 
3891 		/* we can trust pa_free ... */
3892 		free += pa->pa_free;
3893 
3894 		spin_unlock(&pa->pa_lock);
3895 
3896 		list_del(&pa->pa_group_list);
3897 		list_add(&pa->u.pa_tmp_list, &list);
3898 	}
3899 
3900 	/* if we still need more blocks and some PAs were used, try again */
3901 	if (free < needed && busy) {
3902 		busy = 0;
3903 		ext4_unlock_group(sb, group);
3904 		cond_resched();
3905 		goto repeat;
3906 	}
3907 
3908 	/* found anything to free? */
3909 	if (list_empty(&list)) {
3910 		BUG_ON(free != 0);
3911 		goto out;
3912 	}
3913 
3914 	/* now free all selected PAs */
3915 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3916 
3917 		/* remove from object (inode or locality group) */
3918 		spin_lock(pa->pa_obj_lock);
3919 		list_del_rcu(&pa->pa_inode_list);
3920 		spin_unlock(pa->pa_obj_lock);
3921 
3922 		if (pa->pa_type == MB_GROUP_PA)
3923 			ext4_mb_release_group_pa(&e4b, pa);
3924 		else
3925 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3926 
3927 		list_del(&pa->u.pa_tmp_list);
3928 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3929 	}
3930 
3931 out:
3932 	ext4_unlock_group(sb, group);
3933 	ext4_mb_unload_buddy(&e4b);
3934 	put_bh(bitmap_bh);
3935 	return free;
3936 }
3937 
3938 /*
3939  * releases all non-used preallocated blocks for given inode
3940  *
3941  * It's important to discard preallocations under i_data_sem
3942  * We don't want another block to be served from the prealloc
3943  * space when we are discarding the inode prealloc space.
3944  *
3945  * FIXME!! Make sure it is valid at all the call sites
3946  */
3947 void ext4_discard_preallocations(struct inode *inode)
3948 {
3949 	struct ext4_inode_info *ei = EXT4_I(inode);
3950 	struct super_block *sb = inode->i_sb;
3951 	struct buffer_head *bitmap_bh = NULL;
3952 	struct ext4_prealloc_space *pa, *tmp;
3953 	ext4_group_t group = 0;
3954 	struct list_head list;
3955 	struct ext4_buddy e4b;
3956 	int err;
3957 
3958 	if (!S_ISREG(inode->i_mode)) {
3959 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3960 		return;
3961 	}
3962 
3963 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3964 	trace_ext4_discard_preallocations(inode);
3965 
3966 	INIT_LIST_HEAD(&list);
3967 
3968 repeat:
3969 	/* first, collect all pa's in the inode */
3970 	spin_lock(&ei->i_prealloc_lock);
3971 	while (!list_empty(&ei->i_prealloc_list)) {
3972 		pa = list_entry(ei->i_prealloc_list.next,
3973 				struct ext4_prealloc_space, pa_inode_list);
3974 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3975 		spin_lock(&pa->pa_lock);
3976 		if (atomic_read(&pa->pa_count)) {
3977 			/* this shouldn't happen often - nobody should
3978 			 * use preallocation while we're discarding it */
3979 			spin_unlock(&pa->pa_lock);
3980 			spin_unlock(&ei->i_prealloc_lock);
3981 			ext4_msg(sb, KERN_ERR,
3982 				 "uh-oh! used pa while discarding");
3983 			WARN_ON(1);
3984 			schedule_timeout_uninterruptible(HZ);
3985 			goto repeat;
3986 
3987 		}
3988 		if (pa->pa_deleted == 0) {
3989 			pa->pa_deleted = 1;
3990 			spin_unlock(&pa->pa_lock);
3991 			list_del_rcu(&pa->pa_inode_list);
3992 			list_add(&pa->u.pa_tmp_list, &list);
3993 			continue;
3994 		}
3995 
3996 		/* someone is deleting pa right now */
3997 		spin_unlock(&pa->pa_lock);
3998 		spin_unlock(&ei->i_prealloc_lock);
3999 
4000 		/* we have to wait here because pa_deleted
4001 		 * doesn't mean pa is already unlinked from
4002 		 * the list. as we might be called from
4003 		 * ->clear_inode() the inode will get freed
4004 		 * and concurrent thread which is unlinking
4005 		 * pa from inode's list may access already
4006 		 * freed memory, bad-bad-bad */
4007 
4008 		/* XXX: if this happens too often, we can
4009 		 * add a flag to force wait only in case
4010 		 * of ->clear_inode(), but not in case of
4011 		 * regular truncate */
4012 		schedule_timeout_uninterruptible(HZ);
4013 		goto repeat;
4014 	}
4015 	spin_unlock(&ei->i_prealloc_lock);
4016 
4017 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4018 		BUG_ON(pa->pa_type != MB_INODE_PA);
4019 		group = ext4_get_group_number(sb, pa->pa_pstart);
4020 
4021 		err = ext4_mb_load_buddy(sb, group, &e4b);
4022 		if (err) {
4023 			ext4_error(sb, "Error loading buddy information for %u",
4024 					group);
4025 			continue;
4026 		}
4027 
4028 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4029 		if (IS_ERR(bitmap_bh)) {
4030 			err = PTR_ERR(bitmap_bh);
4031 			ext4_error(sb, "Error %d reading block bitmap for %u",
4032 					err, group);
4033 			ext4_mb_unload_buddy(&e4b);
4034 			continue;
4035 		}
4036 
4037 		ext4_lock_group(sb, group);
4038 		list_del(&pa->pa_group_list);
4039 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4040 		ext4_unlock_group(sb, group);
4041 
4042 		ext4_mb_unload_buddy(&e4b);
4043 		put_bh(bitmap_bh);
4044 
4045 		list_del(&pa->u.pa_tmp_list);
4046 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4047 	}
4048 }
4049 
4050 #ifdef CONFIG_EXT4_DEBUG
4051 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4052 {
4053 	struct super_block *sb = ac->ac_sb;
4054 	ext4_group_t ngroups, i;
4055 
4056 	if (!ext4_mballoc_debug ||
4057 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
4058 		return;
4059 
4060 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
4061 			" Allocation context details:");
4062 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
4063 			ac->ac_status, ac->ac_flags);
4064 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
4065 		 	"goal %lu/%lu/%lu@%lu, "
4066 			"best %lu/%lu/%lu@%lu cr %d",
4067 			(unsigned long)ac->ac_o_ex.fe_group,
4068 			(unsigned long)ac->ac_o_ex.fe_start,
4069 			(unsigned long)ac->ac_o_ex.fe_len,
4070 			(unsigned long)ac->ac_o_ex.fe_logical,
4071 			(unsigned long)ac->ac_g_ex.fe_group,
4072 			(unsigned long)ac->ac_g_ex.fe_start,
4073 			(unsigned long)ac->ac_g_ex.fe_len,
4074 			(unsigned long)ac->ac_g_ex.fe_logical,
4075 			(unsigned long)ac->ac_b_ex.fe_group,
4076 			(unsigned long)ac->ac_b_ex.fe_start,
4077 			(unsigned long)ac->ac_b_ex.fe_len,
4078 			(unsigned long)ac->ac_b_ex.fe_logical,
4079 			(int)ac->ac_criteria);
4080 	ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
4081 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
4082 	ngroups = ext4_get_groups_count(sb);
4083 	for (i = 0; i < ngroups; i++) {
4084 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4085 		struct ext4_prealloc_space *pa;
4086 		ext4_grpblk_t start;
4087 		struct list_head *cur;
4088 		ext4_lock_group(sb, i);
4089 		list_for_each(cur, &grp->bb_prealloc_list) {
4090 			pa = list_entry(cur, struct ext4_prealloc_space,
4091 					pa_group_list);
4092 			spin_lock(&pa->pa_lock);
4093 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4094 						     NULL, &start);
4095 			spin_unlock(&pa->pa_lock);
4096 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
4097 			       start, pa->pa_len);
4098 		}
4099 		ext4_unlock_group(sb, i);
4100 
4101 		if (grp->bb_free == 0)
4102 			continue;
4103 		printk(KERN_ERR "%u: %d/%d \n",
4104 		       i, grp->bb_free, grp->bb_fragments);
4105 	}
4106 	printk(KERN_ERR "\n");
4107 }
4108 #else
4109 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4110 {
4111 	return;
4112 }
4113 #endif
4114 
4115 /*
4116  * We use locality group preallocation for small size file. The size of the
4117  * file is determined by the current size or the resulting size after
4118  * allocation which ever is larger
4119  *
4120  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4121  */
4122 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4123 {
4124 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4125 	int bsbits = ac->ac_sb->s_blocksize_bits;
4126 	loff_t size, isize;
4127 
4128 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4129 		return;
4130 
4131 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4132 		return;
4133 
4134 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4135 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4136 		>> bsbits;
4137 
4138 	if ((size == isize) &&
4139 	    !ext4_fs_is_busy(sbi) &&
4140 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
4141 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4142 		return;
4143 	}
4144 
4145 	if (sbi->s_mb_group_prealloc <= 0) {
4146 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4147 		return;
4148 	}
4149 
4150 	/* don't use group allocation for large files */
4151 	size = max(size, isize);
4152 	if (size > sbi->s_mb_stream_request) {
4153 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4154 		return;
4155 	}
4156 
4157 	BUG_ON(ac->ac_lg != NULL);
4158 	/*
4159 	 * locality group prealloc space are per cpu. The reason for having
4160 	 * per cpu locality group is to reduce the contention between block
4161 	 * request from multiple CPUs.
4162 	 */
4163 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4164 
4165 	/* we're going to use group allocation */
4166 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4167 
4168 	/* serialize all allocations in the group */
4169 	mutex_lock(&ac->ac_lg->lg_mutex);
4170 }
4171 
4172 static noinline_for_stack int
4173 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4174 				struct ext4_allocation_request *ar)
4175 {
4176 	struct super_block *sb = ar->inode->i_sb;
4177 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4178 	struct ext4_super_block *es = sbi->s_es;
4179 	ext4_group_t group;
4180 	unsigned int len;
4181 	ext4_fsblk_t goal;
4182 	ext4_grpblk_t block;
4183 
4184 	/* we can't allocate > group size */
4185 	len = ar->len;
4186 
4187 	/* just a dirty hack to filter too big requests  */
4188 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4189 		len = EXT4_CLUSTERS_PER_GROUP(sb);
4190 
4191 	/* start searching from the goal */
4192 	goal = ar->goal;
4193 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4194 			goal >= ext4_blocks_count(es))
4195 		goal = le32_to_cpu(es->s_first_data_block);
4196 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4197 
4198 	/* set up allocation goals */
4199 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4200 	ac->ac_status = AC_STATUS_CONTINUE;
4201 	ac->ac_sb = sb;
4202 	ac->ac_inode = ar->inode;
4203 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4204 	ac->ac_o_ex.fe_group = group;
4205 	ac->ac_o_ex.fe_start = block;
4206 	ac->ac_o_ex.fe_len = len;
4207 	ac->ac_g_ex = ac->ac_o_ex;
4208 	ac->ac_flags = ar->flags;
4209 
4210 	/* we have to define context: we'll we work with a file or
4211 	 * locality group. this is a policy, actually */
4212 	ext4_mb_group_or_file(ac);
4213 
4214 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4215 			"left: %u/%u, right %u/%u to %swritable\n",
4216 			(unsigned) ar->len, (unsigned) ar->logical,
4217 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4218 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4219 			(unsigned) ar->lright, (unsigned) ar->pright,
4220 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4221 	return 0;
4222 
4223 }
4224 
4225 static noinline_for_stack void
4226 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4227 					struct ext4_locality_group *lg,
4228 					int order, int total_entries)
4229 {
4230 	ext4_group_t group = 0;
4231 	struct ext4_buddy e4b;
4232 	struct list_head discard_list;
4233 	struct ext4_prealloc_space *pa, *tmp;
4234 
4235 	mb_debug(1, "discard locality group preallocation\n");
4236 
4237 	INIT_LIST_HEAD(&discard_list);
4238 
4239 	spin_lock(&lg->lg_prealloc_lock);
4240 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4241 						pa_inode_list) {
4242 		spin_lock(&pa->pa_lock);
4243 		if (atomic_read(&pa->pa_count)) {
4244 			/*
4245 			 * This is the pa that we just used
4246 			 * for block allocation. So don't
4247 			 * free that
4248 			 */
4249 			spin_unlock(&pa->pa_lock);
4250 			continue;
4251 		}
4252 		if (pa->pa_deleted) {
4253 			spin_unlock(&pa->pa_lock);
4254 			continue;
4255 		}
4256 		/* only lg prealloc space */
4257 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4258 
4259 		/* seems this one can be freed ... */
4260 		pa->pa_deleted = 1;
4261 		spin_unlock(&pa->pa_lock);
4262 
4263 		list_del_rcu(&pa->pa_inode_list);
4264 		list_add(&pa->u.pa_tmp_list, &discard_list);
4265 
4266 		total_entries--;
4267 		if (total_entries <= 5) {
4268 			/*
4269 			 * we want to keep only 5 entries
4270 			 * allowing it to grow to 8. This
4271 			 * mak sure we don't call discard
4272 			 * soon for this list.
4273 			 */
4274 			break;
4275 		}
4276 	}
4277 	spin_unlock(&lg->lg_prealloc_lock);
4278 
4279 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4280 
4281 		group = ext4_get_group_number(sb, pa->pa_pstart);
4282 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4283 			ext4_error(sb, "Error loading buddy information for %u",
4284 					group);
4285 			continue;
4286 		}
4287 		ext4_lock_group(sb, group);
4288 		list_del(&pa->pa_group_list);
4289 		ext4_mb_release_group_pa(&e4b, pa);
4290 		ext4_unlock_group(sb, group);
4291 
4292 		ext4_mb_unload_buddy(&e4b);
4293 		list_del(&pa->u.pa_tmp_list);
4294 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4295 	}
4296 }
4297 
4298 /*
4299  * We have incremented pa_count. So it cannot be freed at this
4300  * point. Also we hold lg_mutex. So no parallel allocation is
4301  * possible from this lg. That means pa_free cannot be updated.
4302  *
4303  * A parallel ext4_mb_discard_group_preallocations is possible.
4304  * which can cause the lg_prealloc_list to be updated.
4305  */
4306 
4307 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4308 {
4309 	int order, added = 0, lg_prealloc_count = 1;
4310 	struct super_block *sb = ac->ac_sb;
4311 	struct ext4_locality_group *lg = ac->ac_lg;
4312 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4313 
4314 	order = fls(pa->pa_free) - 1;
4315 	if (order > PREALLOC_TB_SIZE - 1)
4316 		/* The max size of hash table is PREALLOC_TB_SIZE */
4317 		order = PREALLOC_TB_SIZE - 1;
4318 	/* Add the prealloc space to lg */
4319 	spin_lock(&lg->lg_prealloc_lock);
4320 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4321 						pa_inode_list) {
4322 		spin_lock(&tmp_pa->pa_lock);
4323 		if (tmp_pa->pa_deleted) {
4324 			spin_unlock(&tmp_pa->pa_lock);
4325 			continue;
4326 		}
4327 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4328 			/* Add to the tail of the previous entry */
4329 			list_add_tail_rcu(&pa->pa_inode_list,
4330 						&tmp_pa->pa_inode_list);
4331 			added = 1;
4332 			/*
4333 			 * we want to count the total
4334 			 * number of entries in the list
4335 			 */
4336 		}
4337 		spin_unlock(&tmp_pa->pa_lock);
4338 		lg_prealloc_count++;
4339 	}
4340 	if (!added)
4341 		list_add_tail_rcu(&pa->pa_inode_list,
4342 					&lg->lg_prealloc_list[order]);
4343 	spin_unlock(&lg->lg_prealloc_lock);
4344 
4345 	/* Now trim the list to be not more than 8 elements */
4346 	if (lg_prealloc_count > 8) {
4347 		ext4_mb_discard_lg_preallocations(sb, lg,
4348 						  order, lg_prealloc_count);
4349 		return;
4350 	}
4351 	return ;
4352 }
4353 
4354 /*
4355  * release all resource we used in allocation
4356  */
4357 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4358 {
4359 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4360 	struct ext4_prealloc_space *pa = ac->ac_pa;
4361 	if (pa) {
4362 		if (pa->pa_type == MB_GROUP_PA) {
4363 			/* see comment in ext4_mb_use_group_pa() */
4364 			spin_lock(&pa->pa_lock);
4365 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4366 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4367 			pa->pa_free -= ac->ac_b_ex.fe_len;
4368 			pa->pa_len -= ac->ac_b_ex.fe_len;
4369 			spin_unlock(&pa->pa_lock);
4370 		}
4371 	}
4372 	if (pa) {
4373 		/*
4374 		 * We want to add the pa to the right bucket.
4375 		 * Remove it from the list and while adding
4376 		 * make sure the list to which we are adding
4377 		 * doesn't grow big.
4378 		 */
4379 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4380 			spin_lock(pa->pa_obj_lock);
4381 			list_del_rcu(&pa->pa_inode_list);
4382 			spin_unlock(pa->pa_obj_lock);
4383 			ext4_mb_add_n_trim(ac);
4384 		}
4385 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4386 	}
4387 	if (ac->ac_bitmap_page)
4388 		put_page(ac->ac_bitmap_page);
4389 	if (ac->ac_buddy_page)
4390 		put_page(ac->ac_buddy_page);
4391 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4392 		mutex_unlock(&ac->ac_lg->lg_mutex);
4393 	ext4_mb_collect_stats(ac);
4394 	return 0;
4395 }
4396 
4397 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4398 {
4399 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4400 	int ret;
4401 	int freed = 0;
4402 
4403 	trace_ext4_mb_discard_preallocations(sb, needed);
4404 	for (i = 0; i < ngroups && needed > 0; i++) {
4405 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4406 		freed += ret;
4407 		needed -= ret;
4408 	}
4409 
4410 	return freed;
4411 }
4412 
4413 /*
4414  * Main entry point into mballoc to allocate blocks
4415  * it tries to use preallocation first, then falls back
4416  * to usual allocation
4417  */
4418 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4419 				struct ext4_allocation_request *ar, int *errp)
4420 {
4421 	int freed;
4422 	struct ext4_allocation_context *ac = NULL;
4423 	struct ext4_sb_info *sbi;
4424 	struct super_block *sb;
4425 	ext4_fsblk_t block = 0;
4426 	unsigned int inquota = 0;
4427 	unsigned int reserv_clstrs = 0;
4428 
4429 	might_sleep();
4430 	sb = ar->inode->i_sb;
4431 	sbi = EXT4_SB(sb);
4432 
4433 	trace_ext4_request_blocks(ar);
4434 
4435 	/* Allow to use superuser reservation for quota file */
4436 	if (IS_NOQUOTA(ar->inode))
4437 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4438 
4439 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4440 		/* Without delayed allocation we need to verify
4441 		 * there is enough free blocks to do block allocation
4442 		 * and verify allocation doesn't exceed the quota limits.
4443 		 */
4444 		while (ar->len &&
4445 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4446 
4447 			/* let others to free the space */
4448 			cond_resched();
4449 			ar->len = ar->len >> 1;
4450 		}
4451 		if (!ar->len) {
4452 			*errp = -ENOSPC;
4453 			return 0;
4454 		}
4455 		reserv_clstrs = ar->len;
4456 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4457 			dquot_alloc_block_nofail(ar->inode,
4458 						 EXT4_C2B(sbi, ar->len));
4459 		} else {
4460 			while (ar->len &&
4461 				dquot_alloc_block(ar->inode,
4462 						  EXT4_C2B(sbi, ar->len))) {
4463 
4464 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4465 				ar->len--;
4466 			}
4467 		}
4468 		inquota = ar->len;
4469 		if (ar->len == 0) {
4470 			*errp = -EDQUOT;
4471 			goto out;
4472 		}
4473 	}
4474 
4475 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4476 	if (!ac) {
4477 		ar->len = 0;
4478 		*errp = -ENOMEM;
4479 		goto out;
4480 	}
4481 
4482 	*errp = ext4_mb_initialize_context(ac, ar);
4483 	if (*errp) {
4484 		ar->len = 0;
4485 		goto out;
4486 	}
4487 
4488 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4489 	if (!ext4_mb_use_preallocated(ac)) {
4490 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4491 		ext4_mb_normalize_request(ac, ar);
4492 repeat:
4493 		/* allocate space in core */
4494 		*errp = ext4_mb_regular_allocator(ac);
4495 		if (*errp)
4496 			goto discard_and_exit;
4497 
4498 		/* as we've just preallocated more space than
4499 		 * user requested originally, we store allocated
4500 		 * space in a special descriptor */
4501 		if (ac->ac_status == AC_STATUS_FOUND &&
4502 		    ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4503 			*errp = ext4_mb_new_preallocation(ac);
4504 		if (*errp) {
4505 		discard_and_exit:
4506 			ext4_discard_allocated_blocks(ac);
4507 			goto errout;
4508 		}
4509 	}
4510 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4511 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4512 		if (*errp == -EAGAIN) {
4513 			/*
4514 			 * drop the reference that we took
4515 			 * in ext4_mb_use_best_found
4516 			 */
4517 			ext4_mb_release_context(ac);
4518 			ac->ac_b_ex.fe_group = 0;
4519 			ac->ac_b_ex.fe_start = 0;
4520 			ac->ac_b_ex.fe_len = 0;
4521 			ac->ac_status = AC_STATUS_CONTINUE;
4522 			goto repeat;
4523 		} else if (*errp) {
4524 			ext4_discard_allocated_blocks(ac);
4525 			goto errout;
4526 		} else {
4527 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4528 			ar->len = ac->ac_b_ex.fe_len;
4529 		}
4530 	} else {
4531 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4532 		if (freed)
4533 			goto repeat;
4534 		*errp = -ENOSPC;
4535 	}
4536 
4537 errout:
4538 	if (*errp) {
4539 		ac->ac_b_ex.fe_len = 0;
4540 		ar->len = 0;
4541 		ext4_mb_show_ac(ac);
4542 	}
4543 	ext4_mb_release_context(ac);
4544 out:
4545 	if (ac)
4546 		kmem_cache_free(ext4_ac_cachep, ac);
4547 	if (inquota && ar->len < inquota)
4548 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4549 	if (!ar->len) {
4550 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
4551 			/* release all the reserved blocks if non delalloc */
4552 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4553 						reserv_clstrs);
4554 	}
4555 
4556 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4557 
4558 	return block;
4559 }
4560 
4561 /*
4562  * We can merge two free data extents only if the physical blocks
4563  * are contiguous, AND the extents were freed by the same transaction,
4564  * AND the blocks are associated with the same group.
4565  */
4566 static int can_merge(struct ext4_free_data *entry1,
4567 			struct ext4_free_data *entry2)
4568 {
4569 	if ((entry1->efd_tid == entry2->efd_tid) &&
4570 	    (entry1->efd_group == entry2->efd_group) &&
4571 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4572 		return 1;
4573 	return 0;
4574 }
4575 
4576 static noinline_for_stack int
4577 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4578 		      struct ext4_free_data *new_entry)
4579 {
4580 	ext4_group_t group = e4b->bd_group;
4581 	ext4_grpblk_t cluster;
4582 	struct ext4_free_data *entry;
4583 	struct ext4_group_info *db = e4b->bd_info;
4584 	struct super_block *sb = e4b->bd_sb;
4585 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4586 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4587 	struct rb_node *parent = NULL, *new_node;
4588 
4589 	BUG_ON(!ext4_handle_valid(handle));
4590 	BUG_ON(e4b->bd_bitmap_page == NULL);
4591 	BUG_ON(e4b->bd_buddy_page == NULL);
4592 
4593 	new_node = &new_entry->efd_node;
4594 	cluster = new_entry->efd_start_cluster;
4595 
4596 	if (!*n) {
4597 		/* first free block exent. We need to
4598 		   protect buddy cache from being freed,
4599 		 * otherwise we'll refresh it from
4600 		 * on-disk bitmap and lose not-yet-available
4601 		 * blocks */
4602 		get_page(e4b->bd_buddy_page);
4603 		get_page(e4b->bd_bitmap_page);
4604 	}
4605 	while (*n) {
4606 		parent = *n;
4607 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4608 		if (cluster < entry->efd_start_cluster)
4609 			n = &(*n)->rb_left;
4610 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4611 			n = &(*n)->rb_right;
4612 		else {
4613 			ext4_grp_locked_error(sb, group, 0,
4614 				ext4_group_first_block_no(sb, group) +
4615 				EXT4_C2B(sbi, cluster),
4616 				"Block already on to-be-freed list");
4617 			return 0;
4618 		}
4619 	}
4620 
4621 	rb_link_node(new_node, parent, n);
4622 	rb_insert_color(new_node, &db->bb_free_root);
4623 
4624 	/* Now try to see the extent can be merged to left and right */
4625 	node = rb_prev(new_node);
4626 	if (node) {
4627 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4628 		if (can_merge(entry, new_entry) &&
4629 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4630 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4631 			new_entry->efd_count += entry->efd_count;
4632 			rb_erase(node, &(db->bb_free_root));
4633 			kmem_cache_free(ext4_free_data_cachep, entry);
4634 		}
4635 	}
4636 
4637 	node = rb_next(new_node);
4638 	if (node) {
4639 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4640 		if (can_merge(new_entry, entry) &&
4641 		    ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
4642 			new_entry->efd_count += entry->efd_count;
4643 			rb_erase(node, &(db->bb_free_root));
4644 			kmem_cache_free(ext4_free_data_cachep, entry);
4645 		}
4646 	}
4647 	/* Add the extent to transaction's private list */
4648 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4649 				  &new_entry->efd_jce);
4650 	return 0;
4651 }
4652 
4653 /**
4654  * ext4_free_blocks() -- Free given blocks and update quota
4655  * @handle:		handle for this transaction
4656  * @inode:		inode
4657  * @block:		start physical block to free
4658  * @count:		number of blocks to count
4659  * @flags:		flags used by ext4_free_blocks
4660  */
4661 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4662 		      struct buffer_head *bh, ext4_fsblk_t block,
4663 		      unsigned long count, int flags)
4664 {
4665 	struct buffer_head *bitmap_bh = NULL;
4666 	struct super_block *sb = inode->i_sb;
4667 	struct ext4_group_desc *gdp;
4668 	unsigned int overflow;
4669 	ext4_grpblk_t bit;
4670 	struct buffer_head *gd_bh;
4671 	ext4_group_t block_group;
4672 	struct ext4_sb_info *sbi;
4673 	struct ext4_buddy e4b;
4674 	unsigned int count_clusters;
4675 	int err = 0;
4676 	int ret;
4677 
4678 	might_sleep();
4679 	if (bh) {
4680 		if (block)
4681 			BUG_ON(block != bh->b_blocknr);
4682 		else
4683 			block = bh->b_blocknr;
4684 	}
4685 
4686 	sbi = EXT4_SB(sb);
4687 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4688 	    !ext4_data_block_valid(sbi, block, count)) {
4689 		ext4_error(sb, "Freeing blocks not in datazone - "
4690 			   "block = %llu, count = %lu", block, count);
4691 		goto error_return;
4692 	}
4693 
4694 	ext4_debug("freeing block %llu\n", block);
4695 	trace_ext4_free_blocks(inode, block, count, flags);
4696 
4697 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4698 		BUG_ON(count > 1);
4699 
4700 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4701 			    inode, bh, block);
4702 	}
4703 
4704 	/*
4705 	 * If the extent to be freed does not begin on a cluster
4706 	 * boundary, we need to deal with partial clusters at the
4707 	 * beginning and end of the extent.  Normally we will free
4708 	 * blocks at the beginning or the end unless we are explicitly
4709 	 * requested to avoid doing so.
4710 	 */
4711 	overflow = EXT4_PBLK_COFF(sbi, block);
4712 	if (overflow) {
4713 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4714 			overflow = sbi->s_cluster_ratio - overflow;
4715 			block += overflow;
4716 			if (count > overflow)
4717 				count -= overflow;
4718 			else
4719 				return;
4720 		} else {
4721 			block -= overflow;
4722 			count += overflow;
4723 		}
4724 	}
4725 	overflow = EXT4_LBLK_COFF(sbi, count);
4726 	if (overflow) {
4727 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4728 			if (count > overflow)
4729 				count -= overflow;
4730 			else
4731 				return;
4732 		} else
4733 			count += sbi->s_cluster_ratio - overflow;
4734 	}
4735 
4736 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4737 		int i;
4738 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
4739 
4740 		for (i = 0; i < count; i++) {
4741 			cond_resched();
4742 			if (is_metadata)
4743 				bh = sb_find_get_block(inode->i_sb, block + i);
4744 			ext4_forget(handle, is_metadata, inode, bh, block + i);
4745 		}
4746 	}
4747 
4748 do_more:
4749 	overflow = 0;
4750 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4751 
4752 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
4753 			ext4_get_group_info(sb, block_group))))
4754 		return;
4755 
4756 	/*
4757 	 * Check to see if we are freeing blocks across a group
4758 	 * boundary.
4759 	 */
4760 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4761 		overflow = EXT4_C2B(sbi, bit) + count -
4762 			EXT4_BLOCKS_PER_GROUP(sb);
4763 		count -= overflow;
4764 	}
4765 	count_clusters = EXT4_NUM_B2C(sbi, count);
4766 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4767 	if (IS_ERR(bitmap_bh)) {
4768 		err = PTR_ERR(bitmap_bh);
4769 		bitmap_bh = NULL;
4770 		goto error_return;
4771 	}
4772 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4773 	if (!gdp) {
4774 		err = -EIO;
4775 		goto error_return;
4776 	}
4777 
4778 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4779 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4780 	    in_range(block, ext4_inode_table(sb, gdp),
4781 		     EXT4_SB(sb)->s_itb_per_group) ||
4782 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4783 		     EXT4_SB(sb)->s_itb_per_group)) {
4784 
4785 		ext4_error(sb, "Freeing blocks in system zone - "
4786 			   "Block = %llu, count = %lu", block, count);
4787 		/* err = 0. ext4_std_error should be a no op */
4788 		goto error_return;
4789 	}
4790 
4791 	BUFFER_TRACE(bitmap_bh, "getting write access");
4792 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4793 	if (err)
4794 		goto error_return;
4795 
4796 	/*
4797 	 * We are about to modify some metadata.  Call the journal APIs
4798 	 * to unshare ->b_data if a currently-committing transaction is
4799 	 * using it
4800 	 */
4801 	BUFFER_TRACE(gd_bh, "get_write_access");
4802 	err = ext4_journal_get_write_access(handle, gd_bh);
4803 	if (err)
4804 		goto error_return;
4805 #ifdef AGGRESSIVE_CHECK
4806 	{
4807 		int i;
4808 		for (i = 0; i < count_clusters; i++)
4809 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4810 	}
4811 #endif
4812 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4813 
4814 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
4815 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
4816 				     GFP_NOFS|__GFP_NOFAIL);
4817 	if (err)
4818 		goto error_return;
4819 
4820 	/*
4821 	 * We need to make sure we don't reuse the freed block until after the
4822 	 * transaction is committed. We make an exception if the inode is to be
4823 	 * written in writeback mode since writeback mode has weak data
4824 	 * consistency guarantees.
4825 	 */
4826 	if (ext4_handle_valid(handle) &&
4827 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
4828 	     !ext4_should_writeback_data(inode))) {
4829 		struct ext4_free_data *new_entry;
4830 		/*
4831 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4832 		 * to fail.
4833 		 */
4834 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4835 				GFP_NOFS|__GFP_NOFAIL);
4836 		new_entry->efd_start_cluster = bit;
4837 		new_entry->efd_group = block_group;
4838 		new_entry->efd_count = count_clusters;
4839 		new_entry->efd_tid = handle->h_transaction->t_tid;
4840 
4841 		ext4_lock_group(sb, block_group);
4842 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4843 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4844 	} else {
4845 		/* need to update group_info->bb_free and bitmap
4846 		 * with group lock held. generate_buddy look at
4847 		 * them with group lock_held
4848 		 */
4849 		if (test_opt(sb, DISCARD)) {
4850 			err = ext4_issue_discard(sb, block_group, bit, count);
4851 			if (err && err != -EOPNOTSUPP)
4852 				ext4_msg(sb, KERN_WARNING, "discard request in"
4853 					 " group:%d block:%d count:%lu failed"
4854 					 " with %d", block_group, bit, count,
4855 					 err);
4856 		} else
4857 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
4858 
4859 		ext4_lock_group(sb, block_group);
4860 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4861 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4862 	}
4863 
4864 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4865 	ext4_free_group_clusters_set(sb, gdp, ret);
4866 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4867 	ext4_group_desc_csum_set(sb, block_group, gdp);
4868 	ext4_unlock_group(sb, block_group);
4869 
4870 	if (sbi->s_log_groups_per_flex) {
4871 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4872 		atomic64_add(count_clusters,
4873 			     &sbi->s_flex_groups[flex_group].free_clusters);
4874 	}
4875 
4876 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4877 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4878 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4879 
4880 	ext4_mb_unload_buddy(&e4b);
4881 
4882 	/* We dirtied the bitmap block */
4883 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4884 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4885 
4886 	/* And the group descriptor block */
4887 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4888 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4889 	if (!err)
4890 		err = ret;
4891 
4892 	if (overflow && !err) {
4893 		block += count;
4894 		count = overflow;
4895 		put_bh(bitmap_bh);
4896 		goto do_more;
4897 	}
4898 error_return:
4899 	brelse(bitmap_bh);
4900 	ext4_std_error(sb, err);
4901 	return;
4902 }
4903 
4904 /**
4905  * ext4_group_add_blocks() -- Add given blocks to an existing group
4906  * @handle:			handle to this transaction
4907  * @sb:				super block
4908  * @block:			start physical block to add to the block group
4909  * @count:			number of blocks to free
4910  *
4911  * This marks the blocks as free in the bitmap and buddy.
4912  */
4913 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4914 			 ext4_fsblk_t block, unsigned long count)
4915 {
4916 	struct buffer_head *bitmap_bh = NULL;
4917 	struct buffer_head *gd_bh;
4918 	ext4_group_t block_group;
4919 	ext4_grpblk_t bit;
4920 	unsigned int i;
4921 	struct ext4_group_desc *desc;
4922 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4923 	struct ext4_buddy e4b;
4924 	int err = 0, ret, blk_free_count;
4925 	ext4_grpblk_t blocks_freed;
4926 
4927 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4928 
4929 	if (count == 0)
4930 		return 0;
4931 
4932 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4933 	/*
4934 	 * Check to see if we are freeing blocks across a group
4935 	 * boundary.
4936 	 */
4937 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4938 		ext4_warning(sb, "too much blocks added to group %u\n",
4939 			     block_group);
4940 		err = -EINVAL;
4941 		goto error_return;
4942 	}
4943 
4944 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4945 	if (IS_ERR(bitmap_bh)) {
4946 		err = PTR_ERR(bitmap_bh);
4947 		bitmap_bh = NULL;
4948 		goto error_return;
4949 	}
4950 
4951 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4952 	if (!desc) {
4953 		err = -EIO;
4954 		goto error_return;
4955 	}
4956 
4957 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4958 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4959 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4960 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4961 		     sbi->s_itb_per_group)) {
4962 		ext4_error(sb, "Adding blocks in system zones - "
4963 			   "Block = %llu, count = %lu",
4964 			   block, count);
4965 		err = -EINVAL;
4966 		goto error_return;
4967 	}
4968 
4969 	BUFFER_TRACE(bitmap_bh, "getting write access");
4970 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4971 	if (err)
4972 		goto error_return;
4973 
4974 	/*
4975 	 * We are about to modify some metadata.  Call the journal APIs
4976 	 * to unshare ->b_data if a currently-committing transaction is
4977 	 * using it
4978 	 */
4979 	BUFFER_TRACE(gd_bh, "get_write_access");
4980 	err = ext4_journal_get_write_access(handle, gd_bh);
4981 	if (err)
4982 		goto error_return;
4983 
4984 	for (i = 0, blocks_freed = 0; i < count; i++) {
4985 		BUFFER_TRACE(bitmap_bh, "clear bit");
4986 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4987 			ext4_error(sb, "bit already cleared for block %llu",
4988 				   (ext4_fsblk_t)(block + i));
4989 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4990 		} else {
4991 			blocks_freed++;
4992 		}
4993 	}
4994 
4995 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4996 	if (err)
4997 		goto error_return;
4998 
4999 	/*
5000 	 * need to update group_info->bb_free and bitmap
5001 	 * with group lock held. generate_buddy look at
5002 	 * them with group lock_held
5003 	 */
5004 	ext4_lock_group(sb, block_group);
5005 	mb_clear_bits(bitmap_bh->b_data, bit, count);
5006 	mb_free_blocks(NULL, &e4b, bit, count);
5007 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
5008 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
5009 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5010 	ext4_group_desc_csum_set(sb, block_group, desc);
5011 	ext4_unlock_group(sb, block_group);
5012 	percpu_counter_add(&sbi->s_freeclusters_counter,
5013 			   EXT4_NUM_B2C(sbi, blocks_freed));
5014 
5015 	if (sbi->s_log_groups_per_flex) {
5016 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5017 		atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
5018 			     &sbi->s_flex_groups[flex_group].free_clusters);
5019 	}
5020 
5021 	ext4_mb_unload_buddy(&e4b);
5022 
5023 	/* We dirtied the bitmap block */
5024 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5025 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5026 
5027 	/* And the group descriptor block */
5028 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5029 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5030 	if (!err)
5031 		err = ret;
5032 
5033 error_return:
5034 	brelse(bitmap_bh);
5035 	ext4_std_error(sb, err);
5036 	return err;
5037 }
5038 
5039 /**
5040  * ext4_trim_extent -- function to TRIM one single free extent in the group
5041  * @sb:		super block for the file system
5042  * @start:	starting block of the free extent in the alloc. group
5043  * @count:	number of blocks to TRIM
5044  * @group:	alloc. group we are working with
5045  * @e4b:	ext4 buddy for the group
5046  *
5047  * Trim "count" blocks starting at "start" in the "group". To assure that no
5048  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5049  * be called with under the group lock.
5050  */
5051 static int ext4_trim_extent(struct super_block *sb, int start, int count,
5052 			     ext4_group_t group, struct ext4_buddy *e4b)
5053 __releases(bitlock)
5054 __acquires(bitlock)
5055 {
5056 	struct ext4_free_extent ex;
5057 	int ret = 0;
5058 
5059 	trace_ext4_trim_extent(sb, group, start, count);
5060 
5061 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
5062 
5063 	ex.fe_start = start;
5064 	ex.fe_group = group;
5065 	ex.fe_len = count;
5066 
5067 	/*
5068 	 * Mark blocks used, so no one can reuse them while
5069 	 * being trimmed.
5070 	 */
5071 	mb_mark_used(e4b, &ex);
5072 	ext4_unlock_group(sb, group);
5073 	ret = ext4_issue_discard(sb, group, start, count);
5074 	ext4_lock_group(sb, group);
5075 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
5076 	return ret;
5077 }
5078 
5079 /**
5080  * ext4_trim_all_free -- function to trim all free space in alloc. group
5081  * @sb:			super block for file system
5082  * @group:		group to be trimmed
5083  * @start:		first group block to examine
5084  * @max:		last group block to examine
5085  * @minblocks:		minimum extent block count
5086  *
5087  * ext4_trim_all_free walks through group's buddy bitmap searching for free
5088  * extents. When the free block is found, ext4_trim_extent is called to TRIM
5089  * the extent.
5090  *
5091  *
5092  * ext4_trim_all_free walks through group's block bitmap searching for free
5093  * extents. When the free extent is found, mark it as used in group buddy
5094  * bitmap. Then issue a TRIM command on this extent and free the extent in
5095  * the group buddy bitmap. This is done until whole group is scanned.
5096  */
5097 static ext4_grpblk_t
5098 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5099 		   ext4_grpblk_t start, ext4_grpblk_t max,
5100 		   ext4_grpblk_t minblocks)
5101 {
5102 	void *bitmap;
5103 	ext4_grpblk_t next, count = 0, free_count = 0;
5104 	struct ext4_buddy e4b;
5105 	int ret = 0;
5106 
5107 	trace_ext4_trim_all_free(sb, group, start, max);
5108 
5109 	ret = ext4_mb_load_buddy(sb, group, &e4b);
5110 	if (ret) {
5111 		ext4_error(sb, "Error in loading buddy "
5112 				"information for %u", group);
5113 		return ret;
5114 	}
5115 	bitmap = e4b.bd_bitmap;
5116 
5117 	ext4_lock_group(sb, group);
5118 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5119 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5120 		goto out;
5121 
5122 	start = (e4b.bd_info->bb_first_free > start) ?
5123 		e4b.bd_info->bb_first_free : start;
5124 
5125 	while (start <= max) {
5126 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
5127 		if (start > max)
5128 			break;
5129 		next = mb_find_next_bit(bitmap, max + 1, start);
5130 
5131 		if ((next - start) >= minblocks) {
5132 			ret = ext4_trim_extent(sb, start,
5133 					       next - start, group, &e4b);
5134 			if (ret && ret != -EOPNOTSUPP)
5135 				break;
5136 			ret = 0;
5137 			count += next - start;
5138 		}
5139 		free_count += next - start;
5140 		start = next + 1;
5141 
5142 		if (fatal_signal_pending(current)) {
5143 			count = -ERESTARTSYS;
5144 			break;
5145 		}
5146 
5147 		if (need_resched()) {
5148 			ext4_unlock_group(sb, group);
5149 			cond_resched();
5150 			ext4_lock_group(sb, group);
5151 		}
5152 
5153 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
5154 			break;
5155 	}
5156 
5157 	if (!ret) {
5158 		ret = count;
5159 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
5160 	}
5161 out:
5162 	ext4_unlock_group(sb, group);
5163 	ext4_mb_unload_buddy(&e4b);
5164 
5165 	ext4_debug("trimmed %d blocks in the group %d\n",
5166 		count, group);
5167 
5168 	return ret;
5169 }
5170 
5171 /**
5172  * ext4_trim_fs() -- trim ioctl handle function
5173  * @sb:			superblock for filesystem
5174  * @range:		fstrim_range structure
5175  *
5176  * start:	First Byte to trim
5177  * len:		number of Bytes to trim from start
5178  * minlen:	minimum extent length in Bytes
5179  * ext4_trim_fs goes through all allocation groups containing Bytes from
5180  * start to start+len. For each such a group ext4_trim_all_free function
5181  * is invoked to trim all free space.
5182  */
5183 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5184 {
5185 	struct ext4_group_info *grp;
5186 	ext4_group_t group, first_group, last_group;
5187 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5188 	uint64_t start, end, minlen, trimmed = 0;
5189 	ext4_fsblk_t first_data_blk =
5190 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5191 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5192 	int ret = 0;
5193 
5194 	start = range->start >> sb->s_blocksize_bits;
5195 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
5196 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5197 			      range->minlen >> sb->s_blocksize_bits);
5198 
5199 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5200 	    start >= max_blks ||
5201 	    range->len < sb->s_blocksize)
5202 		return -EINVAL;
5203 	if (end >= max_blks)
5204 		end = max_blks - 1;
5205 	if (end <= first_data_blk)
5206 		goto out;
5207 	if (start < first_data_blk)
5208 		start = first_data_blk;
5209 
5210 	/* Determine first and last group to examine based on start and end */
5211 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5212 				     &first_group, &first_cluster);
5213 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5214 				     &last_group, &last_cluster);
5215 
5216 	/* end now represents the last cluster to discard in this group */
5217 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5218 
5219 	for (group = first_group; group <= last_group; group++) {
5220 		grp = ext4_get_group_info(sb, group);
5221 		/* We only do this if the grp has never been initialized */
5222 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5223 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
5224 			if (ret)
5225 				break;
5226 		}
5227 
5228 		/*
5229 		 * For all the groups except the last one, last cluster will
5230 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5231 		 * change it for the last group, note that last_cluster is
5232 		 * already computed earlier by ext4_get_group_no_and_offset()
5233 		 */
5234 		if (group == last_group)
5235 			end = last_cluster;
5236 
5237 		if (grp->bb_free >= minlen) {
5238 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5239 						end, minlen);
5240 			if (cnt < 0) {
5241 				ret = cnt;
5242 				break;
5243 			}
5244 			trimmed += cnt;
5245 		}
5246 
5247 		/*
5248 		 * For every group except the first one, we are sure
5249 		 * that the first cluster to discard will be cluster #0.
5250 		 */
5251 		first_cluster = 0;
5252 	}
5253 
5254 	if (!ret)
5255 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5256 
5257 out:
5258 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5259 	return ret;
5260 }
5261