xref: /openbmc/linux/fs/ext4/mballoc.c (revision d0b73b48)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/debugfs.h>
27 #include <linux/log2.h>
28 #include <linux/slab.h>
29 #include <trace/events/ext4.h>
30 
31 /*
32  * MUSTDO:
33  *   - test ext4_ext_search_left() and ext4_ext_search_right()
34  *   - search for metadata in few groups
35  *
36  * TODO v4:
37  *   - normalization should take into account whether file is still open
38  *   - discard preallocations if no free space left (policy?)
39  *   - don't normalize tails
40  *   - quota
41  *   - reservation for superuser
42  *
43  * TODO v3:
44  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
45  *   - track min/max extents in each group for better group selection
46  *   - mb_mark_used() may allocate chunk right after splitting buddy
47  *   - tree of groups sorted by number of free blocks
48  *   - error handling
49  */
50 
51 /*
52  * The allocation request involve request for multiple number of blocks
53  * near to the goal(block) value specified.
54  *
55  * During initialization phase of the allocator we decide to use the
56  * group preallocation or inode preallocation depending on the size of
57  * the file. The size of the file could be the resulting file size we
58  * would have after allocation, or the current file size, which ever
59  * is larger. If the size is less than sbi->s_mb_stream_request we
60  * select to use the group preallocation. The default value of
61  * s_mb_stream_request is 16 blocks. This can also be tuned via
62  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
63  * terms of number of blocks.
64  *
65  * The main motivation for having small file use group preallocation is to
66  * ensure that we have small files closer together on the disk.
67  *
68  * First stage the allocator looks at the inode prealloc list,
69  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
70  * spaces for this particular inode. The inode prealloc space is
71  * represented as:
72  *
73  * pa_lstart -> the logical start block for this prealloc space
74  * pa_pstart -> the physical start block for this prealloc space
75  * pa_len    -> length for this prealloc space (in clusters)
76  * pa_free   ->  free space available in this prealloc space (in clusters)
77  *
78  * The inode preallocation space is used looking at the _logical_ start
79  * block. If only the logical file block falls within the range of prealloc
80  * space we will consume the particular prealloc space. This makes sure that
81  * we have contiguous physical blocks representing the file blocks
82  *
83  * The important thing to be noted in case of inode prealloc space is that
84  * we don't modify the values associated to inode prealloc space except
85  * pa_free.
86  *
87  * If we are not able to find blocks in the inode prealloc space and if we
88  * have the group allocation flag set then we look at the locality group
89  * prealloc space. These are per CPU prealloc list represented as
90  *
91  * ext4_sb_info.s_locality_groups[smp_processor_id()]
92  *
93  * The reason for having a per cpu locality group is to reduce the contention
94  * between CPUs. It is possible to get scheduled at this point.
95  *
96  * The locality group prealloc space is used looking at whether we have
97  * enough free space (pa_free) within the prealloc space.
98  *
99  * If we can't allocate blocks via inode prealloc or/and locality group
100  * prealloc then we look at the buddy cache. The buddy cache is represented
101  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
102  * mapped to the buddy and bitmap information regarding different
103  * groups. The buddy information is attached to buddy cache inode so that
104  * we can access them through the page cache. The information regarding
105  * each group is loaded via ext4_mb_load_buddy.  The information involve
106  * block bitmap and buddy information. The information are stored in the
107  * inode as:
108  *
109  *  {                        page                        }
110  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
111  *
112  *
113  * one block each for bitmap and buddy information.  So for each group we
114  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
115  * blocksize) blocks.  So it can have information regarding groups_per_page
116  * which is blocks_per_page/2
117  *
118  * The buddy cache inode is not stored on disk. The inode is thrown
119  * away when the filesystem is unmounted.
120  *
121  * We look for count number of blocks in the buddy cache. If we were able
122  * to locate that many free blocks we return with additional information
123  * regarding rest of the contiguous physical block available
124  *
125  * Before allocating blocks via buddy cache we normalize the request
126  * blocks. This ensure we ask for more blocks that we needed. The extra
127  * blocks that we get after allocation is added to the respective prealloc
128  * list. In case of inode preallocation we follow a list of heuristics
129  * based on file size. This can be found in ext4_mb_normalize_request. If
130  * we are doing a group prealloc we try to normalize the request to
131  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
132  * dependent on the cluster size; for non-bigalloc file systems, it is
133  * 512 blocks. This can be tuned via
134  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
135  * terms of number of blocks. If we have mounted the file system with -O
136  * stripe=<value> option the group prealloc request is normalized to the
137  * the smallest multiple of the stripe value (sbi->s_stripe) which is
138  * greater than the default mb_group_prealloc.
139  *
140  * The regular allocator (using the buddy cache) supports a few tunables.
141  *
142  * /sys/fs/ext4/<partition>/mb_min_to_scan
143  * /sys/fs/ext4/<partition>/mb_max_to_scan
144  * /sys/fs/ext4/<partition>/mb_order2_req
145  *
146  * The regular allocator uses buddy scan only if the request len is power of
147  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
148  * value of s_mb_order2_reqs can be tuned via
149  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
150  * stripe size (sbi->s_stripe), we try to search for contiguous block in
151  * stripe size. This should result in better allocation on RAID setups. If
152  * not, we search in the specific group using bitmap for best extents. The
153  * tunable min_to_scan and max_to_scan control the behaviour here.
154  * min_to_scan indicate how long the mballoc __must__ look for a best
155  * extent and max_to_scan indicates how long the mballoc __can__ look for a
156  * best extent in the found extents. Searching for the blocks starts with
157  * the group specified as the goal value in allocation context via
158  * ac_g_ex. Each group is first checked based on the criteria whether it
159  * can be used for allocation. ext4_mb_good_group explains how the groups are
160  * checked.
161  *
162  * Both the prealloc space are getting populated as above. So for the first
163  * request we will hit the buddy cache which will result in this prealloc
164  * space getting filled. The prealloc space is then later used for the
165  * subsequent request.
166  */
167 
168 /*
169  * mballoc operates on the following data:
170  *  - on-disk bitmap
171  *  - in-core buddy (actually includes buddy and bitmap)
172  *  - preallocation descriptors (PAs)
173  *
174  * there are two types of preallocations:
175  *  - inode
176  *    assiged to specific inode and can be used for this inode only.
177  *    it describes part of inode's space preallocated to specific
178  *    physical blocks. any block from that preallocated can be used
179  *    independent. the descriptor just tracks number of blocks left
180  *    unused. so, before taking some block from descriptor, one must
181  *    make sure corresponded logical block isn't allocated yet. this
182  *    also means that freeing any block within descriptor's range
183  *    must discard all preallocated blocks.
184  *  - locality group
185  *    assigned to specific locality group which does not translate to
186  *    permanent set of inodes: inode can join and leave group. space
187  *    from this type of preallocation can be used for any inode. thus
188  *    it's consumed from the beginning to the end.
189  *
190  * relation between them can be expressed as:
191  *    in-core buddy = on-disk bitmap + preallocation descriptors
192  *
193  * this mean blocks mballoc considers used are:
194  *  - allocated blocks (persistent)
195  *  - preallocated blocks (non-persistent)
196  *
197  * consistency in mballoc world means that at any time a block is either
198  * free or used in ALL structures. notice: "any time" should not be read
199  * literally -- time is discrete and delimited by locks.
200  *
201  *  to keep it simple, we don't use block numbers, instead we count number of
202  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
203  *
204  * all operations can be expressed as:
205  *  - init buddy:			buddy = on-disk + PAs
206  *  - new PA:				buddy += N; PA = N
207  *  - use inode PA:			on-disk += N; PA -= N
208  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
209  *  - use locality group PA		on-disk += N; PA -= N
210  *  - discard locality group PA		buddy -= PA; PA = 0
211  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
212  *        is used in real operation because we can't know actual used
213  *        bits from PA, only from on-disk bitmap
214  *
215  * if we follow this strict logic, then all operations above should be atomic.
216  * given some of them can block, we'd have to use something like semaphores
217  * killing performance on high-end SMP hardware. let's try to relax it using
218  * the following knowledge:
219  *  1) if buddy is referenced, it's already initialized
220  *  2) while block is used in buddy and the buddy is referenced,
221  *     nobody can re-allocate that block
222  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
223  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
224  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
225  *     block
226  *
227  * so, now we're building a concurrency table:
228  *  - init buddy vs.
229  *    - new PA
230  *      blocks for PA are allocated in the buddy, buddy must be referenced
231  *      until PA is linked to allocation group to avoid concurrent buddy init
232  *    - use inode PA
233  *      we need to make sure that either on-disk bitmap or PA has uptodate data
234  *      given (3) we care that PA-=N operation doesn't interfere with init
235  *    - discard inode PA
236  *      the simplest way would be to have buddy initialized by the discard
237  *    - use locality group PA
238  *      again PA-=N must be serialized with init
239  *    - discard locality group PA
240  *      the simplest way would be to have buddy initialized by the discard
241  *  - new PA vs.
242  *    - use inode PA
243  *      i_data_sem serializes them
244  *    - discard inode PA
245  *      discard process must wait until PA isn't used by another process
246  *    - use locality group PA
247  *      some mutex should serialize them
248  *    - discard locality group PA
249  *      discard process must wait until PA isn't used by another process
250  *  - use inode PA
251  *    - use inode PA
252  *      i_data_sem or another mutex should serializes them
253  *    - discard inode PA
254  *      discard process must wait until PA isn't used by another process
255  *    - use locality group PA
256  *      nothing wrong here -- they're different PAs covering different blocks
257  *    - discard locality group PA
258  *      discard process must wait until PA isn't used by another process
259  *
260  * now we're ready to make few consequences:
261  *  - PA is referenced and while it is no discard is possible
262  *  - PA is referenced until block isn't marked in on-disk bitmap
263  *  - PA changes only after on-disk bitmap
264  *  - discard must not compete with init. either init is done before
265  *    any discard or they're serialized somehow
266  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
267  *
268  * a special case when we've used PA to emptiness. no need to modify buddy
269  * in this case, but we should care about concurrent init
270  *
271  */
272 
273  /*
274  * Logic in few words:
275  *
276  *  - allocation:
277  *    load group
278  *    find blocks
279  *    mark bits in on-disk bitmap
280  *    release group
281  *
282  *  - use preallocation:
283  *    find proper PA (per-inode or group)
284  *    load group
285  *    mark bits in on-disk bitmap
286  *    release group
287  *    release PA
288  *
289  *  - free:
290  *    load group
291  *    mark bits in on-disk bitmap
292  *    release group
293  *
294  *  - discard preallocations in group:
295  *    mark PAs deleted
296  *    move them onto local list
297  *    load on-disk bitmap
298  *    load group
299  *    remove PA from object (inode or locality group)
300  *    mark free blocks in-core
301  *
302  *  - discard inode's preallocations:
303  */
304 
305 /*
306  * Locking rules
307  *
308  * Locks:
309  *  - bitlock on a group	(group)
310  *  - object (inode/locality)	(object)
311  *  - per-pa lock		(pa)
312  *
313  * Paths:
314  *  - new pa
315  *    object
316  *    group
317  *
318  *  - find and use pa:
319  *    pa
320  *
321  *  - release consumed pa:
322  *    pa
323  *    group
324  *    object
325  *
326  *  - generate in-core bitmap:
327  *    group
328  *        pa
329  *
330  *  - discard all for given object (inode, locality group):
331  *    object
332  *        pa
333  *    group
334  *
335  *  - discard all for given group:
336  *    group
337  *        pa
338  *    group
339  *        object
340  *
341  */
342 static struct kmem_cache *ext4_pspace_cachep;
343 static struct kmem_cache *ext4_ac_cachep;
344 static struct kmem_cache *ext4_free_data_cachep;
345 
346 /* We create slab caches for groupinfo data structures based on the
347  * superblock block size.  There will be one per mounted filesystem for
348  * each unique s_blocksize_bits */
349 #define NR_GRPINFO_CACHES 8
350 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
351 
352 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
353 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
354 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
355 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
356 };
357 
358 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
359 					ext4_group_t group);
360 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
361 						ext4_group_t group);
362 static void ext4_free_data_callback(struct super_block *sb,
363 				struct ext4_journal_cb_entry *jce, int rc);
364 
365 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
366 {
367 #if BITS_PER_LONG == 64
368 	*bit += ((unsigned long) addr & 7UL) << 3;
369 	addr = (void *) ((unsigned long) addr & ~7UL);
370 #elif BITS_PER_LONG == 32
371 	*bit += ((unsigned long) addr & 3UL) << 3;
372 	addr = (void *) ((unsigned long) addr & ~3UL);
373 #else
374 #error "how many bits you are?!"
375 #endif
376 	return addr;
377 }
378 
379 static inline int mb_test_bit(int bit, void *addr)
380 {
381 	/*
382 	 * ext4_test_bit on architecture like powerpc
383 	 * needs unsigned long aligned address
384 	 */
385 	addr = mb_correct_addr_and_bit(&bit, addr);
386 	return ext4_test_bit(bit, addr);
387 }
388 
389 static inline void mb_set_bit(int bit, void *addr)
390 {
391 	addr = mb_correct_addr_and_bit(&bit, addr);
392 	ext4_set_bit(bit, addr);
393 }
394 
395 static inline void mb_clear_bit(int bit, void *addr)
396 {
397 	addr = mb_correct_addr_and_bit(&bit, addr);
398 	ext4_clear_bit(bit, addr);
399 }
400 
401 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
402 {
403 	int fix = 0, ret, tmpmax;
404 	addr = mb_correct_addr_and_bit(&fix, addr);
405 	tmpmax = max + fix;
406 	start += fix;
407 
408 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
409 	if (ret > max)
410 		return max;
411 	return ret;
412 }
413 
414 static inline int mb_find_next_bit(void *addr, int max, int start)
415 {
416 	int fix = 0, ret, tmpmax;
417 	addr = mb_correct_addr_and_bit(&fix, addr);
418 	tmpmax = max + fix;
419 	start += fix;
420 
421 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
422 	if (ret > max)
423 		return max;
424 	return ret;
425 }
426 
427 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
428 {
429 	char *bb;
430 
431 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
432 	BUG_ON(max == NULL);
433 
434 	if (order > e4b->bd_blkbits + 1) {
435 		*max = 0;
436 		return NULL;
437 	}
438 
439 	/* at order 0 we see each particular block */
440 	if (order == 0) {
441 		*max = 1 << (e4b->bd_blkbits + 3);
442 		return e4b->bd_bitmap;
443 	}
444 
445 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
446 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
447 
448 	return bb;
449 }
450 
451 #ifdef DOUBLE_CHECK
452 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
453 			   int first, int count)
454 {
455 	int i;
456 	struct super_block *sb = e4b->bd_sb;
457 
458 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
459 		return;
460 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
461 	for (i = 0; i < count; i++) {
462 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
463 			ext4_fsblk_t blocknr;
464 
465 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
466 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
467 			ext4_grp_locked_error(sb, e4b->bd_group,
468 					      inode ? inode->i_ino : 0,
469 					      blocknr,
470 					      "freeing block already freed "
471 					      "(bit %u)",
472 					      first + i);
473 		}
474 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
475 	}
476 }
477 
478 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
479 {
480 	int i;
481 
482 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
483 		return;
484 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
485 	for (i = 0; i < count; i++) {
486 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
487 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
488 	}
489 }
490 
491 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
492 {
493 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
494 		unsigned char *b1, *b2;
495 		int i;
496 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
497 		b2 = (unsigned char *) bitmap;
498 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
499 			if (b1[i] != b2[i]) {
500 				ext4_msg(e4b->bd_sb, KERN_ERR,
501 					 "corruption in group %u "
502 					 "at byte %u(%u): %x in copy != %x "
503 					 "on disk/prealloc",
504 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
505 				BUG();
506 			}
507 		}
508 	}
509 }
510 
511 #else
512 static inline void mb_free_blocks_double(struct inode *inode,
513 				struct ext4_buddy *e4b, int first, int count)
514 {
515 	return;
516 }
517 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
518 						int first, int count)
519 {
520 	return;
521 }
522 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
523 {
524 	return;
525 }
526 #endif
527 
528 #ifdef AGGRESSIVE_CHECK
529 
530 #define MB_CHECK_ASSERT(assert)						\
531 do {									\
532 	if (!(assert)) {						\
533 		printk(KERN_EMERG					\
534 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
535 			function, file, line, # assert);		\
536 		BUG();							\
537 	}								\
538 } while (0)
539 
540 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
541 				const char *function, int line)
542 {
543 	struct super_block *sb = e4b->bd_sb;
544 	int order = e4b->bd_blkbits + 1;
545 	int max;
546 	int max2;
547 	int i;
548 	int j;
549 	int k;
550 	int count;
551 	struct ext4_group_info *grp;
552 	int fragments = 0;
553 	int fstart;
554 	struct list_head *cur;
555 	void *buddy;
556 	void *buddy2;
557 
558 	{
559 		static int mb_check_counter;
560 		if (mb_check_counter++ % 100 != 0)
561 			return 0;
562 	}
563 
564 	while (order > 1) {
565 		buddy = mb_find_buddy(e4b, order, &max);
566 		MB_CHECK_ASSERT(buddy);
567 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
568 		MB_CHECK_ASSERT(buddy2);
569 		MB_CHECK_ASSERT(buddy != buddy2);
570 		MB_CHECK_ASSERT(max * 2 == max2);
571 
572 		count = 0;
573 		for (i = 0; i < max; i++) {
574 
575 			if (mb_test_bit(i, buddy)) {
576 				/* only single bit in buddy2 may be 1 */
577 				if (!mb_test_bit(i << 1, buddy2)) {
578 					MB_CHECK_ASSERT(
579 						mb_test_bit((i<<1)+1, buddy2));
580 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
581 					MB_CHECK_ASSERT(
582 						mb_test_bit(i << 1, buddy2));
583 				}
584 				continue;
585 			}
586 
587 			/* both bits in buddy2 must be 1 */
588 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
589 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
590 
591 			for (j = 0; j < (1 << order); j++) {
592 				k = (i * (1 << order)) + j;
593 				MB_CHECK_ASSERT(
594 					!mb_test_bit(k, e4b->bd_bitmap));
595 			}
596 			count++;
597 		}
598 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
599 		order--;
600 	}
601 
602 	fstart = -1;
603 	buddy = mb_find_buddy(e4b, 0, &max);
604 	for (i = 0; i < max; i++) {
605 		if (!mb_test_bit(i, buddy)) {
606 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
607 			if (fstart == -1) {
608 				fragments++;
609 				fstart = i;
610 			}
611 			continue;
612 		}
613 		fstart = -1;
614 		/* check used bits only */
615 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
616 			buddy2 = mb_find_buddy(e4b, j, &max2);
617 			k = i >> j;
618 			MB_CHECK_ASSERT(k < max2);
619 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
620 		}
621 	}
622 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
623 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
624 
625 	grp = ext4_get_group_info(sb, e4b->bd_group);
626 	list_for_each(cur, &grp->bb_prealloc_list) {
627 		ext4_group_t groupnr;
628 		struct ext4_prealloc_space *pa;
629 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
630 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
631 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
632 		for (i = 0; i < pa->pa_len; i++)
633 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
634 	}
635 	return 0;
636 }
637 #undef MB_CHECK_ASSERT
638 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
639 					__FILE__, __func__, __LINE__)
640 #else
641 #define mb_check_buddy(e4b)
642 #endif
643 
644 /*
645  * Divide blocks started from @first with length @len into
646  * smaller chunks with power of 2 blocks.
647  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
648  * then increase bb_counters[] for corresponded chunk size.
649  */
650 static void ext4_mb_mark_free_simple(struct super_block *sb,
651 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
652 					struct ext4_group_info *grp)
653 {
654 	struct ext4_sb_info *sbi = EXT4_SB(sb);
655 	ext4_grpblk_t min;
656 	ext4_grpblk_t max;
657 	ext4_grpblk_t chunk;
658 	unsigned short border;
659 
660 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
661 
662 	border = 2 << sb->s_blocksize_bits;
663 
664 	while (len > 0) {
665 		/* find how many blocks can be covered since this position */
666 		max = ffs(first | border) - 1;
667 
668 		/* find how many blocks of power 2 we need to mark */
669 		min = fls(len) - 1;
670 
671 		if (max < min)
672 			min = max;
673 		chunk = 1 << min;
674 
675 		/* mark multiblock chunks only */
676 		grp->bb_counters[min]++;
677 		if (min > 0)
678 			mb_clear_bit(first >> min,
679 				     buddy + sbi->s_mb_offsets[min]);
680 
681 		len -= chunk;
682 		first += chunk;
683 	}
684 }
685 
686 /*
687  * Cache the order of the largest free extent we have available in this block
688  * group.
689  */
690 static void
691 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
692 {
693 	int i;
694 	int bits;
695 
696 	grp->bb_largest_free_order = -1; /* uninit */
697 
698 	bits = sb->s_blocksize_bits + 1;
699 	for (i = bits; i >= 0; i--) {
700 		if (grp->bb_counters[i] > 0) {
701 			grp->bb_largest_free_order = i;
702 			break;
703 		}
704 	}
705 }
706 
707 static noinline_for_stack
708 void ext4_mb_generate_buddy(struct super_block *sb,
709 				void *buddy, void *bitmap, ext4_group_t group)
710 {
711 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
712 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
713 	ext4_grpblk_t i = 0;
714 	ext4_grpblk_t first;
715 	ext4_grpblk_t len;
716 	unsigned free = 0;
717 	unsigned fragments = 0;
718 	unsigned long long period = get_cycles();
719 
720 	/* initialize buddy from bitmap which is aggregation
721 	 * of on-disk bitmap and preallocations */
722 	i = mb_find_next_zero_bit(bitmap, max, 0);
723 	grp->bb_first_free = i;
724 	while (i < max) {
725 		fragments++;
726 		first = i;
727 		i = mb_find_next_bit(bitmap, max, i);
728 		len = i - first;
729 		free += len;
730 		if (len > 1)
731 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
732 		else
733 			grp->bb_counters[0]++;
734 		if (i < max)
735 			i = mb_find_next_zero_bit(bitmap, max, i);
736 	}
737 	grp->bb_fragments = fragments;
738 
739 	if (free != grp->bb_free) {
740 		ext4_grp_locked_error(sb, group, 0, 0,
741 				      "%u clusters in bitmap, %u in gd",
742 				      free, grp->bb_free);
743 		/*
744 		 * If we intent to continue, we consider group descritor
745 		 * corrupt and update bb_free using bitmap value
746 		 */
747 		grp->bb_free = free;
748 	}
749 	mb_set_largest_free_order(sb, grp);
750 
751 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
752 
753 	period = get_cycles() - period;
754 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
755 	EXT4_SB(sb)->s_mb_buddies_generated++;
756 	EXT4_SB(sb)->s_mb_generation_time += period;
757 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
758 }
759 
760 /* The buddy information is attached the buddy cache inode
761  * for convenience. The information regarding each group
762  * is loaded via ext4_mb_load_buddy. The information involve
763  * block bitmap and buddy information. The information are
764  * stored in the inode as
765  *
766  * {                        page                        }
767  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
768  *
769  *
770  * one block each for bitmap and buddy information.
771  * So for each group we take up 2 blocks. A page can
772  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
773  * So it can have information regarding groups_per_page which
774  * is blocks_per_page/2
775  *
776  * Locking note:  This routine takes the block group lock of all groups
777  * for this page; do not hold this lock when calling this routine!
778  */
779 
780 static int ext4_mb_init_cache(struct page *page, char *incore)
781 {
782 	ext4_group_t ngroups;
783 	int blocksize;
784 	int blocks_per_page;
785 	int groups_per_page;
786 	int err = 0;
787 	int i;
788 	ext4_group_t first_group, group;
789 	int first_block;
790 	struct super_block *sb;
791 	struct buffer_head *bhs;
792 	struct buffer_head **bh = NULL;
793 	struct inode *inode;
794 	char *data;
795 	char *bitmap;
796 	struct ext4_group_info *grinfo;
797 
798 	mb_debug(1, "init page %lu\n", page->index);
799 
800 	inode = page->mapping->host;
801 	sb = inode->i_sb;
802 	ngroups = ext4_get_groups_count(sb);
803 	blocksize = 1 << inode->i_blkbits;
804 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
805 
806 	groups_per_page = blocks_per_page >> 1;
807 	if (groups_per_page == 0)
808 		groups_per_page = 1;
809 
810 	/* allocate buffer_heads to read bitmaps */
811 	if (groups_per_page > 1) {
812 		i = sizeof(struct buffer_head *) * groups_per_page;
813 		bh = kzalloc(i, GFP_NOFS);
814 		if (bh == NULL) {
815 			err = -ENOMEM;
816 			goto out;
817 		}
818 	} else
819 		bh = &bhs;
820 
821 	first_group = page->index * blocks_per_page / 2;
822 
823 	/* read all groups the page covers into the cache */
824 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
825 		if (group >= ngroups)
826 			break;
827 
828 		grinfo = ext4_get_group_info(sb, group);
829 		/*
830 		 * If page is uptodate then we came here after online resize
831 		 * which added some new uninitialized group info structs, so
832 		 * we must skip all initialized uptodate buddies on the page,
833 		 * which may be currently in use by an allocating task.
834 		 */
835 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
836 			bh[i] = NULL;
837 			continue;
838 		}
839 		if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
840 			err = -ENOMEM;
841 			goto out;
842 		}
843 		mb_debug(1, "read bitmap for group %u\n", group);
844 	}
845 
846 	/* wait for I/O completion */
847 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
848 		if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
849 			err = -EIO;
850 			goto out;
851 		}
852 	}
853 
854 	first_block = page->index * blocks_per_page;
855 	for (i = 0; i < blocks_per_page; i++) {
856 		int group;
857 
858 		group = (first_block + i) >> 1;
859 		if (group >= ngroups)
860 			break;
861 
862 		if (!bh[group - first_group])
863 			/* skip initialized uptodate buddy */
864 			continue;
865 
866 		/*
867 		 * data carry information regarding this
868 		 * particular group in the format specified
869 		 * above
870 		 *
871 		 */
872 		data = page_address(page) + (i * blocksize);
873 		bitmap = bh[group - first_group]->b_data;
874 
875 		/*
876 		 * We place the buddy block and bitmap block
877 		 * close together
878 		 */
879 		if ((first_block + i) & 1) {
880 			/* this is block of buddy */
881 			BUG_ON(incore == NULL);
882 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
883 				group, page->index, i * blocksize);
884 			trace_ext4_mb_buddy_bitmap_load(sb, group);
885 			grinfo = ext4_get_group_info(sb, group);
886 			grinfo->bb_fragments = 0;
887 			memset(grinfo->bb_counters, 0,
888 			       sizeof(*grinfo->bb_counters) *
889 				(sb->s_blocksize_bits+2));
890 			/*
891 			 * incore got set to the group block bitmap below
892 			 */
893 			ext4_lock_group(sb, group);
894 			/* init the buddy */
895 			memset(data, 0xff, blocksize);
896 			ext4_mb_generate_buddy(sb, data, incore, group);
897 			ext4_unlock_group(sb, group);
898 			incore = NULL;
899 		} else {
900 			/* this is block of bitmap */
901 			BUG_ON(incore != NULL);
902 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
903 				group, page->index, i * blocksize);
904 			trace_ext4_mb_bitmap_load(sb, group);
905 
906 			/* see comments in ext4_mb_put_pa() */
907 			ext4_lock_group(sb, group);
908 			memcpy(data, bitmap, blocksize);
909 
910 			/* mark all preallocated blks used in in-core bitmap */
911 			ext4_mb_generate_from_pa(sb, data, group);
912 			ext4_mb_generate_from_freelist(sb, data, group);
913 			ext4_unlock_group(sb, group);
914 
915 			/* set incore so that the buddy information can be
916 			 * generated using this
917 			 */
918 			incore = data;
919 		}
920 	}
921 	SetPageUptodate(page);
922 
923 out:
924 	if (bh) {
925 		for (i = 0; i < groups_per_page; i++)
926 			brelse(bh[i]);
927 		if (bh != &bhs)
928 			kfree(bh);
929 	}
930 	return err;
931 }
932 
933 /*
934  * Lock the buddy and bitmap pages. This make sure other parallel init_group
935  * on the same buddy page doesn't happen whild holding the buddy page lock.
936  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
937  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
938  */
939 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
940 		ext4_group_t group, struct ext4_buddy *e4b)
941 {
942 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
943 	int block, pnum, poff;
944 	int blocks_per_page;
945 	struct page *page;
946 
947 	e4b->bd_buddy_page = NULL;
948 	e4b->bd_bitmap_page = NULL;
949 
950 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
951 	/*
952 	 * the buddy cache inode stores the block bitmap
953 	 * and buddy information in consecutive blocks.
954 	 * So for each group we need two blocks.
955 	 */
956 	block = group * 2;
957 	pnum = block / blocks_per_page;
958 	poff = block % blocks_per_page;
959 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
960 	if (!page)
961 		return -EIO;
962 	BUG_ON(page->mapping != inode->i_mapping);
963 	e4b->bd_bitmap_page = page;
964 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
965 
966 	if (blocks_per_page >= 2) {
967 		/* buddy and bitmap are on the same page */
968 		return 0;
969 	}
970 
971 	block++;
972 	pnum = block / blocks_per_page;
973 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
974 	if (!page)
975 		return -EIO;
976 	BUG_ON(page->mapping != inode->i_mapping);
977 	e4b->bd_buddy_page = page;
978 	return 0;
979 }
980 
981 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
982 {
983 	if (e4b->bd_bitmap_page) {
984 		unlock_page(e4b->bd_bitmap_page);
985 		page_cache_release(e4b->bd_bitmap_page);
986 	}
987 	if (e4b->bd_buddy_page) {
988 		unlock_page(e4b->bd_buddy_page);
989 		page_cache_release(e4b->bd_buddy_page);
990 	}
991 }
992 
993 /*
994  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
995  * block group lock of all groups for this page; do not hold the BG lock when
996  * calling this routine!
997  */
998 static noinline_for_stack
999 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1000 {
1001 
1002 	struct ext4_group_info *this_grp;
1003 	struct ext4_buddy e4b;
1004 	struct page *page;
1005 	int ret = 0;
1006 
1007 	mb_debug(1, "init group %u\n", group);
1008 	this_grp = ext4_get_group_info(sb, group);
1009 	/*
1010 	 * This ensures that we don't reinit the buddy cache
1011 	 * page which map to the group from which we are already
1012 	 * allocating. If we are looking at the buddy cache we would
1013 	 * have taken a reference using ext4_mb_load_buddy and that
1014 	 * would have pinned buddy page to page cache.
1015 	 */
1016 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1017 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1018 		/*
1019 		 * somebody initialized the group
1020 		 * return without doing anything
1021 		 */
1022 		goto err;
1023 	}
1024 
1025 	page = e4b.bd_bitmap_page;
1026 	ret = ext4_mb_init_cache(page, NULL);
1027 	if (ret)
1028 		goto err;
1029 	if (!PageUptodate(page)) {
1030 		ret = -EIO;
1031 		goto err;
1032 	}
1033 	mark_page_accessed(page);
1034 
1035 	if (e4b.bd_buddy_page == NULL) {
1036 		/*
1037 		 * If both the bitmap and buddy are in
1038 		 * the same page we don't need to force
1039 		 * init the buddy
1040 		 */
1041 		ret = 0;
1042 		goto err;
1043 	}
1044 	/* init buddy cache */
1045 	page = e4b.bd_buddy_page;
1046 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1047 	if (ret)
1048 		goto err;
1049 	if (!PageUptodate(page)) {
1050 		ret = -EIO;
1051 		goto err;
1052 	}
1053 	mark_page_accessed(page);
1054 err:
1055 	ext4_mb_put_buddy_page_lock(&e4b);
1056 	return ret;
1057 }
1058 
1059 /*
1060  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1061  * block group lock of all groups for this page; do not hold the BG lock when
1062  * calling this routine!
1063  */
1064 static noinline_for_stack int
1065 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1066 					struct ext4_buddy *e4b)
1067 {
1068 	int blocks_per_page;
1069 	int block;
1070 	int pnum;
1071 	int poff;
1072 	struct page *page;
1073 	int ret;
1074 	struct ext4_group_info *grp;
1075 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1076 	struct inode *inode = sbi->s_buddy_cache;
1077 
1078 	mb_debug(1, "load group %u\n", group);
1079 
1080 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1081 	grp = ext4_get_group_info(sb, group);
1082 
1083 	e4b->bd_blkbits = sb->s_blocksize_bits;
1084 	e4b->bd_info = grp;
1085 	e4b->bd_sb = sb;
1086 	e4b->bd_group = group;
1087 	e4b->bd_buddy_page = NULL;
1088 	e4b->bd_bitmap_page = NULL;
1089 
1090 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1091 		/*
1092 		 * we need full data about the group
1093 		 * to make a good selection
1094 		 */
1095 		ret = ext4_mb_init_group(sb, group);
1096 		if (ret)
1097 			return ret;
1098 	}
1099 
1100 	/*
1101 	 * the buddy cache inode stores the block bitmap
1102 	 * and buddy information in consecutive blocks.
1103 	 * So for each group we need two blocks.
1104 	 */
1105 	block = group * 2;
1106 	pnum = block / blocks_per_page;
1107 	poff = block % blocks_per_page;
1108 
1109 	/* we could use find_or_create_page(), but it locks page
1110 	 * what we'd like to avoid in fast path ... */
1111 	page = find_get_page(inode->i_mapping, pnum);
1112 	if (page == NULL || !PageUptodate(page)) {
1113 		if (page)
1114 			/*
1115 			 * drop the page reference and try
1116 			 * to get the page with lock. If we
1117 			 * are not uptodate that implies
1118 			 * somebody just created the page but
1119 			 * is yet to initialize the same. So
1120 			 * wait for it to initialize.
1121 			 */
1122 			page_cache_release(page);
1123 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1124 		if (page) {
1125 			BUG_ON(page->mapping != inode->i_mapping);
1126 			if (!PageUptodate(page)) {
1127 				ret = ext4_mb_init_cache(page, NULL);
1128 				if (ret) {
1129 					unlock_page(page);
1130 					goto err;
1131 				}
1132 				mb_cmp_bitmaps(e4b, page_address(page) +
1133 					       (poff * sb->s_blocksize));
1134 			}
1135 			unlock_page(page);
1136 		}
1137 	}
1138 	if (page == NULL || !PageUptodate(page)) {
1139 		ret = -EIO;
1140 		goto err;
1141 	}
1142 	e4b->bd_bitmap_page = page;
1143 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1144 	mark_page_accessed(page);
1145 
1146 	block++;
1147 	pnum = block / blocks_per_page;
1148 	poff = block % blocks_per_page;
1149 
1150 	page = find_get_page(inode->i_mapping, pnum);
1151 	if (page == NULL || !PageUptodate(page)) {
1152 		if (page)
1153 			page_cache_release(page);
1154 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1155 		if (page) {
1156 			BUG_ON(page->mapping != inode->i_mapping);
1157 			if (!PageUptodate(page)) {
1158 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1159 				if (ret) {
1160 					unlock_page(page);
1161 					goto err;
1162 				}
1163 			}
1164 			unlock_page(page);
1165 		}
1166 	}
1167 	if (page == NULL || !PageUptodate(page)) {
1168 		ret = -EIO;
1169 		goto err;
1170 	}
1171 	e4b->bd_buddy_page = page;
1172 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1173 	mark_page_accessed(page);
1174 
1175 	BUG_ON(e4b->bd_bitmap_page == NULL);
1176 	BUG_ON(e4b->bd_buddy_page == NULL);
1177 
1178 	return 0;
1179 
1180 err:
1181 	if (page)
1182 		page_cache_release(page);
1183 	if (e4b->bd_bitmap_page)
1184 		page_cache_release(e4b->bd_bitmap_page);
1185 	if (e4b->bd_buddy_page)
1186 		page_cache_release(e4b->bd_buddy_page);
1187 	e4b->bd_buddy = NULL;
1188 	e4b->bd_bitmap = NULL;
1189 	return ret;
1190 }
1191 
1192 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1193 {
1194 	if (e4b->bd_bitmap_page)
1195 		page_cache_release(e4b->bd_bitmap_page);
1196 	if (e4b->bd_buddy_page)
1197 		page_cache_release(e4b->bd_buddy_page);
1198 }
1199 
1200 
1201 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1202 {
1203 	int order = 1;
1204 	void *bb;
1205 
1206 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1207 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1208 
1209 	bb = e4b->bd_buddy;
1210 	while (order <= e4b->bd_blkbits + 1) {
1211 		block = block >> 1;
1212 		if (!mb_test_bit(block, bb)) {
1213 			/* this block is part of buddy of order 'order' */
1214 			return order;
1215 		}
1216 		bb += 1 << (e4b->bd_blkbits - order);
1217 		order++;
1218 	}
1219 	return 0;
1220 }
1221 
1222 static void mb_clear_bits(void *bm, int cur, int len)
1223 {
1224 	__u32 *addr;
1225 
1226 	len = cur + len;
1227 	while (cur < len) {
1228 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1229 			/* fast path: clear whole word at once */
1230 			addr = bm + (cur >> 3);
1231 			*addr = 0;
1232 			cur += 32;
1233 			continue;
1234 		}
1235 		mb_clear_bit(cur, bm);
1236 		cur++;
1237 	}
1238 }
1239 
1240 void ext4_set_bits(void *bm, int cur, int len)
1241 {
1242 	__u32 *addr;
1243 
1244 	len = cur + len;
1245 	while (cur < len) {
1246 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1247 			/* fast path: set whole word at once */
1248 			addr = bm + (cur >> 3);
1249 			*addr = 0xffffffff;
1250 			cur += 32;
1251 			continue;
1252 		}
1253 		mb_set_bit(cur, bm);
1254 		cur++;
1255 	}
1256 }
1257 
1258 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1259 			  int first, int count)
1260 {
1261 	int block = 0;
1262 	int max = 0;
1263 	int order;
1264 	void *buddy;
1265 	void *buddy2;
1266 	struct super_block *sb = e4b->bd_sb;
1267 
1268 	BUG_ON(first + count > (sb->s_blocksize << 3));
1269 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1270 	mb_check_buddy(e4b);
1271 	mb_free_blocks_double(inode, e4b, first, count);
1272 
1273 	e4b->bd_info->bb_free += count;
1274 	if (first < e4b->bd_info->bb_first_free)
1275 		e4b->bd_info->bb_first_free = first;
1276 
1277 	/* let's maintain fragments counter */
1278 	if (first != 0)
1279 		block = !mb_test_bit(first - 1, e4b->bd_bitmap);
1280 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1281 		max = !mb_test_bit(first + count, e4b->bd_bitmap);
1282 	if (block && max)
1283 		e4b->bd_info->bb_fragments--;
1284 	else if (!block && !max)
1285 		e4b->bd_info->bb_fragments++;
1286 
1287 	/* let's maintain buddy itself */
1288 	while (count-- > 0) {
1289 		block = first++;
1290 		order = 0;
1291 
1292 		if (!mb_test_bit(block, e4b->bd_bitmap)) {
1293 			ext4_fsblk_t blocknr;
1294 
1295 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1296 			blocknr += EXT4_C2B(EXT4_SB(sb), block);
1297 			ext4_grp_locked_error(sb, e4b->bd_group,
1298 					      inode ? inode->i_ino : 0,
1299 					      blocknr,
1300 					      "freeing already freed block "
1301 					      "(bit %u)", block);
1302 		}
1303 		mb_clear_bit(block, e4b->bd_bitmap);
1304 		e4b->bd_info->bb_counters[order]++;
1305 
1306 		/* start of the buddy */
1307 		buddy = mb_find_buddy(e4b, order, &max);
1308 
1309 		do {
1310 			block &= ~1UL;
1311 			if (mb_test_bit(block, buddy) ||
1312 					mb_test_bit(block + 1, buddy))
1313 				break;
1314 
1315 			/* both the buddies are free, try to coalesce them */
1316 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1317 
1318 			if (!buddy2)
1319 				break;
1320 
1321 			if (order > 0) {
1322 				/* for special purposes, we don't set
1323 				 * free bits in bitmap */
1324 				mb_set_bit(block, buddy);
1325 				mb_set_bit(block + 1, buddy);
1326 			}
1327 			e4b->bd_info->bb_counters[order]--;
1328 			e4b->bd_info->bb_counters[order]--;
1329 
1330 			block = block >> 1;
1331 			order++;
1332 			e4b->bd_info->bb_counters[order]++;
1333 
1334 			mb_clear_bit(block, buddy2);
1335 			buddy = buddy2;
1336 		} while (1);
1337 	}
1338 	mb_set_largest_free_order(sb, e4b->bd_info);
1339 	mb_check_buddy(e4b);
1340 }
1341 
1342 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1343 				int needed, struct ext4_free_extent *ex)
1344 {
1345 	int next = block;
1346 	int max, order;
1347 	void *buddy;
1348 
1349 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1350 	BUG_ON(ex == NULL);
1351 
1352 	buddy = mb_find_buddy(e4b, 0, &max);
1353 	BUG_ON(buddy == NULL);
1354 	BUG_ON(block >= max);
1355 	if (mb_test_bit(block, buddy)) {
1356 		ex->fe_len = 0;
1357 		ex->fe_start = 0;
1358 		ex->fe_group = 0;
1359 		return 0;
1360 	}
1361 
1362 	/* find actual order */
1363 	order = mb_find_order_for_block(e4b, block);
1364 	block = block >> order;
1365 
1366 	ex->fe_len = 1 << order;
1367 	ex->fe_start = block << order;
1368 	ex->fe_group = e4b->bd_group;
1369 
1370 	/* calc difference from given start */
1371 	next = next - ex->fe_start;
1372 	ex->fe_len -= next;
1373 	ex->fe_start += next;
1374 
1375 	while (needed > ex->fe_len &&
1376 	       mb_find_buddy(e4b, order, &max)) {
1377 
1378 		if (block + 1 >= max)
1379 			break;
1380 
1381 		next = (block + 1) * (1 << order);
1382 		if (mb_test_bit(next, e4b->bd_bitmap))
1383 			break;
1384 
1385 		order = mb_find_order_for_block(e4b, next);
1386 
1387 		block = next >> order;
1388 		ex->fe_len += 1 << order;
1389 	}
1390 
1391 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1392 	return ex->fe_len;
1393 }
1394 
1395 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1396 {
1397 	int ord;
1398 	int mlen = 0;
1399 	int max = 0;
1400 	int cur;
1401 	int start = ex->fe_start;
1402 	int len = ex->fe_len;
1403 	unsigned ret = 0;
1404 	int len0 = len;
1405 	void *buddy;
1406 
1407 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1408 	BUG_ON(e4b->bd_group != ex->fe_group);
1409 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1410 	mb_check_buddy(e4b);
1411 	mb_mark_used_double(e4b, start, len);
1412 
1413 	e4b->bd_info->bb_free -= len;
1414 	if (e4b->bd_info->bb_first_free == start)
1415 		e4b->bd_info->bb_first_free += len;
1416 
1417 	/* let's maintain fragments counter */
1418 	if (start != 0)
1419 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1420 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1421 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1422 	if (mlen && max)
1423 		e4b->bd_info->bb_fragments++;
1424 	else if (!mlen && !max)
1425 		e4b->bd_info->bb_fragments--;
1426 
1427 	/* let's maintain buddy itself */
1428 	while (len) {
1429 		ord = mb_find_order_for_block(e4b, start);
1430 
1431 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1432 			/* the whole chunk may be allocated at once! */
1433 			mlen = 1 << ord;
1434 			buddy = mb_find_buddy(e4b, ord, &max);
1435 			BUG_ON((start >> ord) >= max);
1436 			mb_set_bit(start >> ord, buddy);
1437 			e4b->bd_info->bb_counters[ord]--;
1438 			start += mlen;
1439 			len -= mlen;
1440 			BUG_ON(len < 0);
1441 			continue;
1442 		}
1443 
1444 		/* store for history */
1445 		if (ret == 0)
1446 			ret = len | (ord << 16);
1447 
1448 		/* we have to split large buddy */
1449 		BUG_ON(ord <= 0);
1450 		buddy = mb_find_buddy(e4b, ord, &max);
1451 		mb_set_bit(start >> ord, buddy);
1452 		e4b->bd_info->bb_counters[ord]--;
1453 
1454 		ord--;
1455 		cur = (start >> ord) & ~1U;
1456 		buddy = mb_find_buddy(e4b, ord, &max);
1457 		mb_clear_bit(cur, buddy);
1458 		mb_clear_bit(cur + 1, buddy);
1459 		e4b->bd_info->bb_counters[ord]++;
1460 		e4b->bd_info->bb_counters[ord]++;
1461 	}
1462 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1463 
1464 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1465 	mb_check_buddy(e4b);
1466 
1467 	return ret;
1468 }
1469 
1470 /*
1471  * Must be called under group lock!
1472  */
1473 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1474 					struct ext4_buddy *e4b)
1475 {
1476 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1477 	int ret;
1478 
1479 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1480 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1481 
1482 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1483 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1484 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1485 
1486 	/* preallocation can change ac_b_ex, thus we store actually
1487 	 * allocated blocks for history */
1488 	ac->ac_f_ex = ac->ac_b_ex;
1489 
1490 	ac->ac_status = AC_STATUS_FOUND;
1491 	ac->ac_tail = ret & 0xffff;
1492 	ac->ac_buddy = ret >> 16;
1493 
1494 	/*
1495 	 * take the page reference. We want the page to be pinned
1496 	 * so that we don't get a ext4_mb_init_cache_call for this
1497 	 * group until we update the bitmap. That would mean we
1498 	 * double allocate blocks. The reference is dropped
1499 	 * in ext4_mb_release_context
1500 	 */
1501 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1502 	get_page(ac->ac_bitmap_page);
1503 	ac->ac_buddy_page = e4b->bd_buddy_page;
1504 	get_page(ac->ac_buddy_page);
1505 	/* store last allocated for subsequent stream allocation */
1506 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1507 		spin_lock(&sbi->s_md_lock);
1508 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1509 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1510 		spin_unlock(&sbi->s_md_lock);
1511 	}
1512 }
1513 
1514 /*
1515  * regular allocator, for general purposes allocation
1516  */
1517 
1518 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1519 					struct ext4_buddy *e4b,
1520 					int finish_group)
1521 {
1522 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1523 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1524 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1525 	struct ext4_free_extent ex;
1526 	int max;
1527 
1528 	if (ac->ac_status == AC_STATUS_FOUND)
1529 		return;
1530 	/*
1531 	 * We don't want to scan for a whole year
1532 	 */
1533 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1534 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1535 		ac->ac_status = AC_STATUS_BREAK;
1536 		return;
1537 	}
1538 
1539 	/*
1540 	 * Haven't found good chunk so far, let's continue
1541 	 */
1542 	if (bex->fe_len < gex->fe_len)
1543 		return;
1544 
1545 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1546 			&& bex->fe_group == e4b->bd_group) {
1547 		/* recheck chunk's availability - we don't know
1548 		 * when it was found (within this lock-unlock
1549 		 * period or not) */
1550 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1551 		if (max >= gex->fe_len) {
1552 			ext4_mb_use_best_found(ac, e4b);
1553 			return;
1554 		}
1555 	}
1556 }
1557 
1558 /*
1559  * The routine checks whether found extent is good enough. If it is,
1560  * then the extent gets marked used and flag is set to the context
1561  * to stop scanning. Otherwise, the extent is compared with the
1562  * previous found extent and if new one is better, then it's stored
1563  * in the context. Later, the best found extent will be used, if
1564  * mballoc can't find good enough extent.
1565  *
1566  * FIXME: real allocation policy is to be designed yet!
1567  */
1568 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1569 					struct ext4_free_extent *ex,
1570 					struct ext4_buddy *e4b)
1571 {
1572 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1573 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1574 
1575 	BUG_ON(ex->fe_len <= 0);
1576 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1577 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1578 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1579 
1580 	ac->ac_found++;
1581 
1582 	/*
1583 	 * The special case - take what you catch first
1584 	 */
1585 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1586 		*bex = *ex;
1587 		ext4_mb_use_best_found(ac, e4b);
1588 		return;
1589 	}
1590 
1591 	/*
1592 	 * Let's check whether the chuck is good enough
1593 	 */
1594 	if (ex->fe_len == gex->fe_len) {
1595 		*bex = *ex;
1596 		ext4_mb_use_best_found(ac, e4b);
1597 		return;
1598 	}
1599 
1600 	/*
1601 	 * If this is first found extent, just store it in the context
1602 	 */
1603 	if (bex->fe_len == 0) {
1604 		*bex = *ex;
1605 		return;
1606 	}
1607 
1608 	/*
1609 	 * If new found extent is better, store it in the context
1610 	 */
1611 	if (bex->fe_len < gex->fe_len) {
1612 		/* if the request isn't satisfied, any found extent
1613 		 * larger than previous best one is better */
1614 		if (ex->fe_len > bex->fe_len)
1615 			*bex = *ex;
1616 	} else if (ex->fe_len > gex->fe_len) {
1617 		/* if the request is satisfied, then we try to find
1618 		 * an extent that still satisfy the request, but is
1619 		 * smaller than previous one */
1620 		if (ex->fe_len < bex->fe_len)
1621 			*bex = *ex;
1622 	}
1623 
1624 	ext4_mb_check_limits(ac, e4b, 0);
1625 }
1626 
1627 static noinline_for_stack
1628 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1629 					struct ext4_buddy *e4b)
1630 {
1631 	struct ext4_free_extent ex = ac->ac_b_ex;
1632 	ext4_group_t group = ex.fe_group;
1633 	int max;
1634 	int err;
1635 
1636 	BUG_ON(ex.fe_len <= 0);
1637 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1638 	if (err)
1639 		return err;
1640 
1641 	ext4_lock_group(ac->ac_sb, group);
1642 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1643 
1644 	if (max > 0) {
1645 		ac->ac_b_ex = ex;
1646 		ext4_mb_use_best_found(ac, e4b);
1647 	}
1648 
1649 	ext4_unlock_group(ac->ac_sb, group);
1650 	ext4_mb_unload_buddy(e4b);
1651 
1652 	return 0;
1653 }
1654 
1655 static noinline_for_stack
1656 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1657 				struct ext4_buddy *e4b)
1658 {
1659 	ext4_group_t group = ac->ac_g_ex.fe_group;
1660 	int max;
1661 	int err;
1662 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1663 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1664 	struct ext4_free_extent ex;
1665 
1666 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1667 		return 0;
1668 	if (grp->bb_free == 0)
1669 		return 0;
1670 
1671 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1672 	if (err)
1673 		return err;
1674 
1675 	ext4_lock_group(ac->ac_sb, group);
1676 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1677 			     ac->ac_g_ex.fe_len, &ex);
1678 
1679 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1680 		ext4_fsblk_t start;
1681 
1682 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1683 			ex.fe_start;
1684 		/* use do_div to get remainder (would be 64-bit modulo) */
1685 		if (do_div(start, sbi->s_stripe) == 0) {
1686 			ac->ac_found++;
1687 			ac->ac_b_ex = ex;
1688 			ext4_mb_use_best_found(ac, e4b);
1689 		}
1690 	} else if (max >= ac->ac_g_ex.fe_len) {
1691 		BUG_ON(ex.fe_len <= 0);
1692 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1693 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1694 		ac->ac_found++;
1695 		ac->ac_b_ex = ex;
1696 		ext4_mb_use_best_found(ac, e4b);
1697 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1698 		/* Sometimes, caller may want to merge even small
1699 		 * number of blocks to an existing extent */
1700 		BUG_ON(ex.fe_len <= 0);
1701 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1702 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1703 		ac->ac_found++;
1704 		ac->ac_b_ex = ex;
1705 		ext4_mb_use_best_found(ac, e4b);
1706 	}
1707 	ext4_unlock_group(ac->ac_sb, group);
1708 	ext4_mb_unload_buddy(e4b);
1709 
1710 	return 0;
1711 }
1712 
1713 /*
1714  * The routine scans buddy structures (not bitmap!) from given order
1715  * to max order and tries to find big enough chunk to satisfy the req
1716  */
1717 static noinline_for_stack
1718 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1719 					struct ext4_buddy *e4b)
1720 {
1721 	struct super_block *sb = ac->ac_sb;
1722 	struct ext4_group_info *grp = e4b->bd_info;
1723 	void *buddy;
1724 	int i;
1725 	int k;
1726 	int max;
1727 
1728 	BUG_ON(ac->ac_2order <= 0);
1729 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1730 		if (grp->bb_counters[i] == 0)
1731 			continue;
1732 
1733 		buddy = mb_find_buddy(e4b, i, &max);
1734 		BUG_ON(buddy == NULL);
1735 
1736 		k = mb_find_next_zero_bit(buddy, max, 0);
1737 		BUG_ON(k >= max);
1738 
1739 		ac->ac_found++;
1740 
1741 		ac->ac_b_ex.fe_len = 1 << i;
1742 		ac->ac_b_ex.fe_start = k << i;
1743 		ac->ac_b_ex.fe_group = e4b->bd_group;
1744 
1745 		ext4_mb_use_best_found(ac, e4b);
1746 
1747 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1748 
1749 		if (EXT4_SB(sb)->s_mb_stats)
1750 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1751 
1752 		break;
1753 	}
1754 }
1755 
1756 /*
1757  * The routine scans the group and measures all found extents.
1758  * In order to optimize scanning, caller must pass number of
1759  * free blocks in the group, so the routine can know upper limit.
1760  */
1761 static noinline_for_stack
1762 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1763 					struct ext4_buddy *e4b)
1764 {
1765 	struct super_block *sb = ac->ac_sb;
1766 	void *bitmap = e4b->bd_bitmap;
1767 	struct ext4_free_extent ex;
1768 	int i;
1769 	int free;
1770 
1771 	free = e4b->bd_info->bb_free;
1772 	BUG_ON(free <= 0);
1773 
1774 	i = e4b->bd_info->bb_first_free;
1775 
1776 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1777 		i = mb_find_next_zero_bit(bitmap,
1778 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1779 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1780 			/*
1781 			 * IF we have corrupt bitmap, we won't find any
1782 			 * free blocks even though group info says we
1783 			 * we have free blocks
1784 			 */
1785 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1786 					"%d free clusters as per "
1787 					"group info. But bitmap says 0",
1788 					free);
1789 			break;
1790 		}
1791 
1792 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
1793 		BUG_ON(ex.fe_len <= 0);
1794 		if (free < ex.fe_len) {
1795 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1796 					"%d free clusters as per "
1797 					"group info. But got %d blocks",
1798 					free, ex.fe_len);
1799 			/*
1800 			 * The number of free blocks differs. This mostly
1801 			 * indicate that the bitmap is corrupt. So exit
1802 			 * without claiming the space.
1803 			 */
1804 			break;
1805 		}
1806 
1807 		ext4_mb_measure_extent(ac, &ex, e4b);
1808 
1809 		i += ex.fe_len;
1810 		free -= ex.fe_len;
1811 	}
1812 
1813 	ext4_mb_check_limits(ac, e4b, 1);
1814 }
1815 
1816 /*
1817  * This is a special case for storages like raid5
1818  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1819  */
1820 static noinline_for_stack
1821 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1822 				 struct ext4_buddy *e4b)
1823 {
1824 	struct super_block *sb = ac->ac_sb;
1825 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1826 	void *bitmap = e4b->bd_bitmap;
1827 	struct ext4_free_extent ex;
1828 	ext4_fsblk_t first_group_block;
1829 	ext4_fsblk_t a;
1830 	ext4_grpblk_t i;
1831 	int max;
1832 
1833 	BUG_ON(sbi->s_stripe == 0);
1834 
1835 	/* find first stripe-aligned block in group */
1836 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1837 
1838 	a = first_group_block + sbi->s_stripe - 1;
1839 	do_div(a, sbi->s_stripe);
1840 	i = (a * sbi->s_stripe) - first_group_block;
1841 
1842 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
1843 		if (!mb_test_bit(i, bitmap)) {
1844 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
1845 			if (max >= sbi->s_stripe) {
1846 				ac->ac_found++;
1847 				ac->ac_b_ex = ex;
1848 				ext4_mb_use_best_found(ac, e4b);
1849 				break;
1850 			}
1851 		}
1852 		i += sbi->s_stripe;
1853 	}
1854 }
1855 
1856 /* This is now called BEFORE we load the buddy bitmap. */
1857 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1858 				ext4_group_t group, int cr)
1859 {
1860 	unsigned free, fragments;
1861 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1862 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1863 
1864 	BUG_ON(cr < 0 || cr >= 4);
1865 
1866 	free = grp->bb_free;
1867 	if (free == 0)
1868 		return 0;
1869 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
1870 		return 0;
1871 
1872 	/* We only do this if the grp has never been initialized */
1873 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1874 		int ret = ext4_mb_init_group(ac->ac_sb, group);
1875 		if (ret)
1876 			return 0;
1877 	}
1878 
1879 	fragments = grp->bb_fragments;
1880 	if (fragments == 0)
1881 		return 0;
1882 
1883 	switch (cr) {
1884 	case 0:
1885 		BUG_ON(ac->ac_2order == 0);
1886 
1887 		if (grp->bb_largest_free_order < ac->ac_2order)
1888 			return 0;
1889 
1890 		/* Avoid using the first bg of a flexgroup for data files */
1891 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1892 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1893 		    ((group % flex_size) == 0))
1894 			return 0;
1895 
1896 		return 1;
1897 	case 1:
1898 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1899 			return 1;
1900 		break;
1901 	case 2:
1902 		if (free >= ac->ac_g_ex.fe_len)
1903 			return 1;
1904 		break;
1905 	case 3:
1906 		return 1;
1907 	default:
1908 		BUG();
1909 	}
1910 
1911 	return 0;
1912 }
1913 
1914 static noinline_for_stack int
1915 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1916 {
1917 	ext4_group_t ngroups, group, i;
1918 	int cr;
1919 	int err = 0;
1920 	struct ext4_sb_info *sbi;
1921 	struct super_block *sb;
1922 	struct ext4_buddy e4b;
1923 
1924 	sb = ac->ac_sb;
1925 	sbi = EXT4_SB(sb);
1926 	ngroups = ext4_get_groups_count(sb);
1927 	/* non-extent files are limited to low blocks/groups */
1928 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1929 		ngroups = sbi->s_blockfile_groups;
1930 
1931 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1932 
1933 	/* first, try the goal */
1934 	err = ext4_mb_find_by_goal(ac, &e4b);
1935 	if (err || ac->ac_status == AC_STATUS_FOUND)
1936 		goto out;
1937 
1938 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1939 		goto out;
1940 
1941 	/*
1942 	 * ac->ac2_order is set only if the fe_len is a power of 2
1943 	 * if ac2_order is set we also set criteria to 0 so that we
1944 	 * try exact allocation using buddy.
1945 	 */
1946 	i = fls(ac->ac_g_ex.fe_len);
1947 	ac->ac_2order = 0;
1948 	/*
1949 	 * We search using buddy data only if the order of the request
1950 	 * is greater than equal to the sbi_s_mb_order2_reqs
1951 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
1952 	 */
1953 	if (i >= sbi->s_mb_order2_reqs) {
1954 		/*
1955 		 * This should tell if fe_len is exactly power of 2
1956 		 */
1957 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1958 			ac->ac_2order = i - 1;
1959 	}
1960 
1961 	/* if stream allocation is enabled, use global goal */
1962 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1963 		/* TBD: may be hot point */
1964 		spin_lock(&sbi->s_md_lock);
1965 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1966 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1967 		spin_unlock(&sbi->s_md_lock);
1968 	}
1969 
1970 	/* Let's just scan groups to find more-less suitable blocks */
1971 	cr = ac->ac_2order ? 0 : 1;
1972 	/*
1973 	 * cr == 0 try to get exact allocation,
1974 	 * cr == 3  try to get anything
1975 	 */
1976 repeat:
1977 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1978 		ac->ac_criteria = cr;
1979 		/*
1980 		 * searching for the right group start
1981 		 * from the goal value specified
1982 		 */
1983 		group = ac->ac_g_ex.fe_group;
1984 
1985 		for (i = 0; i < ngroups; group++, i++) {
1986 			if (group == ngroups)
1987 				group = 0;
1988 
1989 			/* This now checks without needing the buddy page */
1990 			if (!ext4_mb_good_group(ac, group, cr))
1991 				continue;
1992 
1993 			err = ext4_mb_load_buddy(sb, group, &e4b);
1994 			if (err)
1995 				goto out;
1996 
1997 			ext4_lock_group(sb, group);
1998 
1999 			/*
2000 			 * We need to check again after locking the
2001 			 * block group
2002 			 */
2003 			if (!ext4_mb_good_group(ac, group, cr)) {
2004 				ext4_unlock_group(sb, group);
2005 				ext4_mb_unload_buddy(&e4b);
2006 				continue;
2007 			}
2008 
2009 			ac->ac_groups_scanned++;
2010 			if (cr == 0)
2011 				ext4_mb_simple_scan_group(ac, &e4b);
2012 			else if (cr == 1 && sbi->s_stripe &&
2013 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2014 				ext4_mb_scan_aligned(ac, &e4b);
2015 			else
2016 				ext4_mb_complex_scan_group(ac, &e4b);
2017 
2018 			ext4_unlock_group(sb, group);
2019 			ext4_mb_unload_buddy(&e4b);
2020 
2021 			if (ac->ac_status != AC_STATUS_CONTINUE)
2022 				break;
2023 		}
2024 	}
2025 
2026 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2027 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2028 		/*
2029 		 * We've been searching too long. Let's try to allocate
2030 		 * the best chunk we've found so far
2031 		 */
2032 
2033 		ext4_mb_try_best_found(ac, &e4b);
2034 		if (ac->ac_status != AC_STATUS_FOUND) {
2035 			/*
2036 			 * Someone more lucky has already allocated it.
2037 			 * The only thing we can do is just take first
2038 			 * found block(s)
2039 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2040 			 */
2041 			ac->ac_b_ex.fe_group = 0;
2042 			ac->ac_b_ex.fe_start = 0;
2043 			ac->ac_b_ex.fe_len = 0;
2044 			ac->ac_status = AC_STATUS_CONTINUE;
2045 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2046 			cr = 3;
2047 			atomic_inc(&sbi->s_mb_lost_chunks);
2048 			goto repeat;
2049 		}
2050 	}
2051 out:
2052 	return err;
2053 }
2054 
2055 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2056 {
2057 	struct super_block *sb = seq->private;
2058 	ext4_group_t group;
2059 
2060 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2061 		return NULL;
2062 	group = *pos + 1;
2063 	return (void *) ((unsigned long) group);
2064 }
2065 
2066 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2067 {
2068 	struct super_block *sb = seq->private;
2069 	ext4_group_t group;
2070 
2071 	++*pos;
2072 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2073 		return NULL;
2074 	group = *pos + 1;
2075 	return (void *) ((unsigned long) group);
2076 }
2077 
2078 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2079 {
2080 	struct super_block *sb = seq->private;
2081 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2082 	int i;
2083 	int err, buddy_loaded = 0;
2084 	struct ext4_buddy e4b;
2085 	struct ext4_group_info *grinfo;
2086 	struct sg {
2087 		struct ext4_group_info info;
2088 		ext4_grpblk_t counters[16];
2089 	} sg;
2090 
2091 	group--;
2092 	if (group == 0)
2093 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2094 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2095 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2096 			   "group", "free", "frags", "first",
2097 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2098 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2099 
2100 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2101 		sizeof(struct ext4_group_info);
2102 	grinfo = ext4_get_group_info(sb, group);
2103 	/* Load the group info in memory only if not already loaded. */
2104 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2105 		err = ext4_mb_load_buddy(sb, group, &e4b);
2106 		if (err) {
2107 			seq_printf(seq, "#%-5u: I/O error\n", group);
2108 			return 0;
2109 		}
2110 		buddy_loaded = 1;
2111 	}
2112 
2113 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2114 
2115 	if (buddy_loaded)
2116 		ext4_mb_unload_buddy(&e4b);
2117 
2118 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2119 			sg.info.bb_fragments, sg.info.bb_first_free);
2120 	for (i = 0; i <= 13; i++)
2121 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2122 				sg.info.bb_counters[i] : 0);
2123 	seq_printf(seq, " ]\n");
2124 
2125 	return 0;
2126 }
2127 
2128 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2129 {
2130 }
2131 
2132 static const struct seq_operations ext4_mb_seq_groups_ops = {
2133 	.start  = ext4_mb_seq_groups_start,
2134 	.next   = ext4_mb_seq_groups_next,
2135 	.stop   = ext4_mb_seq_groups_stop,
2136 	.show   = ext4_mb_seq_groups_show,
2137 };
2138 
2139 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2140 {
2141 	struct super_block *sb = PDE(inode)->data;
2142 	int rc;
2143 
2144 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2145 	if (rc == 0) {
2146 		struct seq_file *m = file->private_data;
2147 		m->private = sb;
2148 	}
2149 	return rc;
2150 
2151 }
2152 
2153 static const struct file_operations ext4_mb_seq_groups_fops = {
2154 	.owner		= THIS_MODULE,
2155 	.open		= ext4_mb_seq_groups_open,
2156 	.read		= seq_read,
2157 	.llseek		= seq_lseek,
2158 	.release	= seq_release,
2159 };
2160 
2161 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2162 {
2163 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2164 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2165 
2166 	BUG_ON(!cachep);
2167 	return cachep;
2168 }
2169 
2170 /*
2171  * Allocate the top-level s_group_info array for the specified number
2172  * of groups
2173  */
2174 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2175 {
2176 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2177 	unsigned size;
2178 	struct ext4_group_info ***new_groupinfo;
2179 
2180 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2181 		EXT4_DESC_PER_BLOCK_BITS(sb);
2182 	if (size <= sbi->s_group_info_size)
2183 		return 0;
2184 
2185 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2186 	new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
2187 	if (!new_groupinfo) {
2188 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2189 		return -ENOMEM;
2190 	}
2191 	if (sbi->s_group_info) {
2192 		memcpy(new_groupinfo, sbi->s_group_info,
2193 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2194 		ext4_kvfree(sbi->s_group_info);
2195 	}
2196 	sbi->s_group_info = new_groupinfo;
2197 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2198 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2199 		   sbi->s_group_info_size);
2200 	return 0;
2201 }
2202 
2203 /* Create and initialize ext4_group_info data for the given group. */
2204 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2205 			  struct ext4_group_desc *desc)
2206 {
2207 	int i;
2208 	int metalen = 0;
2209 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2210 	struct ext4_group_info **meta_group_info;
2211 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2212 
2213 	/*
2214 	 * First check if this group is the first of a reserved block.
2215 	 * If it's true, we have to allocate a new table of pointers
2216 	 * to ext4_group_info structures
2217 	 */
2218 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2219 		metalen = sizeof(*meta_group_info) <<
2220 			EXT4_DESC_PER_BLOCK_BITS(sb);
2221 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2222 		if (meta_group_info == NULL) {
2223 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2224 				 "for a buddy group");
2225 			goto exit_meta_group_info;
2226 		}
2227 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2228 			meta_group_info;
2229 	}
2230 
2231 	meta_group_info =
2232 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2233 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2234 
2235 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL);
2236 	if (meta_group_info[i] == NULL) {
2237 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2238 		goto exit_group_info;
2239 	}
2240 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2241 		&(meta_group_info[i]->bb_state));
2242 
2243 	/*
2244 	 * initialize bb_free to be able to skip
2245 	 * empty groups without initialization
2246 	 */
2247 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2248 		meta_group_info[i]->bb_free =
2249 			ext4_free_clusters_after_init(sb, group, desc);
2250 	} else {
2251 		meta_group_info[i]->bb_free =
2252 			ext4_free_group_clusters(sb, desc);
2253 	}
2254 
2255 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2256 	init_rwsem(&meta_group_info[i]->alloc_sem);
2257 	meta_group_info[i]->bb_free_root = RB_ROOT;
2258 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2259 
2260 #ifdef DOUBLE_CHECK
2261 	{
2262 		struct buffer_head *bh;
2263 		meta_group_info[i]->bb_bitmap =
2264 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2265 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2266 		bh = ext4_read_block_bitmap(sb, group);
2267 		BUG_ON(bh == NULL);
2268 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2269 			sb->s_blocksize);
2270 		put_bh(bh);
2271 	}
2272 #endif
2273 
2274 	return 0;
2275 
2276 exit_group_info:
2277 	/* If a meta_group_info table has been allocated, release it now */
2278 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2279 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2280 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2281 	}
2282 exit_meta_group_info:
2283 	return -ENOMEM;
2284 } /* ext4_mb_add_groupinfo */
2285 
2286 static int ext4_mb_init_backend(struct super_block *sb)
2287 {
2288 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2289 	ext4_group_t i;
2290 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2291 	int err;
2292 	struct ext4_group_desc *desc;
2293 	struct kmem_cache *cachep;
2294 
2295 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2296 	if (err)
2297 		return err;
2298 
2299 	sbi->s_buddy_cache = new_inode(sb);
2300 	if (sbi->s_buddy_cache == NULL) {
2301 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2302 		goto err_freesgi;
2303 	}
2304 	/* To avoid potentially colliding with an valid on-disk inode number,
2305 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2306 	 * not in the inode hash, so it should never be found by iget(), but
2307 	 * this will avoid confusion if it ever shows up during debugging. */
2308 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2309 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2310 	for (i = 0; i < ngroups; i++) {
2311 		desc = ext4_get_group_desc(sb, i, NULL);
2312 		if (desc == NULL) {
2313 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2314 			goto err_freebuddy;
2315 		}
2316 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2317 			goto err_freebuddy;
2318 	}
2319 
2320 	return 0;
2321 
2322 err_freebuddy:
2323 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2324 	while (i-- > 0)
2325 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2326 	i = sbi->s_group_info_size;
2327 	while (i-- > 0)
2328 		kfree(sbi->s_group_info[i]);
2329 	iput(sbi->s_buddy_cache);
2330 err_freesgi:
2331 	ext4_kvfree(sbi->s_group_info);
2332 	return -ENOMEM;
2333 }
2334 
2335 static void ext4_groupinfo_destroy_slabs(void)
2336 {
2337 	int i;
2338 
2339 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2340 		if (ext4_groupinfo_caches[i])
2341 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2342 		ext4_groupinfo_caches[i] = NULL;
2343 	}
2344 }
2345 
2346 static int ext4_groupinfo_create_slab(size_t size)
2347 {
2348 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2349 	int slab_size;
2350 	int blocksize_bits = order_base_2(size);
2351 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2352 	struct kmem_cache *cachep;
2353 
2354 	if (cache_index >= NR_GRPINFO_CACHES)
2355 		return -EINVAL;
2356 
2357 	if (unlikely(cache_index < 0))
2358 		cache_index = 0;
2359 
2360 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2361 	if (ext4_groupinfo_caches[cache_index]) {
2362 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2363 		return 0;	/* Already created */
2364 	}
2365 
2366 	slab_size = offsetof(struct ext4_group_info,
2367 				bb_counters[blocksize_bits + 2]);
2368 
2369 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2370 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2371 					NULL);
2372 
2373 	ext4_groupinfo_caches[cache_index] = cachep;
2374 
2375 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2376 	if (!cachep) {
2377 		printk(KERN_EMERG
2378 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2379 		return -ENOMEM;
2380 	}
2381 
2382 	return 0;
2383 }
2384 
2385 int ext4_mb_init(struct super_block *sb)
2386 {
2387 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2388 	unsigned i, j;
2389 	unsigned offset;
2390 	unsigned max;
2391 	int ret;
2392 
2393 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2394 
2395 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2396 	if (sbi->s_mb_offsets == NULL) {
2397 		ret = -ENOMEM;
2398 		goto out;
2399 	}
2400 
2401 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2402 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2403 	if (sbi->s_mb_maxs == NULL) {
2404 		ret = -ENOMEM;
2405 		goto out;
2406 	}
2407 
2408 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2409 	if (ret < 0)
2410 		goto out;
2411 
2412 	/* order 0 is regular bitmap */
2413 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2414 	sbi->s_mb_offsets[0] = 0;
2415 
2416 	i = 1;
2417 	offset = 0;
2418 	max = sb->s_blocksize << 2;
2419 	do {
2420 		sbi->s_mb_offsets[i] = offset;
2421 		sbi->s_mb_maxs[i] = max;
2422 		offset += 1 << (sb->s_blocksize_bits - i);
2423 		max = max >> 1;
2424 		i++;
2425 	} while (i <= sb->s_blocksize_bits + 1);
2426 
2427 	spin_lock_init(&sbi->s_md_lock);
2428 	spin_lock_init(&sbi->s_bal_lock);
2429 
2430 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2431 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2432 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2433 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2434 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2435 	/*
2436 	 * The default group preallocation is 512, which for 4k block
2437 	 * sizes translates to 2 megabytes.  However for bigalloc file
2438 	 * systems, this is probably too big (i.e, if the cluster size
2439 	 * is 1 megabyte, then group preallocation size becomes half a
2440 	 * gigabyte!).  As a default, we will keep a two megabyte
2441 	 * group pralloc size for cluster sizes up to 64k, and after
2442 	 * that, we will force a minimum group preallocation size of
2443 	 * 32 clusters.  This translates to 8 megs when the cluster
2444 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2445 	 * which seems reasonable as a default.
2446 	 */
2447 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2448 				       sbi->s_cluster_bits, 32);
2449 	/*
2450 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2451 	 * to the lowest multiple of s_stripe which is bigger than
2452 	 * the s_mb_group_prealloc as determined above. We want
2453 	 * the preallocation size to be an exact multiple of the
2454 	 * RAID stripe size so that preallocations don't fragment
2455 	 * the stripes.
2456 	 */
2457 	if (sbi->s_stripe > 1) {
2458 		sbi->s_mb_group_prealloc = roundup(
2459 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2460 	}
2461 
2462 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2463 	if (sbi->s_locality_groups == NULL) {
2464 		ret = -ENOMEM;
2465 		goto out_free_groupinfo_slab;
2466 	}
2467 	for_each_possible_cpu(i) {
2468 		struct ext4_locality_group *lg;
2469 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2470 		mutex_init(&lg->lg_mutex);
2471 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2472 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2473 		spin_lock_init(&lg->lg_prealloc_lock);
2474 	}
2475 
2476 	/* init file for buddy data */
2477 	ret = ext4_mb_init_backend(sb);
2478 	if (ret != 0)
2479 		goto out_free_locality_groups;
2480 
2481 	if (sbi->s_proc)
2482 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2483 				 &ext4_mb_seq_groups_fops, sb);
2484 
2485 	return 0;
2486 
2487 out_free_locality_groups:
2488 	free_percpu(sbi->s_locality_groups);
2489 	sbi->s_locality_groups = NULL;
2490 out_free_groupinfo_slab:
2491 	ext4_groupinfo_destroy_slabs();
2492 out:
2493 	kfree(sbi->s_mb_offsets);
2494 	sbi->s_mb_offsets = NULL;
2495 	kfree(sbi->s_mb_maxs);
2496 	sbi->s_mb_maxs = NULL;
2497 	return ret;
2498 }
2499 
2500 /* need to called with the ext4 group lock held */
2501 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2502 {
2503 	struct ext4_prealloc_space *pa;
2504 	struct list_head *cur, *tmp;
2505 	int count = 0;
2506 
2507 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2508 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2509 		list_del(&pa->pa_group_list);
2510 		count++;
2511 		kmem_cache_free(ext4_pspace_cachep, pa);
2512 	}
2513 	if (count)
2514 		mb_debug(1, "mballoc: %u PAs left\n", count);
2515 
2516 }
2517 
2518 int ext4_mb_release(struct super_block *sb)
2519 {
2520 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2521 	ext4_group_t i;
2522 	int num_meta_group_infos;
2523 	struct ext4_group_info *grinfo;
2524 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2525 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2526 
2527 	if (sbi->s_proc)
2528 		remove_proc_entry("mb_groups", sbi->s_proc);
2529 
2530 	if (sbi->s_group_info) {
2531 		for (i = 0; i < ngroups; i++) {
2532 			grinfo = ext4_get_group_info(sb, i);
2533 #ifdef DOUBLE_CHECK
2534 			kfree(grinfo->bb_bitmap);
2535 #endif
2536 			ext4_lock_group(sb, i);
2537 			ext4_mb_cleanup_pa(grinfo);
2538 			ext4_unlock_group(sb, i);
2539 			kmem_cache_free(cachep, grinfo);
2540 		}
2541 		num_meta_group_infos = (ngroups +
2542 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2543 			EXT4_DESC_PER_BLOCK_BITS(sb);
2544 		for (i = 0; i < num_meta_group_infos; i++)
2545 			kfree(sbi->s_group_info[i]);
2546 		ext4_kvfree(sbi->s_group_info);
2547 	}
2548 	kfree(sbi->s_mb_offsets);
2549 	kfree(sbi->s_mb_maxs);
2550 	if (sbi->s_buddy_cache)
2551 		iput(sbi->s_buddy_cache);
2552 	if (sbi->s_mb_stats) {
2553 		ext4_msg(sb, KERN_INFO,
2554 		       "mballoc: %u blocks %u reqs (%u success)",
2555 				atomic_read(&sbi->s_bal_allocated),
2556 				atomic_read(&sbi->s_bal_reqs),
2557 				atomic_read(&sbi->s_bal_success));
2558 		ext4_msg(sb, KERN_INFO,
2559 		      "mballoc: %u extents scanned, %u goal hits, "
2560 				"%u 2^N hits, %u breaks, %u lost",
2561 				atomic_read(&sbi->s_bal_ex_scanned),
2562 				atomic_read(&sbi->s_bal_goals),
2563 				atomic_read(&sbi->s_bal_2orders),
2564 				atomic_read(&sbi->s_bal_breaks),
2565 				atomic_read(&sbi->s_mb_lost_chunks));
2566 		ext4_msg(sb, KERN_INFO,
2567 		       "mballoc: %lu generated and it took %Lu",
2568 				sbi->s_mb_buddies_generated,
2569 				sbi->s_mb_generation_time);
2570 		ext4_msg(sb, KERN_INFO,
2571 		       "mballoc: %u preallocated, %u discarded",
2572 				atomic_read(&sbi->s_mb_preallocated),
2573 				atomic_read(&sbi->s_mb_discarded));
2574 	}
2575 
2576 	free_percpu(sbi->s_locality_groups);
2577 
2578 	return 0;
2579 }
2580 
2581 static inline int ext4_issue_discard(struct super_block *sb,
2582 		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
2583 {
2584 	ext4_fsblk_t discard_block;
2585 
2586 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2587 			 ext4_group_first_block_no(sb, block_group));
2588 	count = EXT4_C2B(EXT4_SB(sb), count);
2589 	trace_ext4_discard_blocks(sb,
2590 			(unsigned long long) discard_block, count);
2591 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2592 }
2593 
2594 /*
2595  * This function is called by the jbd2 layer once the commit has finished,
2596  * so we know we can free the blocks that were released with that commit.
2597  */
2598 static void ext4_free_data_callback(struct super_block *sb,
2599 				    struct ext4_journal_cb_entry *jce,
2600 				    int rc)
2601 {
2602 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2603 	struct ext4_buddy e4b;
2604 	struct ext4_group_info *db;
2605 	int err, count = 0, count2 = 0;
2606 
2607 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2608 		 entry->efd_count, entry->efd_group, entry);
2609 
2610 	if (test_opt(sb, DISCARD)) {
2611 		err = ext4_issue_discard(sb, entry->efd_group,
2612 					 entry->efd_start_cluster,
2613 					 entry->efd_count);
2614 		if (err && err != -EOPNOTSUPP)
2615 			ext4_msg(sb, KERN_WARNING, "discard request in"
2616 				 " group:%d block:%d count:%d failed"
2617 				 " with %d", entry->efd_group,
2618 				 entry->efd_start_cluster,
2619 				 entry->efd_count, err);
2620 	}
2621 
2622 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2623 	/* we expect to find existing buddy because it's pinned */
2624 	BUG_ON(err != 0);
2625 
2626 
2627 	db = e4b.bd_info;
2628 	/* there are blocks to put in buddy to make them really free */
2629 	count += entry->efd_count;
2630 	count2++;
2631 	ext4_lock_group(sb, entry->efd_group);
2632 	/* Take it out of per group rb tree */
2633 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2634 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2635 
2636 	/*
2637 	 * Clear the trimmed flag for the group so that the next
2638 	 * ext4_trim_fs can trim it.
2639 	 * If the volume is mounted with -o discard, online discard
2640 	 * is supported and the free blocks will be trimmed online.
2641 	 */
2642 	if (!test_opt(sb, DISCARD))
2643 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2644 
2645 	if (!db->bb_free_root.rb_node) {
2646 		/* No more items in the per group rb tree
2647 		 * balance refcounts from ext4_mb_free_metadata()
2648 		 */
2649 		page_cache_release(e4b.bd_buddy_page);
2650 		page_cache_release(e4b.bd_bitmap_page);
2651 	}
2652 	ext4_unlock_group(sb, entry->efd_group);
2653 	kmem_cache_free(ext4_free_data_cachep, entry);
2654 	ext4_mb_unload_buddy(&e4b);
2655 
2656 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2657 }
2658 
2659 #ifdef CONFIG_EXT4_DEBUG
2660 u8 mb_enable_debug __read_mostly;
2661 
2662 static struct dentry *debugfs_dir;
2663 static struct dentry *debugfs_debug;
2664 
2665 static void __init ext4_create_debugfs_entry(void)
2666 {
2667 	debugfs_dir = debugfs_create_dir("ext4", NULL);
2668 	if (debugfs_dir)
2669 		debugfs_debug = debugfs_create_u8("mballoc-debug",
2670 						  S_IRUGO | S_IWUSR,
2671 						  debugfs_dir,
2672 						  &mb_enable_debug);
2673 }
2674 
2675 static void ext4_remove_debugfs_entry(void)
2676 {
2677 	debugfs_remove(debugfs_debug);
2678 	debugfs_remove(debugfs_dir);
2679 }
2680 
2681 #else
2682 
2683 static void __init ext4_create_debugfs_entry(void)
2684 {
2685 }
2686 
2687 static void ext4_remove_debugfs_entry(void)
2688 {
2689 }
2690 
2691 #endif
2692 
2693 int __init ext4_init_mballoc(void)
2694 {
2695 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2696 					SLAB_RECLAIM_ACCOUNT);
2697 	if (ext4_pspace_cachep == NULL)
2698 		return -ENOMEM;
2699 
2700 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2701 				    SLAB_RECLAIM_ACCOUNT);
2702 	if (ext4_ac_cachep == NULL) {
2703 		kmem_cache_destroy(ext4_pspace_cachep);
2704 		return -ENOMEM;
2705 	}
2706 
2707 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2708 					   SLAB_RECLAIM_ACCOUNT);
2709 	if (ext4_free_data_cachep == NULL) {
2710 		kmem_cache_destroy(ext4_pspace_cachep);
2711 		kmem_cache_destroy(ext4_ac_cachep);
2712 		return -ENOMEM;
2713 	}
2714 	ext4_create_debugfs_entry();
2715 	return 0;
2716 }
2717 
2718 void ext4_exit_mballoc(void)
2719 {
2720 	/*
2721 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2722 	 * before destroying the slab cache.
2723 	 */
2724 	rcu_barrier();
2725 	kmem_cache_destroy(ext4_pspace_cachep);
2726 	kmem_cache_destroy(ext4_ac_cachep);
2727 	kmem_cache_destroy(ext4_free_data_cachep);
2728 	ext4_groupinfo_destroy_slabs();
2729 	ext4_remove_debugfs_entry();
2730 }
2731 
2732 
2733 /*
2734  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2735  * Returns 0 if success or error code
2736  */
2737 static noinline_for_stack int
2738 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2739 				handle_t *handle, unsigned int reserv_clstrs)
2740 {
2741 	struct buffer_head *bitmap_bh = NULL;
2742 	struct ext4_group_desc *gdp;
2743 	struct buffer_head *gdp_bh;
2744 	struct ext4_sb_info *sbi;
2745 	struct super_block *sb;
2746 	ext4_fsblk_t block;
2747 	int err, len;
2748 
2749 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2750 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2751 
2752 	sb = ac->ac_sb;
2753 	sbi = EXT4_SB(sb);
2754 
2755 	err = -EIO;
2756 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2757 	if (!bitmap_bh)
2758 		goto out_err;
2759 
2760 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2761 	if (err)
2762 		goto out_err;
2763 
2764 	err = -EIO;
2765 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2766 	if (!gdp)
2767 		goto out_err;
2768 
2769 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2770 			ext4_free_group_clusters(sb, gdp));
2771 
2772 	err = ext4_journal_get_write_access(handle, gdp_bh);
2773 	if (err)
2774 		goto out_err;
2775 
2776 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2777 
2778 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2779 	if (!ext4_data_block_valid(sbi, block, len)) {
2780 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2781 			   "fs metadata", block, block+len);
2782 		/* File system mounted not to panic on error
2783 		 * Fix the bitmap and repeat the block allocation
2784 		 * We leak some of the blocks here.
2785 		 */
2786 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2787 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2788 			      ac->ac_b_ex.fe_len);
2789 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2790 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2791 		if (!err)
2792 			err = -EAGAIN;
2793 		goto out_err;
2794 	}
2795 
2796 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2797 #ifdef AGGRESSIVE_CHECK
2798 	{
2799 		int i;
2800 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2801 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2802 						bitmap_bh->b_data));
2803 		}
2804 	}
2805 #endif
2806 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2807 		      ac->ac_b_ex.fe_len);
2808 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2809 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2810 		ext4_free_group_clusters_set(sb, gdp,
2811 					     ext4_free_clusters_after_init(sb,
2812 						ac->ac_b_ex.fe_group, gdp));
2813 	}
2814 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2815 	ext4_free_group_clusters_set(sb, gdp, len);
2816 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
2817 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2818 
2819 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2820 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2821 	/*
2822 	 * Now reduce the dirty block count also. Should not go negative
2823 	 */
2824 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2825 		/* release all the reserved blocks if non delalloc */
2826 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2827 				   reserv_clstrs);
2828 
2829 	if (sbi->s_log_groups_per_flex) {
2830 		ext4_group_t flex_group = ext4_flex_group(sbi,
2831 							  ac->ac_b_ex.fe_group);
2832 		atomic_sub(ac->ac_b_ex.fe_len,
2833 			   &sbi->s_flex_groups[flex_group].free_clusters);
2834 	}
2835 
2836 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2837 	if (err)
2838 		goto out_err;
2839 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2840 
2841 out_err:
2842 	brelse(bitmap_bh);
2843 	return err;
2844 }
2845 
2846 /*
2847  * here we normalize request for locality group
2848  * Group request are normalized to s_mb_group_prealloc, which goes to
2849  * s_strip if we set the same via mount option.
2850  * s_mb_group_prealloc can be configured via
2851  * /sys/fs/ext4/<partition>/mb_group_prealloc
2852  *
2853  * XXX: should we try to preallocate more than the group has now?
2854  */
2855 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2856 {
2857 	struct super_block *sb = ac->ac_sb;
2858 	struct ext4_locality_group *lg = ac->ac_lg;
2859 
2860 	BUG_ON(lg == NULL);
2861 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2862 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2863 		current->pid, ac->ac_g_ex.fe_len);
2864 }
2865 
2866 /*
2867  * Normalization means making request better in terms of
2868  * size and alignment
2869  */
2870 static noinline_for_stack void
2871 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2872 				struct ext4_allocation_request *ar)
2873 {
2874 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2875 	int bsbits, max;
2876 	ext4_lblk_t end;
2877 	loff_t size, start_off;
2878 	loff_t orig_size __maybe_unused;
2879 	ext4_lblk_t start;
2880 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2881 	struct ext4_prealloc_space *pa;
2882 
2883 	/* do normalize only data requests, metadata requests
2884 	   do not need preallocation */
2885 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2886 		return;
2887 
2888 	/* sometime caller may want exact blocks */
2889 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2890 		return;
2891 
2892 	/* caller may indicate that preallocation isn't
2893 	 * required (it's a tail, for example) */
2894 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2895 		return;
2896 
2897 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2898 		ext4_mb_normalize_group_request(ac);
2899 		return ;
2900 	}
2901 
2902 	bsbits = ac->ac_sb->s_blocksize_bits;
2903 
2904 	/* first, let's learn actual file size
2905 	 * given current request is allocated */
2906 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
2907 	size = size << bsbits;
2908 	if (size < i_size_read(ac->ac_inode))
2909 		size = i_size_read(ac->ac_inode);
2910 	orig_size = size;
2911 
2912 	/* max size of free chunks */
2913 	max = 2 << bsbits;
2914 
2915 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2916 		(req <= (size) || max <= (chunk_size))
2917 
2918 	/* first, try to predict filesize */
2919 	/* XXX: should this table be tunable? */
2920 	start_off = 0;
2921 	if (size <= 16 * 1024) {
2922 		size = 16 * 1024;
2923 	} else if (size <= 32 * 1024) {
2924 		size = 32 * 1024;
2925 	} else if (size <= 64 * 1024) {
2926 		size = 64 * 1024;
2927 	} else if (size <= 128 * 1024) {
2928 		size = 128 * 1024;
2929 	} else if (size <= 256 * 1024) {
2930 		size = 256 * 1024;
2931 	} else if (size <= 512 * 1024) {
2932 		size = 512 * 1024;
2933 	} else if (size <= 1024 * 1024) {
2934 		size = 1024 * 1024;
2935 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2936 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2937 						(21 - bsbits)) << 21;
2938 		size = 2 * 1024 * 1024;
2939 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2940 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2941 							(22 - bsbits)) << 22;
2942 		size = 4 * 1024 * 1024;
2943 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2944 					(8<<20)>>bsbits, max, 8 * 1024)) {
2945 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2946 							(23 - bsbits)) << 23;
2947 		size = 8 * 1024 * 1024;
2948 	} else {
2949 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2950 		size	  = ac->ac_o_ex.fe_len << bsbits;
2951 	}
2952 	size = size >> bsbits;
2953 	start = start_off >> bsbits;
2954 
2955 	/* don't cover already allocated blocks in selected range */
2956 	if (ar->pleft && start <= ar->lleft) {
2957 		size -= ar->lleft + 1 - start;
2958 		start = ar->lleft + 1;
2959 	}
2960 	if (ar->pright && start + size - 1 >= ar->lright)
2961 		size -= start + size - ar->lright;
2962 
2963 	end = start + size;
2964 
2965 	/* check we don't cross already preallocated blocks */
2966 	rcu_read_lock();
2967 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2968 		ext4_lblk_t pa_end;
2969 
2970 		if (pa->pa_deleted)
2971 			continue;
2972 		spin_lock(&pa->pa_lock);
2973 		if (pa->pa_deleted) {
2974 			spin_unlock(&pa->pa_lock);
2975 			continue;
2976 		}
2977 
2978 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2979 						  pa->pa_len);
2980 
2981 		/* PA must not overlap original request */
2982 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2983 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2984 
2985 		/* skip PAs this normalized request doesn't overlap with */
2986 		if (pa->pa_lstart >= end || pa_end <= start) {
2987 			spin_unlock(&pa->pa_lock);
2988 			continue;
2989 		}
2990 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2991 
2992 		/* adjust start or end to be adjacent to this pa */
2993 		if (pa_end <= ac->ac_o_ex.fe_logical) {
2994 			BUG_ON(pa_end < start);
2995 			start = pa_end;
2996 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
2997 			BUG_ON(pa->pa_lstart > end);
2998 			end = pa->pa_lstart;
2999 		}
3000 		spin_unlock(&pa->pa_lock);
3001 	}
3002 	rcu_read_unlock();
3003 	size = end - start;
3004 
3005 	/* XXX: extra loop to check we really don't overlap preallocations */
3006 	rcu_read_lock();
3007 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3008 		ext4_lblk_t pa_end;
3009 
3010 		spin_lock(&pa->pa_lock);
3011 		if (pa->pa_deleted == 0) {
3012 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3013 							  pa->pa_len);
3014 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3015 		}
3016 		spin_unlock(&pa->pa_lock);
3017 	}
3018 	rcu_read_unlock();
3019 
3020 	if (start + size <= ac->ac_o_ex.fe_logical &&
3021 			start > ac->ac_o_ex.fe_logical) {
3022 		ext4_msg(ac->ac_sb, KERN_ERR,
3023 			 "start %lu, size %lu, fe_logical %lu",
3024 			 (unsigned long) start, (unsigned long) size,
3025 			 (unsigned long) ac->ac_o_ex.fe_logical);
3026 	}
3027 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3028 			start > ac->ac_o_ex.fe_logical);
3029 	BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
3030 
3031 	/* now prepare goal request */
3032 
3033 	/* XXX: is it better to align blocks WRT to logical
3034 	 * placement or satisfy big request as is */
3035 	ac->ac_g_ex.fe_logical = start;
3036 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3037 
3038 	/* define goal start in order to merge */
3039 	if (ar->pright && (ar->lright == (start + size))) {
3040 		/* merge to the right */
3041 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3042 						&ac->ac_f_ex.fe_group,
3043 						&ac->ac_f_ex.fe_start);
3044 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3045 	}
3046 	if (ar->pleft && (ar->lleft + 1 == start)) {
3047 		/* merge to the left */
3048 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3049 						&ac->ac_f_ex.fe_group,
3050 						&ac->ac_f_ex.fe_start);
3051 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3052 	}
3053 
3054 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3055 		(unsigned) orig_size, (unsigned) start);
3056 }
3057 
3058 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3059 {
3060 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3061 
3062 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3063 		atomic_inc(&sbi->s_bal_reqs);
3064 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3065 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3066 			atomic_inc(&sbi->s_bal_success);
3067 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3068 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3069 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3070 			atomic_inc(&sbi->s_bal_goals);
3071 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3072 			atomic_inc(&sbi->s_bal_breaks);
3073 	}
3074 
3075 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3076 		trace_ext4_mballoc_alloc(ac);
3077 	else
3078 		trace_ext4_mballoc_prealloc(ac);
3079 }
3080 
3081 /*
3082  * Called on failure; free up any blocks from the inode PA for this
3083  * context.  We don't need this for MB_GROUP_PA because we only change
3084  * pa_free in ext4_mb_release_context(), but on failure, we've already
3085  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3086  */
3087 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3088 {
3089 	struct ext4_prealloc_space *pa = ac->ac_pa;
3090 
3091 	if (pa && pa->pa_type == MB_INODE_PA)
3092 		pa->pa_free += ac->ac_b_ex.fe_len;
3093 }
3094 
3095 /*
3096  * use blocks preallocated to inode
3097  */
3098 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3099 				struct ext4_prealloc_space *pa)
3100 {
3101 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3102 	ext4_fsblk_t start;
3103 	ext4_fsblk_t end;
3104 	int len;
3105 
3106 	/* found preallocated blocks, use them */
3107 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3108 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3109 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3110 	len = EXT4_NUM_B2C(sbi, end - start);
3111 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3112 					&ac->ac_b_ex.fe_start);
3113 	ac->ac_b_ex.fe_len = len;
3114 	ac->ac_status = AC_STATUS_FOUND;
3115 	ac->ac_pa = pa;
3116 
3117 	BUG_ON(start < pa->pa_pstart);
3118 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3119 	BUG_ON(pa->pa_free < len);
3120 	pa->pa_free -= len;
3121 
3122 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3123 }
3124 
3125 /*
3126  * use blocks preallocated to locality group
3127  */
3128 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3129 				struct ext4_prealloc_space *pa)
3130 {
3131 	unsigned int len = ac->ac_o_ex.fe_len;
3132 
3133 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3134 					&ac->ac_b_ex.fe_group,
3135 					&ac->ac_b_ex.fe_start);
3136 	ac->ac_b_ex.fe_len = len;
3137 	ac->ac_status = AC_STATUS_FOUND;
3138 	ac->ac_pa = pa;
3139 
3140 	/* we don't correct pa_pstart or pa_plen here to avoid
3141 	 * possible race when the group is being loaded concurrently
3142 	 * instead we correct pa later, after blocks are marked
3143 	 * in on-disk bitmap -- see ext4_mb_release_context()
3144 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3145 	 */
3146 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3147 }
3148 
3149 /*
3150  * Return the prealloc space that have minimal distance
3151  * from the goal block. @cpa is the prealloc
3152  * space that is having currently known minimal distance
3153  * from the goal block.
3154  */
3155 static struct ext4_prealloc_space *
3156 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3157 			struct ext4_prealloc_space *pa,
3158 			struct ext4_prealloc_space *cpa)
3159 {
3160 	ext4_fsblk_t cur_distance, new_distance;
3161 
3162 	if (cpa == NULL) {
3163 		atomic_inc(&pa->pa_count);
3164 		return pa;
3165 	}
3166 	cur_distance = abs(goal_block - cpa->pa_pstart);
3167 	new_distance = abs(goal_block - pa->pa_pstart);
3168 
3169 	if (cur_distance <= new_distance)
3170 		return cpa;
3171 
3172 	/* drop the previous reference */
3173 	atomic_dec(&cpa->pa_count);
3174 	atomic_inc(&pa->pa_count);
3175 	return pa;
3176 }
3177 
3178 /*
3179  * search goal blocks in preallocated space
3180  */
3181 static noinline_for_stack int
3182 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3183 {
3184 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3185 	int order, i;
3186 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3187 	struct ext4_locality_group *lg;
3188 	struct ext4_prealloc_space *pa, *cpa = NULL;
3189 	ext4_fsblk_t goal_block;
3190 
3191 	/* only data can be preallocated */
3192 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3193 		return 0;
3194 
3195 	/* first, try per-file preallocation */
3196 	rcu_read_lock();
3197 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3198 
3199 		/* all fields in this condition don't change,
3200 		 * so we can skip locking for them */
3201 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3202 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3203 					       EXT4_C2B(sbi, pa->pa_len)))
3204 			continue;
3205 
3206 		/* non-extent files can't have physical blocks past 2^32 */
3207 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3208 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3209 		     EXT4_MAX_BLOCK_FILE_PHYS))
3210 			continue;
3211 
3212 		/* found preallocated blocks, use them */
3213 		spin_lock(&pa->pa_lock);
3214 		if (pa->pa_deleted == 0 && pa->pa_free) {
3215 			atomic_inc(&pa->pa_count);
3216 			ext4_mb_use_inode_pa(ac, pa);
3217 			spin_unlock(&pa->pa_lock);
3218 			ac->ac_criteria = 10;
3219 			rcu_read_unlock();
3220 			return 1;
3221 		}
3222 		spin_unlock(&pa->pa_lock);
3223 	}
3224 	rcu_read_unlock();
3225 
3226 	/* can we use group allocation? */
3227 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3228 		return 0;
3229 
3230 	/* inode may have no locality group for some reason */
3231 	lg = ac->ac_lg;
3232 	if (lg == NULL)
3233 		return 0;
3234 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3235 	if (order > PREALLOC_TB_SIZE - 1)
3236 		/* The max size of hash table is PREALLOC_TB_SIZE */
3237 		order = PREALLOC_TB_SIZE - 1;
3238 
3239 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3240 	/*
3241 	 * search for the prealloc space that is having
3242 	 * minimal distance from the goal block.
3243 	 */
3244 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3245 		rcu_read_lock();
3246 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3247 					pa_inode_list) {
3248 			spin_lock(&pa->pa_lock);
3249 			if (pa->pa_deleted == 0 &&
3250 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3251 
3252 				cpa = ext4_mb_check_group_pa(goal_block,
3253 								pa, cpa);
3254 			}
3255 			spin_unlock(&pa->pa_lock);
3256 		}
3257 		rcu_read_unlock();
3258 	}
3259 	if (cpa) {
3260 		ext4_mb_use_group_pa(ac, cpa);
3261 		ac->ac_criteria = 20;
3262 		return 1;
3263 	}
3264 	return 0;
3265 }
3266 
3267 /*
3268  * the function goes through all block freed in the group
3269  * but not yet committed and marks them used in in-core bitmap.
3270  * buddy must be generated from this bitmap
3271  * Need to be called with the ext4 group lock held
3272  */
3273 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3274 						ext4_group_t group)
3275 {
3276 	struct rb_node *n;
3277 	struct ext4_group_info *grp;
3278 	struct ext4_free_data *entry;
3279 
3280 	grp = ext4_get_group_info(sb, group);
3281 	n = rb_first(&(grp->bb_free_root));
3282 
3283 	while (n) {
3284 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3285 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3286 		n = rb_next(n);
3287 	}
3288 	return;
3289 }
3290 
3291 /*
3292  * the function goes through all preallocation in this group and marks them
3293  * used in in-core bitmap. buddy must be generated from this bitmap
3294  * Need to be called with ext4 group lock held
3295  */
3296 static noinline_for_stack
3297 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3298 					ext4_group_t group)
3299 {
3300 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3301 	struct ext4_prealloc_space *pa;
3302 	struct list_head *cur;
3303 	ext4_group_t groupnr;
3304 	ext4_grpblk_t start;
3305 	int preallocated = 0;
3306 	int len;
3307 
3308 	/* all form of preallocation discards first load group,
3309 	 * so the only competing code is preallocation use.
3310 	 * we don't need any locking here
3311 	 * notice we do NOT ignore preallocations with pa_deleted
3312 	 * otherwise we could leave used blocks available for
3313 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3314 	 * is dropping preallocation
3315 	 */
3316 	list_for_each(cur, &grp->bb_prealloc_list) {
3317 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3318 		spin_lock(&pa->pa_lock);
3319 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3320 					     &groupnr, &start);
3321 		len = pa->pa_len;
3322 		spin_unlock(&pa->pa_lock);
3323 		if (unlikely(len == 0))
3324 			continue;
3325 		BUG_ON(groupnr != group);
3326 		ext4_set_bits(bitmap, start, len);
3327 		preallocated += len;
3328 	}
3329 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3330 }
3331 
3332 static void ext4_mb_pa_callback(struct rcu_head *head)
3333 {
3334 	struct ext4_prealloc_space *pa;
3335 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3336 	kmem_cache_free(ext4_pspace_cachep, pa);
3337 }
3338 
3339 /*
3340  * drops a reference to preallocated space descriptor
3341  * if this was the last reference and the space is consumed
3342  */
3343 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3344 			struct super_block *sb, struct ext4_prealloc_space *pa)
3345 {
3346 	ext4_group_t grp;
3347 	ext4_fsblk_t grp_blk;
3348 
3349 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3350 		return;
3351 
3352 	/* in this short window concurrent discard can set pa_deleted */
3353 	spin_lock(&pa->pa_lock);
3354 	if (pa->pa_deleted == 1) {
3355 		spin_unlock(&pa->pa_lock);
3356 		return;
3357 	}
3358 
3359 	pa->pa_deleted = 1;
3360 	spin_unlock(&pa->pa_lock);
3361 
3362 	grp_blk = pa->pa_pstart;
3363 	/*
3364 	 * If doing group-based preallocation, pa_pstart may be in the
3365 	 * next group when pa is used up
3366 	 */
3367 	if (pa->pa_type == MB_GROUP_PA)
3368 		grp_blk--;
3369 
3370 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3371 
3372 	/*
3373 	 * possible race:
3374 	 *
3375 	 *  P1 (buddy init)			P2 (regular allocation)
3376 	 *					find block B in PA
3377 	 *  copy on-disk bitmap to buddy
3378 	 *  					mark B in on-disk bitmap
3379 	 *					drop PA from group
3380 	 *  mark all PAs in buddy
3381 	 *
3382 	 * thus, P1 initializes buddy with B available. to prevent this
3383 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3384 	 * against that pair
3385 	 */
3386 	ext4_lock_group(sb, grp);
3387 	list_del(&pa->pa_group_list);
3388 	ext4_unlock_group(sb, grp);
3389 
3390 	spin_lock(pa->pa_obj_lock);
3391 	list_del_rcu(&pa->pa_inode_list);
3392 	spin_unlock(pa->pa_obj_lock);
3393 
3394 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3395 }
3396 
3397 /*
3398  * creates new preallocated space for given inode
3399  */
3400 static noinline_for_stack int
3401 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3402 {
3403 	struct super_block *sb = ac->ac_sb;
3404 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3405 	struct ext4_prealloc_space *pa;
3406 	struct ext4_group_info *grp;
3407 	struct ext4_inode_info *ei;
3408 
3409 	/* preallocate only when found space is larger then requested */
3410 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3411 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3412 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3413 
3414 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3415 	if (pa == NULL)
3416 		return -ENOMEM;
3417 
3418 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3419 		int winl;
3420 		int wins;
3421 		int win;
3422 		int offs;
3423 
3424 		/* we can't allocate as much as normalizer wants.
3425 		 * so, found space must get proper lstart
3426 		 * to cover original request */
3427 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3428 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3429 
3430 		/* we're limited by original request in that
3431 		 * logical block must be covered any way
3432 		 * winl is window we can move our chunk within */
3433 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3434 
3435 		/* also, we should cover whole original request */
3436 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3437 
3438 		/* the smallest one defines real window */
3439 		win = min(winl, wins);
3440 
3441 		offs = ac->ac_o_ex.fe_logical %
3442 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3443 		if (offs && offs < win)
3444 			win = offs;
3445 
3446 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3447 			EXT4_B2C(sbi, win);
3448 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3449 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3450 	}
3451 
3452 	/* preallocation can change ac_b_ex, thus we store actually
3453 	 * allocated blocks for history */
3454 	ac->ac_f_ex = ac->ac_b_ex;
3455 
3456 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3457 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3458 	pa->pa_len = ac->ac_b_ex.fe_len;
3459 	pa->pa_free = pa->pa_len;
3460 	atomic_set(&pa->pa_count, 1);
3461 	spin_lock_init(&pa->pa_lock);
3462 	INIT_LIST_HEAD(&pa->pa_inode_list);
3463 	INIT_LIST_HEAD(&pa->pa_group_list);
3464 	pa->pa_deleted = 0;
3465 	pa->pa_type = MB_INODE_PA;
3466 
3467 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3468 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3469 	trace_ext4_mb_new_inode_pa(ac, pa);
3470 
3471 	ext4_mb_use_inode_pa(ac, pa);
3472 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3473 
3474 	ei = EXT4_I(ac->ac_inode);
3475 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3476 
3477 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3478 	pa->pa_inode = ac->ac_inode;
3479 
3480 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3481 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3482 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3483 
3484 	spin_lock(pa->pa_obj_lock);
3485 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3486 	spin_unlock(pa->pa_obj_lock);
3487 
3488 	return 0;
3489 }
3490 
3491 /*
3492  * creates new preallocated space for locality group inodes belongs to
3493  */
3494 static noinline_for_stack int
3495 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3496 {
3497 	struct super_block *sb = ac->ac_sb;
3498 	struct ext4_locality_group *lg;
3499 	struct ext4_prealloc_space *pa;
3500 	struct ext4_group_info *grp;
3501 
3502 	/* preallocate only when found space is larger then requested */
3503 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3504 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3505 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3506 
3507 	BUG_ON(ext4_pspace_cachep == NULL);
3508 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3509 	if (pa == NULL)
3510 		return -ENOMEM;
3511 
3512 	/* preallocation can change ac_b_ex, thus we store actually
3513 	 * allocated blocks for history */
3514 	ac->ac_f_ex = ac->ac_b_ex;
3515 
3516 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3517 	pa->pa_lstart = pa->pa_pstart;
3518 	pa->pa_len = ac->ac_b_ex.fe_len;
3519 	pa->pa_free = pa->pa_len;
3520 	atomic_set(&pa->pa_count, 1);
3521 	spin_lock_init(&pa->pa_lock);
3522 	INIT_LIST_HEAD(&pa->pa_inode_list);
3523 	INIT_LIST_HEAD(&pa->pa_group_list);
3524 	pa->pa_deleted = 0;
3525 	pa->pa_type = MB_GROUP_PA;
3526 
3527 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3528 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3529 	trace_ext4_mb_new_group_pa(ac, pa);
3530 
3531 	ext4_mb_use_group_pa(ac, pa);
3532 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3533 
3534 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3535 	lg = ac->ac_lg;
3536 	BUG_ON(lg == NULL);
3537 
3538 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3539 	pa->pa_inode = NULL;
3540 
3541 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3542 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3543 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3544 
3545 	/*
3546 	 * We will later add the new pa to the right bucket
3547 	 * after updating the pa_free in ext4_mb_release_context
3548 	 */
3549 	return 0;
3550 }
3551 
3552 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3553 {
3554 	int err;
3555 
3556 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3557 		err = ext4_mb_new_group_pa(ac);
3558 	else
3559 		err = ext4_mb_new_inode_pa(ac);
3560 	return err;
3561 }
3562 
3563 /*
3564  * finds all unused blocks in on-disk bitmap, frees them in
3565  * in-core bitmap and buddy.
3566  * @pa must be unlinked from inode and group lists, so that
3567  * nobody else can find/use it.
3568  * the caller MUST hold group/inode locks.
3569  * TODO: optimize the case when there are no in-core structures yet
3570  */
3571 static noinline_for_stack int
3572 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3573 			struct ext4_prealloc_space *pa)
3574 {
3575 	struct super_block *sb = e4b->bd_sb;
3576 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3577 	unsigned int end;
3578 	unsigned int next;
3579 	ext4_group_t group;
3580 	ext4_grpblk_t bit;
3581 	unsigned long long grp_blk_start;
3582 	int err = 0;
3583 	int free = 0;
3584 
3585 	BUG_ON(pa->pa_deleted == 0);
3586 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3587 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3588 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3589 	end = bit + pa->pa_len;
3590 
3591 	while (bit < end) {
3592 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3593 		if (bit >= end)
3594 			break;
3595 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3596 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3597 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3598 			 (unsigned) next - bit, (unsigned) group);
3599 		free += next - bit;
3600 
3601 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3602 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3603 						    EXT4_C2B(sbi, bit)),
3604 					       next - bit);
3605 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3606 		bit = next + 1;
3607 	}
3608 	if (free != pa->pa_free) {
3609 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3610 			 "pa %p: logic %lu, phys. %lu, len %lu",
3611 			 pa, (unsigned long) pa->pa_lstart,
3612 			 (unsigned long) pa->pa_pstart,
3613 			 (unsigned long) pa->pa_len);
3614 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3615 					free, pa->pa_free);
3616 		/*
3617 		 * pa is already deleted so we use the value obtained
3618 		 * from the bitmap and continue.
3619 		 */
3620 	}
3621 	atomic_add(free, &sbi->s_mb_discarded);
3622 
3623 	return err;
3624 }
3625 
3626 static noinline_for_stack int
3627 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3628 				struct ext4_prealloc_space *pa)
3629 {
3630 	struct super_block *sb = e4b->bd_sb;
3631 	ext4_group_t group;
3632 	ext4_grpblk_t bit;
3633 
3634 	trace_ext4_mb_release_group_pa(sb, pa);
3635 	BUG_ON(pa->pa_deleted == 0);
3636 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3637 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3638 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3639 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3640 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3641 
3642 	return 0;
3643 }
3644 
3645 /*
3646  * releases all preallocations in given group
3647  *
3648  * first, we need to decide discard policy:
3649  * - when do we discard
3650  *   1) ENOSPC
3651  * - how many do we discard
3652  *   1) how many requested
3653  */
3654 static noinline_for_stack int
3655 ext4_mb_discard_group_preallocations(struct super_block *sb,
3656 					ext4_group_t group, int needed)
3657 {
3658 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3659 	struct buffer_head *bitmap_bh = NULL;
3660 	struct ext4_prealloc_space *pa, *tmp;
3661 	struct list_head list;
3662 	struct ext4_buddy e4b;
3663 	int err;
3664 	int busy = 0;
3665 	int free = 0;
3666 
3667 	mb_debug(1, "discard preallocation for group %u\n", group);
3668 
3669 	if (list_empty(&grp->bb_prealloc_list))
3670 		return 0;
3671 
3672 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3673 	if (bitmap_bh == NULL) {
3674 		ext4_error(sb, "Error reading block bitmap for %u", group);
3675 		return 0;
3676 	}
3677 
3678 	err = ext4_mb_load_buddy(sb, group, &e4b);
3679 	if (err) {
3680 		ext4_error(sb, "Error loading buddy information for %u", group);
3681 		put_bh(bitmap_bh);
3682 		return 0;
3683 	}
3684 
3685 	if (needed == 0)
3686 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3687 
3688 	INIT_LIST_HEAD(&list);
3689 repeat:
3690 	ext4_lock_group(sb, group);
3691 	list_for_each_entry_safe(pa, tmp,
3692 				&grp->bb_prealloc_list, pa_group_list) {
3693 		spin_lock(&pa->pa_lock);
3694 		if (atomic_read(&pa->pa_count)) {
3695 			spin_unlock(&pa->pa_lock);
3696 			busy = 1;
3697 			continue;
3698 		}
3699 		if (pa->pa_deleted) {
3700 			spin_unlock(&pa->pa_lock);
3701 			continue;
3702 		}
3703 
3704 		/* seems this one can be freed ... */
3705 		pa->pa_deleted = 1;
3706 
3707 		/* we can trust pa_free ... */
3708 		free += pa->pa_free;
3709 
3710 		spin_unlock(&pa->pa_lock);
3711 
3712 		list_del(&pa->pa_group_list);
3713 		list_add(&pa->u.pa_tmp_list, &list);
3714 	}
3715 
3716 	/* if we still need more blocks and some PAs were used, try again */
3717 	if (free < needed && busy) {
3718 		busy = 0;
3719 		ext4_unlock_group(sb, group);
3720 		/*
3721 		 * Yield the CPU here so that we don't get soft lockup
3722 		 * in non preempt case.
3723 		 */
3724 		yield();
3725 		goto repeat;
3726 	}
3727 
3728 	/* found anything to free? */
3729 	if (list_empty(&list)) {
3730 		BUG_ON(free != 0);
3731 		goto out;
3732 	}
3733 
3734 	/* now free all selected PAs */
3735 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3736 
3737 		/* remove from object (inode or locality group) */
3738 		spin_lock(pa->pa_obj_lock);
3739 		list_del_rcu(&pa->pa_inode_list);
3740 		spin_unlock(pa->pa_obj_lock);
3741 
3742 		if (pa->pa_type == MB_GROUP_PA)
3743 			ext4_mb_release_group_pa(&e4b, pa);
3744 		else
3745 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3746 
3747 		list_del(&pa->u.pa_tmp_list);
3748 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3749 	}
3750 
3751 out:
3752 	ext4_unlock_group(sb, group);
3753 	ext4_mb_unload_buddy(&e4b);
3754 	put_bh(bitmap_bh);
3755 	return free;
3756 }
3757 
3758 /*
3759  * releases all non-used preallocated blocks for given inode
3760  *
3761  * It's important to discard preallocations under i_data_sem
3762  * We don't want another block to be served from the prealloc
3763  * space when we are discarding the inode prealloc space.
3764  *
3765  * FIXME!! Make sure it is valid at all the call sites
3766  */
3767 void ext4_discard_preallocations(struct inode *inode)
3768 {
3769 	struct ext4_inode_info *ei = EXT4_I(inode);
3770 	struct super_block *sb = inode->i_sb;
3771 	struct buffer_head *bitmap_bh = NULL;
3772 	struct ext4_prealloc_space *pa, *tmp;
3773 	ext4_group_t group = 0;
3774 	struct list_head list;
3775 	struct ext4_buddy e4b;
3776 	int err;
3777 
3778 	if (!S_ISREG(inode->i_mode)) {
3779 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3780 		return;
3781 	}
3782 
3783 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3784 	trace_ext4_discard_preallocations(inode);
3785 
3786 	INIT_LIST_HEAD(&list);
3787 
3788 repeat:
3789 	/* first, collect all pa's in the inode */
3790 	spin_lock(&ei->i_prealloc_lock);
3791 	while (!list_empty(&ei->i_prealloc_list)) {
3792 		pa = list_entry(ei->i_prealloc_list.next,
3793 				struct ext4_prealloc_space, pa_inode_list);
3794 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3795 		spin_lock(&pa->pa_lock);
3796 		if (atomic_read(&pa->pa_count)) {
3797 			/* this shouldn't happen often - nobody should
3798 			 * use preallocation while we're discarding it */
3799 			spin_unlock(&pa->pa_lock);
3800 			spin_unlock(&ei->i_prealloc_lock);
3801 			ext4_msg(sb, KERN_ERR,
3802 				 "uh-oh! used pa while discarding");
3803 			WARN_ON(1);
3804 			schedule_timeout_uninterruptible(HZ);
3805 			goto repeat;
3806 
3807 		}
3808 		if (pa->pa_deleted == 0) {
3809 			pa->pa_deleted = 1;
3810 			spin_unlock(&pa->pa_lock);
3811 			list_del_rcu(&pa->pa_inode_list);
3812 			list_add(&pa->u.pa_tmp_list, &list);
3813 			continue;
3814 		}
3815 
3816 		/* someone is deleting pa right now */
3817 		spin_unlock(&pa->pa_lock);
3818 		spin_unlock(&ei->i_prealloc_lock);
3819 
3820 		/* we have to wait here because pa_deleted
3821 		 * doesn't mean pa is already unlinked from
3822 		 * the list. as we might be called from
3823 		 * ->clear_inode() the inode will get freed
3824 		 * and concurrent thread which is unlinking
3825 		 * pa from inode's list may access already
3826 		 * freed memory, bad-bad-bad */
3827 
3828 		/* XXX: if this happens too often, we can
3829 		 * add a flag to force wait only in case
3830 		 * of ->clear_inode(), but not in case of
3831 		 * regular truncate */
3832 		schedule_timeout_uninterruptible(HZ);
3833 		goto repeat;
3834 	}
3835 	spin_unlock(&ei->i_prealloc_lock);
3836 
3837 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3838 		BUG_ON(pa->pa_type != MB_INODE_PA);
3839 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3840 
3841 		err = ext4_mb_load_buddy(sb, group, &e4b);
3842 		if (err) {
3843 			ext4_error(sb, "Error loading buddy information for %u",
3844 					group);
3845 			continue;
3846 		}
3847 
3848 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3849 		if (bitmap_bh == NULL) {
3850 			ext4_error(sb, "Error reading block bitmap for %u",
3851 					group);
3852 			ext4_mb_unload_buddy(&e4b);
3853 			continue;
3854 		}
3855 
3856 		ext4_lock_group(sb, group);
3857 		list_del(&pa->pa_group_list);
3858 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3859 		ext4_unlock_group(sb, group);
3860 
3861 		ext4_mb_unload_buddy(&e4b);
3862 		put_bh(bitmap_bh);
3863 
3864 		list_del(&pa->u.pa_tmp_list);
3865 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3866 	}
3867 }
3868 
3869 #ifdef CONFIG_EXT4_DEBUG
3870 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3871 {
3872 	struct super_block *sb = ac->ac_sb;
3873 	ext4_group_t ngroups, i;
3874 
3875 	if (!mb_enable_debug ||
3876 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3877 		return;
3878 
3879 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
3880 			" Allocation context details:");
3881 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
3882 			ac->ac_status, ac->ac_flags);
3883 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
3884 		 	"goal %lu/%lu/%lu@%lu, "
3885 			"best %lu/%lu/%lu@%lu cr %d",
3886 			(unsigned long)ac->ac_o_ex.fe_group,
3887 			(unsigned long)ac->ac_o_ex.fe_start,
3888 			(unsigned long)ac->ac_o_ex.fe_len,
3889 			(unsigned long)ac->ac_o_ex.fe_logical,
3890 			(unsigned long)ac->ac_g_ex.fe_group,
3891 			(unsigned long)ac->ac_g_ex.fe_start,
3892 			(unsigned long)ac->ac_g_ex.fe_len,
3893 			(unsigned long)ac->ac_g_ex.fe_logical,
3894 			(unsigned long)ac->ac_b_ex.fe_group,
3895 			(unsigned long)ac->ac_b_ex.fe_start,
3896 			(unsigned long)ac->ac_b_ex.fe_len,
3897 			(unsigned long)ac->ac_b_ex.fe_logical,
3898 			(int)ac->ac_criteria);
3899 	ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
3900 		 ac->ac_ex_scanned, ac->ac_found);
3901 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
3902 	ngroups = ext4_get_groups_count(sb);
3903 	for (i = 0; i < ngroups; i++) {
3904 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3905 		struct ext4_prealloc_space *pa;
3906 		ext4_grpblk_t start;
3907 		struct list_head *cur;
3908 		ext4_lock_group(sb, i);
3909 		list_for_each(cur, &grp->bb_prealloc_list) {
3910 			pa = list_entry(cur, struct ext4_prealloc_space,
3911 					pa_group_list);
3912 			spin_lock(&pa->pa_lock);
3913 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3914 						     NULL, &start);
3915 			spin_unlock(&pa->pa_lock);
3916 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3917 			       start, pa->pa_len);
3918 		}
3919 		ext4_unlock_group(sb, i);
3920 
3921 		if (grp->bb_free == 0)
3922 			continue;
3923 		printk(KERN_ERR "%u: %d/%d \n",
3924 		       i, grp->bb_free, grp->bb_fragments);
3925 	}
3926 	printk(KERN_ERR "\n");
3927 }
3928 #else
3929 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3930 {
3931 	return;
3932 }
3933 #endif
3934 
3935 /*
3936  * We use locality group preallocation for small size file. The size of the
3937  * file is determined by the current size or the resulting size after
3938  * allocation which ever is larger
3939  *
3940  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3941  */
3942 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3943 {
3944 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3945 	int bsbits = ac->ac_sb->s_blocksize_bits;
3946 	loff_t size, isize;
3947 
3948 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3949 		return;
3950 
3951 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3952 		return;
3953 
3954 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3955 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3956 		>> bsbits;
3957 
3958 	if ((size == isize) &&
3959 	    !ext4_fs_is_busy(sbi) &&
3960 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3961 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3962 		return;
3963 	}
3964 
3965 	if (sbi->s_mb_group_prealloc <= 0) {
3966 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3967 		return;
3968 	}
3969 
3970 	/* don't use group allocation for large files */
3971 	size = max(size, isize);
3972 	if (size > sbi->s_mb_stream_request) {
3973 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3974 		return;
3975 	}
3976 
3977 	BUG_ON(ac->ac_lg != NULL);
3978 	/*
3979 	 * locality group prealloc space are per cpu. The reason for having
3980 	 * per cpu locality group is to reduce the contention between block
3981 	 * request from multiple CPUs.
3982 	 */
3983 	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3984 
3985 	/* we're going to use group allocation */
3986 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3987 
3988 	/* serialize all allocations in the group */
3989 	mutex_lock(&ac->ac_lg->lg_mutex);
3990 }
3991 
3992 static noinline_for_stack int
3993 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3994 				struct ext4_allocation_request *ar)
3995 {
3996 	struct super_block *sb = ar->inode->i_sb;
3997 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3998 	struct ext4_super_block *es = sbi->s_es;
3999 	ext4_group_t group;
4000 	unsigned int len;
4001 	ext4_fsblk_t goal;
4002 	ext4_grpblk_t block;
4003 
4004 	/* we can't allocate > group size */
4005 	len = ar->len;
4006 
4007 	/* just a dirty hack to filter too big requests  */
4008 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
4009 		len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
4010 
4011 	/* start searching from the goal */
4012 	goal = ar->goal;
4013 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4014 			goal >= ext4_blocks_count(es))
4015 		goal = le32_to_cpu(es->s_first_data_block);
4016 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4017 
4018 	/* set up allocation goals */
4019 	ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
4020 	ac->ac_status = AC_STATUS_CONTINUE;
4021 	ac->ac_sb = sb;
4022 	ac->ac_inode = ar->inode;
4023 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4024 	ac->ac_o_ex.fe_group = group;
4025 	ac->ac_o_ex.fe_start = block;
4026 	ac->ac_o_ex.fe_len = len;
4027 	ac->ac_g_ex = ac->ac_o_ex;
4028 	ac->ac_flags = ar->flags;
4029 
4030 	/* we have to define context: we'll we work with a file or
4031 	 * locality group. this is a policy, actually */
4032 	ext4_mb_group_or_file(ac);
4033 
4034 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4035 			"left: %u/%u, right %u/%u to %swritable\n",
4036 			(unsigned) ar->len, (unsigned) ar->logical,
4037 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4038 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4039 			(unsigned) ar->lright, (unsigned) ar->pright,
4040 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4041 	return 0;
4042 
4043 }
4044 
4045 static noinline_for_stack void
4046 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4047 					struct ext4_locality_group *lg,
4048 					int order, int total_entries)
4049 {
4050 	ext4_group_t group = 0;
4051 	struct ext4_buddy e4b;
4052 	struct list_head discard_list;
4053 	struct ext4_prealloc_space *pa, *tmp;
4054 
4055 	mb_debug(1, "discard locality group preallocation\n");
4056 
4057 	INIT_LIST_HEAD(&discard_list);
4058 
4059 	spin_lock(&lg->lg_prealloc_lock);
4060 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4061 						pa_inode_list) {
4062 		spin_lock(&pa->pa_lock);
4063 		if (atomic_read(&pa->pa_count)) {
4064 			/*
4065 			 * This is the pa that we just used
4066 			 * for block allocation. So don't
4067 			 * free that
4068 			 */
4069 			spin_unlock(&pa->pa_lock);
4070 			continue;
4071 		}
4072 		if (pa->pa_deleted) {
4073 			spin_unlock(&pa->pa_lock);
4074 			continue;
4075 		}
4076 		/* only lg prealloc space */
4077 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4078 
4079 		/* seems this one can be freed ... */
4080 		pa->pa_deleted = 1;
4081 		spin_unlock(&pa->pa_lock);
4082 
4083 		list_del_rcu(&pa->pa_inode_list);
4084 		list_add(&pa->u.pa_tmp_list, &discard_list);
4085 
4086 		total_entries--;
4087 		if (total_entries <= 5) {
4088 			/*
4089 			 * we want to keep only 5 entries
4090 			 * allowing it to grow to 8. This
4091 			 * mak sure we don't call discard
4092 			 * soon for this list.
4093 			 */
4094 			break;
4095 		}
4096 	}
4097 	spin_unlock(&lg->lg_prealloc_lock);
4098 
4099 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4100 
4101 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4102 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4103 			ext4_error(sb, "Error loading buddy information for %u",
4104 					group);
4105 			continue;
4106 		}
4107 		ext4_lock_group(sb, group);
4108 		list_del(&pa->pa_group_list);
4109 		ext4_mb_release_group_pa(&e4b, pa);
4110 		ext4_unlock_group(sb, group);
4111 
4112 		ext4_mb_unload_buddy(&e4b);
4113 		list_del(&pa->u.pa_tmp_list);
4114 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4115 	}
4116 }
4117 
4118 /*
4119  * We have incremented pa_count. So it cannot be freed at this
4120  * point. Also we hold lg_mutex. So no parallel allocation is
4121  * possible from this lg. That means pa_free cannot be updated.
4122  *
4123  * A parallel ext4_mb_discard_group_preallocations is possible.
4124  * which can cause the lg_prealloc_list to be updated.
4125  */
4126 
4127 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4128 {
4129 	int order, added = 0, lg_prealloc_count = 1;
4130 	struct super_block *sb = ac->ac_sb;
4131 	struct ext4_locality_group *lg = ac->ac_lg;
4132 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4133 
4134 	order = fls(pa->pa_free) - 1;
4135 	if (order > PREALLOC_TB_SIZE - 1)
4136 		/* The max size of hash table is PREALLOC_TB_SIZE */
4137 		order = PREALLOC_TB_SIZE - 1;
4138 	/* Add the prealloc space to lg */
4139 	rcu_read_lock();
4140 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4141 						pa_inode_list) {
4142 		spin_lock(&tmp_pa->pa_lock);
4143 		if (tmp_pa->pa_deleted) {
4144 			spin_unlock(&tmp_pa->pa_lock);
4145 			continue;
4146 		}
4147 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4148 			/* Add to the tail of the previous entry */
4149 			list_add_tail_rcu(&pa->pa_inode_list,
4150 						&tmp_pa->pa_inode_list);
4151 			added = 1;
4152 			/*
4153 			 * we want to count the total
4154 			 * number of entries in the list
4155 			 */
4156 		}
4157 		spin_unlock(&tmp_pa->pa_lock);
4158 		lg_prealloc_count++;
4159 	}
4160 	if (!added)
4161 		list_add_tail_rcu(&pa->pa_inode_list,
4162 					&lg->lg_prealloc_list[order]);
4163 	rcu_read_unlock();
4164 
4165 	/* Now trim the list to be not more than 8 elements */
4166 	if (lg_prealloc_count > 8) {
4167 		ext4_mb_discard_lg_preallocations(sb, lg,
4168 						order, lg_prealloc_count);
4169 		return;
4170 	}
4171 	return ;
4172 }
4173 
4174 /*
4175  * release all resource we used in allocation
4176  */
4177 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4178 {
4179 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4180 	struct ext4_prealloc_space *pa = ac->ac_pa;
4181 	if (pa) {
4182 		if (pa->pa_type == MB_GROUP_PA) {
4183 			/* see comment in ext4_mb_use_group_pa() */
4184 			spin_lock(&pa->pa_lock);
4185 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4186 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4187 			pa->pa_free -= ac->ac_b_ex.fe_len;
4188 			pa->pa_len -= ac->ac_b_ex.fe_len;
4189 			spin_unlock(&pa->pa_lock);
4190 		}
4191 	}
4192 	if (pa) {
4193 		/*
4194 		 * We want to add the pa to the right bucket.
4195 		 * Remove it from the list and while adding
4196 		 * make sure the list to which we are adding
4197 		 * doesn't grow big.
4198 		 */
4199 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4200 			spin_lock(pa->pa_obj_lock);
4201 			list_del_rcu(&pa->pa_inode_list);
4202 			spin_unlock(pa->pa_obj_lock);
4203 			ext4_mb_add_n_trim(ac);
4204 		}
4205 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4206 	}
4207 	if (ac->ac_bitmap_page)
4208 		page_cache_release(ac->ac_bitmap_page);
4209 	if (ac->ac_buddy_page)
4210 		page_cache_release(ac->ac_buddy_page);
4211 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4212 		mutex_unlock(&ac->ac_lg->lg_mutex);
4213 	ext4_mb_collect_stats(ac);
4214 	return 0;
4215 }
4216 
4217 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4218 {
4219 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4220 	int ret;
4221 	int freed = 0;
4222 
4223 	trace_ext4_mb_discard_preallocations(sb, needed);
4224 	for (i = 0; i < ngroups && needed > 0; i++) {
4225 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4226 		freed += ret;
4227 		needed -= ret;
4228 	}
4229 
4230 	return freed;
4231 }
4232 
4233 /*
4234  * Main entry point into mballoc to allocate blocks
4235  * it tries to use preallocation first, then falls back
4236  * to usual allocation
4237  */
4238 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4239 				struct ext4_allocation_request *ar, int *errp)
4240 {
4241 	int freed;
4242 	struct ext4_allocation_context *ac = NULL;
4243 	struct ext4_sb_info *sbi;
4244 	struct super_block *sb;
4245 	ext4_fsblk_t block = 0;
4246 	unsigned int inquota = 0;
4247 	unsigned int reserv_clstrs = 0;
4248 
4249 	sb = ar->inode->i_sb;
4250 	sbi = EXT4_SB(sb);
4251 
4252 	trace_ext4_request_blocks(ar);
4253 
4254 	/* Allow to use superuser reservation for quota file */
4255 	if (IS_NOQUOTA(ar->inode))
4256 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4257 
4258 	/*
4259 	 * For delayed allocation, we could skip the ENOSPC and
4260 	 * EDQUOT check, as blocks and quotas have been already
4261 	 * reserved when data being copied into pagecache.
4262 	 */
4263 	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4264 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4265 	else {
4266 		/* Without delayed allocation we need to verify
4267 		 * there is enough free blocks to do block allocation
4268 		 * and verify allocation doesn't exceed the quota limits.
4269 		 */
4270 		while (ar->len &&
4271 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4272 
4273 			/* let others to free the space */
4274 			yield();
4275 			ar->len = ar->len >> 1;
4276 		}
4277 		if (!ar->len) {
4278 			*errp = -ENOSPC;
4279 			return 0;
4280 		}
4281 		reserv_clstrs = ar->len;
4282 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4283 			dquot_alloc_block_nofail(ar->inode,
4284 						 EXT4_C2B(sbi, ar->len));
4285 		} else {
4286 			while (ar->len &&
4287 				dquot_alloc_block(ar->inode,
4288 						  EXT4_C2B(sbi, ar->len))) {
4289 
4290 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4291 				ar->len--;
4292 			}
4293 		}
4294 		inquota = ar->len;
4295 		if (ar->len == 0) {
4296 			*errp = -EDQUOT;
4297 			goto out;
4298 		}
4299 	}
4300 
4301 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
4302 	if (!ac) {
4303 		ar->len = 0;
4304 		*errp = -ENOMEM;
4305 		goto out;
4306 	}
4307 
4308 	*errp = ext4_mb_initialize_context(ac, ar);
4309 	if (*errp) {
4310 		ar->len = 0;
4311 		goto out;
4312 	}
4313 
4314 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4315 	if (!ext4_mb_use_preallocated(ac)) {
4316 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4317 		ext4_mb_normalize_request(ac, ar);
4318 repeat:
4319 		/* allocate space in core */
4320 		*errp = ext4_mb_regular_allocator(ac);
4321 		if (*errp) {
4322 			ext4_discard_allocated_blocks(ac);
4323 			goto errout;
4324 		}
4325 
4326 		/* as we've just preallocated more space than
4327 		 * user requested orinally, we store allocated
4328 		 * space in a special descriptor */
4329 		if (ac->ac_status == AC_STATUS_FOUND &&
4330 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4331 			ext4_mb_new_preallocation(ac);
4332 	}
4333 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4334 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4335 		if (*errp == -EAGAIN) {
4336 			/*
4337 			 * drop the reference that we took
4338 			 * in ext4_mb_use_best_found
4339 			 */
4340 			ext4_mb_release_context(ac);
4341 			ac->ac_b_ex.fe_group = 0;
4342 			ac->ac_b_ex.fe_start = 0;
4343 			ac->ac_b_ex.fe_len = 0;
4344 			ac->ac_status = AC_STATUS_CONTINUE;
4345 			goto repeat;
4346 		} else if (*errp) {
4347 			ext4_discard_allocated_blocks(ac);
4348 			goto errout;
4349 		} else {
4350 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4351 			ar->len = ac->ac_b_ex.fe_len;
4352 		}
4353 	} else {
4354 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4355 		if (freed)
4356 			goto repeat;
4357 		*errp = -ENOSPC;
4358 	}
4359 
4360 errout:
4361 	if (*errp) {
4362 		ac->ac_b_ex.fe_len = 0;
4363 		ar->len = 0;
4364 		ext4_mb_show_ac(ac);
4365 	}
4366 	ext4_mb_release_context(ac);
4367 out:
4368 	if (ac)
4369 		kmem_cache_free(ext4_ac_cachep, ac);
4370 	if (inquota && ar->len < inquota)
4371 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4372 	if (!ar->len) {
4373 		if (!ext4_test_inode_state(ar->inode,
4374 					   EXT4_STATE_DELALLOC_RESERVED))
4375 			/* release all the reserved blocks if non delalloc */
4376 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4377 						reserv_clstrs);
4378 	}
4379 
4380 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4381 
4382 	return block;
4383 }
4384 
4385 /*
4386  * We can merge two free data extents only if the physical blocks
4387  * are contiguous, AND the extents were freed by the same transaction,
4388  * AND the blocks are associated with the same group.
4389  */
4390 static int can_merge(struct ext4_free_data *entry1,
4391 			struct ext4_free_data *entry2)
4392 {
4393 	if ((entry1->efd_tid == entry2->efd_tid) &&
4394 	    (entry1->efd_group == entry2->efd_group) &&
4395 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4396 		return 1;
4397 	return 0;
4398 }
4399 
4400 static noinline_for_stack int
4401 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4402 		      struct ext4_free_data *new_entry)
4403 {
4404 	ext4_group_t group = e4b->bd_group;
4405 	ext4_grpblk_t cluster;
4406 	struct ext4_free_data *entry;
4407 	struct ext4_group_info *db = e4b->bd_info;
4408 	struct super_block *sb = e4b->bd_sb;
4409 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4410 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4411 	struct rb_node *parent = NULL, *new_node;
4412 
4413 	BUG_ON(!ext4_handle_valid(handle));
4414 	BUG_ON(e4b->bd_bitmap_page == NULL);
4415 	BUG_ON(e4b->bd_buddy_page == NULL);
4416 
4417 	new_node = &new_entry->efd_node;
4418 	cluster = new_entry->efd_start_cluster;
4419 
4420 	if (!*n) {
4421 		/* first free block exent. We need to
4422 		   protect buddy cache from being freed,
4423 		 * otherwise we'll refresh it from
4424 		 * on-disk bitmap and lose not-yet-available
4425 		 * blocks */
4426 		page_cache_get(e4b->bd_buddy_page);
4427 		page_cache_get(e4b->bd_bitmap_page);
4428 	}
4429 	while (*n) {
4430 		parent = *n;
4431 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4432 		if (cluster < entry->efd_start_cluster)
4433 			n = &(*n)->rb_left;
4434 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4435 			n = &(*n)->rb_right;
4436 		else {
4437 			ext4_grp_locked_error(sb, group, 0,
4438 				ext4_group_first_block_no(sb, group) +
4439 				EXT4_C2B(sbi, cluster),
4440 				"Block already on to-be-freed list");
4441 			return 0;
4442 		}
4443 	}
4444 
4445 	rb_link_node(new_node, parent, n);
4446 	rb_insert_color(new_node, &db->bb_free_root);
4447 
4448 	/* Now try to see the extent can be merged to left and right */
4449 	node = rb_prev(new_node);
4450 	if (node) {
4451 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4452 		if (can_merge(entry, new_entry)) {
4453 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4454 			new_entry->efd_count += entry->efd_count;
4455 			rb_erase(node, &(db->bb_free_root));
4456 			ext4_journal_callback_del(handle, &entry->efd_jce);
4457 			kmem_cache_free(ext4_free_data_cachep, entry);
4458 		}
4459 	}
4460 
4461 	node = rb_next(new_node);
4462 	if (node) {
4463 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4464 		if (can_merge(new_entry, entry)) {
4465 			new_entry->efd_count += entry->efd_count;
4466 			rb_erase(node, &(db->bb_free_root));
4467 			ext4_journal_callback_del(handle, &entry->efd_jce);
4468 			kmem_cache_free(ext4_free_data_cachep, entry);
4469 		}
4470 	}
4471 	/* Add the extent to transaction's private list */
4472 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4473 				  &new_entry->efd_jce);
4474 	return 0;
4475 }
4476 
4477 /**
4478  * ext4_free_blocks() -- Free given blocks and update quota
4479  * @handle:		handle for this transaction
4480  * @inode:		inode
4481  * @block:		start physical block to free
4482  * @count:		number of blocks to count
4483  * @flags:		flags used by ext4_free_blocks
4484  */
4485 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4486 		      struct buffer_head *bh, ext4_fsblk_t block,
4487 		      unsigned long count, int flags)
4488 {
4489 	struct buffer_head *bitmap_bh = NULL;
4490 	struct super_block *sb = inode->i_sb;
4491 	struct ext4_group_desc *gdp;
4492 	unsigned long freed = 0;
4493 	unsigned int overflow;
4494 	ext4_grpblk_t bit;
4495 	struct buffer_head *gd_bh;
4496 	ext4_group_t block_group;
4497 	struct ext4_sb_info *sbi;
4498 	struct ext4_buddy e4b;
4499 	unsigned int count_clusters;
4500 	int err = 0;
4501 	int ret;
4502 
4503 	if (bh) {
4504 		if (block)
4505 			BUG_ON(block != bh->b_blocknr);
4506 		else
4507 			block = bh->b_blocknr;
4508 	}
4509 
4510 	sbi = EXT4_SB(sb);
4511 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4512 	    !ext4_data_block_valid(sbi, block, count)) {
4513 		ext4_error(sb, "Freeing blocks not in datazone - "
4514 			   "block = %llu, count = %lu", block, count);
4515 		goto error_return;
4516 	}
4517 
4518 	ext4_debug("freeing block %llu\n", block);
4519 	trace_ext4_free_blocks(inode, block, count, flags);
4520 
4521 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4522 		struct buffer_head *tbh = bh;
4523 		int i;
4524 
4525 		BUG_ON(bh && (count > 1));
4526 
4527 		for (i = 0; i < count; i++) {
4528 			if (!bh)
4529 				tbh = sb_find_get_block(inode->i_sb,
4530 							block + i);
4531 			if (unlikely(!tbh))
4532 				continue;
4533 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4534 				    inode, tbh, block + i);
4535 		}
4536 	}
4537 
4538 	/*
4539 	 * We need to make sure we don't reuse the freed block until
4540 	 * after the transaction is committed, which we can do by
4541 	 * treating the block as metadata, below.  We make an
4542 	 * exception if the inode is to be written in writeback mode
4543 	 * since writeback mode has weak data consistency guarantees.
4544 	 */
4545 	if (!ext4_should_writeback_data(inode))
4546 		flags |= EXT4_FREE_BLOCKS_METADATA;
4547 
4548 	/*
4549 	 * If the extent to be freed does not begin on a cluster
4550 	 * boundary, we need to deal with partial clusters at the
4551 	 * beginning and end of the extent.  Normally we will free
4552 	 * blocks at the beginning or the end unless we are explicitly
4553 	 * requested to avoid doing so.
4554 	 */
4555 	overflow = block & (sbi->s_cluster_ratio - 1);
4556 	if (overflow) {
4557 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4558 			overflow = sbi->s_cluster_ratio - overflow;
4559 			block += overflow;
4560 			if (count > overflow)
4561 				count -= overflow;
4562 			else
4563 				return;
4564 		} else {
4565 			block -= overflow;
4566 			count += overflow;
4567 		}
4568 	}
4569 	overflow = count & (sbi->s_cluster_ratio - 1);
4570 	if (overflow) {
4571 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4572 			if (count > overflow)
4573 				count -= overflow;
4574 			else
4575 				return;
4576 		} else
4577 			count += sbi->s_cluster_ratio - overflow;
4578 	}
4579 
4580 do_more:
4581 	overflow = 0;
4582 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4583 
4584 	/*
4585 	 * Check to see if we are freeing blocks across a group
4586 	 * boundary.
4587 	 */
4588 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4589 		overflow = EXT4_C2B(sbi, bit) + count -
4590 			EXT4_BLOCKS_PER_GROUP(sb);
4591 		count -= overflow;
4592 	}
4593 	count_clusters = EXT4_B2C(sbi, count);
4594 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4595 	if (!bitmap_bh) {
4596 		err = -EIO;
4597 		goto error_return;
4598 	}
4599 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4600 	if (!gdp) {
4601 		err = -EIO;
4602 		goto error_return;
4603 	}
4604 
4605 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4606 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4607 	    in_range(block, ext4_inode_table(sb, gdp),
4608 		     EXT4_SB(sb)->s_itb_per_group) ||
4609 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4610 		     EXT4_SB(sb)->s_itb_per_group)) {
4611 
4612 		ext4_error(sb, "Freeing blocks in system zone - "
4613 			   "Block = %llu, count = %lu", block, count);
4614 		/* err = 0. ext4_std_error should be a no op */
4615 		goto error_return;
4616 	}
4617 
4618 	BUFFER_TRACE(bitmap_bh, "getting write access");
4619 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4620 	if (err)
4621 		goto error_return;
4622 
4623 	/*
4624 	 * We are about to modify some metadata.  Call the journal APIs
4625 	 * to unshare ->b_data if a currently-committing transaction is
4626 	 * using it
4627 	 */
4628 	BUFFER_TRACE(gd_bh, "get_write_access");
4629 	err = ext4_journal_get_write_access(handle, gd_bh);
4630 	if (err)
4631 		goto error_return;
4632 #ifdef AGGRESSIVE_CHECK
4633 	{
4634 		int i;
4635 		for (i = 0; i < count_clusters; i++)
4636 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4637 	}
4638 #endif
4639 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4640 
4641 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4642 	if (err)
4643 		goto error_return;
4644 
4645 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4646 		struct ext4_free_data *new_entry;
4647 		/*
4648 		 * blocks being freed are metadata. these blocks shouldn't
4649 		 * be used until this transaction is committed
4650 		 */
4651 		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
4652 		if (!new_entry) {
4653 			ext4_mb_unload_buddy(&e4b);
4654 			err = -ENOMEM;
4655 			goto error_return;
4656 		}
4657 		new_entry->efd_start_cluster = bit;
4658 		new_entry->efd_group = block_group;
4659 		new_entry->efd_count = count_clusters;
4660 		new_entry->efd_tid = handle->h_transaction->t_tid;
4661 
4662 		ext4_lock_group(sb, block_group);
4663 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4664 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4665 	} else {
4666 		/* need to update group_info->bb_free and bitmap
4667 		 * with group lock held. generate_buddy look at
4668 		 * them with group lock_held
4669 		 */
4670 		if (test_opt(sb, DISCARD)) {
4671 			err = ext4_issue_discard(sb, block_group, bit, count);
4672 			if (err && err != -EOPNOTSUPP)
4673 				ext4_msg(sb, KERN_WARNING, "discard request in"
4674 					 " group:%d block:%d count:%lu failed"
4675 					 " with %d", block_group, bit, count,
4676 					 err);
4677 		}
4678 
4679 
4680 		ext4_lock_group(sb, block_group);
4681 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4682 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4683 	}
4684 
4685 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4686 	ext4_free_group_clusters_set(sb, gdp, ret);
4687 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
4688 	ext4_group_desc_csum_set(sb, block_group, gdp);
4689 	ext4_unlock_group(sb, block_group);
4690 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4691 
4692 	if (sbi->s_log_groups_per_flex) {
4693 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4694 		atomic_add(count_clusters,
4695 			   &sbi->s_flex_groups[flex_group].free_clusters);
4696 	}
4697 
4698 	ext4_mb_unload_buddy(&e4b);
4699 
4700 	freed += count;
4701 
4702 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4703 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4704 
4705 	/* We dirtied the bitmap block */
4706 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4707 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4708 
4709 	/* And the group descriptor block */
4710 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4711 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4712 	if (!err)
4713 		err = ret;
4714 
4715 	if (overflow && !err) {
4716 		block += count;
4717 		count = overflow;
4718 		put_bh(bitmap_bh);
4719 		goto do_more;
4720 	}
4721 error_return:
4722 	brelse(bitmap_bh);
4723 	ext4_std_error(sb, err);
4724 	return;
4725 }
4726 
4727 /**
4728  * ext4_group_add_blocks() -- Add given blocks to an existing group
4729  * @handle:			handle to this transaction
4730  * @sb:				super block
4731  * @block:			start physical block to add to the block group
4732  * @count:			number of blocks to free
4733  *
4734  * This marks the blocks as free in the bitmap and buddy.
4735  */
4736 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4737 			 ext4_fsblk_t block, unsigned long count)
4738 {
4739 	struct buffer_head *bitmap_bh = NULL;
4740 	struct buffer_head *gd_bh;
4741 	ext4_group_t block_group;
4742 	ext4_grpblk_t bit;
4743 	unsigned int i;
4744 	struct ext4_group_desc *desc;
4745 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4746 	struct ext4_buddy e4b;
4747 	int err = 0, ret, blk_free_count;
4748 	ext4_grpblk_t blocks_freed;
4749 
4750 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4751 
4752 	if (count == 0)
4753 		return 0;
4754 
4755 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4756 	/*
4757 	 * Check to see if we are freeing blocks across a group
4758 	 * boundary.
4759 	 */
4760 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4761 		ext4_warning(sb, "too much blocks added to group %u\n",
4762 			     block_group);
4763 		err = -EINVAL;
4764 		goto error_return;
4765 	}
4766 
4767 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4768 	if (!bitmap_bh) {
4769 		err = -EIO;
4770 		goto error_return;
4771 	}
4772 
4773 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4774 	if (!desc) {
4775 		err = -EIO;
4776 		goto error_return;
4777 	}
4778 
4779 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4780 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4781 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4782 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4783 		     sbi->s_itb_per_group)) {
4784 		ext4_error(sb, "Adding blocks in system zones - "
4785 			   "Block = %llu, count = %lu",
4786 			   block, count);
4787 		err = -EINVAL;
4788 		goto error_return;
4789 	}
4790 
4791 	BUFFER_TRACE(bitmap_bh, "getting write access");
4792 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4793 	if (err)
4794 		goto error_return;
4795 
4796 	/*
4797 	 * We are about to modify some metadata.  Call the journal APIs
4798 	 * to unshare ->b_data if a currently-committing transaction is
4799 	 * using it
4800 	 */
4801 	BUFFER_TRACE(gd_bh, "get_write_access");
4802 	err = ext4_journal_get_write_access(handle, gd_bh);
4803 	if (err)
4804 		goto error_return;
4805 
4806 	for (i = 0, blocks_freed = 0; i < count; i++) {
4807 		BUFFER_TRACE(bitmap_bh, "clear bit");
4808 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4809 			ext4_error(sb, "bit already cleared for block %llu",
4810 				   (ext4_fsblk_t)(block + i));
4811 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4812 		} else {
4813 			blocks_freed++;
4814 		}
4815 	}
4816 
4817 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4818 	if (err)
4819 		goto error_return;
4820 
4821 	/*
4822 	 * need to update group_info->bb_free and bitmap
4823 	 * with group lock held. generate_buddy look at
4824 	 * them with group lock_held
4825 	 */
4826 	ext4_lock_group(sb, block_group);
4827 	mb_clear_bits(bitmap_bh->b_data, bit, count);
4828 	mb_free_blocks(NULL, &e4b, bit, count);
4829 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4830 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
4831 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
4832 	ext4_group_desc_csum_set(sb, block_group, desc);
4833 	ext4_unlock_group(sb, block_group);
4834 	percpu_counter_add(&sbi->s_freeclusters_counter,
4835 			   EXT4_B2C(sbi, blocks_freed));
4836 
4837 	if (sbi->s_log_groups_per_flex) {
4838 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4839 		atomic_add(EXT4_B2C(sbi, blocks_freed),
4840 			   &sbi->s_flex_groups[flex_group].free_clusters);
4841 	}
4842 
4843 	ext4_mb_unload_buddy(&e4b);
4844 
4845 	/* We dirtied the bitmap block */
4846 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4847 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4848 
4849 	/* And the group descriptor block */
4850 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4851 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4852 	if (!err)
4853 		err = ret;
4854 
4855 error_return:
4856 	brelse(bitmap_bh);
4857 	ext4_std_error(sb, err);
4858 	return err;
4859 }
4860 
4861 /**
4862  * ext4_trim_extent -- function to TRIM one single free extent in the group
4863  * @sb:		super block for the file system
4864  * @start:	starting block of the free extent in the alloc. group
4865  * @count:	number of blocks to TRIM
4866  * @group:	alloc. group we are working with
4867  * @e4b:	ext4 buddy for the group
4868  *
4869  * Trim "count" blocks starting at "start" in the "group". To assure that no
4870  * one will allocate those blocks, mark it as used in buddy bitmap. This must
4871  * be called with under the group lock.
4872  */
4873 static int ext4_trim_extent(struct super_block *sb, int start, int count,
4874 			     ext4_group_t group, struct ext4_buddy *e4b)
4875 {
4876 	struct ext4_free_extent ex;
4877 	int ret = 0;
4878 
4879 	trace_ext4_trim_extent(sb, group, start, count);
4880 
4881 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
4882 
4883 	ex.fe_start = start;
4884 	ex.fe_group = group;
4885 	ex.fe_len = count;
4886 
4887 	/*
4888 	 * Mark blocks used, so no one can reuse them while
4889 	 * being trimmed.
4890 	 */
4891 	mb_mark_used(e4b, &ex);
4892 	ext4_unlock_group(sb, group);
4893 	ret = ext4_issue_discard(sb, group, start, count);
4894 	ext4_lock_group(sb, group);
4895 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
4896 	return ret;
4897 }
4898 
4899 /**
4900  * ext4_trim_all_free -- function to trim all free space in alloc. group
4901  * @sb:			super block for file system
4902  * @group:		group to be trimmed
4903  * @start:		first group block to examine
4904  * @max:		last group block to examine
4905  * @minblocks:		minimum extent block count
4906  *
4907  * ext4_trim_all_free walks through group's buddy bitmap searching for free
4908  * extents. When the free block is found, ext4_trim_extent is called to TRIM
4909  * the extent.
4910  *
4911  *
4912  * ext4_trim_all_free walks through group's block bitmap searching for free
4913  * extents. When the free extent is found, mark it as used in group buddy
4914  * bitmap. Then issue a TRIM command on this extent and free the extent in
4915  * the group buddy bitmap. This is done until whole group is scanned.
4916  */
4917 static ext4_grpblk_t
4918 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4919 		   ext4_grpblk_t start, ext4_grpblk_t max,
4920 		   ext4_grpblk_t minblocks)
4921 {
4922 	void *bitmap;
4923 	ext4_grpblk_t next, count = 0, free_count = 0;
4924 	struct ext4_buddy e4b;
4925 	int ret = 0;
4926 
4927 	trace_ext4_trim_all_free(sb, group, start, max);
4928 
4929 	ret = ext4_mb_load_buddy(sb, group, &e4b);
4930 	if (ret) {
4931 		ext4_error(sb, "Error in loading buddy "
4932 				"information for %u", group);
4933 		return ret;
4934 	}
4935 	bitmap = e4b.bd_bitmap;
4936 
4937 	ext4_lock_group(sb, group);
4938 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4939 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4940 		goto out;
4941 
4942 	start = (e4b.bd_info->bb_first_free > start) ?
4943 		e4b.bd_info->bb_first_free : start;
4944 
4945 	while (start <= max) {
4946 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
4947 		if (start > max)
4948 			break;
4949 		next = mb_find_next_bit(bitmap, max + 1, start);
4950 
4951 		if ((next - start) >= minblocks) {
4952 			ret = ext4_trim_extent(sb, start,
4953 					       next - start, group, &e4b);
4954 			if (ret && ret != -EOPNOTSUPP)
4955 				break;
4956 			ret = 0;
4957 			count += next - start;
4958 		}
4959 		free_count += next - start;
4960 		start = next + 1;
4961 
4962 		if (fatal_signal_pending(current)) {
4963 			count = -ERESTARTSYS;
4964 			break;
4965 		}
4966 
4967 		if (need_resched()) {
4968 			ext4_unlock_group(sb, group);
4969 			cond_resched();
4970 			ext4_lock_group(sb, group);
4971 		}
4972 
4973 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
4974 			break;
4975 	}
4976 
4977 	if (!ret) {
4978 		ret = count;
4979 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4980 	}
4981 out:
4982 	ext4_unlock_group(sb, group);
4983 	ext4_mb_unload_buddy(&e4b);
4984 
4985 	ext4_debug("trimmed %d blocks in the group %d\n",
4986 		count, group);
4987 
4988 	return ret;
4989 }
4990 
4991 /**
4992  * ext4_trim_fs() -- trim ioctl handle function
4993  * @sb:			superblock for filesystem
4994  * @range:		fstrim_range structure
4995  *
4996  * start:	First Byte to trim
4997  * len:		number of Bytes to trim from start
4998  * minlen:	minimum extent length in Bytes
4999  * ext4_trim_fs goes through all allocation groups containing Bytes from
5000  * start to start+len. For each such a group ext4_trim_all_free function
5001  * is invoked to trim all free space.
5002  */
5003 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5004 {
5005 	struct ext4_group_info *grp;
5006 	ext4_group_t group, first_group, last_group;
5007 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
5008 	uint64_t start, end, minlen, trimmed = 0;
5009 	ext4_fsblk_t first_data_blk =
5010 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
5011 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
5012 	int ret = 0;
5013 
5014 	start = range->start >> sb->s_blocksize_bits;
5015 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
5016 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5017 			      range->minlen >> sb->s_blocksize_bits);
5018 
5019 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5020 	    start >= max_blks ||
5021 	    range->len < sb->s_blocksize)
5022 		return -EINVAL;
5023 	if (end >= max_blks)
5024 		end = max_blks - 1;
5025 	if (end <= first_data_blk)
5026 		goto out;
5027 	if (start < first_data_blk)
5028 		start = first_data_blk;
5029 
5030 	/* Determine first and last group to examine based on start and end */
5031 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5032 				     &first_group, &first_cluster);
5033 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5034 				     &last_group, &last_cluster);
5035 
5036 	/* end now represents the last cluster to discard in this group */
5037 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5038 
5039 	for (group = first_group; group <= last_group; group++) {
5040 		grp = ext4_get_group_info(sb, group);
5041 		/* We only do this if the grp has never been initialized */
5042 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5043 			ret = ext4_mb_init_group(sb, group);
5044 			if (ret)
5045 				break;
5046 		}
5047 
5048 		/*
5049 		 * For all the groups except the last one, last cluster will
5050 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5051 		 * change it for the last group, note that last_cluster is
5052 		 * already computed earlier by ext4_get_group_no_and_offset()
5053 		 */
5054 		if (group == last_group)
5055 			end = last_cluster;
5056 
5057 		if (grp->bb_free >= minlen) {
5058 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5059 						end, minlen);
5060 			if (cnt < 0) {
5061 				ret = cnt;
5062 				break;
5063 			}
5064 			trimmed += cnt;
5065 		}
5066 
5067 		/*
5068 		 * For every group except the first one, we are sure
5069 		 * that the first cluster to discard will be cluster #0.
5070 		 */
5071 		first_cluster = 0;
5072 	}
5073 
5074 	if (!ret)
5075 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5076 
5077 out:
5078 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
5079 	return ret;
5080 }
5081