xref: /openbmc/linux/fs/ext4/mballoc.c (revision 5d4a2e29)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "mballoc.h"
25 #include <linux/debugfs.h>
26 #include <linux/slab.h>
27 #include <trace/events/ext4.h>
28 
29 /*
30  * MUSTDO:
31  *   - test ext4_ext_search_left() and ext4_ext_search_right()
32  *   - search for metadata in few groups
33  *
34  * TODO v4:
35  *   - normalization should take into account whether file is still open
36  *   - discard preallocations if no free space left (policy?)
37  *   - don't normalize tails
38  *   - quota
39  *   - reservation for superuser
40  *
41  * TODO v3:
42  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
43  *   - track min/max extents in each group for better group selection
44  *   - mb_mark_used() may allocate chunk right after splitting buddy
45  *   - tree of groups sorted by number of free blocks
46  *   - error handling
47  */
48 
49 /*
50  * The allocation request involve request for multiple number of blocks
51  * near to the goal(block) value specified.
52  *
53  * During initialization phase of the allocator we decide to use the
54  * group preallocation or inode preallocation depending on the size of
55  * the file. The size of the file could be the resulting file size we
56  * would have after allocation, or the current file size, which ever
57  * is larger. If the size is less than sbi->s_mb_stream_request we
58  * select to use the group preallocation. The default value of
59  * s_mb_stream_request is 16 blocks. This can also be tuned via
60  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
61  * terms of number of blocks.
62  *
63  * The main motivation for having small file use group preallocation is to
64  * ensure that we have small files closer together on the disk.
65  *
66  * First stage the allocator looks at the inode prealloc list,
67  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
68  * spaces for this particular inode. The inode prealloc space is
69  * represented as:
70  *
71  * pa_lstart -> the logical start block for this prealloc space
72  * pa_pstart -> the physical start block for this prealloc space
73  * pa_len    -> length for this prealloc space
74  * pa_free   ->  free space available in this prealloc space
75  *
76  * The inode preallocation space is used looking at the _logical_ start
77  * block. If only the logical file block falls within the range of prealloc
78  * space we will consume the particular prealloc space. This make sure that
79  * that the we have contiguous physical blocks representing the file blocks
80  *
81  * The important thing to be noted in case of inode prealloc space is that
82  * we don't modify the values associated to inode prealloc space except
83  * pa_free.
84  *
85  * If we are not able to find blocks in the inode prealloc space and if we
86  * have the group allocation flag set then we look at the locality group
87  * prealloc space. These are per CPU prealloc list repreasented as
88  *
89  * ext4_sb_info.s_locality_groups[smp_processor_id()]
90  *
91  * The reason for having a per cpu locality group is to reduce the contention
92  * between CPUs. It is possible to get scheduled at this point.
93  *
94  * The locality group prealloc space is used looking at whether we have
95  * enough free space (pa_free) withing the prealloc space.
96  *
97  * If we can't allocate blocks via inode prealloc or/and locality group
98  * prealloc then we look at the buddy cache. The buddy cache is represented
99  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
100  * mapped to the buddy and bitmap information regarding different
101  * groups. The buddy information is attached to buddy cache inode so that
102  * we can access them through the page cache. The information regarding
103  * each group is loaded via ext4_mb_load_buddy.  The information involve
104  * block bitmap and buddy information. The information are stored in the
105  * inode as:
106  *
107  *  {                        page                        }
108  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
109  *
110  *
111  * one block each for bitmap and buddy information.  So for each group we
112  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
113  * blocksize) blocks.  So it can have information regarding groups_per_page
114  * which is blocks_per_page/2
115  *
116  * The buddy cache inode is not stored on disk. The inode is thrown
117  * away when the filesystem is unmounted.
118  *
119  * We look for count number of blocks in the buddy cache. If we were able
120  * to locate that many free blocks we return with additional information
121  * regarding rest of the contiguous physical block available
122  *
123  * Before allocating blocks via buddy cache we normalize the request
124  * blocks. This ensure we ask for more blocks that we needed. The extra
125  * blocks that we get after allocation is added to the respective prealloc
126  * list. In case of inode preallocation we follow a list of heuristics
127  * based on file size. This can be found in ext4_mb_normalize_request. If
128  * we are doing a group prealloc we try to normalize the request to
129  * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
130  * 512 blocks. This can be tuned via
131  * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
132  * terms of number of blocks. If we have mounted the file system with -O
133  * stripe=<value> option the group prealloc request is normalized to the
134  * stripe value (sbi->s_stripe)
135  *
136  * The regular allocator(using the buddy cache) supports few tunables.
137  *
138  * /sys/fs/ext4/<partition>/mb_min_to_scan
139  * /sys/fs/ext4/<partition>/mb_max_to_scan
140  * /sys/fs/ext4/<partition>/mb_order2_req
141  *
142  * The regular allocator uses buddy scan only if the request len is power of
143  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
144  * value of s_mb_order2_reqs can be tuned via
145  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
146  * stripe size (sbi->s_stripe), we try to search for contiguous block in
147  * stripe size. This should result in better allocation on RAID setups. If
148  * not, we search in the specific group using bitmap for best extents. The
149  * tunable min_to_scan and max_to_scan control the behaviour here.
150  * min_to_scan indicate how long the mballoc __must__ look for a best
151  * extent and max_to_scan indicates how long the mballoc __can__ look for a
152  * best extent in the found extents. Searching for the blocks starts with
153  * the group specified as the goal value in allocation context via
154  * ac_g_ex. Each group is first checked based on the criteria whether it
155  * can used for allocation. ext4_mb_good_group explains how the groups are
156  * checked.
157  *
158  * Both the prealloc space are getting populated as above. So for the first
159  * request we will hit the buddy cache which will result in this prealloc
160  * space getting filled. The prealloc space is then later used for the
161  * subsequent request.
162  */
163 
164 /*
165  * mballoc operates on the following data:
166  *  - on-disk bitmap
167  *  - in-core buddy (actually includes buddy and bitmap)
168  *  - preallocation descriptors (PAs)
169  *
170  * there are two types of preallocations:
171  *  - inode
172  *    assiged to specific inode and can be used for this inode only.
173  *    it describes part of inode's space preallocated to specific
174  *    physical blocks. any block from that preallocated can be used
175  *    independent. the descriptor just tracks number of blocks left
176  *    unused. so, before taking some block from descriptor, one must
177  *    make sure corresponded logical block isn't allocated yet. this
178  *    also means that freeing any block within descriptor's range
179  *    must discard all preallocated blocks.
180  *  - locality group
181  *    assigned to specific locality group which does not translate to
182  *    permanent set of inodes: inode can join and leave group. space
183  *    from this type of preallocation can be used for any inode. thus
184  *    it's consumed from the beginning to the end.
185  *
186  * relation between them can be expressed as:
187  *    in-core buddy = on-disk bitmap + preallocation descriptors
188  *
189  * this mean blocks mballoc considers used are:
190  *  - allocated blocks (persistent)
191  *  - preallocated blocks (non-persistent)
192  *
193  * consistency in mballoc world means that at any time a block is either
194  * free or used in ALL structures. notice: "any time" should not be read
195  * literally -- time is discrete and delimited by locks.
196  *
197  *  to keep it simple, we don't use block numbers, instead we count number of
198  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
199  *
200  * all operations can be expressed as:
201  *  - init buddy:			buddy = on-disk + PAs
202  *  - new PA:				buddy += N; PA = N
203  *  - use inode PA:			on-disk += N; PA -= N
204  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
205  *  - use locality group PA		on-disk += N; PA -= N
206  *  - discard locality group PA		buddy -= PA; PA = 0
207  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
208  *        is used in real operation because we can't know actual used
209  *        bits from PA, only from on-disk bitmap
210  *
211  * if we follow this strict logic, then all operations above should be atomic.
212  * given some of them can block, we'd have to use something like semaphores
213  * killing performance on high-end SMP hardware. let's try to relax it using
214  * the following knowledge:
215  *  1) if buddy is referenced, it's already initialized
216  *  2) while block is used in buddy and the buddy is referenced,
217  *     nobody can re-allocate that block
218  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
219  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
220  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
221  *     block
222  *
223  * so, now we're building a concurrency table:
224  *  - init buddy vs.
225  *    - new PA
226  *      blocks for PA are allocated in the buddy, buddy must be referenced
227  *      until PA is linked to allocation group to avoid concurrent buddy init
228  *    - use inode PA
229  *      we need to make sure that either on-disk bitmap or PA has uptodate data
230  *      given (3) we care that PA-=N operation doesn't interfere with init
231  *    - discard inode PA
232  *      the simplest way would be to have buddy initialized by the discard
233  *    - use locality group PA
234  *      again PA-=N must be serialized with init
235  *    - discard locality group PA
236  *      the simplest way would be to have buddy initialized by the discard
237  *  - new PA vs.
238  *    - use inode PA
239  *      i_data_sem serializes them
240  *    - discard inode PA
241  *      discard process must wait until PA isn't used by another process
242  *    - use locality group PA
243  *      some mutex should serialize them
244  *    - discard locality group PA
245  *      discard process must wait until PA isn't used by another process
246  *  - use inode PA
247  *    - use inode PA
248  *      i_data_sem or another mutex should serializes them
249  *    - discard inode PA
250  *      discard process must wait until PA isn't used by another process
251  *    - use locality group PA
252  *      nothing wrong here -- they're different PAs covering different blocks
253  *    - discard locality group PA
254  *      discard process must wait until PA isn't used by another process
255  *
256  * now we're ready to make few consequences:
257  *  - PA is referenced and while it is no discard is possible
258  *  - PA is referenced until block isn't marked in on-disk bitmap
259  *  - PA changes only after on-disk bitmap
260  *  - discard must not compete with init. either init is done before
261  *    any discard or they're serialized somehow
262  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
263  *
264  * a special case when we've used PA to emptiness. no need to modify buddy
265  * in this case, but we should care about concurrent init
266  *
267  */
268 
269  /*
270  * Logic in few words:
271  *
272  *  - allocation:
273  *    load group
274  *    find blocks
275  *    mark bits in on-disk bitmap
276  *    release group
277  *
278  *  - use preallocation:
279  *    find proper PA (per-inode or group)
280  *    load group
281  *    mark bits in on-disk bitmap
282  *    release group
283  *    release PA
284  *
285  *  - free:
286  *    load group
287  *    mark bits in on-disk bitmap
288  *    release group
289  *
290  *  - discard preallocations in group:
291  *    mark PAs deleted
292  *    move them onto local list
293  *    load on-disk bitmap
294  *    load group
295  *    remove PA from object (inode or locality group)
296  *    mark free blocks in-core
297  *
298  *  - discard inode's preallocations:
299  */
300 
301 /*
302  * Locking rules
303  *
304  * Locks:
305  *  - bitlock on a group	(group)
306  *  - object (inode/locality)	(object)
307  *  - per-pa lock		(pa)
308  *
309  * Paths:
310  *  - new pa
311  *    object
312  *    group
313  *
314  *  - find and use pa:
315  *    pa
316  *
317  *  - release consumed pa:
318  *    pa
319  *    group
320  *    object
321  *
322  *  - generate in-core bitmap:
323  *    group
324  *        pa
325  *
326  *  - discard all for given object (inode, locality group):
327  *    object
328  *        pa
329  *    group
330  *
331  *  - discard all for given group:
332  *    group
333  *        pa
334  *    group
335  *        object
336  *
337  */
338 static struct kmem_cache *ext4_pspace_cachep;
339 static struct kmem_cache *ext4_ac_cachep;
340 static struct kmem_cache *ext4_free_ext_cachep;
341 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
342 					ext4_group_t group);
343 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
344 						ext4_group_t group);
345 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
346 
347 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
348 {
349 #if BITS_PER_LONG == 64
350 	*bit += ((unsigned long) addr & 7UL) << 3;
351 	addr = (void *) ((unsigned long) addr & ~7UL);
352 #elif BITS_PER_LONG == 32
353 	*bit += ((unsigned long) addr & 3UL) << 3;
354 	addr = (void *) ((unsigned long) addr & ~3UL);
355 #else
356 #error "how many bits you are?!"
357 #endif
358 	return addr;
359 }
360 
361 static inline int mb_test_bit(int bit, void *addr)
362 {
363 	/*
364 	 * ext4_test_bit on architecture like powerpc
365 	 * needs unsigned long aligned address
366 	 */
367 	addr = mb_correct_addr_and_bit(&bit, addr);
368 	return ext4_test_bit(bit, addr);
369 }
370 
371 static inline void mb_set_bit(int bit, void *addr)
372 {
373 	addr = mb_correct_addr_and_bit(&bit, addr);
374 	ext4_set_bit(bit, addr);
375 }
376 
377 static inline void mb_clear_bit(int bit, void *addr)
378 {
379 	addr = mb_correct_addr_and_bit(&bit, addr);
380 	ext4_clear_bit(bit, addr);
381 }
382 
383 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
384 {
385 	int fix = 0, ret, tmpmax;
386 	addr = mb_correct_addr_and_bit(&fix, addr);
387 	tmpmax = max + fix;
388 	start += fix;
389 
390 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
391 	if (ret > max)
392 		return max;
393 	return ret;
394 }
395 
396 static inline int mb_find_next_bit(void *addr, int max, int start)
397 {
398 	int fix = 0, ret, tmpmax;
399 	addr = mb_correct_addr_and_bit(&fix, addr);
400 	tmpmax = max + fix;
401 	start += fix;
402 
403 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
404 	if (ret > max)
405 		return max;
406 	return ret;
407 }
408 
409 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
410 {
411 	char *bb;
412 
413 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
414 	BUG_ON(max == NULL);
415 
416 	if (order > e4b->bd_blkbits + 1) {
417 		*max = 0;
418 		return NULL;
419 	}
420 
421 	/* at order 0 we see each particular block */
422 	*max = 1 << (e4b->bd_blkbits + 3);
423 	if (order == 0)
424 		return EXT4_MB_BITMAP(e4b);
425 
426 	bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
427 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
428 
429 	return bb;
430 }
431 
432 #ifdef DOUBLE_CHECK
433 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
434 			   int first, int count)
435 {
436 	int i;
437 	struct super_block *sb = e4b->bd_sb;
438 
439 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
440 		return;
441 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
442 	for (i = 0; i < count; i++) {
443 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
444 			ext4_fsblk_t blocknr;
445 
446 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
447 			blocknr += first + i;
448 			ext4_grp_locked_error(sb, e4b->bd_group,
449 				   __func__, "double-free of inode"
450 				   " %lu's block %llu(bit %u in group %u)",
451 				   inode ? inode->i_ino : 0, blocknr,
452 				   first + i, e4b->bd_group);
453 		}
454 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
455 	}
456 }
457 
458 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
459 {
460 	int i;
461 
462 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
463 		return;
464 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
465 	for (i = 0; i < count; i++) {
466 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
467 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
468 	}
469 }
470 
471 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
472 {
473 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
474 		unsigned char *b1, *b2;
475 		int i;
476 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
477 		b2 = (unsigned char *) bitmap;
478 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479 			if (b1[i] != b2[i]) {
480 				printk(KERN_ERR "corruption in group %u "
481 				       "at byte %u(%u): %x in copy != %x "
482 				       "on disk/prealloc\n",
483 				       e4b->bd_group, i, i * 8, b1[i], b2[i]);
484 				BUG();
485 			}
486 		}
487 	}
488 }
489 
490 #else
491 static inline void mb_free_blocks_double(struct inode *inode,
492 				struct ext4_buddy *e4b, int first, int count)
493 {
494 	return;
495 }
496 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
497 						int first, int count)
498 {
499 	return;
500 }
501 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
502 {
503 	return;
504 }
505 #endif
506 
507 #ifdef AGGRESSIVE_CHECK
508 
509 #define MB_CHECK_ASSERT(assert)						\
510 do {									\
511 	if (!(assert)) {						\
512 		printk(KERN_EMERG					\
513 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
514 			function, file, line, # assert);		\
515 		BUG();							\
516 	}								\
517 } while (0)
518 
519 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
520 				const char *function, int line)
521 {
522 	struct super_block *sb = e4b->bd_sb;
523 	int order = e4b->bd_blkbits + 1;
524 	int max;
525 	int max2;
526 	int i;
527 	int j;
528 	int k;
529 	int count;
530 	struct ext4_group_info *grp;
531 	int fragments = 0;
532 	int fstart;
533 	struct list_head *cur;
534 	void *buddy;
535 	void *buddy2;
536 
537 	{
538 		static int mb_check_counter;
539 		if (mb_check_counter++ % 100 != 0)
540 			return 0;
541 	}
542 
543 	while (order > 1) {
544 		buddy = mb_find_buddy(e4b, order, &max);
545 		MB_CHECK_ASSERT(buddy);
546 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
547 		MB_CHECK_ASSERT(buddy2);
548 		MB_CHECK_ASSERT(buddy != buddy2);
549 		MB_CHECK_ASSERT(max * 2 == max2);
550 
551 		count = 0;
552 		for (i = 0; i < max; i++) {
553 
554 			if (mb_test_bit(i, buddy)) {
555 				/* only single bit in buddy2 may be 1 */
556 				if (!mb_test_bit(i << 1, buddy2)) {
557 					MB_CHECK_ASSERT(
558 						mb_test_bit((i<<1)+1, buddy2));
559 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
560 					MB_CHECK_ASSERT(
561 						mb_test_bit(i << 1, buddy2));
562 				}
563 				continue;
564 			}
565 
566 			/* both bits in buddy2 must be 0 */
567 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
568 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
569 
570 			for (j = 0; j < (1 << order); j++) {
571 				k = (i * (1 << order)) + j;
572 				MB_CHECK_ASSERT(
573 					!mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
574 			}
575 			count++;
576 		}
577 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
578 		order--;
579 	}
580 
581 	fstart = -1;
582 	buddy = mb_find_buddy(e4b, 0, &max);
583 	for (i = 0; i < max; i++) {
584 		if (!mb_test_bit(i, buddy)) {
585 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
586 			if (fstart == -1) {
587 				fragments++;
588 				fstart = i;
589 			}
590 			continue;
591 		}
592 		fstart = -1;
593 		/* check used bits only */
594 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
595 			buddy2 = mb_find_buddy(e4b, j, &max2);
596 			k = i >> j;
597 			MB_CHECK_ASSERT(k < max2);
598 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
599 		}
600 	}
601 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
602 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
603 
604 	grp = ext4_get_group_info(sb, e4b->bd_group);
605 	buddy = mb_find_buddy(e4b, 0, &max);
606 	list_for_each(cur, &grp->bb_prealloc_list) {
607 		ext4_group_t groupnr;
608 		struct ext4_prealloc_space *pa;
609 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
610 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
611 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
612 		for (i = 0; i < pa->pa_len; i++)
613 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
614 	}
615 	return 0;
616 }
617 #undef MB_CHECK_ASSERT
618 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
619 					__FILE__, __func__, __LINE__)
620 #else
621 #define mb_check_buddy(e4b)
622 #endif
623 
624 /* FIXME!! need more doc */
625 static void ext4_mb_mark_free_simple(struct super_block *sb,
626 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
627 					struct ext4_group_info *grp)
628 {
629 	struct ext4_sb_info *sbi = EXT4_SB(sb);
630 	ext4_grpblk_t min;
631 	ext4_grpblk_t max;
632 	ext4_grpblk_t chunk;
633 	unsigned short border;
634 
635 	BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
636 
637 	border = 2 << sb->s_blocksize_bits;
638 
639 	while (len > 0) {
640 		/* find how many blocks can be covered since this position */
641 		max = ffs(first | border) - 1;
642 
643 		/* find how many blocks of power 2 we need to mark */
644 		min = fls(len) - 1;
645 
646 		if (max < min)
647 			min = max;
648 		chunk = 1 << min;
649 
650 		/* mark multiblock chunks only */
651 		grp->bb_counters[min]++;
652 		if (min > 0)
653 			mb_clear_bit(first >> min,
654 				     buddy + sbi->s_mb_offsets[min]);
655 
656 		len -= chunk;
657 		first += chunk;
658 	}
659 }
660 
661 /*
662  * Cache the order of the largest free extent we have available in this block
663  * group.
664  */
665 static void
666 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
667 {
668 	int i;
669 	int bits;
670 
671 	grp->bb_largest_free_order = -1; /* uninit */
672 
673 	bits = sb->s_blocksize_bits + 1;
674 	for (i = bits; i >= 0; i--) {
675 		if (grp->bb_counters[i] > 0) {
676 			grp->bb_largest_free_order = i;
677 			break;
678 		}
679 	}
680 }
681 
682 static noinline_for_stack
683 void ext4_mb_generate_buddy(struct super_block *sb,
684 				void *buddy, void *bitmap, ext4_group_t group)
685 {
686 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
687 	ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb);
688 	ext4_grpblk_t i = 0;
689 	ext4_grpblk_t first;
690 	ext4_grpblk_t len;
691 	unsigned free = 0;
692 	unsigned fragments = 0;
693 	unsigned long long period = get_cycles();
694 
695 	/* initialize buddy from bitmap which is aggregation
696 	 * of on-disk bitmap and preallocations */
697 	i = mb_find_next_zero_bit(bitmap, max, 0);
698 	grp->bb_first_free = i;
699 	while (i < max) {
700 		fragments++;
701 		first = i;
702 		i = mb_find_next_bit(bitmap, max, i);
703 		len = i - first;
704 		free += len;
705 		if (len > 1)
706 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
707 		else
708 			grp->bb_counters[0]++;
709 		if (i < max)
710 			i = mb_find_next_zero_bit(bitmap, max, i);
711 	}
712 	grp->bb_fragments = fragments;
713 
714 	if (free != grp->bb_free) {
715 		ext4_grp_locked_error(sb, group,  __func__,
716 			"EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
717 			group, free, grp->bb_free);
718 		/*
719 		 * If we intent to continue, we consider group descritor
720 		 * corrupt and update bb_free using bitmap value
721 		 */
722 		grp->bb_free = free;
723 	}
724 	mb_set_largest_free_order(sb, grp);
725 
726 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
727 
728 	period = get_cycles() - period;
729 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
730 	EXT4_SB(sb)->s_mb_buddies_generated++;
731 	EXT4_SB(sb)->s_mb_generation_time += period;
732 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
733 }
734 
735 /* The buddy information is attached the buddy cache inode
736  * for convenience. The information regarding each group
737  * is loaded via ext4_mb_load_buddy. The information involve
738  * block bitmap and buddy information. The information are
739  * stored in the inode as
740  *
741  * {                        page                        }
742  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
743  *
744  *
745  * one block each for bitmap and buddy information.
746  * So for each group we take up 2 blocks. A page can
747  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
748  * So it can have information regarding groups_per_page which
749  * is blocks_per_page/2
750  *
751  * Locking note:  This routine takes the block group lock of all groups
752  * for this page; do not hold this lock when calling this routine!
753  */
754 
755 static int ext4_mb_init_cache(struct page *page, char *incore)
756 {
757 	ext4_group_t ngroups;
758 	int blocksize;
759 	int blocks_per_page;
760 	int groups_per_page;
761 	int err = 0;
762 	int i;
763 	ext4_group_t first_group;
764 	int first_block;
765 	struct super_block *sb;
766 	struct buffer_head *bhs;
767 	struct buffer_head **bh;
768 	struct inode *inode;
769 	char *data;
770 	char *bitmap;
771 
772 	mb_debug(1, "init page %lu\n", page->index);
773 
774 	inode = page->mapping->host;
775 	sb = inode->i_sb;
776 	ngroups = ext4_get_groups_count(sb);
777 	blocksize = 1 << inode->i_blkbits;
778 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
779 
780 	groups_per_page = blocks_per_page >> 1;
781 	if (groups_per_page == 0)
782 		groups_per_page = 1;
783 
784 	/* allocate buffer_heads to read bitmaps */
785 	if (groups_per_page > 1) {
786 		err = -ENOMEM;
787 		i = sizeof(struct buffer_head *) * groups_per_page;
788 		bh = kzalloc(i, GFP_NOFS);
789 		if (bh == NULL)
790 			goto out;
791 	} else
792 		bh = &bhs;
793 
794 	first_group = page->index * blocks_per_page / 2;
795 
796 	/* read all groups the page covers into the cache */
797 	for (i = 0; i < groups_per_page; i++) {
798 		struct ext4_group_desc *desc;
799 
800 		if (first_group + i >= ngroups)
801 			break;
802 
803 		err = -EIO;
804 		desc = ext4_get_group_desc(sb, first_group + i, NULL);
805 		if (desc == NULL)
806 			goto out;
807 
808 		err = -ENOMEM;
809 		bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
810 		if (bh[i] == NULL)
811 			goto out;
812 
813 		if (bitmap_uptodate(bh[i]))
814 			continue;
815 
816 		lock_buffer(bh[i]);
817 		if (bitmap_uptodate(bh[i])) {
818 			unlock_buffer(bh[i]);
819 			continue;
820 		}
821 		ext4_lock_group(sb, first_group + i);
822 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
823 			ext4_init_block_bitmap(sb, bh[i],
824 						first_group + i, desc);
825 			set_bitmap_uptodate(bh[i]);
826 			set_buffer_uptodate(bh[i]);
827 			ext4_unlock_group(sb, first_group + i);
828 			unlock_buffer(bh[i]);
829 			continue;
830 		}
831 		ext4_unlock_group(sb, first_group + i);
832 		if (buffer_uptodate(bh[i])) {
833 			/*
834 			 * if not uninit if bh is uptodate,
835 			 * bitmap is also uptodate
836 			 */
837 			set_bitmap_uptodate(bh[i]);
838 			unlock_buffer(bh[i]);
839 			continue;
840 		}
841 		get_bh(bh[i]);
842 		/*
843 		 * submit the buffer_head for read. We can
844 		 * safely mark the bitmap as uptodate now.
845 		 * We do it here so the bitmap uptodate bit
846 		 * get set with buffer lock held.
847 		 */
848 		set_bitmap_uptodate(bh[i]);
849 		bh[i]->b_end_io = end_buffer_read_sync;
850 		submit_bh(READ, bh[i]);
851 		mb_debug(1, "read bitmap for group %u\n", first_group + i);
852 	}
853 
854 	/* wait for I/O completion */
855 	for (i = 0; i < groups_per_page && bh[i]; i++)
856 		wait_on_buffer(bh[i]);
857 
858 	err = -EIO;
859 	for (i = 0; i < groups_per_page && bh[i]; i++)
860 		if (!buffer_uptodate(bh[i]))
861 			goto out;
862 
863 	err = 0;
864 	first_block = page->index * blocks_per_page;
865 	/* init the page  */
866 	memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
867 	for (i = 0; i < blocks_per_page; i++) {
868 		int group;
869 		struct ext4_group_info *grinfo;
870 
871 		group = (first_block + i) >> 1;
872 		if (group >= ngroups)
873 			break;
874 
875 		/*
876 		 * data carry information regarding this
877 		 * particular group in the format specified
878 		 * above
879 		 *
880 		 */
881 		data = page_address(page) + (i * blocksize);
882 		bitmap = bh[group - first_group]->b_data;
883 
884 		/*
885 		 * We place the buddy block and bitmap block
886 		 * close together
887 		 */
888 		if ((first_block + i) & 1) {
889 			/* this is block of buddy */
890 			BUG_ON(incore == NULL);
891 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
892 				group, page->index, i * blocksize);
893 			trace_ext4_mb_buddy_bitmap_load(sb, group);
894 			grinfo = ext4_get_group_info(sb, group);
895 			grinfo->bb_fragments = 0;
896 			memset(grinfo->bb_counters, 0,
897 			       sizeof(*grinfo->bb_counters) *
898 				(sb->s_blocksize_bits+2));
899 			/*
900 			 * incore got set to the group block bitmap below
901 			 */
902 			ext4_lock_group(sb, group);
903 			ext4_mb_generate_buddy(sb, data, incore, group);
904 			ext4_unlock_group(sb, group);
905 			incore = NULL;
906 		} else {
907 			/* this is block of bitmap */
908 			BUG_ON(incore != NULL);
909 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
910 				group, page->index, i * blocksize);
911 			trace_ext4_mb_bitmap_load(sb, group);
912 
913 			/* see comments in ext4_mb_put_pa() */
914 			ext4_lock_group(sb, group);
915 			memcpy(data, bitmap, blocksize);
916 
917 			/* mark all preallocated blks used in in-core bitmap */
918 			ext4_mb_generate_from_pa(sb, data, group);
919 			ext4_mb_generate_from_freelist(sb, data, group);
920 			ext4_unlock_group(sb, group);
921 
922 			/* set incore so that the buddy information can be
923 			 * generated using this
924 			 */
925 			incore = data;
926 		}
927 	}
928 	SetPageUptodate(page);
929 
930 out:
931 	if (bh) {
932 		for (i = 0; i < groups_per_page && bh[i]; i++)
933 			brelse(bh[i]);
934 		if (bh != &bhs)
935 			kfree(bh);
936 	}
937 	return err;
938 }
939 
940 /*
941  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
942  * block group lock of all groups for this page; do not hold the BG lock when
943  * calling this routine!
944  */
945 static noinline_for_stack
946 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
947 {
948 
949 	int ret = 0;
950 	void *bitmap;
951 	int blocks_per_page;
952 	int block, pnum, poff;
953 	int num_grp_locked = 0;
954 	struct ext4_group_info *this_grp;
955 	struct ext4_sb_info *sbi = EXT4_SB(sb);
956 	struct inode *inode = sbi->s_buddy_cache;
957 	struct page *page = NULL, *bitmap_page = NULL;
958 
959 	mb_debug(1, "init group %u\n", group);
960 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
961 	this_grp = ext4_get_group_info(sb, group);
962 	/*
963 	 * This ensures that we don't reinit the buddy cache
964 	 * page which map to the group from which we are already
965 	 * allocating. If we are looking at the buddy cache we would
966 	 * have taken a reference using ext4_mb_load_buddy and that
967 	 * would have taken the alloc_sem lock.
968 	 */
969 	num_grp_locked =  ext4_mb_get_buddy_cache_lock(sb, group);
970 	if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
971 		/*
972 		 * somebody initialized the group
973 		 * return without doing anything
974 		 */
975 		ret = 0;
976 		goto err;
977 	}
978 	/*
979 	 * the buddy cache inode stores the block bitmap
980 	 * and buddy information in consecutive blocks.
981 	 * So for each group we need two blocks.
982 	 */
983 	block = group * 2;
984 	pnum = block / blocks_per_page;
985 	poff = block % blocks_per_page;
986 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
987 	if (page) {
988 		BUG_ON(page->mapping != inode->i_mapping);
989 		ret = ext4_mb_init_cache(page, NULL);
990 		if (ret) {
991 			unlock_page(page);
992 			goto err;
993 		}
994 		unlock_page(page);
995 	}
996 	if (page == NULL || !PageUptodate(page)) {
997 		ret = -EIO;
998 		goto err;
999 	}
1000 	mark_page_accessed(page);
1001 	bitmap_page = page;
1002 	bitmap = page_address(page) + (poff * sb->s_blocksize);
1003 
1004 	/* init buddy cache */
1005 	block++;
1006 	pnum = block / blocks_per_page;
1007 	poff = block % blocks_per_page;
1008 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1009 	if (page == bitmap_page) {
1010 		/*
1011 		 * If both the bitmap and buddy are in
1012 		 * the same page we don't need to force
1013 		 * init the buddy
1014 		 */
1015 		unlock_page(page);
1016 	} else if (page) {
1017 		BUG_ON(page->mapping != inode->i_mapping);
1018 		ret = ext4_mb_init_cache(page, bitmap);
1019 		if (ret) {
1020 			unlock_page(page);
1021 			goto err;
1022 		}
1023 		unlock_page(page);
1024 	}
1025 	if (page == NULL || !PageUptodate(page)) {
1026 		ret = -EIO;
1027 		goto err;
1028 	}
1029 	mark_page_accessed(page);
1030 err:
1031 	ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
1032 	if (bitmap_page)
1033 		page_cache_release(bitmap_page);
1034 	if (page)
1035 		page_cache_release(page);
1036 	return ret;
1037 }
1038 
1039 /*
1040  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1041  * block group lock of all groups for this page; do not hold the BG lock when
1042  * calling this routine!
1043  */
1044 static noinline_for_stack int
1045 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1046 					struct ext4_buddy *e4b)
1047 {
1048 	int blocks_per_page;
1049 	int block;
1050 	int pnum;
1051 	int poff;
1052 	struct page *page;
1053 	int ret;
1054 	struct ext4_group_info *grp;
1055 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1056 	struct inode *inode = sbi->s_buddy_cache;
1057 
1058 	mb_debug(1, "load group %u\n", group);
1059 
1060 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1061 	grp = ext4_get_group_info(sb, group);
1062 
1063 	e4b->bd_blkbits = sb->s_blocksize_bits;
1064 	e4b->bd_info = ext4_get_group_info(sb, group);
1065 	e4b->bd_sb = sb;
1066 	e4b->bd_group = group;
1067 	e4b->bd_buddy_page = NULL;
1068 	e4b->bd_bitmap_page = NULL;
1069 	e4b->alloc_semp = &grp->alloc_sem;
1070 
1071 	/* Take the read lock on the group alloc
1072 	 * sem. This would make sure a parallel
1073 	 * ext4_mb_init_group happening on other
1074 	 * groups mapped by the page is blocked
1075 	 * till we are done with allocation
1076 	 */
1077 repeat_load_buddy:
1078 	down_read(e4b->alloc_semp);
1079 
1080 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1081 		/* we need to check for group need init flag
1082 		 * with alloc_semp held so that we can be sure
1083 		 * that new blocks didn't get added to the group
1084 		 * when we are loading the buddy cache
1085 		 */
1086 		up_read(e4b->alloc_semp);
1087 		/*
1088 		 * we need full data about the group
1089 		 * to make a good selection
1090 		 */
1091 		ret = ext4_mb_init_group(sb, group);
1092 		if (ret)
1093 			return ret;
1094 		goto repeat_load_buddy;
1095 	}
1096 
1097 	/*
1098 	 * the buddy cache inode stores the block bitmap
1099 	 * and buddy information in consecutive blocks.
1100 	 * So for each group we need two blocks.
1101 	 */
1102 	block = group * 2;
1103 	pnum = block / blocks_per_page;
1104 	poff = block % blocks_per_page;
1105 
1106 	/* we could use find_or_create_page(), but it locks page
1107 	 * what we'd like to avoid in fast path ... */
1108 	page = find_get_page(inode->i_mapping, pnum);
1109 	if (page == NULL || !PageUptodate(page)) {
1110 		if (page)
1111 			/*
1112 			 * drop the page reference and try
1113 			 * to get the page with lock. If we
1114 			 * are not uptodate that implies
1115 			 * somebody just created the page but
1116 			 * is yet to initialize the same. So
1117 			 * wait for it to initialize.
1118 			 */
1119 			page_cache_release(page);
1120 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1121 		if (page) {
1122 			BUG_ON(page->mapping != inode->i_mapping);
1123 			if (!PageUptodate(page)) {
1124 				ret = ext4_mb_init_cache(page, NULL);
1125 				if (ret) {
1126 					unlock_page(page);
1127 					goto err;
1128 				}
1129 				mb_cmp_bitmaps(e4b, page_address(page) +
1130 					       (poff * sb->s_blocksize));
1131 			}
1132 			unlock_page(page);
1133 		}
1134 	}
1135 	if (page == NULL || !PageUptodate(page)) {
1136 		ret = -EIO;
1137 		goto err;
1138 	}
1139 	e4b->bd_bitmap_page = page;
1140 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1141 	mark_page_accessed(page);
1142 
1143 	block++;
1144 	pnum = block / blocks_per_page;
1145 	poff = block % blocks_per_page;
1146 
1147 	page = find_get_page(inode->i_mapping, pnum);
1148 	if (page == NULL || !PageUptodate(page)) {
1149 		if (page)
1150 			page_cache_release(page);
1151 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1152 		if (page) {
1153 			BUG_ON(page->mapping != inode->i_mapping);
1154 			if (!PageUptodate(page)) {
1155 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1156 				if (ret) {
1157 					unlock_page(page);
1158 					goto err;
1159 				}
1160 			}
1161 			unlock_page(page);
1162 		}
1163 	}
1164 	if (page == NULL || !PageUptodate(page)) {
1165 		ret = -EIO;
1166 		goto err;
1167 	}
1168 	e4b->bd_buddy_page = page;
1169 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1170 	mark_page_accessed(page);
1171 
1172 	BUG_ON(e4b->bd_bitmap_page == NULL);
1173 	BUG_ON(e4b->bd_buddy_page == NULL);
1174 
1175 	return 0;
1176 
1177 err:
1178 	if (e4b->bd_bitmap_page)
1179 		page_cache_release(e4b->bd_bitmap_page);
1180 	if (e4b->bd_buddy_page)
1181 		page_cache_release(e4b->bd_buddy_page);
1182 	e4b->bd_buddy = NULL;
1183 	e4b->bd_bitmap = NULL;
1184 
1185 	/* Done with the buddy cache */
1186 	up_read(e4b->alloc_semp);
1187 	return ret;
1188 }
1189 
1190 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1191 {
1192 	if (e4b->bd_bitmap_page)
1193 		page_cache_release(e4b->bd_bitmap_page);
1194 	if (e4b->bd_buddy_page)
1195 		page_cache_release(e4b->bd_buddy_page);
1196 	/* Done with the buddy cache */
1197 	if (e4b->alloc_semp)
1198 		up_read(e4b->alloc_semp);
1199 }
1200 
1201 
1202 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1203 {
1204 	int order = 1;
1205 	void *bb;
1206 
1207 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1208 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1209 
1210 	bb = EXT4_MB_BUDDY(e4b);
1211 	while (order <= e4b->bd_blkbits + 1) {
1212 		block = block >> 1;
1213 		if (!mb_test_bit(block, bb)) {
1214 			/* this block is part of buddy of order 'order' */
1215 			return order;
1216 		}
1217 		bb += 1 << (e4b->bd_blkbits - order);
1218 		order++;
1219 	}
1220 	return 0;
1221 }
1222 
1223 static void mb_clear_bits(void *bm, int cur, int len)
1224 {
1225 	__u32 *addr;
1226 
1227 	len = cur + len;
1228 	while (cur < len) {
1229 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1230 			/* fast path: clear whole word at once */
1231 			addr = bm + (cur >> 3);
1232 			*addr = 0;
1233 			cur += 32;
1234 			continue;
1235 		}
1236 		mb_clear_bit(cur, bm);
1237 		cur++;
1238 	}
1239 }
1240 
1241 static void mb_set_bits(void *bm, int cur, int len)
1242 {
1243 	__u32 *addr;
1244 
1245 	len = cur + len;
1246 	while (cur < len) {
1247 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1248 			/* fast path: set whole word at once */
1249 			addr = bm + (cur >> 3);
1250 			*addr = 0xffffffff;
1251 			cur += 32;
1252 			continue;
1253 		}
1254 		mb_set_bit(cur, bm);
1255 		cur++;
1256 	}
1257 }
1258 
1259 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1260 			  int first, int count)
1261 {
1262 	int block = 0;
1263 	int max = 0;
1264 	int order;
1265 	void *buddy;
1266 	void *buddy2;
1267 	struct super_block *sb = e4b->bd_sb;
1268 
1269 	BUG_ON(first + count > (sb->s_blocksize << 3));
1270 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1271 	mb_check_buddy(e4b);
1272 	mb_free_blocks_double(inode, e4b, first, count);
1273 
1274 	e4b->bd_info->bb_free += count;
1275 	if (first < e4b->bd_info->bb_first_free)
1276 		e4b->bd_info->bb_first_free = first;
1277 
1278 	/* let's maintain fragments counter */
1279 	if (first != 0)
1280 		block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1281 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1282 		max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1283 	if (block && max)
1284 		e4b->bd_info->bb_fragments--;
1285 	else if (!block && !max)
1286 		e4b->bd_info->bb_fragments++;
1287 
1288 	/* let's maintain buddy itself */
1289 	while (count-- > 0) {
1290 		block = first++;
1291 		order = 0;
1292 
1293 		if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1294 			ext4_fsblk_t blocknr;
1295 
1296 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1297 			blocknr += block;
1298 			ext4_grp_locked_error(sb, e4b->bd_group,
1299 				   __func__, "double-free of inode"
1300 				   " %lu's block %llu(bit %u in group %u)",
1301 				   inode ? inode->i_ino : 0, blocknr, block,
1302 				   e4b->bd_group);
1303 		}
1304 		mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1305 		e4b->bd_info->bb_counters[order]++;
1306 
1307 		/* start of the buddy */
1308 		buddy = mb_find_buddy(e4b, order, &max);
1309 
1310 		do {
1311 			block &= ~1UL;
1312 			if (mb_test_bit(block, buddy) ||
1313 					mb_test_bit(block + 1, buddy))
1314 				break;
1315 
1316 			/* both the buddies are free, try to coalesce them */
1317 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1318 
1319 			if (!buddy2)
1320 				break;
1321 
1322 			if (order > 0) {
1323 				/* for special purposes, we don't set
1324 				 * free bits in bitmap */
1325 				mb_set_bit(block, buddy);
1326 				mb_set_bit(block + 1, buddy);
1327 			}
1328 			e4b->bd_info->bb_counters[order]--;
1329 			e4b->bd_info->bb_counters[order]--;
1330 
1331 			block = block >> 1;
1332 			order++;
1333 			e4b->bd_info->bb_counters[order]++;
1334 
1335 			mb_clear_bit(block, buddy2);
1336 			buddy = buddy2;
1337 		} while (1);
1338 	}
1339 	mb_set_largest_free_order(sb, e4b->bd_info);
1340 	mb_check_buddy(e4b);
1341 }
1342 
1343 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1344 				int needed, struct ext4_free_extent *ex)
1345 {
1346 	int next = block;
1347 	int max;
1348 	int ord;
1349 	void *buddy;
1350 
1351 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1352 	BUG_ON(ex == NULL);
1353 
1354 	buddy = mb_find_buddy(e4b, order, &max);
1355 	BUG_ON(buddy == NULL);
1356 	BUG_ON(block >= max);
1357 	if (mb_test_bit(block, buddy)) {
1358 		ex->fe_len = 0;
1359 		ex->fe_start = 0;
1360 		ex->fe_group = 0;
1361 		return 0;
1362 	}
1363 
1364 	/* FIXME dorp order completely ? */
1365 	if (likely(order == 0)) {
1366 		/* find actual order */
1367 		order = mb_find_order_for_block(e4b, block);
1368 		block = block >> order;
1369 	}
1370 
1371 	ex->fe_len = 1 << order;
1372 	ex->fe_start = block << order;
1373 	ex->fe_group = e4b->bd_group;
1374 
1375 	/* calc difference from given start */
1376 	next = next - ex->fe_start;
1377 	ex->fe_len -= next;
1378 	ex->fe_start += next;
1379 
1380 	while (needed > ex->fe_len &&
1381 	       (buddy = mb_find_buddy(e4b, order, &max))) {
1382 
1383 		if (block + 1 >= max)
1384 			break;
1385 
1386 		next = (block + 1) * (1 << order);
1387 		if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1388 			break;
1389 
1390 		ord = mb_find_order_for_block(e4b, next);
1391 
1392 		order = ord;
1393 		block = next >> order;
1394 		ex->fe_len += 1 << order;
1395 	}
1396 
1397 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1398 	return ex->fe_len;
1399 }
1400 
1401 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1402 {
1403 	int ord;
1404 	int mlen = 0;
1405 	int max = 0;
1406 	int cur;
1407 	int start = ex->fe_start;
1408 	int len = ex->fe_len;
1409 	unsigned ret = 0;
1410 	int len0 = len;
1411 	void *buddy;
1412 
1413 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1414 	BUG_ON(e4b->bd_group != ex->fe_group);
1415 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1416 	mb_check_buddy(e4b);
1417 	mb_mark_used_double(e4b, start, len);
1418 
1419 	e4b->bd_info->bb_free -= len;
1420 	if (e4b->bd_info->bb_first_free == start)
1421 		e4b->bd_info->bb_first_free += len;
1422 
1423 	/* let's maintain fragments counter */
1424 	if (start != 0)
1425 		mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1426 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1427 		max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1428 	if (mlen && max)
1429 		e4b->bd_info->bb_fragments++;
1430 	else if (!mlen && !max)
1431 		e4b->bd_info->bb_fragments--;
1432 
1433 	/* let's maintain buddy itself */
1434 	while (len) {
1435 		ord = mb_find_order_for_block(e4b, start);
1436 
1437 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1438 			/* the whole chunk may be allocated at once! */
1439 			mlen = 1 << ord;
1440 			buddy = mb_find_buddy(e4b, ord, &max);
1441 			BUG_ON((start >> ord) >= max);
1442 			mb_set_bit(start >> ord, buddy);
1443 			e4b->bd_info->bb_counters[ord]--;
1444 			start += mlen;
1445 			len -= mlen;
1446 			BUG_ON(len < 0);
1447 			continue;
1448 		}
1449 
1450 		/* store for history */
1451 		if (ret == 0)
1452 			ret = len | (ord << 16);
1453 
1454 		/* we have to split large buddy */
1455 		BUG_ON(ord <= 0);
1456 		buddy = mb_find_buddy(e4b, ord, &max);
1457 		mb_set_bit(start >> ord, buddy);
1458 		e4b->bd_info->bb_counters[ord]--;
1459 
1460 		ord--;
1461 		cur = (start >> ord) & ~1U;
1462 		buddy = mb_find_buddy(e4b, ord, &max);
1463 		mb_clear_bit(cur, buddy);
1464 		mb_clear_bit(cur + 1, buddy);
1465 		e4b->bd_info->bb_counters[ord]++;
1466 		e4b->bd_info->bb_counters[ord]++;
1467 	}
1468 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1469 
1470 	mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1471 	mb_check_buddy(e4b);
1472 
1473 	return ret;
1474 }
1475 
1476 /*
1477  * Must be called under group lock!
1478  */
1479 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1480 					struct ext4_buddy *e4b)
1481 {
1482 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1483 	int ret;
1484 
1485 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1486 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1487 
1488 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1489 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1490 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1491 
1492 	/* preallocation can change ac_b_ex, thus we store actually
1493 	 * allocated blocks for history */
1494 	ac->ac_f_ex = ac->ac_b_ex;
1495 
1496 	ac->ac_status = AC_STATUS_FOUND;
1497 	ac->ac_tail = ret & 0xffff;
1498 	ac->ac_buddy = ret >> 16;
1499 
1500 	/*
1501 	 * take the page reference. We want the page to be pinned
1502 	 * so that we don't get a ext4_mb_init_cache_call for this
1503 	 * group until we update the bitmap. That would mean we
1504 	 * double allocate blocks. The reference is dropped
1505 	 * in ext4_mb_release_context
1506 	 */
1507 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1508 	get_page(ac->ac_bitmap_page);
1509 	ac->ac_buddy_page = e4b->bd_buddy_page;
1510 	get_page(ac->ac_buddy_page);
1511 	/* on allocation we use ac to track the held semaphore */
1512 	ac->alloc_semp =  e4b->alloc_semp;
1513 	e4b->alloc_semp = NULL;
1514 	/* store last allocated for subsequent stream allocation */
1515 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1516 		spin_lock(&sbi->s_md_lock);
1517 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1518 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1519 		spin_unlock(&sbi->s_md_lock);
1520 	}
1521 }
1522 
1523 /*
1524  * regular allocator, for general purposes allocation
1525  */
1526 
1527 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1528 					struct ext4_buddy *e4b,
1529 					int finish_group)
1530 {
1531 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1532 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1533 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1534 	struct ext4_free_extent ex;
1535 	int max;
1536 
1537 	if (ac->ac_status == AC_STATUS_FOUND)
1538 		return;
1539 	/*
1540 	 * We don't want to scan for a whole year
1541 	 */
1542 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1543 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1544 		ac->ac_status = AC_STATUS_BREAK;
1545 		return;
1546 	}
1547 
1548 	/*
1549 	 * Haven't found good chunk so far, let's continue
1550 	 */
1551 	if (bex->fe_len < gex->fe_len)
1552 		return;
1553 
1554 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1555 			&& bex->fe_group == e4b->bd_group) {
1556 		/* recheck chunk's availability - we don't know
1557 		 * when it was found (within this lock-unlock
1558 		 * period or not) */
1559 		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1560 		if (max >= gex->fe_len) {
1561 			ext4_mb_use_best_found(ac, e4b);
1562 			return;
1563 		}
1564 	}
1565 }
1566 
1567 /*
1568  * The routine checks whether found extent is good enough. If it is,
1569  * then the extent gets marked used and flag is set to the context
1570  * to stop scanning. Otherwise, the extent is compared with the
1571  * previous found extent and if new one is better, then it's stored
1572  * in the context. Later, the best found extent will be used, if
1573  * mballoc can't find good enough extent.
1574  *
1575  * FIXME: real allocation policy is to be designed yet!
1576  */
1577 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1578 					struct ext4_free_extent *ex,
1579 					struct ext4_buddy *e4b)
1580 {
1581 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1582 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1583 
1584 	BUG_ON(ex->fe_len <= 0);
1585 	BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1586 	BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1587 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1588 
1589 	ac->ac_found++;
1590 
1591 	/*
1592 	 * The special case - take what you catch first
1593 	 */
1594 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1595 		*bex = *ex;
1596 		ext4_mb_use_best_found(ac, e4b);
1597 		return;
1598 	}
1599 
1600 	/*
1601 	 * Let's check whether the chuck is good enough
1602 	 */
1603 	if (ex->fe_len == gex->fe_len) {
1604 		*bex = *ex;
1605 		ext4_mb_use_best_found(ac, e4b);
1606 		return;
1607 	}
1608 
1609 	/*
1610 	 * If this is first found extent, just store it in the context
1611 	 */
1612 	if (bex->fe_len == 0) {
1613 		*bex = *ex;
1614 		return;
1615 	}
1616 
1617 	/*
1618 	 * If new found extent is better, store it in the context
1619 	 */
1620 	if (bex->fe_len < gex->fe_len) {
1621 		/* if the request isn't satisfied, any found extent
1622 		 * larger than previous best one is better */
1623 		if (ex->fe_len > bex->fe_len)
1624 			*bex = *ex;
1625 	} else if (ex->fe_len > gex->fe_len) {
1626 		/* if the request is satisfied, then we try to find
1627 		 * an extent that still satisfy the request, but is
1628 		 * smaller than previous one */
1629 		if (ex->fe_len < bex->fe_len)
1630 			*bex = *ex;
1631 	}
1632 
1633 	ext4_mb_check_limits(ac, e4b, 0);
1634 }
1635 
1636 static noinline_for_stack
1637 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1638 					struct ext4_buddy *e4b)
1639 {
1640 	struct ext4_free_extent ex = ac->ac_b_ex;
1641 	ext4_group_t group = ex.fe_group;
1642 	int max;
1643 	int err;
1644 
1645 	BUG_ON(ex.fe_len <= 0);
1646 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1647 	if (err)
1648 		return err;
1649 
1650 	ext4_lock_group(ac->ac_sb, group);
1651 	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1652 
1653 	if (max > 0) {
1654 		ac->ac_b_ex = ex;
1655 		ext4_mb_use_best_found(ac, e4b);
1656 	}
1657 
1658 	ext4_unlock_group(ac->ac_sb, group);
1659 	ext4_mb_unload_buddy(e4b);
1660 
1661 	return 0;
1662 }
1663 
1664 static noinline_for_stack
1665 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1666 				struct ext4_buddy *e4b)
1667 {
1668 	ext4_group_t group = ac->ac_g_ex.fe_group;
1669 	int max;
1670 	int err;
1671 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1672 	struct ext4_free_extent ex;
1673 
1674 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1675 		return 0;
1676 
1677 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1678 	if (err)
1679 		return err;
1680 
1681 	ext4_lock_group(ac->ac_sb, group);
1682 	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1683 			     ac->ac_g_ex.fe_len, &ex);
1684 
1685 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1686 		ext4_fsblk_t start;
1687 
1688 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1689 			ex.fe_start;
1690 		/* use do_div to get remainder (would be 64-bit modulo) */
1691 		if (do_div(start, sbi->s_stripe) == 0) {
1692 			ac->ac_found++;
1693 			ac->ac_b_ex = ex;
1694 			ext4_mb_use_best_found(ac, e4b);
1695 		}
1696 	} else if (max >= ac->ac_g_ex.fe_len) {
1697 		BUG_ON(ex.fe_len <= 0);
1698 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1699 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1700 		ac->ac_found++;
1701 		ac->ac_b_ex = ex;
1702 		ext4_mb_use_best_found(ac, e4b);
1703 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1704 		/* Sometimes, caller may want to merge even small
1705 		 * number of blocks to an existing extent */
1706 		BUG_ON(ex.fe_len <= 0);
1707 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1708 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1709 		ac->ac_found++;
1710 		ac->ac_b_ex = ex;
1711 		ext4_mb_use_best_found(ac, e4b);
1712 	}
1713 	ext4_unlock_group(ac->ac_sb, group);
1714 	ext4_mb_unload_buddy(e4b);
1715 
1716 	return 0;
1717 }
1718 
1719 /*
1720  * The routine scans buddy structures (not bitmap!) from given order
1721  * to max order and tries to find big enough chunk to satisfy the req
1722  */
1723 static noinline_for_stack
1724 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1725 					struct ext4_buddy *e4b)
1726 {
1727 	struct super_block *sb = ac->ac_sb;
1728 	struct ext4_group_info *grp = e4b->bd_info;
1729 	void *buddy;
1730 	int i;
1731 	int k;
1732 	int max;
1733 
1734 	BUG_ON(ac->ac_2order <= 0);
1735 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1736 		if (grp->bb_counters[i] == 0)
1737 			continue;
1738 
1739 		buddy = mb_find_buddy(e4b, i, &max);
1740 		BUG_ON(buddy == NULL);
1741 
1742 		k = mb_find_next_zero_bit(buddy, max, 0);
1743 		BUG_ON(k >= max);
1744 
1745 		ac->ac_found++;
1746 
1747 		ac->ac_b_ex.fe_len = 1 << i;
1748 		ac->ac_b_ex.fe_start = k << i;
1749 		ac->ac_b_ex.fe_group = e4b->bd_group;
1750 
1751 		ext4_mb_use_best_found(ac, e4b);
1752 
1753 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1754 
1755 		if (EXT4_SB(sb)->s_mb_stats)
1756 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1757 
1758 		break;
1759 	}
1760 }
1761 
1762 /*
1763  * The routine scans the group and measures all found extents.
1764  * In order to optimize scanning, caller must pass number of
1765  * free blocks in the group, so the routine can know upper limit.
1766  */
1767 static noinline_for_stack
1768 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1769 					struct ext4_buddy *e4b)
1770 {
1771 	struct super_block *sb = ac->ac_sb;
1772 	void *bitmap = EXT4_MB_BITMAP(e4b);
1773 	struct ext4_free_extent ex;
1774 	int i;
1775 	int free;
1776 
1777 	free = e4b->bd_info->bb_free;
1778 	BUG_ON(free <= 0);
1779 
1780 	i = e4b->bd_info->bb_first_free;
1781 
1782 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1783 		i = mb_find_next_zero_bit(bitmap,
1784 						EXT4_BLOCKS_PER_GROUP(sb), i);
1785 		if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1786 			/*
1787 			 * IF we have corrupt bitmap, we won't find any
1788 			 * free blocks even though group info says we
1789 			 * we have free blocks
1790 			 */
1791 			ext4_grp_locked_error(sb, e4b->bd_group,
1792 					__func__, "%d free blocks as per "
1793 					"group info. But bitmap says 0",
1794 					free);
1795 			break;
1796 		}
1797 
1798 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1799 		BUG_ON(ex.fe_len <= 0);
1800 		if (free < ex.fe_len) {
1801 			ext4_grp_locked_error(sb, e4b->bd_group,
1802 					__func__, "%d free blocks as per "
1803 					"group info. But got %d blocks",
1804 					free, ex.fe_len);
1805 			/*
1806 			 * The number of free blocks differs. This mostly
1807 			 * indicate that the bitmap is corrupt. So exit
1808 			 * without claiming the space.
1809 			 */
1810 			break;
1811 		}
1812 
1813 		ext4_mb_measure_extent(ac, &ex, e4b);
1814 
1815 		i += ex.fe_len;
1816 		free -= ex.fe_len;
1817 	}
1818 
1819 	ext4_mb_check_limits(ac, e4b, 1);
1820 }
1821 
1822 /*
1823  * This is a special case for storages like raid5
1824  * we try to find stripe-aligned chunks for stripe-size requests
1825  * XXX should do so at least for multiples of stripe size as well
1826  */
1827 static noinline_for_stack
1828 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1829 				 struct ext4_buddy *e4b)
1830 {
1831 	struct super_block *sb = ac->ac_sb;
1832 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1833 	void *bitmap = EXT4_MB_BITMAP(e4b);
1834 	struct ext4_free_extent ex;
1835 	ext4_fsblk_t first_group_block;
1836 	ext4_fsblk_t a;
1837 	ext4_grpblk_t i;
1838 	int max;
1839 
1840 	BUG_ON(sbi->s_stripe == 0);
1841 
1842 	/* find first stripe-aligned block in group */
1843 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1844 
1845 	a = first_group_block + sbi->s_stripe - 1;
1846 	do_div(a, sbi->s_stripe);
1847 	i = (a * sbi->s_stripe) - first_group_block;
1848 
1849 	while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1850 		if (!mb_test_bit(i, bitmap)) {
1851 			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1852 			if (max >= sbi->s_stripe) {
1853 				ac->ac_found++;
1854 				ac->ac_b_ex = ex;
1855 				ext4_mb_use_best_found(ac, e4b);
1856 				break;
1857 			}
1858 		}
1859 		i += sbi->s_stripe;
1860 	}
1861 }
1862 
1863 /* This is now called BEFORE we load the buddy bitmap. */
1864 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1865 				ext4_group_t group, int cr)
1866 {
1867 	unsigned free, fragments;
1868 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1869 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1870 
1871 	BUG_ON(cr < 0 || cr >= 4);
1872 
1873 	/* We only do this if the grp has never been initialized */
1874 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1875 		int ret = ext4_mb_init_group(ac->ac_sb, group);
1876 		if (ret)
1877 			return 0;
1878 	}
1879 
1880 	free = grp->bb_free;
1881 	fragments = grp->bb_fragments;
1882 	if (free == 0)
1883 		return 0;
1884 	if (fragments == 0)
1885 		return 0;
1886 
1887 	switch (cr) {
1888 	case 0:
1889 		BUG_ON(ac->ac_2order == 0);
1890 
1891 		if (grp->bb_largest_free_order < ac->ac_2order)
1892 			return 0;
1893 
1894 		/* Avoid using the first bg of a flexgroup for data files */
1895 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1896 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1897 		    ((group % flex_size) == 0))
1898 			return 0;
1899 
1900 		return 1;
1901 	case 1:
1902 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1903 			return 1;
1904 		break;
1905 	case 2:
1906 		if (free >= ac->ac_g_ex.fe_len)
1907 			return 1;
1908 		break;
1909 	case 3:
1910 		return 1;
1911 	default:
1912 		BUG();
1913 	}
1914 
1915 	return 0;
1916 }
1917 
1918 /*
1919  * lock the group_info alloc_sem of all the groups
1920  * belonging to the same buddy cache page. This
1921  * make sure other parallel operation on the buddy
1922  * cache doesn't happen  whild holding the buddy cache
1923  * lock
1924  */
1925 int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
1926 {
1927 	int i;
1928 	int block, pnum;
1929 	int blocks_per_page;
1930 	int groups_per_page;
1931 	ext4_group_t ngroups = ext4_get_groups_count(sb);
1932 	ext4_group_t first_group;
1933 	struct ext4_group_info *grp;
1934 
1935 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1936 	/*
1937 	 * the buddy cache inode stores the block bitmap
1938 	 * and buddy information in consecutive blocks.
1939 	 * So for each group we need two blocks.
1940 	 */
1941 	block = group * 2;
1942 	pnum = block / blocks_per_page;
1943 	first_group = pnum * blocks_per_page / 2;
1944 
1945 	groups_per_page = blocks_per_page >> 1;
1946 	if (groups_per_page == 0)
1947 		groups_per_page = 1;
1948 	/* read all groups the page covers into the cache */
1949 	for (i = 0; i < groups_per_page; i++) {
1950 
1951 		if ((first_group + i) >= ngroups)
1952 			break;
1953 		grp = ext4_get_group_info(sb, first_group + i);
1954 		/* take all groups write allocation
1955 		 * semaphore. This make sure there is
1956 		 * no block allocation going on in any
1957 		 * of that groups
1958 		 */
1959 		down_write_nested(&grp->alloc_sem, i);
1960 	}
1961 	return i;
1962 }
1963 
1964 void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1965 					ext4_group_t group, int locked_group)
1966 {
1967 	int i;
1968 	int block, pnum;
1969 	int blocks_per_page;
1970 	ext4_group_t first_group;
1971 	struct ext4_group_info *grp;
1972 
1973 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1974 	/*
1975 	 * the buddy cache inode stores the block bitmap
1976 	 * and buddy information in consecutive blocks.
1977 	 * So for each group we need two blocks.
1978 	 */
1979 	block = group * 2;
1980 	pnum = block / blocks_per_page;
1981 	first_group = pnum * blocks_per_page / 2;
1982 	/* release locks on all the groups */
1983 	for (i = 0; i < locked_group; i++) {
1984 
1985 		grp = ext4_get_group_info(sb, first_group + i);
1986 		/* take all groups write allocation
1987 		 * semaphore. This make sure there is
1988 		 * no block allocation going on in any
1989 		 * of that groups
1990 		 */
1991 		up_write(&grp->alloc_sem);
1992 	}
1993 
1994 }
1995 
1996 static noinline_for_stack int
1997 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1998 {
1999 	ext4_group_t ngroups, group, i;
2000 	int cr;
2001 	int err = 0;
2002 	int bsbits;
2003 	struct ext4_sb_info *sbi;
2004 	struct super_block *sb;
2005 	struct ext4_buddy e4b;
2006 
2007 	sb = ac->ac_sb;
2008 	sbi = EXT4_SB(sb);
2009 	ngroups = ext4_get_groups_count(sb);
2010 	/* non-extent files are limited to low blocks/groups */
2011 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2012 		ngroups = sbi->s_blockfile_groups;
2013 
2014 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2015 
2016 	/* first, try the goal */
2017 	err = ext4_mb_find_by_goal(ac, &e4b);
2018 	if (err || ac->ac_status == AC_STATUS_FOUND)
2019 		goto out;
2020 
2021 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2022 		goto out;
2023 
2024 	/*
2025 	 * ac->ac2_order is set only if the fe_len is a power of 2
2026 	 * if ac2_order is set we also set criteria to 0 so that we
2027 	 * try exact allocation using buddy.
2028 	 */
2029 	i = fls(ac->ac_g_ex.fe_len);
2030 	ac->ac_2order = 0;
2031 	/*
2032 	 * We search using buddy data only if the order of the request
2033 	 * is greater than equal to the sbi_s_mb_order2_reqs
2034 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2035 	 */
2036 	if (i >= sbi->s_mb_order2_reqs) {
2037 		/*
2038 		 * This should tell if fe_len is exactly power of 2
2039 		 */
2040 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2041 			ac->ac_2order = i - 1;
2042 	}
2043 
2044 	bsbits = ac->ac_sb->s_blocksize_bits;
2045 
2046 	/* if stream allocation is enabled, use global goal */
2047 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2048 		/* TBD: may be hot point */
2049 		spin_lock(&sbi->s_md_lock);
2050 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2051 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2052 		spin_unlock(&sbi->s_md_lock);
2053 	}
2054 
2055 	/* Let's just scan groups to find more-less suitable blocks */
2056 	cr = ac->ac_2order ? 0 : 1;
2057 	/*
2058 	 * cr == 0 try to get exact allocation,
2059 	 * cr == 3  try to get anything
2060 	 */
2061 repeat:
2062 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2063 		ac->ac_criteria = cr;
2064 		/*
2065 		 * searching for the right group start
2066 		 * from the goal value specified
2067 		 */
2068 		group = ac->ac_g_ex.fe_group;
2069 
2070 		for (i = 0; i < ngroups; group++, i++) {
2071 			if (group == ngroups)
2072 				group = 0;
2073 
2074 			/* This now checks without needing the buddy page */
2075 			if (!ext4_mb_good_group(ac, group, cr))
2076 				continue;
2077 
2078 			err = ext4_mb_load_buddy(sb, group, &e4b);
2079 			if (err)
2080 				goto out;
2081 
2082 			ext4_lock_group(sb, group);
2083 
2084 			/*
2085 			 * We need to check again after locking the
2086 			 * block group
2087 			 */
2088 			if (!ext4_mb_good_group(ac, group, cr)) {
2089 				ext4_unlock_group(sb, group);
2090 				ext4_mb_unload_buddy(&e4b);
2091 				continue;
2092 			}
2093 
2094 			ac->ac_groups_scanned++;
2095 			if (cr == 0)
2096 				ext4_mb_simple_scan_group(ac, &e4b);
2097 			else if (cr == 1 &&
2098 					ac->ac_g_ex.fe_len == sbi->s_stripe)
2099 				ext4_mb_scan_aligned(ac, &e4b);
2100 			else
2101 				ext4_mb_complex_scan_group(ac, &e4b);
2102 
2103 			ext4_unlock_group(sb, group);
2104 			ext4_mb_unload_buddy(&e4b);
2105 
2106 			if (ac->ac_status != AC_STATUS_CONTINUE)
2107 				break;
2108 		}
2109 	}
2110 
2111 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2112 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2113 		/*
2114 		 * We've been searching too long. Let's try to allocate
2115 		 * the best chunk we've found so far
2116 		 */
2117 
2118 		ext4_mb_try_best_found(ac, &e4b);
2119 		if (ac->ac_status != AC_STATUS_FOUND) {
2120 			/*
2121 			 * Someone more lucky has already allocated it.
2122 			 * The only thing we can do is just take first
2123 			 * found block(s)
2124 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2125 			 */
2126 			ac->ac_b_ex.fe_group = 0;
2127 			ac->ac_b_ex.fe_start = 0;
2128 			ac->ac_b_ex.fe_len = 0;
2129 			ac->ac_status = AC_STATUS_CONTINUE;
2130 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2131 			cr = 3;
2132 			atomic_inc(&sbi->s_mb_lost_chunks);
2133 			goto repeat;
2134 		}
2135 	}
2136 out:
2137 	return err;
2138 }
2139 
2140 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2141 {
2142 	struct super_block *sb = seq->private;
2143 	ext4_group_t group;
2144 
2145 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2146 		return NULL;
2147 	group = *pos + 1;
2148 	return (void *) ((unsigned long) group);
2149 }
2150 
2151 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2152 {
2153 	struct super_block *sb = seq->private;
2154 	ext4_group_t group;
2155 
2156 	++*pos;
2157 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2158 		return NULL;
2159 	group = *pos + 1;
2160 	return (void *) ((unsigned long) group);
2161 }
2162 
2163 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2164 {
2165 	struct super_block *sb = seq->private;
2166 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2167 	int i;
2168 	int err;
2169 	struct ext4_buddy e4b;
2170 	struct sg {
2171 		struct ext4_group_info info;
2172 		ext4_grpblk_t counters[16];
2173 	} sg;
2174 
2175 	group--;
2176 	if (group == 0)
2177 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2178 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2179 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2180 			   "group", "free", "frags", "first",
2181 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2182 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2183 
2184 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2185 		sizeof(struct ext4_group_info);
2186 	err = ext4_mb_load_buddy(sb, group, &e4b);
2187 	if (err) {
2188 		seq_printf(seq, "#%-5u: I/O error\n", group);
2189 		return 0;
2190 	}
2191 	ext4_lock_group(sb, group);
2192 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2193 	ext4_unlock_group(sb, group);
2194 	ext4_mb_unload_buddy(&e4b);
2195 
2196 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2197 			sg.info.bb_fragments, sg.info.bb_first_free);
2198 	for (i = 0; i <= 13; i++)
2199 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2200 				sg.info.bb_counters[i] : 0);
2201 	seq_printf(seq, " ]\n");
2202 
2203 	return 0;
2204 }
2205 
2206 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2207 {
2208 }
2209 
2210 static const struct seq_operations ext4_mb_seq_groups_ops = {
2211 	.start  = ext4_mb_seq_groups_start,
2212 	.next   = ext4_mb_seq_groups_next,
2213 	.stop   = ext4_mb_seq_groups_stop,
2214 	.show   = ext4_mb_seq_groups_show,
2215 };
2216 
2217 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2218 {
2219 	struct super_block *sb = PDE(inode)->data;
2220 	int rc;
2221 
2222 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2223 	if (rc == 0) {
2224 		struct seq_file *m = (struct seq_file *)file->private_data;
2225 		m->private = sb;
2226 	}
2227 	return rc;
2228 
2229 }
2230 
2231 static const struct file_operations ext4_mb_seq_groups_fops = {
2232 	.owner		= THIS_MODULE,
2233 	.open		= ext4_mb_seq_groups_open,
2234 	.read		= seq_read,
2235 	.llseek		= seq_lseek,
2236 	.release	= seq_release,
2237 };
2238 
2239 
2240 /* Create and initialize ext4_group_info data for the given group. */
2241 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2242 			  struct ext4_group_desc *desc)
2243 {
2244 	int i, len;
2245 	int metalen = 0;
2246 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2247 	struct ext4_group_info **meta_group_info;
2248 
2249 	/*
2250 	 * First check if this group is the first of a reserved block.
2251 	 * If it's true, we have to allocate a new table of pointers
2252 	 * to ext4_group_info structures
2253 	 */
2254 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2255 		metalen = sizeof(*meta_group_info) <<
2256 			EXT4_DESC_PER_BLOCK_BITS(sb);
2257 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2258 		if (meta_group_info == NULL) {
2259 			printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2260 			       "buddy group\n");
2261 			goto exit_meta_group_info;
2262 		}
2263 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2264 			meta_group_info;
2265 	}
2266 
2267 	/*
2268 	 * calculate needed size. if change bb_counters size,
2269 	 * don't forget about ext4_mb_generate_buddy()
2270 	 */
2271 	len = offsetof(typeof(**meta_group_info),
2272 		       bb_counters[sb->s_blocksize_bits + 2]);
2273 
2274 	meta_group_info =
2275 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2276 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2277 
2278 	meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2279 	if (meta_group_info[i] == NULL) {
2280 		printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2281 		goto exit_group_info;
2282 	}
2283 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2284 		&(meta_group_info[i]->bb_state));
2285 
2286 	/*
2287 	 * initialize bb_free to be able to skip
2288 	 * empty groups without initialization
2289 	 */
2290 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2291 		meta_group_info[i]->bb_free =
2292 			ext4_free_blocks_after_init(sb, group, desc);
2293 	} else {
2294 		meta_group_info[i]->bb_free =
2295 			ext4_free_blks_count(sb, desc);
2296 	}
2297 
2298 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2299 	init_rwsem(&meta_group_info[i]->alloc_sem);
2300 	meta_group_info[i]->bb_free_root = RB_ROOT;
2301 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2302 
2303 #ifdef DOUBLE_CHECK
2304 	{
2305 		struct buffer_head *bh;
2306 		meta_group_info[i]->bb_bitmap =
2307 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2308 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2309 		bh = ext4_read_block_bitmap(sb, group);
2310 		BUG_ON(bh == NULL);
2311 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2312 			sb->s_blocksize);
2313 		put_bh(bh);
2314 	}
2315 #endif
2316 
2317 	return 0;
2318 
2319 exit_group_info:
2320 	/* If a meta_group_info table has been allocated, release it now */
2321 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2322 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2323 exit_meta_group_info:
2324 	return -ENOMEM;
2325 } /* ext4_mb_add_groupinfo */
2326 
2327 static int ext4_mb_init_backend(struct super_block *sb)
2328 {
2329 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2330 	ext4_group_t i;
2331 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2332 	struct ext4_super_block *es = sbi->s_es;
2333 	int num_meta_group_infos;
2334 	int num_meta_group_infos_max;
2335 	int array_size;
2336 	struct ext4_group_desc *desc;
2337 
2338 	/* This is the number of blocks used by GDT */
2339 	num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
2340 				1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2341 
2342 	/*
2343 	 * This is the total number of blocks used by GDT including
2344 	 * the number of reserved blocks for GDT.
2345 	 * The s_group_info array is allocated with this value
2346 	 * to allow a clean online resize without a complex
2347 	 * manipulation of pointer.
2348 	 * The drawback is the unused memory when no resize
2349 	 * occurs but it's very low in terms of pages
2350 	 * (see comments below)
2351 	 * Need to handle this properly when META_BG resizing is allowed
2352 	 */
2353 	num_meta_group_infos_max = num_meta_group_infos +
2354 				le16_to_cpu(es->s_reserved_gdt_blocks);
2355 
2356 	/*
2357 	 * array_size is the size of s_group_info array. We round it
2358 	 * to the next power of two because this approximation is done
2359 	 * internally by kmalloc so we can have some more memory
2360 	 * for free here (e.g. may be used for META_BG resize).
2361 	 */
2362 	array_size = 1;
2363 	while (array_size < sizeof(*sbi->s_group_info) *
2364 	       num_meta_group_infos_max)
2365 		array_size = array_size << 1;
2366 	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2367 	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2368 	 * So a two level scheme suffices for now. */
2369 	sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
2370 	if (sbi->s_group_info == NULL) {
2371 		printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2372 		return -ENOMEM;
2373 	}
2374 	sbi->s_buddy_cache = new_inode(sb);
2375 	if (sbi->s_buddy_cache == NULL) {
2376 		printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2377 		goto err_freesgi;
2378 	}
2379 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2380 	for (i = 0; i < ngroups; i++) {
2381 		desc = ext4_get_group_desc(sb, i, NULL);
2382 		if (desc == NULL) {
2383 			printk(KERN_ERR
2384 				"EXT4-fs: can't read descriptor %u\n", i);
2385 			goto err_freebuddy;
2386 		}
2387 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2388 			goto err_freebuddy;
2389 	}
2390 
2391 	return 0;
2392 
2393 err_freebuddy:
2394 	while (i-- > 0)
2395 		kfree(ext4_get_group_info(sb, i));
2396 	i = num_meta_group_infos;
2397 	while (i-- > 0)
2398 		kfree(sbi->s_group_info[i]);
2399 	iput(sbi->s_buddy_cache);
2400 err_freesgi:
2401 	kfree(sbi->s_group_info);
2402 	return -ENOMEM;
2403 }
2404 
2405 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2406 {
2407 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2408 	unsigned i, j;
2409 	unsigned offset;
2410 	unsigned max;
2411 	int ret;
2412 
2413 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2414 
2415 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2416 	if (sbi->s_mb_offsets == NULL) {
2417 		return -ENOMEM;
2418 	}
2419 
2420 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2421 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2422 	if (sbi->s_mb_maxs == NULL) {
2423 		kfree(sbi->s_mb_offsets);
2424 		return -ENOMEM;
2425 	}
2426 
2427 	/* order 0 is regular bitmap */
2428 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2429 	sbi->s_mb_offsets[0] = 0;
2430 
2431 	i = 1;
2432 	offset = 0;
2433 	max = sb->s_blocksize << 2;
2434 	do {
2435 		sbi->s_mb_offsets[i] = offset;
2436 		sbi->s_mb_maxs[i] = max;
2437 		offset += 1 << (sb->s_blocksize_bits - i);
2438 		max = max >> 1;
2439 		i++;
2440 	} while (i <= sb->s_blocksize_bits + 1);
2441 
2442 	/* init file for buddy data */
2443 	ret = ext4_mb_init_backend(sb);
2444 	if (ret != 0) {
2445 		kfree(sbi->s_mb_offsets);
2446 		kfree(sbi->s_mb_maxs);
2447 		return ret;
2448 	}
2449 
2450 	spin_lock_init(&sbi->s_md_lock);
2451 	spin_lock_init(&sbi->s_bal_lock);
2452 
2453 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2454 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2455 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2456 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2457 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2458 	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2459 
2460 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2461 	if (sbi->s_locality_groups == NULL) {
2462 		kfree(sbi->s_mb_offsets);
2463 		kfree(sbi->s_mb_maxs);
2464 		return -ENOMEM;
2465 	}
2466 	for_each_possible_cpu(i) {
2467 		struct ext4_locality_group *lg;
2468 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2469 		mutex_init(&lg->lg_mutex);
2470 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2471 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2472 		spin_lock_init(&lg->lg_prealloc_lock);
2473 	}
2474 
2475 	if (sbi->s_proc)
2476 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2477 				 &ext4_mb_seq_groups_fops, sb);
2478 
2479 	if (sbi->s_journal)
2480 		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2481 	return 0;
2482 }
2483 
2484 /* need to called with the ext4 group lock held */
2485 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2486 {
2487 	struct ext4_prealloc_space *pa;
2488 	struct list_head *cur, *tmp;
2489 	int count = 0;
2490 
2491 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2492 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2493 		list_del(&pa->pa_group_list);
2494 		count++;
2495 		kmem_cache_free(ext4_pspace_cachep, pa);
2496 	}
2497 	if (count)
2498 		mb_debug(1, "mballoc: %u PAs left\n", count);
2499 
2500 }
2501 
2502 int ext4_mb_release(struct super_block *sb)
2503 {
2504 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2505 	ext4_group_t i;
2506 	int num_meta_group_infos;
2507 	struct ext4_group_info *grinfo;
2508 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2509 
2510 	if (sbi->s_group_info) {
2511 		for (i = 0; i < ngroups; i++) {
2512 			grinfo = ext4_get_group_info(sb, i);
2513 #ifdef DOUBLE_CHECK
2514 			kfree(grinfo->bb_bitmap);
2515 #endif
2516 			ext4_lock_group(sb, i);
2517 			ext4_mb_cleanup_pa(grinfo);
2518 			ext4_unlock_group(sb, i);
2519 			kfree(grinfo);
2520 		}
2521 		num_meta_group_infos = (ngroups +
2522 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2523 			EXT4_DESC_PER_BLOCK_BITS(sb);
2524 		for (i = 0; i < num_meta_group_infos; i++)
2525 			kfree(sbi->s_group_info[i]);
2526 		kfree(sbi->s_group_info);
2527 	}
2528 	kfree(sbi->s_mb_offsets);
2529 	kfree(sbi->s_mb_maxs);
2530 	if (sbi->s_buddy_cache)
2531 		iput(sbi->s_buddy_cache);
2532 	if (sbi->s_mb_stats) {
2533 		printk(KERN_INFO
2534 		       "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2535 				atomic_read(&sbi->s_bal_allocated),
2536 				atomic_read(&sbi->s_bal_reqs),
2537 				atomic_read(&sbi->s_bal_success));
2538 		printk(KERN_INFO
2539 		      "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2540 				"%u 2^N hits, %u breaks, %u lost\n",
2541 				atomic_read(&sbi->s_bal_ex_scanned),
2542 				atomic_read(&sbi->s_bal_goals),
2543 				atomic_read(&sbi->s_bal_2orders),
2544 				atomic_read(&sbi->s_bal_breaks),
2545 				atomic_read(&sbi->s_mb_lost_chunks));
2546 		printk(KERN_INFO
2547 		       "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2548 				sbi->s_mb_buddies_generated++,
2549 				sbi->s_mb_generation_time);
2550 		printk(KERN_INFO
2551 		       "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2552 				atomic_read(&sbi->s_mb_preallocated),
2553 				atomic_read(&sbi->s_mb_discarded));
2554 	}
2555 
2556 	free_percpu(sbi->s_locality_groups);
2557 	if (sbi->s_proc)
2558 		remove_proc_entry("mb_groups", sbi->s_proc);
2559 
2560 	return 0;
2561 }
2562 
2563 /*
2564  * This function is called by the jbd2 layer once the commit has finished,
2565  * so we know we can free the blocks that were released with that commit.
2566  */
2567 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2568 {
2569 	struct super_block *sb = journal->j_private;
2570 	struct ext4_buddy e4b;
2571 	struct ext4_group_info *db;
2572 	int err, count = 0, count2 = 0;
2573 	struct ext4_free_data *entry;
2574 	struct list_head *l, *ltmp;
2575 
2576 	list_for_each_safe(l, ltmp, &txn->t_private_list) {
2577 		entry = list_entry(l, struct ext4_free_data, list);
2578 
2579 		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2580 			 entry->count, entry->group, entry);
2581 
2582 		if (test_opt(sb, DISCARD)) {
2583 			int ret;
2584 			ext4_fsblk_t discard_block;
2585 
2586 			discard_block = entry->start_blk +
2587 				ext4_group_first_block_no(sb, entry->group);
2588 			trace_ext4_discard_blocks(sb,
2589 					(unsigned long long)discard_block,
2590 					entry->count);
2591 			ret = sb_issue_discard(sb, discard_block, entry->count);
2592 			if (ret == EOPNOTSUPP) {
2593 				ext4_warning(sb,
2594 					"discard not supported, disabling");
2595 				clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
2596 			}
2597 		}
2598 
2599 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2600 		/* we expect to find existing buddy because it's pinned */
2601 		BUG_ON(err != 0);
2602 
2603 		db = e4b.bd_info;
2604 		/* there are blocks to put in buddy to make them really free */
2605 		count += entry->count;
2606 		count2++;
2607 		ext4_lock_group(sb, entry->group);
2608 		/* Take it out of per group rb tree */
2609 		rb_erase(&entry->node, &(db->bb_free_root));
2610 		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2611 
2612 		if (!db->bb_free_root.rb_node) {
2613 			/* No more items in the per group rb tree
2614 			 * balance refcounts from ext4_mb_free_metadata()
2615 			 */
2616 			page_cache_release(e4b.bd_buddy_page);
2617 			page_cache_release(e4b.bd_bitmap_page);
2618 		}
2619 		ext4_unlock_group(sb, entry->group);
2620 		kmem_cache_free(ext4_free_ext_cachep, entry);
2621 		ext4_mb_unload_buddy(&e4b);
2622 	}
2623 
2624 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2625 }
2626 
2627 #ifdef CONFIG_EXT4_DEBUG
2628 u8 mb_enable_debug __read_mostly;
2629 
2630 static struct dentry *debugfs_dir;
2631 static struct dentry *debugfs_debug;
2632 
2633 static void __init ext4_create_debugfs_entry(void)
2634 {
2635 	debugfs_dir = debugfs_create_dir("ext4", NULL);
2636 	if (debugfs_dir)
2637 		debugfs_debug = debugfs_create_u8("mballoc-debug",
2638 						  S_IRUGO | S_IWUSR,
2639 						  debugfs_dir,
2640 						  &mb_enable_debug);
2641 }
2642 
2643 static void ext4_remove_debugfs_entry(void)
2644 {
2645 	debugfs_remove(debugfs_debug);
2646 	debugfs_remove(debugfs_dir);
2647 }
2648 
2649 #else
2650 
2651 static void __init ext4_create_debugfs_entry(void)
2652 {
2653 }
2654 
2655 static void ext4_remove_debugfs_entry(void)
2656 {
2657 }
2658 
2659 #endif
2660 
2661 int __init init_ext4_mballoc(void)
2662 {
2663 	ext4_pspace_cachep =
2664 		kmem_cache_create("ext4_prealloc_space",
2665 				     sizeof(struct ext4_prealloc_space),
2666 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2667 	if (ext4_pspace_cachep == NULL)
2668 		return -ENOMEM;
2669 
2670 	ext4_ac_cachep =
2671 		kmem_cache_create("ext4_alloc_context",
2672 				     sizeof(struct ext4_allocation_context),
2673 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2674 	if (ext4_ac_cachep == NULL) {
2675 		kmem_cache_destroy(ext4_pspace_cachep);
2676 		return -ENOMEM;
2677 	}
2678 
2679 	ext4_free_ext_cachep =
2680 		kmem_cache_create("ext4_free_block_extents",
2681 				     sizeof(struct ext4_free_data),
2682 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2683 	if (ext4_free_ext_cachep == NULL) {
2684 		kmem_cache_destroy(ext4_pspace_cachep);
2685 		kmem_cache_destroy(ext4_ac_cachep);
2686 		return -ENOMEM;
2687 	}
2688 	ext4_create_debugfs_entry();
2689 	return 0;
2690 }
2691 
2692 void exit_ext4_mballoc(void)
2693 {
2694 	/*
2695 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2696 	 * before destroying the slab cache.
2697 	 */
2698 	rcu_barrier();
2699 	kmem_cache_destroy(ext4_pspace_cachep);
2700 	kmem_cache_destroy(ext4_ac_cachep);
2701 	kmem_cache_destroy(ext4_free_ext_cachep);
2702 	ext4_remove_debugfs_entry();
2703 }
2704 
2705 
2706 /*
2707  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2708  * Returns 0 if success or error code
2709  */
2710 static noinline_for_stack int
2711 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2712 				handle_t *handle, unsigned int reserv_blks)
2713 {
2714 	struct buffer_head *bitmap_bh = NULL;
2715 	struct ext4_super_block *es;
2716 	struct ext4_group_desc *gdp;
2717 	struct buffer_head *gdp_bh;
2718 	struct ext4_sb_info *sbi;
2719 	struct super_block *sb;
2720 	ext4_fsblk_t block;
2721 	int err, len;
2722 
2723 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2724 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2725 
2726 	sb = ac->ac_sb;
2727 	sbi = EXT4_SB(sb);
2728 	es = sbi->s_es;
2729 
2730 
2731 	err = -EIO;
2732 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2733 	if (!bitmap_bh)
2734 		goto out_err;
2735 
2736 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2737 	if (err)
2738 		goto out_err;
2739 
2740 	err = -EIO;
2741 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2742 	if (!gdp)
2743 		goto out_err;
2744 
2745 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2746 			ext4_free_blks_count(sb, gdp));
2747 
2748 	err = ext4_journal_get_write_access(handle, gdp_bh);
2749 	if (err)
2750 		goto out_err;
2751 
2752 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2753 
2754 	len = ac->ac_b_ex.fe_len;
2755 	if (!ext4_data_block_valid(sbi, block, len)) {
2756 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2757 			   "fs metadata\n", block, block+len);
2758 		/* File system mounted not to panic on error
2759 		 * Fix the bitmap and repeat the block allocation
2760 		 * We leak some of the blocks here.
2761 		 */
2762 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2763 		mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2764 			    ac->ac_b_ex.fe_len);
2765 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2766 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2767 		if (!err)
2768 			err = -EAGAIN;
2769 		goto out_err;
2770 	}
2771 
2772 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2773 #ifdef AGGRESSIVE_CHECK
2774 	{
2775 		int i;
2776 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2777 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2778 						bitmap_bh->b_data));
2779 		}
2780 	}
2781 #endif
2782 	mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
2783 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2784 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2785 		ext4_free_blks_set(sb, gdp,
2786 					ext4_free_blocks_after_init(sb,
2787 					ac->ac_b_ex.fe_group, gdp));
2788 	}
2789 	len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
2790 	ext4_free_blks_set(sb, gdp, len);
2791 	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2792 
2793 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2794 	percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
2795 	/*
2796 	 * Now reduce the dirty block count also. Should not go negative
2797 	 */
2798 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2799 		/* release all the reserved blocks if non delalloc */
2800 		percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
2801 
2802 	if (sbi->s_log_groups_per_flex) {
2803 		ext4_group_t flex_group = ext4_flex_group(sbi,
2804 							  ac->ac_b_ex.fe_group);
2805 		atomic_sub(ac->ac_b_ex.fe_len,
2806 			   &sbi->s_flex_groups[flex_group].free_blocks);
2807 	}
2808 
2809 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2810 	if (err)
2811 		goto out_err;
2812 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2813 
2814 out_err:
2815 	sb->s_dirt = 1;
2816 	brelse(bitmap_bh);
2817 	return err;
2818 }
2819 
2820 /*
2821  * here we normalize request for locality group
2822  * Group request are normalized to s_strip size if we set the same via mount
2823  * option. If not we set it to s_mb_group_prealloc which can be configured via
2824  * /sys/fs/ext4/<partition>/mb_group_prealloc
2825  *
2826  * XXX: should we try to preallocate more than the group has now?
2827  */
2828 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2829 {
2830 	struct super_block *sb = ac->ac_sb;
2831 	struct ext4_locality_group *lg = ac->ac_lg;
2832 
2833 	BUG_ON(lg == NULL);
2834 	if (EXT4_SB(sb)->s_stripe)
2835 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2836 	else
2837 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2838 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2839 		current->pid, ac->ac_g_ex.fe_len);
2840 }
2841 
2842 /*
2843  * Normalization means making request better in terms of
2844  * size and alignment
2845  */
2846 static noinline_for_stack void
2847 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2848 				struct ext4_allocation_request *ar)
2849 {
2850 	int bsbits, max;
2851 	ext4_lblk_t end;
2852 	loff_t size, orig_size, start_off;
2853 	ext4_lblk_t start, orig_start;
2854 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2855 	struct ext4_prealloc_space *pa;
2856 
2857 	/* do normalize only data requests, metadata requests
2858 	   do not need preallocation */
2859 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2860 		return;
2861 
2862 	/* sometime caller may want exact blocks */
2863 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2864 		return;
2865 
2866 	/* caller may indicate that preallocation isn't
2867 	 * required (it's a tail, for example) */
2868 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2869 		return;
2870 
2871 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2872 		ext4_mb_normalize_group_request(ac);
2873 		return ;
2874 	}
2875 
2876 	bsbits = ac->ac_sb->s_blocksize_bits;
2877 
2878 	/* first, let's learn actual file size
2879 	 * given current request is allocated */
2880 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2881 	size = size << bsbits;
2882 	if (size < i_size_read(ac->ac_inode))
2883 		size = i_size_read(ac->ac_inode);
2884 
2885 	/* max size of free chunks */
2886 	max = 2 << bsbits;
2887 
2888 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2889 		(req <= (size) || max <= (chunk_size))
2890 
2891 	/* first, try to predict filesize */
2892 	/* XXX: should this table be tunable? */
2893 	start_off = 0;
2894 	if (size <= 16 * 1024) {
2895 		size = 16 * 1024;
2896 	} else if (size <= 32 * 1024) {
2897 		size = 32 * 1024;
2898 	} else if (size <= 64 * 1024) {
2899 		size = 64 * 1024;
2900 	} else if (size <= 128 * 1024) {
2901 		size = 128 * 1024;
2902 	} else if (size <= 256 * 1024) {
2903 		size = 256 * 1024;
2904 	} else if (size <= 512 * 1024) {
2905 		size = 512 * 1024;
2906 	} else if (size <= 1024 * 1024) {
2907 		size = 1024 * 1024;
2908 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2909 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2910 						(21 - bsbits)) << 21;
2911 		size = 2 * 1024 * 1024;
2912 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2913 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2914 							(22 - bsbits)) << 22;
2915 		size = 4 * 1024 * 1024;
2916 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2917 					(8<<20)>>bsbits, max, 8 * 1024)) {
2918 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2919 							(23 - bsbits)) << 23;
2920 		size = 8 * 1024 * 1024;
2921 	} else {
2922 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2923 		size	  = ac->ac_o_ex.fe_len << bsbits;
2924 	}
2925 	orig_size = size = size >> bsbits;
2926 	orig_start = start = start_off >> bsbits;
2927 
2928 	/* don't cover already allocated blocks in selected range */
2929 	if (ar->pleft && start <= ar->lleft) {
2930 		size -= ar->lleft + 1 - start;
2931 		start = ar->lleft + 1;
2932 	}
2933 	if (ar->pright && start + size - 1 >= ar->lright)
2934 		size -= start + size - ar->lright;
2935 
2936 	end = start + size;
2937 
2938 	/* check we don't cross already preallocated blocks */
2939 	rcu_read_lock();
2940 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2941 		ext4_lblk_t pa_end;
2942 
2943 		if (pa->pa_deleted)
2944 			continue;
2945 		spin_lock(&pa->pa_lock);
2946 		if (pa->pa_deleted) {
2947 			spin_unlock(&pa->pa_lock);
2948 			continue;
2949 		}
2950 
2951 		pa_end = pa->pa_lstart + pa->pa_len;
2952 
2953 		/* PA must not overlap original request */
2954 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2955 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2956 
2957 		/* skip PAs this normalized request doesn't overlap with */
2958 		if (pa->pa_lstart >= end || pa_end <= start) {
2959 			spin_unlock(&pa->pa_lock);
2960 			continue;
2961 		}
2962 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2963 
2964 		/* adjust start or end to be adjacent to this pa */
2965 		if (pa_end <= ac->ac_o_ex.fe_logical) {
2966 			BUG_ON(pa_end < start);
2967 			start = pa_end;
2968 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
2969 			BUG_ON(pa->pa_lstart > end);
2970 			end = pa->pa_lstart;
2971 		}
2972 		spin_unlock(&pa->pa_lock);
2973 	}
2974 	rcu_read_unlock();
2975 	size = end - start;
2976 
2977 	/* XXX: extra loop to check we really don't overlap preallocations */
2978 	rcu_read_lock();
2979 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2980 		ext4_lblk_t pa_end;
2981 		spin_lock(&pa->pa_lock);
2982 		if (pa->pa_deleted == 0) {
2983 			pa_end = pa->pa_lstart + pa->pa_len;
2984 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
2985 		}
2986 		spin_unlock(&pa->pa_lock);
2987 	}
2988 	rcu_read_unlock();
2989 
2990 	if (start + size <= ac->ac_o_ex.fe_logical &&
2991 			start > ac->ac_o_ex.fe_logical) {
2992 		printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
2993 			(unsigned long) start, (unsigned long) size,
2994 			(unsigned long) ac->ac_o_ex.fe_logical);
2995 	}
2996 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
2997 			start > ac->ac_o_ex.fe_logical);
2998 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
2999 
3000 	/* now prepare goal request */
3001 
3002 	/* XXX: is it better to align blocks WRT to logical
3003 	 * placement or satisfy big request as is */
3004 	ac->ac_g_ex.fe_logical = start;
3005 	ac->ac_g_ex.fe_len = size;
3006 
3007 	/* define goal start in order to merge */
3008 	if (ar->pright && (ar->lright == (start + size))) {
3009 		/* merge to the right */
3010 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3011 						&ac->ac_f_ex.fe_group,
3012 						&ac->ac_f_ex.fe_start);
3013 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3014 	}
3015 	if (ar->pleft && (ar->lleft + 1 == start)) {
3016 		/* merge to the left */
3017 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3018 						&ac->ac_f_ex.fe_group,
3019 						&ac->ac_f_ex.fe_start);
3020 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3021 	}
3022 
3023 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3024 		(unsigned) orig_size, (unsigned) start);
3025 }
3026 
3027 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3028 {
3029 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3030 
3031 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3032 		atomic_inc(&sbi->s_bal_reqs);
3033 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3034 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3035 			atomic_inc(&sbi->s_bal_success);
3036 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3037 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3038 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3039 			atomic_inc(&sbi->s_bal_goals);
3040 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3041 			atomic_inc(&sbi->s_bal_breaks);
3042 	}
3043 
3044 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3045 		trace_ext4_mballoc_alloc(ac);
3046 	else
3047 		trace_ext4_mballoc_prealloc(ac);
3048 }
3049 
3050 /*
3051  * Called on failure; free up any blocks from the inode PA for this
3052  * context.  We don't need this for MB_GROUP_PA because we only change
3053  * pa_free in ext4_mb_release_context(), but on failure, we've already
3054  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3055  */
3056 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3057 {
3058 	struct ext4_prealloc_space *pa = ac->ac_pa;
3059 	int len;
3060 
3061 	if (pa && pa->pa_type == MB_INODE_PA) {
3062 		len = ac->ac_b_ex.fe_len;
3063 		pa->pa_free += len;
3064 	}
3065 
3066 }
3067 
3068 /*
3069  * use blocks preallocated to inode
3070  */
3071 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3072 				struct ext4_prealloc_space *pa)
3073 {
3074 	ext4_fsblk_t start;
3075 	ext4_fsblk_t end;
3076 	int len;
3077 
3078 	/* found preallocated blocks, use them */
3079 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3080 	end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3081 	len = end - start;
3082 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3083 					&ac->ac_b_ex.fe_start);
3084 	ac->ac_b_ex.fe_len = len;
3085 	ac->ac_status = AC_STATUS_FOUND;
3086 	ac->ac_pa = pa;
3087 
3088 	BUG_ON(start < pa->pa_pstart);
3089 	BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3090 	BUG_ON(pa->pa_free < len);
3091 	pa->pa_free -= len;
3092 
3093 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3094 }
3095 
3096 /*
3097  * use blocks preallocated to locality group
3098  */
3099 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3100 				struct ext4_prealloc_space *pa)
3101 {
3102 	unsigned int len = ac->ac_o_ex.fe_len;
3103 
3104 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3105 					&ac->ac_b_ex.fe_group,
3106 					&ac->ac_b_ex.fe_start);
3107 	ac->ac_b_ex.fe_len = len;
3108 	ac->ac_status = AC_STATUS_FOUND;
3109 	ac->ac_pa = pa;
3110 
3111 	/* we don't correct pa_pstart or pa_plen here to avoid
3112 	 * possible race when the group is being loaded concurrently
3113 	 * instead we correct pa later, after blocks are marked
3114 	 * in on-disk bitmap -- see ext4_mb_release_context()
3115 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3116 	 */
3117 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3118 }
3119 
3120 /*
3121  * Return the prealloc space that have minimal distance
3122  * from the goal block. @cpa is the prealloc
3123  * space that is having currently known minimal distance
3124  * from the goal block.
3125  */
3126 static struct ext4_prealloc_space *
3127 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3128 			struct ext4_prealloc_space *pa,
3129 			struct ext4_prealloc_space *cpa)
3130 {
3131 	ext4_fsblk_t cur_distance, new_distance;
3132 
3133 	if (cpa == NULL) {
3134 		atomic_inc(&pa->pa_count);
3135 		return pa;
3136 	}
3137 	cur_distance = abs(goal_block - cpa->pa_pstart);
3138 	new_distance = abs(goal_block - pa->pa_pstart);
3139 
3140 	if (cur_distance < new_distance)
3141 		return cpa;
3142 
3143 	/* drop the previous reference */
3144 	atomic_dec(&cpa->pa_count);
3145 	atomic_inc(&pa->pa_count);
3146 	return pa;
3147 }
3148 
3149 /*
3150  * search goal blocks in preallocated space
3151  */
3152 static noinline_for_stack int
3153 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3154 {
3155 	int order, i;
3156 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3157 	struct ext4_locality_group *lg;
3158 	struct ext4_prealloc_space *pa, *cpa = NULL;
3159 	ext4_fsblk_t goal_block;
3160 
3161 	/* only data can be preallocated */
3162 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3163 		return 0;
3164 
3165 	/* first, try per-file preallocation */
3166 	rcu_read_lock();
3167 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3168 
3169 		/* all fields in this condition don't change,
3170 		 * so we can skip locking for them */
3171 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3172 			ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3173 			continue;
3174 
3175 		/* non-extent files can't have physical blocks past 2^32 */
3176 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3177 			pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
3178 			continue;
3179 
3180 		/* found preallocated blocks, use them */
3181 		spin_lock(&pa->pa_lock);
3182 		if (pa->pa_deleted == 0 && pa->pa_free) {
3183 			atomic_inc(&pa->pa_count);
3184 			ext4_mb_use_inode_pa(ac, pa);
3185 			spin_unlock(&pa->pa_lock);
3186 			ac->ac_criteria = 10;
3187 			rcu_read_unlock();
3188 			return 1;
3189 		}
3190 		spin_unlock(&pa->pa_lock);
3191 	}
3192 	rcu_read_unlock();
3193 
3194 	/* can we use group allocation? */
3195 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3196 		return 0;
3197 
3198 	/* inode may have no locality group for some reason */
3199 	lg = ac->ac_lg;
3200 	if (lg == NULL)
3201 		return 0;
3202 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3203 	if (order > PREALLOC_TB_SIZE - 1)
3204 		/* The max size of hash table is PREALLOC_TB_SIZE */
3205 		order = PREALLOC_TB_SIZE - 1;
3206 
3207 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3208 	/*
3209 	 * search for the prealloc space that is having
3210 	 * minimal distance from the goal block.
3211 	 */
3212 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3213 		rcu_read_lock();
3214 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3215 					pa_inode_list) {
3216 			spin_lock(&pa->pa_lock);
3217 			if (pa->pa_deleted == 0 &&
3218 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3219 
3220 				cpa = ext4_mb_check_group_pa(goal_block,
3221 								pa, cpa);
3222 			}
3223 			spin_unlock(&pa->pa_lock);
3224 		}
3225 		rcu_read_unlock();
3226 	}
3227 	if (cpa) {
3228 		ext4_mb_use_group_pa(ac, cpa);
3229 		ac->ac_criteria = 20;
3230 		return 1;
3231 	}
3232 	return 0;
3233 }
3234 
3235 /*
3236  * the function goes through all block freed in the group
3237  * but not yet committed and marks them used in in-core bitmap.
3238  * buddy must be generated from this bitmap
3239  * Need to be called with the ext4 group lock held
3240  */
3241 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3242 						ext4_group_t group)
3243 {
3244 	struct rb_node *n;
3245 	struct ext4_group_info *grp;
3246 	struct ext4_free_data *entry;
3247 
3248 	grp = ext4_get_group_info(sb, group);
3249 	n = rb_first(&(grp->bb_free_root));
3250 
3251 	while (n) {
3252 		entry = rb_entry(n, struct ext4_free_data, node);
3253 		mb_set_bits(bitmap, entry->start_blk, entry->count);
3254 		n = rb_next(n);
3255 	}
3256 	return;
3257 }
3258 
3259 /*
3260  * the function goes through all preallocation in this group and marks them
3261  * used in in-core bitmap. buddy must be generated from this bitmap
3262  * Need to be called with ext4 group lock held
3263  */
3264 static noinline_for_stack
3265 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3266 					ext4_group_t group)
3267 {
3268 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3269 	struct ext4_prealloc_space *pa;
3270 	struct list_head *cur;
3271 	ext4_group_t groupnr;
3272 	ext4_grpblk_t start;
3273 	int preallocated = 0;
3274 	int count = 0;
3275 	int len;
3276 
3277 	/* all form of preallocation discards first load group,
3278 	 * so the only competing code is preallocation use.
3279 	 * we don't need any locking here
3280 	 * notice we do NOT ignore preallocations with pa_deleted
3281 	 * otherwise we could leave used blocks available for
3282 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3283 	 * is dropping preallocation
3284 	 */
3285 	list_for_each(cur, &grp->bb_prealloc_list) {
3286 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3287 		spin_lock(&pa->pa_lock);
3288 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3289 					     &groupnr, &start);
3290 		len = pa->pa_len;
3291 		spin_unlock(&pa->pa_lock);
3292 		if (unlikely(len == 0))
3293 			continue;
3294 		BUG_ON(groupnr != group);
3295 		mb_set_bits(bitmap, start, len);
3296 		preallocated += len;
3297 		count++;
3298 	}
3299 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3300 }
3301 
3302 static void ext4_mb_pa_callback(struct rcu_head *head)
3303 {
3304 	struct ext4_prealloc_space *pa;
3305 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3306 	kmem_cache_free(ext4_pspace_cachep, pa);
3307 }
3308 
3309 /*
3310  * drops a reference to preallocated space descriptor
3311  * if this was the last reference and the space is consumed
3312  */
3313 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3314 			struct super_block *sb, struct ext4_prealloc_space *pa)
3315 {
3316 	ext4_group_t grp;
3317 	ext4_fsblk_t grp_blk;
3318 
3319 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3320 		return;
3321 
3322 	/* in this short window concurrent discard can set pa_deleted */
3323 	spin_lock(&pa->pa_lock);
3324 	if (pa->pa_deleted == 1) {
3325 		spin_unlock(&pa->pa_lock);
3326 		return;
3327 	}
3328 
3329 	pa->pa_deleted = 1;
3330 	spin_unlock(&pa->pa_lock);
3331 
3332 	grp_blk = pa->pa_pstart;
3333 	/*
3334 	 * If doing group-based preallocation, pa_pstart may be in the
3335 	 * next group when pa is used up
3336 	 */
3337 	if (pa->pa_type == MB_GROUP_PA)
3338 		grp_blk--;
3339 
3340 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3341 
3342 	/*
3343 	 * possible race:
3344 	 *
3345 	 *  P1 (buddy init)			P2 (regular allocation)
3346 	 *					find block B in PA
3347 	 *  copy on-disk bitmap to buddy
3348 	 *  					mark B in on-disk bitmap
3349 	 *					drop PA from group
3350 	 *  mark all PAs in buddy
3351 	 *
3352 	 * thus, P1 initializes buddy with B available. to prevent this
3353 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3354 	 * against that pair
3355 	 */
3356 	ext4_lock_group(sb, grp);
3357 	list_del(&pa->pa_group_list);
3358 	ext4_unlock_group(sb, grp);
3359 
3360 	spin_lock(pa->pa_obj_lock);
3361 	list_del_rcu(&pa->pa_inode_list);
3362 	spin_unlock(pa->pa_obj_lock);
3363 
3364 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3365 }
3366 
3367 /*
3368  * creates new preallocated space for given inode
3369  */
3370 static noinline_for_stack int
3371 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3372 {
3373 	struct super_block *sb = ac->ac_sb;
3374 	struct ext4_prealloc_space *pa;
3375 	struct ext4_group_info *grp;
3376 	struct ext4_inode_info *ei;
3377 
3378 	/* preallocate only when found space is larger then requested */
3379 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3380 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3381 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3382 
3383 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3384 	if (pa == NULL)
3385 		return -ENOMEM;
3386 
3387 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3388 		int winl;
3389 		int wins;
3390 		int win;
3391 		int offs;
3392 
3393 		/* we can't allocate as much as normalizer wants.
3394 		 * so, found space must get proper lstart
3395 		 * to cover original request */
3396 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3397 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3398 
3399 		/* we're limited by original request in that
3400 		 * logical block must be covered any way
3401 		 * winl is window we can move our chunk within */
3402 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3403 
3404 		/* also, we should cover whole original request */
3405 		wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3406 
3407 		/* the smallest one defines real window */
3408 		win = min(winl, wins);
3409 
3410 		offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3411 		if (offs && offs < win)
3412 			win = offs;
3413 
3414 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3415 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3416 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3417 	}
3418 
3419 	/* preallocation can change ac_b_ex, thus we store actually
3420 	 * allocated blocks for history */
3421 	ac->ac_f_ex = ac->ac_b_ex;
3422 
3423 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3424 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3425 	pa->pa_len = ac->ac_b_ex.fe_len;
3426 	pa->pa_free = pa->pa_len;
3427 	atomic_set(&pa->pa_count, 1);
3428 	spin_lock_init(&pa->pa_lock);
3429 	INIT_LIST_HEAD(&pa->pa_inode_list);
3430 	INIT_LIST_HEAD(&pa->pa_group_list);
3431 	pa->pa_deleted = 0;
3432 	pa->pa_type = MB_INODE_PA;
3433 
3434 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3435 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3436 	trace_ext4_mb_new_inode_pa(ac, pa);
3437 
3438 	ext4_mb_use_inode_pa(ac, pa);
3439 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3440 
3441 	ei = EXT4_I(ac->ac_inode);
3442 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3443 
3444 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3445 	pa->pa_inode = ac->ac_inode;
3446 
3447 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3448 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3449 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3450 
3451 	spin_lock(pa->pa_obj_lock);
3452 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3453 	spin_unlock(pa->pa_obj_lock);
3454 
3455 	return 0;
3456 }
3457 
3458 /*
3459  * creates new preallocated space for locality group inodes belongs to
3460  */
3461 static noinline_for_stack int
3462 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3463 {
3464 	struct super_block *sb = ac->ac_sb;
3465 	struct ext4_locality_group *lg;
3466 	struct ext4_prealloc_space *pa;
3467 	struct ext4_group_info *grp;
3468 
3469 	/* preallocate only when found space is larger then requested */
3470 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3471 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3472 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3473 
3474 	BUG_ON(ext4_pspace_cachep == NULL);
3475 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3476 	if (pa == NULL)
3477 		return -ENOMEM;
3478 
3479 	/* preallocation can change ac_b_ex, thus we store actually
3480 	 * allocated blocks for history */
3481 	ac->ac_f_ex = ac->ac_b_ex;
3482 
3483 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3484 	pa->pa_lstart = pa->pa_pstart;
3485 	pa->pa_len = ac->ac_b_ex.fe_len;
3486 	pa->pa_free = pa->pa_len;
3487 	atomic_set(&pa->pa_count, 1);
3488 	spin_lock_init(&pa->pa_lock);
3489 	INIT_LIST_HEAD(&pa->pa_inode_list);
3490 	INIT_LIST_HEAD(&pa->pa_group_list);
3491 	pa->pa_deleted = 0;
3492 	pa->pa_type = MB_GROUP_PA;
3493 
3494 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3495 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3496 	trace_ext4_mb_new_group_pa(ac, pa);
3497 
3498 	ext4_mb_use_group_pa(ac, pa);
3499 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3500 
3501 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3502 	lg = ac->ac_lg;
3503 	BUG_ON(lg == NULL);
3504 
3505 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3506 	pa->pa_inode = NULL;
3507 
3508 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3509 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3510 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3511 
3512 	/*
3513 	 * We will later add the new pa to the right bucket
3514 	 * after updating the pa_free in ext4_mb_release_context
3515 	 */
3516 	return 0;
3517 }
3518 
3519 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3520 {
3521 	int err;
3522 
3523 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3524 		err = ext4_mb_new_group_pa(ac);
3525 	else
3526 		err = ext4_mb_new_inode_pa(ac);
3527 	return err;
3528 }
3529 
3530 /*
3531  * finds all unused blocks in on-disk bitmap, frees them in
3532  * in-core bitmap and buddy.
3533  * @pa must be unlinked from inode and group lists, so that
3534  * nobody else can find/use it.
3535  * the caller MUST hold group/inode locks.
3536  * TODO: optimize the case when there are no in-core structures yet
3537  */
3538 static noinline_for_stack int
3539 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3540 			struct ext4_prealloc_space *pa,
3541 			struct ext4_allocation_context *ac)
3542 {
3543 	struct super_block *sb = e4b->bd_sb;
3544 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3545 	unsigned int end;
3546 	unsigned int next;
3547 	ext4_group_t group;
3548 	ext4_grpblk_t bit;
3549 	unsigned long long grp_blk_start;
3550 	sector_t start;
3551 	int err = 0;
3552 	int free = 0;
3553 
3554 	BUG_ON(pa->pa_deleted == 0);
3555 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3556 	grp_blk_start = pa->pa_pstart - bit;
3557 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3558 	end = bit + pa->pa_len;
3559 
3560 	if (ac) {
3561 		ac->ac_sb = sb;
3562 		ac->ac_inode = pa->pa_inode;
3563 	}
3564 
3565 	while (bit < end) {
3566 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3567 		if (bit >= end)
3568 			break;
3569 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3570 		start = ext4_group_first_block_no(sb, group) + bit;
3571 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3572 				(unsigned) start, (unsigned) next - bit,
3573 				(unsigned) group);
3574 		free += next - bit;
3575 
3576 		if (ac) {
3577 			ac->ac_b_ex.fe_group = group;
3578 			ac->ac_b_ex.fe_start = bit;
3579 			ac->ac_b_ex.fe_len = next - bit;
3580 			ac->ac_b_ex.fe_logical = 0;
3581 			trace_ext4_mballoc_discard(ac);
3582 		}
3583 
3584 		trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
3585 					       next - bit);
3586 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3587 		bit = next + 1;
3588 	}
3589 	if (free != pa->pa_free) {
3590 		printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3591 			pa, (unsigned long) pa->pa_lstart,
3592 			(unsigned long) pa->pa_pstart,
3593 			(unsigned long) pa->pa_len);
3594 		ext4_grp_locked_error(sb, group,
3595 					__func__, "free %u, pa_free %u",
3596 					free, pa->pa_free);
3597 		/*
3598 		 * pa is already deleted so we use the value obtained
3599 		 * from the bitmap and continue.
3600 		 */
3601 	}
3602 	atomic_add(free, &sbi->s_mb_discarded);
3603 
3604 	return err;
3605 }
3606 
3607 static noinline_for_stack int
3608 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3609 				struct ext4_prealloc_space *pa,
3610 				struct ext4_allocation_context *ac)
3611 {
3612 	struct super_block *sb = e4b->bd_sb;
3613 	ext4_group_t group;
3614 	ext4_grpblk_t bit;
3615 
3616 	trace_ext4_mb_release_group_pa(ac, pa);
3617 	BUG_ON(pa->pa_deleted == 0);
3618 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3619 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3620 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3621 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3622 
3623 	if (ac) {
3624 		ac->ac_sb = sb;
3625 		ac->ac_inode = NULL;
3626 		ac->ac_b_ex.fe_group = group;
3627 		ac->ac_b_ex.fe_start = bit;
3628 		ac->ac_b_ex.fe_len = pa->pa_len;
3629 		ac->ac_b_ex.fe_logical = 0;
3630 		trace_ext4_mballoc_discard(ac);
3631 	}
3632 
3633 	return 0;
3634 }
3635 
3636 /*
3637  * releases all preallocations in given group
3638  *
3639  * first, we need to decide discard policy:
3640  * - when do we discard
3641  *   1) ENOSPC
3642  * - how many do we discard
3643  *   1) how many requested
3644  */
3645 static noinline_for_stack int
3646 ext4_mb_discard_group_preallocations(struct super_block *sb,
3647 					ext4_group_t group, int needed)
3648 {
3649 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3650 	struct buffer_head *bitmap_bh = NULL;
3651 	struct ext4_prealloc_space *pa, *tmp;
3652 	struct ext4_allocation_context *ac;
3653 	struct list_head list;
3654 	struct ext4_buddy e4b;
3655 	int err;
3656 	int busy = 0;
3657 	int free = 0;
3658 
3659 	mb_debug(1, "discard preallocation for group %u\n", group);
3660 
3661 	if (list_empty(&grp->bb_prealloc_list))
3662 		return 0;
3663 
3664 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3665 	if (bitmap_bh == NULL) {
3666 		ext4_error(sb, "Error reading block bitmap for %u", group);
3667 		return 0;
3668 	}
3669 
3670 	err = ext4_mb_load_buddy(sb, group, &e4b);
3671 	if (err) {
3672 		ext4_error(sb, "Error loading buddy information for %u", group);
3673 		put_bh(bitmap_bh);
3674 		return 0;
3675 	}
3676 
3677 	if (needed == 0)
3678 		needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3679 
3680 	INIT_LIST_HEAD(&list);
3681 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3682 	if (ac)
3683 		ac->ac_sb = sb;
3684 repeat:
3685 	ext4_lock_group(sb, group);
3686 	list_for_each_entry_safe(pa, tmp,
3687 				&grp->bb_prealloc_list, pa_group_list) {
3688 		spin_lock(&pa->pa_lock);
3689 		if (atomic_read(&pa->pa_count)) {
3690 			spin_unlock(&pa->pa_lock);
3691 			busy = 1;
3692 			continue;
3693 		}
3694 		if (pa->pa_deleted) {
3695 			spin_unlock(&pa->pa_lock);
3696 			continue;
3697 		}
3698 
3699 		/* seems this one can be freed ... */
3700 		pa->pa_deleted = 1;
3701 
3702 		/* we can trust pa_free ... */
3703 		free += pa->pa_free;
3704 
3705 		spin_unlock(&pa->pa_lock);
3706 
3707 		list_del(&pa->pa_group_list);
3708 		list_add(&pa->u.pa_tmp_list, &list);
3709 	}
3710 
3711 	/* if we still need more blocks and some PAs were used, try again */
3712 	if (free < needed && busy) {
3713 		busy = 0;
3714 		ext4_unlock_group(sb, group);
3715 		/*
3716 		 * Yield the CPU here so that we don't get soft lockup
3717 		 * in non preempt case.
3718 		 */
3719 		yield();
3720 		goto repeat;
3721 	}
3722 
3723 	/* found anything to free? */
3724 	if (list_empty(&list)) {
3725 		BUG_ON(free != 0);
3726 		goto out;
3727 	}
3728 
3729 	/* now free all selected PAs */
3730 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3731 
3732 		/* remove from object (inode or locality group) */
3733 		spin_lock(pa->pa_obj_lock);
3734 		list_del_rcu(&pa->pa_inode_list);
3735 		spin_unlock(pa->pa_obj_lock);
3736 
3737 		if (pa->pa_type == MB_GROUP_PA)
3738 			ext4_mb_release_group_pa(&e4b, pa, ac);
3739 		else
3740 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3741 
3742 		list_del(&pa->u.pa_tmp_list);
3743 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3744 	}
3745 
3746 out:
3747 	ext4_unlock_group(sb, group);
3748 	if (ac)
3749 		kmem_cache_free(ext4_ac_cachep, ac);
3750 	ext4_mb_unload_buddy(&e4b);
3751 	put_bh(bitmap_bh);
3752 	return free;
3753 }
3754 
3755 /*
3756  * releases all non-used preallocated blocks for given inode
3757  *
3758  * It's important to discard preallocations under i_data_sem
3759  * We don't want another block to be served from the prealloc
3760  * space when we are discarding the inode prealloc space.
3761  *
3762  * FIXME!! Make sure it is valid at all the call sites
3763  */
3764 void ext4_discard_preallocations(struct inode *inode)
3765 {
3766 	struct ext4_inode_info *ei = EXT4_I(inode);
3767 	struct super_block *sb = inode->i_sb;
3768 	struct buffer_head *bitmap_bh = NULL;
3769 	struct ext4_prealloc_space *pa, *tmp;
3770 	struct ext4_allocation_context *ac;
3771 	ext4_group_t group = 0;
3772 	struct list_head list;
3773 	struct ext4_buddy e4b;
3774 	int err;
3775 
3776 	if (!S_ISREG(inode->i_mode)) {
3777 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3778 		return;
3779 	}
3780 
3781 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3782 	trace_ext4_discard_preallocations(inode);
3783 
3784 	INIT_LIST_HEAD(&list);
3785 
3786 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3787 	if (ac) {
3788 		ac->ac_sb = sb;
3789 		ac->ac_inode = inode;
3790 	}
3791 repeat:
3792 	/* first, collect all pa's in the inode */
3793 	spin_lock(&ei->i_prealloc_lock);
3794 	while (!list_empty(&ei->i_prealloc_list)) {
3795 		pa = list_entry(ei->i_prealloc_list.next,
3796 				struct ext4_prealloc_space, pa_inode_list);
3797 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3798 		spin_lock(&pa->pa_lock);
3799 		if (atomic_read(&pa->pa_count)) {
3800 			/* this shouldn't happen often - nobody should
3801 			 * use preallocation while we're discarding it */
3802 			spin_unlock(&pa->pa_lock);
3803 			spin_unlock(&ei->i_prealloc_lock);
3804 			printk(KERN_ERR "uh-oh! used pa while discarding\n");
3805 			WARN_ON(1);
3806 			schedule_timeout_uninterruptible(HZ);
3807 			goto repeat;
3808 
3809 		}
3810 		if (pa->pa_deleted == 0) {
3811 			pa->pa_deleted = 1;
3812 			spin_unlock(&pa->pa_lock);
3813 			list_del_rcu(&pa->pa_inode_list);
3814 			list_add(&pa->u.pa_tmp_list, &list);
3815 			continue;
3816 		}
3817 
3818 		/* someone is deleting pa right now */
3819 		spin_unlock(&pa->pa_lock);
3820 		spin_unlock(&ei->i_prealloc_lock);
3821 
3822 		/* we have to wait here because pa_deleted
3823 		 * doesn't mean pa is already unlinked from
3824 		 * the list. as we might be called from
3825 		 * ->clear_inode() the inode will get freed
3826 		 * and concurrent thread which is unlinking
3827 		 * pa from inode's list may access already
3828 		 * freed memory, bad-bad-bad */
3829 
3830 		/* XXX: if this happens too often, we can
3831 		 * add a flag to force wait only in case
3832 		 * of ->clear_inode(), but not in case of
3833 		 * regular truncate */
3834 		schedule_timeout_uninterruptible(HZ);
3835 		goto repeat;
3836 	}
3837 	spin_unlock(&ei->i_prealloc_lock);
3838 
3839 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3840 		BUG_ON(pa->pa_type != MB_INODE_PA);
3841 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3842 
3843 		err = ext4_mb_load_buddy(sb, group, &e4b);
3844 		if (err) {
3845 			ext4_error(sb, "Error loading buddy information for %u",
3846 					group);
3847 			continue;
3848 		}
3849 
3850 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3851 		if (bitmap_bh == NULL) {
3852 			ext4_error(sb, "Error reading block bitmap for %u",
3853 					group);
3854 			ext4_mb_unload_buddy(&e4b);
3855 			continue;
3856 		}
3857 
3858 		ext4_lock_group(sb, group);
3859 		list_del(&pa->pa_group_list);
3860 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3861 		ext4_unlock_group(sb, group);
3862 
3863 		ext4_mb_unload_buddy(&e4b);
3864 		put_bh(bitmap_bh);
3865 
3866 		list_del(&pa->u.pa_tmp_list);
3867 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3868 	}
3869 	if (ac)
3870 		kmem_cache_free(ext4_ac_cachep, ac);
3871 }
3872 
3873 /*
3874  * finds all preallocated spaces and return blocks being freed to them
3875  * if preallocated space becomes full (no block is used from the space)
3876  * then the function frees space in buddy
3877  * XXX: at the moment, truncate (which is the only way to free blocks)
3878  * discards all preallocations
3879  */
3880 static void ext4_mb_return_to_preallocation(struct inode *inode,
3881 					struct ext4_buddy *e4b,
3882 					sector_t block, int count)
3883 {
3884 	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3885 }
3886 #ifdef CONFIG_EXT4_DEBUG
3887 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3888 {
3889 	struct super_block *sb = ac->ac_sb;
3890 	ext4_group_t ngroups, i;
3891 
3892 	printk(KERN_ERR "EXT4-fs: Can't allocate:"
3893 			" Allocation context details:\n");
3894 	printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3895 			ac->ac_status, ac->ac_flags);
3896 	printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3897 			"best %lu/%lu/%lu@%lu cr %d\n",
3898 			(unsigned long)ac->ac_o_ex.fe_group,
3899 			(unsigned long)ac->ac_o_ex.fe_start,
3900 			(unsigned long)ac->ac_o_ex.fe_len,
3901 			(unsigned long)ac->ac_o_ex.fe_logical,
3902 			(unsigned long)ac->ac_g_ex.fe_group,
3903 			(unsigned long)ac->ac_g_ex.fe_start,
3904 			(unsigned long)ac->ac_g_ex.fe_len,
3905 			(unsigned long)ac->ac_g_ex.fe_logical,
3906 			(unsigned long)ac->ac_b_ex.fe_group,
3907 			(unsigned long)ac->ac_b_ex.fe_start,
3908 			(unsigned long)ac->ac_b_ex.fe_len,
3909 			(unsigned long)ac->ac_b_ex.fe_logical,
3910 			(int)ac->ac_criteria);
3911 	printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3912 		ac->ac_found);
3913 	printk(KERN_ERR "EXT4-fs: groups: \n");
3914 	ngroups = ext4_get_groups_count(sb);
3915 	for (i = 0; i < ngroups; i++) {
3916 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3917 		struct ext4_prealloc_space *pa;
3918 		ext4_grpblk_t start;
3919 		struct list_head *cur;
3920 		ext4_lock_group(sb, i);
3921 		list_for_each(cur, &grp->bb_prealloc_list) {
3922 			pa = list_entry(cur, struct ext4_prealloc_space,
3923 					pa_group_list);
3924 			spin_lock(&pa->pa_lock);
3925 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3926 						     NULL, &start);
3927 			spin_unlock(&pa->pa_lock);
3928 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3929 			       start, pa->pa_len);
3930 		}
3931 		ext4_unlock_group(sb, i);
3932 
3933 		if (grp->bb_free == 0)
3934 			continue;
3935 		printk(KERN_ERR "%u: %d/%d \n",
3936 		       i, grp->bb_free, grp->bb_fragments);
3937 	}
3938 	printk(KERN_ERR "\n");
3939 }
3940 #else
3941 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3942 {
3943 	return;
3944 }
3945 #endif
3946 
3947 /*
3948  * We use locality group preallocation for small size file. The size of the
3949  * file is determined by the current size or the resulting size after
3950  * allocation which ever is larger
3951  *
3952  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3953  */
3954 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3955 {
3956 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3957 	int bsbits = ac->ac_sb->s_blocksize_bits;
3958 	loff_t size, isize;
3959 
3960 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3961 		return;
3962 
3963 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3964 		return;
3965 
3966 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3967 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3968 		>> bsbits;
3969 
3970 	if ((size == isize) &&
3971 	    !ext4_fs_is_busy(sbi) &&
3972 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3973 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3974 		return;
3975 	}
3976 
3977 	/* don't use group allocation for large files */
3978 	size = max(size, isize);
3979 	if (size > sbi->s_mb_stream_request) {
3980 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3981 		return;
3982 	}
3983 
3984 	BUG_ON(ac->ac_lg != NULL);
3985 	/*
3986 	 * locality group prealloc space are per cpu. The reason for having
3987 	 * per cpu locality group is to reduce the contention between block
3988 	 * request from multiple CPUs.
3989 	 */
3990 	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3991 
3992 	/* we're going to use group allocation */
3993 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3994 
3995 	/* serialize all allocations in the group */
3996 	mutex_lock(&ac->ac_lg->lg_mutex);
3997 }
3998 
3999 static noinline_for_stack int
4000 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4001 				struct ext4_allocation_request *ar)
4002 {
4003 	struct super_block *sb = ar->inode->i_sb;
4004 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4005 	struct ext4_super_block *es = sbi->s_es;
4006 	ext4_group_t group;
4007 	unsigned int len;
4008 	ext4_fsblk_t goal;
4009 	ext4_grpblk_t block;
4010 
4011 	/* we can't allocate > group size */
4012 	len = ar->len;
4013 
4014 	/* just a dirty hack to filter too big requests  */
4015 	if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4016 		len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4017 
4018 	/* start searching from the goal */
4019 	goal = ar->goal;
4020 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4021 			goal >= ext4_blocks_count(es))
4022 		goal = le32_to_cpu(es->s_first_data_block);
4023 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4024 
4025 	/* set up allocation goals */
4026 	memset(ac, 0, sizeof(struct ext4_allocation_context));
4027 	ac->ac_b_ex.fe_logical = ar->logical;
4028 	ac->ac_status = AC_STATUS_CONTINUE;
4029 	ac->ac_sb = sb;
4030 	ac->ac_inode = ar->inode;
4031 	ac->ac_o_ex.fe_logical = ar->logical;
4032 	ac->ac_o_ex.fe_group = group;
4033 	ac->ac_o_ex.fe_start = block;
4034 	ac->ac_o_ex.fe_len = len;
4035 	ac->ac_g_ex.fe_logical = ar->logical;
4036 	ac->ac_g_ex.fe_group = group;
4037 	ac->ac_g_ex.fe_start = block;
4038 	ac->ac_g_ex.fe_len = len;
4039 	ac->ac_flags = ar->flags;
4040 
4041 	/* we have to define context: we'll we work with a file or
4042 	 * locality group. this is a policy, actually */
4043 	ext4_mb_group_or_file(ac);
4044 
4045 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4046 			"left: %u/%u, right %u/%u to %swritable\n",
4047 			(unsigned) ar->len, (unsigned) ar->logical,
4048 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4049 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4050 			(unsigned) ar->lright, (unsigned) ar->pright,
4051 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4052 	return 0;
4053 
4054 }
4055 
4056 static noinline_for_stack void
4057 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4058 					struct ext4_locality_group *lg,
4059 					int order, int total_entries)
4060 {
4061 	ext4_group_t group = 0;
4062 	struct ext4_buddy e4b;
4063 	struct list_head discard_list;
4064 	struct ext4_prealloc_space *pa, *tmp;
4065 	struct ext4_allocation_context *ac;
4066 
4067 	mb_debug(1, "discard locality group preallocation\n");
4068 
4069 	INIT_LIST_HEAD(&discard_list);
4070 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4071 	if (ac)
4072 		ac->ac_sb = sb;
4073 
4074 	spin_lock(&lg->lg_prealloc_lock);
4075 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4076 						pa_inode_list) {
4077 		spin_lock(&pa->pa_lock);
4078 		if (atomic_read(&pa->pa_count)) {
4079 			/*
4080 			 * This is the pa that we just used
4081 			 * for block allocation. So don't
4082 			 * free that
4083 			 */
4084 			spin_unlock(&pa->pa_lock);
4085 			continue;
4086 		}
4087 		if (pa->pa_deleted) {
4088 			spin_unlock(&pa->pa_lock);
4089 			continue;
4090 		}
4091 		/* only lg prealloc space */
4092 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4093 
4094 		/* seems this one can be freed ... */
4095 		pa->pa_deleted = 1;
4096 		spin_unlock(&pa->pa_lock);
4097 
4098 		list_del_rcu(&pa->pa_inode_list);
4099 		list_add(&pa->u.pa_tmp_list, &discard_list);
4100 
4101 		total_entries--;
4102 		if (total_entries <= 5) {
4103 			/*
4104 			 * we want to keep only 5 entries
4105 			 * allowing it to grow to 8. This
4106 			 * mak sure we don't call discard
4107 			 * soon for this list.
4108 			 */
4109 			break;
4110 		}
4111 	}
4112 	spin_unlock(&lg->lg_prealloc_lock);
4113 
4114 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4115 
4116 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4117 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4118 			ext4_error(sb, "Error loading buddy information for %u",
4119 					group);
4120 			continue;
4121 		}
4122 		ext4_lock_group(sb, group);
4123 		list_del(&pa->pa_group_list);
4124 		ext4_mb_release_group_pa(&e4b, pa, ac);
4125 		ext4_unlock_group(sb, group);
4126 
4127 		ext4_mb_unload_buddy(&e4b);
4128 		list_del(&pa->u.pa_tmp_list);
4129 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4130 	}
4131 	if (ac)
4132 		kmem_cache_free(ext4_ac_cachep, ac);
4133 }
4134 
4135 /*
4136  * We have incremented pa_count. So it cannot be freed at this
4137  * point. Also we hold lg_mutex. So no parallel allocation is
4138  * possible from this lg. That means pa_free cannot be updated.
4139  *
4140  * A parallel ext4_mb_discard_group_preallocations is possible.
4141  * which can cause the lg_prealloc_list to be updated.
4142  */
4143 
4144 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4145 {
4146 	int order, added = 0, lg_prealloc_count = 1;
4147 	struct super_block *sb = ac->ac_sb;
4148 	struct ext4_locality_group *lg = ac->ac_lg;
4149 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4150 
4151 	order = fls(pa->pa_free) - 1;
4152 	if (order > PREALLOC_TB_SIZE - 1)
4153 		/* The max size of hash table is PREALLOC_TB_SIZE */
4154 		order = PREALLOC_TB_SIZE - 1;
4155 	/* Add the prealloc space to lg */
4156 	rcu_read_lock();
4157 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4158 						pa_inode_list) {
4159 		spin_lock(&tmp_pa->pa_lock);
4160 		if (tmp_pa->pa_deleted) {
4161 			spin_unlock(&tmp_pa->pa_lock);
4162 			continue;
4163 		}
4164 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4165 			/* Add to the tail of the previous entry */
4166 			list_add_tail_rcu(&pa->pa_inode_list,
4167 						&tmp_pa->pa_inode_list);
4168 			added = 1;
4169 			/*
4170 			 * we want to count the total
4171 			 * number of entries in the list
4172 			 */
4173 		}
4174 		spin_unlock(&tmp_pa->pa_lock);
4175 		lg_prealloc_count++;
4176 	}
4177 	if (!added)
4178 		list_add_tail_rcu(&pa->pa_inode_list,
4179 					&lg->lg_prealloc_list[order]);
4180 	rcu_read_unlock();
4181 
4182 	/* Now trim the list to be not more than 8 elements */
4183 	if (lg_prealloc_count > 8) {
4184 		ext4_mb_discard_lg_preallocations(sb, lg,
4185 						order, lg_prealloc_count);
4186 		return;
4187 	}
4188 	return ;
4189 }
4190 
4191 /*
4192  * release all resource we used in allocation
4193  */
4194 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4195 {
4196 	struct ext4_prealloc_space *pa = ac->ac_pa;
4197 	if (pa) {
4198 		if (pa->pa_type == MB_GROUP_PA) {
4199 			/* see comment in ext4_mb_use_group_pa() */
4200 			spin_lock(&pa->pa_lock);
4201 			pa->pa_pstart += ac->ac_b_ex.fe_len;
4202 			pa->pa_lstart += ac->ac_b_ex.fe_len;
4203 			pa->pa_free -= ac->ac_b_ex.fe_len;
4204 			pa->pa_len -= ac->ac_b_ex.fe_len;
4205 			spin_unlock(&pa->pa_lock);
4206 		}
4207 	}
4208 	if (ac->alloc_semp)
4209 		up_read(ac->alloc_semp);
4210 	if (pa) {
4211 		/*
4212 		 * We want to add the pa to the right bucket.
4213 		 * Remove it from the list and while adding
4214 		 * make sure the list to which we are adding
4215 		 * doesn't grow big.  We need to release
4216 		 * alloc_semp before calling ext4_mb_add_n_trim()
4217 		 */
4218 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4219 			spin_lock(pa->pa_obj_lock);
4220 			list_del_rcu(&pa->pa_inode_list);
4221 			spin_unlock(pa->pa_obj_lock);
4222 			ext4_mb_add_n_trim(ac);
4223 		}
4224 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4225 	}
4226 	if (ac->ac_bitmap_page)
4227 		page_cache_release(ac->ac_bitmap_page);
4228 	if (ac->ac_buddy_page)
4229 		page_cache_release(ac->ac_buddy_page);
4230 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4231 		mutex_unlock(&ac->ac_lg->lg_mutex);
4232 	ext4_mb_collect_stats(ac);
4233 	return 0;
4234 }
4235 
4236 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4237 {
4238 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4239 	int ret;
4240 	int freed = 0;
4241 
4242 	trace_ext4_mb_discard_preallocations(sb, needed);
4243 	for (i = 0; i < ngroups && needed > 0; i++) {
4244 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4245 		freed += ret;
4246 		needed -= ret;
4247 	}
4248 
4249 	return freed;
4250 }
4251 
4252 /*
4253  * Main entry point into mballoc to allocate blocks
4254  * it tries to use preallocation first, then falls back
4255  * to usual allocation
4256  */
4257 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4258 				 struct ext4_allocation_request *ar, int *errp)
4259 {
4260 	int freed;
4261 	struct ext4_allocation_context *ac = NULL;
4262 	struct ext4_sb_info *sbi;
4263 	struct super_block *sb;
4264 	ext4_fsblk_t block = 0;
4265 	unsigned int inquota = 0;
4266 	unsigned int reserv_blks = 0;
4267 
4268 	sb = ar->inode->i_sb;
4269 	sbi = EXT4_SB(sb);
4270 
4271 	trace_ext4_request_blocks(ar);
4272 
4273 	/*
4274 	 * For delayed allocation, we could skip the ENOSPC and
4275 	 * EDQUOT check, as blocks and quotas have been already
4276 	 * reserved when data being copied into pagecache.
4277 	 */
4278 	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4279 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4280 	else {
4281 		/* Without delayed allocation we need to verify
4282 		 * there is enough free blocks to do block allocation
4283 		 * and verify allocation doesn't exceed the quota limits.
4284 		 */
4285 		while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4286 			/* let others to free the space */
4287 			yield();
4288 			ar->len = ar->len >> 1;
4289 		}
4290 		if (!ar->len) {
4291 			*errp = -ENOSPC;
4292 			return 0;
4293 		}
4294 		reserv_blks = ar->len;
4295 		while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
4296 			ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4297 			ar->len--;
4298 		}
4299 		inquota = ar->len;
4300 		if (ar->len == 0) {
4301 			*errp = -EDQUOT;
4302 			goto out3;
4303 		}
4304 	}
4305 
4306 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4307 	if (!ac) {
4308 		ar->len = 0;
4309 		*errp = -ENOMEM;
4310 		goto out1;
4311 	}
4312 
4313 	*errp = ext4_mb_initialize_context(ac, ar);
4314 	if (*errp) {
4315 		ar->len = 0;
4316 		goto out2;
4317 	}
4318 
4319 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4320 	if (!ext4_mb_use_preallocated(ac)) {
4321 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4322 		ext4_mb_normalize_request(ac, ar);
4323 repeat:
4324 		/* allocate space in core */
4325 		ext4_mb_regular_allocator(ac);
4326 
4327 		/* as we've just preallocated more space than
4328 		 * user requested orinally, we store allocated
4329 		 * space in a special descriptor */
4330 		if (ac->ac_status == AC_STATUS_FOUND &&
4331 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4332 			ext4_mb_new_preallocation(ac);
4333 	}
4334 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4335 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4336 		if (*errp ==  -EAGAIN) {
4337 			/*
4338 			 * drop the reference that we took
4339 			 * in ext4_mb_use_best_found
4340 			 */
4341 			ext4_mb_release_context(ac);
4342 			ac->ac_b_ex.fe_group = 0;
4343 			ac->ac_b_ex.fe_start = 0;
4344 			ac->ac_b_ex.fe_len = 0;
4345 			ac->ac_status = AC_STATUS_CONTINUE;
4346 			goto repeat;
4347 		} else if (*errp) {
4348 			ext4_discard_allocated_blocks(ac);
4349 			ac->ac_b_ex.fe_len = 0;
4350 			ar->len = 0;
4351 			ext4_mb_show_ac(ac);
4352 		} else {
4353 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4354 			ar->len = ac->ac_b_ex.fe_len;
4355 		}
4356 	} else {
4357 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4358 		if (freed)
4359 			goto repeat;
4360 		*errp = -ENOSPC;
4361 		ac->ac_b_ex.fe_len = 0;
4362 		ar->len = 0;
4363 		ext4_mb_show_ac(ac);
4364 	}
4365 
4366 	ext4_mb_release_context(ac);
4367 
4368 out2:
4369 	kmem_cache_free(ext4_ac_cachep, ac);
4370 out1:
4371 	if (inquota && ar->len < inquota)
4372 		dquot_free_block(ar->inode, inquota - ar->len);
4373 out3:
4374 	if (!ar->len) {
4375 		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4376 			/* release all the reserved blocks if non delalloc */
4377 			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4378 						reserv_blks);
4379 	}
4380 
4381 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4382 
4383 	return block;
4384 }
4385 
4386 /*
4387  * We can merge two free data extents only if the physical blocks
4388  * are contiguous, AND the extents were freed by the same transaction,
4389  * AND the blocks are associated with the same group.
4390  */
4391 static int can_merge(struct ext4_free_data *entry1,
4392 			struct ext4_free_data *entry2)
4393 {
4394 	if ((entry1->t_tid == entry2->t_tid) &&
4395 	    (entry1->group == entry2->group) &&
4396 	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
4397 		return 1;
4398 	return 0;
4399 }
4400 
4401 static noinline_for_stack int
4402 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4403 		      struct ext4_free_data *new_entry)
4404 {
4405 	ext4_grpblk_t block;
4406 	struct ext4_free_data *entry;
4407 	struct ext4_group_info *db = e4b->bd_info;
4408 	struct super_block *sb = e4b->bd_sb;
4409 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4410 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4411 	struct rb_node *parent = NULL, *new_node;
4412 
4413 	BUG_ON(!ext4_handle_valid(handle));
4414 	BUG_ON(e4b->bd_bitmap_page == NULL);
4415 	BUG_ON(e4b->bd_buddy_page == NULL);
4416 
4417 	new_node = &new_entry->node;
4418 	block = new_entry->start_blk;
4419 
4420 	if (!*n) {
4421 		/* first free block exent. We need to
4422 		   protect buddy cache from being freed,
4423 		 * otherwise we'll refresh it from
4424 		 * on-disk bitmap and lose not-yet-available
4425 		 * blocks */
4426 		page_cache_get(e4b->bd_buddy_page);
4427 		page_cache_get(e4b->bd_bitmap_page);
4428 	}
4429 	while (*n) {
4430 		parent = *n;
4431 		entry = rb_entry(parent, struct ext4_free_data, node);
4432 		if (block < entry->start_blk)
4433 			n = &(*n)->rb_left;
4434 		else if (block >= (entry->start_blk + entry->count))
4435 			n = &(*n)->rb_right;
4436 		else {
4437 			ext4_grp_locked_error(sb, e4b->bd_group, __func__,
4438 					"Double free of blocks %d (%d %d)",
4439 					block, entry->start_blk, entry->count);
4440 			return 0;
4441 		}
4442 	}
4443 
4444 	rb_link_node(new_node, parent, n);
4445 	rb_insert_color(new_node, &db->bb_free_root);
4446 
4447 	/* Now try to see the extent can be merged to left and right */
4448 	node = rb_prev(new_node);
4449 	if (node) {
4450 		entry = rb_entry(node, struct ext4_free_data, node);
4451 		if (can_merge(entry, new_entry)) {
4452 			new_entry->start_blk = entry->start_blk;
4453 			new_entry->count += entry->count;
4454 			rb_erase(node, &(db->bb_free_root));
4455 			spin_lock(&sbi->s_md_lock);
4456 			list_del(&entry->list);
4457 			spin_unlock(&sbi->s_md_lock);
4458 			kmem_cache_free(ext4_free_ext_cachep, entry);
4459 		}
4460 	}
4461 
4462 	node = rb_next(new_node);
4463 	if (node) {
4464 		entry = rb_entry(node, struct ext4_free_data, node);
4465 		if (can_merge(new_entry, entry)) {
4466 			new_entry->count += entry->count;
4467 			rb_erase(node, &(db->bb_free_root));
4468 			spin_lock(&sbi->s_md_lock);
4469 			list_del(&entry->list);
4470 			spin_unlock(&sbi->s_md_lock);
4471 			kmem_cache_free(ext4_free_ext_cachep, entry);
4472 		}
4473 	}
4474 	/* Add the extent to transaction's private list */
4475 	spin_lock(&sbi->s_md_lock);
4476 	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4477 	spin_unlock(&sbi->s_md_lock);
4478 	return 0;
4479 }
4480 
4481 /**
4482  * ext4_free_blocks() -- Free given blocks and update quota
4483  * @handle:		handle for this transaction
4484  * @inode:		inode
4485  * @block:		start physical block to free
4486  * @count:		number of blocks to count
4487  * @metadata: 		Are these metadata blocks
4488  */
4489 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4490 		      struct buffer_head *bh, ext4_fsblk_t block,
4491 		      unsigned long count, int flags)
4492 {
4493 	struct buffer_head *bitmap_bh = NULL;
4494 	struct super_block *sb = inode->i_sb;
4495 	struct ext4_allocation_context *ac = NULL;
4496 	struct ext4_group_desc *gdp;
4497 	struct ext4_super_block *es;
4498 	unsigned long freed = 0;
4499 	unsigned int overflow;
4500 	ext4_grpblk_t bit;
4501 	struct buffer_head *gd_bh;
4502 	ext4_group_t block_group;
4503 	struct ext4_sb_info *sbi;
4504 	struct ext4_buddy e4b;
4505 	int err = 0;
4506 	int ret;
4507 
4508 	if (bh) {
4509 		if (block)
4510 			BUG_ON(block != bh->b_blocknr);
4511 		else
4512 			block = bh->b_blocknr;
4513 	}
4514 
4515 	sbi = EXT4_SB(sb);
4516 	es = EXT4_SB(sb)->s_es;
4517 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4518 	    !ext4_data_block_valid(sbi, block, count)) {
4519 		ext4_error(sb, "Freeing blocks not in datazone - "
4520 			   "block = %llu, count = %lu", block, count);
4521 		goto error_return;
4522 	}
4523 
4524 	ext4_debug("freeing block %llu\n", block);
4525 	trace_ext4_free_blocks(inode, block, count, flags);
4526 
4527 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4528 		struct buffer_head *tbh = bh;
4529 		int i;
4530 
4531 		BUG_ON(bh && (count > 1));
4532 
4533 		for (i = 0; i < count; i++) {
4534 			if (!bh)
4535 				tbh = sb_find_get_block(inode->i_sb,
4536 							block + i);
4537 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4538 				    inode, tbh, block + i);
4539 		}
4540 	}
4541 
4542 	/*
4543 	 * We need to make sure we don't reuse the freed block until
4544 	 * after the transaction is committed, which we can do by
4545 	 * treating the block as metadata, below.  We make an
4546 	 * exception if the inode is to be written in writeback mode
4547 	 * since writeback mode has weak data consistency guarantees.
4548 	 */
4549 	if (!ext4_should_writeback_data(inode))
4550 		flags |= EXT4_FREE_BLOCKS_METADATA;
4551 
4552 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4553 	if (ac) {
4554 		ac->ac_inode = inode;
4555 		ac->ac_sb = sb;
4556 	}
4557 
4558 do_more:
4559 	overflow = 0;
4560 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4561 
4562 	/*
4563 	 * Check to see if we are freeing blocks across a group
4564 	 * boundary.
4565 	 */
4566 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4567 		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4568 		count -= overflow;
4569 	}
4570 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4571 	if (!bitmap_bh) {
4572 		err = -EIO;
4573 		goto error_return;
4574 	}
4575 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4576 	if (!gdp) {
4577 		err = -EIO;
4578 		goto error_return;
4579 	}
4580 
4581 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4582 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4583 	    in_range(block, ext4_inode_table(sb, gdp),
4584 		      EXT4_SB(sb)->s_itb_per_group) ||
4585 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4586 		      EXT4_SB(sb)->s_itb_per_group)) {
4587 
4588 		ext4_error(sb, "Freeing blocks in system zone - "
4589 			   "Block = %llu, count = %lu", block, count);
4590 		/* err = 0. ext4_std_error should be a no op */
4591 		goto error_return;
4592 	}
4593 
4594 	BUFFER_TRACE(bitmap_bh, "getting write access");
4595 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4596 	if (err)
4597 		goto error_return;
4598 
4599 	/*
4600 	 * We are about to modify some metadata.  Call the journal APIs
4601 	 * to unshare ->b_data if a currently-committing transaction is
4602 	 * using it
4603 	 */
4604 	BUFFER_TRACE(gd_bh, "get_write_access");
4605 	err = ext4_journal_get_write_access(handle, gd_bh);
4606 	if (err)
4607 		goto error_return;
4608 #ifdef AGGRESSIVE_CHECK
4609 	{
4610 		int i;
4611 		for (i = 0; i < count; i++)
4612 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4613 	}
4614 #endif
4615 	if (ac) {
4616 		ac->ac_b_ex.fe_group = block_group;
4617 		ac->ac_b_ex.fe_start = bit;
4618 		ac->ac_b_ex.fe_len = count;
4619 		trace_ext4_mballoc_free(ac);
4620 	}
4621 
4622 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4623 	if (err)
4624 		goto error_return;
4625 
4626 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4627 		struct ext4_free_data *new_entry;
4628 		/*
4629 		 * blocks being freed are metadata. these blocks shouldn't
4630 		 * be used until this transaction is committed
4631 		 */
4632 		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4633 		new_entry->start_blk = bit;
4634 		new_entry->group  = block_group;
4635 		new_entry->count = count;
4636 		new_entry->t_tid = handle->h_transaction->t_tid;
4637 
4638 		ext4_lock_group(sb, block_group);
4639 		mb_clear_bits(bitmap_bh->b_data, bit, count);
4640 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4641 	} else {
4642 		/* need to update group_info->bb_free and bitmap
4643 		 * with group lock held. generate_buddy look at
4644 		 * them with group lock_held
4645 		 */
4646 		ext4_lock_group(sb, block_group);
4647 		mb_clear_bits(bitmap_bh->b_data, bit, count);
4648 		mb_free_blocks(inode, &e4b, bit, count);
4649 		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4650 	}
4651 
4652 	ret = ext4_free_blks_count(sb, gdp) + count;
4653 	ext4_free_blks_set(sb, gdp, ret);
4654 	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4655 	ext4_unlock_group(sb, block_group);
4656 	percpu_counter_add(&sbi->s_freeblocks_counter, count);
4657 
4658 	if (sbi->s_log_groups_per_flex) {
4659 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4660 		atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
4661 	}
4662 
4663 	ext4_mb_unload_buddy(&e4b);
4664 
4665 	freed += count;
4666 
4667 	/* We dirtied the bitmap block */
4668 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4669 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4670 
4671 	/* And the group descriptor block */
4672 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4673 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4674 	if (!err)
4675 		err = ret;
4676 
4677 	if (overflow && !err) {
4678 		block += count;
4679 		count = overflow;
4680 		put_bh(bitmap_bh);
4681 		goto do_more;
4682 	}
4683 	sb->s_dirt = 1;
4684 error_return:
4685 	if (freed)
4686 		dquot_free_block(inode, freed);
4687 	brelse(bitmap_bh);
4688 	ext4_std_error(sb, err);
4689 	if (ac)
4690 		kmem_cache_free(ext4_ac_cachep, ac);
4691 	return;
4692 }
4693