xref: /openbmc/linux/fs/ext4/mballoc.c (revision 82ced6fd)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "mballoc.h"
25 /*
26  * MUSTDO:
27  *   - test ext4_ext_search_left() and ext4_ext_search_right()
28  *   - search for metadata in few groups
29  *
30  * TODO v4:
31  *   - normalization should take into account whether file is still open
32  *   - discard preallocations if no free space left (policy?)
33  *   - don't normalize tails
34  *   - quota
35  *   - reservation for superuser
36  *
37  * TODO v3:
38  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
39  *   - track min/max extents in each group for better group selection
40  *   - mb_mark_used() may allocate chunk right after splitting buddy
41  *   - tree of groups sorted by number of free blocks
42  *   - error handling
43  */
44 
45 /*
46  * The allocation request involve request for multiple number of blocks
47  * near to the goal(block) value specified.
48  *
49  * During initialization phase of the allocator we decide to use the
50  * group preallocation or inode preallocation depending on the size of
51  * the file. The size of the file could be the resulting file size we
52  * would have after allocation, or the current file size, which ever
53  * is larger. If the size is less than sbi->s_mb_stream_request we
54  * select to use the group preallocation. The default value of
55  * s_mb_stream_request is 16 blocks. This can also be tuned via
56  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
57  * terms of number of blocks.
58  *
59  * The main motivation for having small file use group preallocation is to
60  * ensure that we have small files closer together on the disk.
61  *
62  * First stage the allocator looks at the inode prealloc list,
63  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
64  * spaces for this particular inode. The inode prealloc space is
65  * represented as:
66  *
67  * pa_lstart -> the logical start block for this prealloc space
68  * pa_pstart -> the physical start block for this prealloc space
69  * pa_len    -> lenght for this prealloc space
70  * pa_free   ->  free space available in this prealloc space
71  *
72  * The inode preallocation space is used looking at the _logical_ start
73  * block. If only the logical file block falls within the range of prealloc
74  * space we will consume the particular prealloc space. This make sure that
75  * that the we have contiguous physical blocks representing the file blocks
76  *
77  * The important thing to be noted in case of inode prealloc space is that
78  * we don't modify the values associated to inode prealloc space except
79  * pa_free.
80  *
81  * If we are not able to find blocks in the inode prealloc space and if we
82  * have the group allocation flag set then we look at the locality group
83  * prealloc space. These are per CPU prealloc list repreasented as
84  *
85  * ext4_sb_info.s_locality_groups[smp_processor_id()]
86  *
87  * The reason for having a per cpu locality group is to reduce the contention
88  * between CPUs. It is possible to get scheduled at this point.
89  *
90  * The locality group prealloc space is used looking at whether we have
91  * enough free space (pa_free) withing the prealloc space.
92  *
93  * If we can't allocate blocks via inode prealloc or/and locality group
94  * prealloc then we look at the buddy cache. The buddy cache is represented
95  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
96  * mapped to the buddy and bitmap information regarding different
97  * groups. The buddy information is attached to buddy cache inode so that
98  * we can access them through the page cache. The information regarding
99  * each group is loaded via ext4_mb_load_buddy.  The information involve
100  * block bitmap and buddy information. The information are stored in the
101  * inode as:
102  *
103  *  {                        page                        }
104  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
105  *
106  *
107  * one block each for bitmap and buddy information.  So for each group we
108  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
109  * blocksize) blocks.  So it can have information regarding groups_per_page
110  * which is blocks_per_page/2
111  *
112  * The buddy cache inode is not stored on disk. The inode is thrown
113  * away when the filesystem is unmounted.
114  *
115  * We look for count number of blocks in the buddy cache. If we were able
116  * to locate that many free blocks we return with additional information
117  * regarding rest of the contiguous physical block available
118  *
119  * Before allocating blocks via buddy cache we normalize the request
120  * blocks. This ensure we ask for more blocks that we needed. The extra
121  * blocks that we get after allocation is added to the respective prealloc
122  * list. In case of inode preallocation we follow a list of heuristics
123  * based on file size. This can be found in ext4_mb_normalize_request. If
124  * we are doing a group prealloc we try to normalize the request to
125  * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
126  * 512 blocks. This can be tuned via
127  * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
128  * terms of number of blocks. If we have mounted the file system with -O
129  * stripe=<value> option the group prealloc request is normalized to the
130  * stripe value (sbi->s_stripe)
131  *
132  * The regular allocator(using the buddy cache) supports few tunables.
133  *
134  * /sys/fs/ext4/<partition>/mb_min_to_scan
135  * /sys/fs/ext4/<partition>/mb_max_to_scan
136  * /sys/fs/ext4/<partition>/mb_order2_req
137  *
138  * The regular allocator uses buddy scan only if the request len is power of
139  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
140  * value of s_mb_order2_reqs can be tuned via
141  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
142  * stripe size (sbi->s_stripe), we try to search for contigous block in
143  * stripe size. This should result in better allocation on RAID setups. If
144  * not, we search in the specific group using bitmap for best extents. The
145  * tunable min_to_scan and max_to_scan control the behaviour here.
146  * min_to_scan indicate how long the mballoc __must__ look for a best
147  * extent and max_to_scan indicates how long the mballoc __can__ look for a
148  * best extent in the found extents. Searching for the blocks starts with
149  * the group specified as the goal value in allocation context via
150  * ac_g_ex. Each group is first checked based on the criteria whether it
151  * can used for allocation. ext4_mb_good_group explains how the groups are
152  * checked.
153  *
154  * Both the prealloc space are getting populated as above. So for the first
155  * request we will hit the buddy cache which will result in this prealloc
156  * space getting filled. The prealloc space is then later used for the
157  * subsequent request.
158  */
159 
160 /*
161  * mballoc operates on the following data:
162  *  - on-disk bitmap
163  *  - in-core buddy (actually includes buddy and bitmap)
164  *  - preallocation descriptors (PAs)
165  *
166  * there are two types of preallocations:
167  *  - inode
168  *    assiged to specific inode and can be used for this inode only.
169  *    it describes part of inode's space preallocated to specific
170  *    physical blocks. any block from that preallocated can be used
171  *    independent. the descriptor just tracks number of blocks left
172  *    unused. so, before taking some block from descriptor, one must
173  *    make sure corresponded logical block isn't allocated yet. this
174  *    also means that freeing any block within descriptor's range
175  *    must discard all preallocated blocks.
176  *  - locality group
177  *    assigned to specific locality group which does not translate to
178  *    permanent set of inodes: inode can join and leave group. space
179  *    from this type of preallocation can be used for any inode. thus
180  *    it's consumed from the beginning to the end.
181  *
182  * relation between them can be expressed as:
183  *    in-core buddy = on-disk bitmap + preallocation descriptors
184  *
185  * this mean blocks mballoc considers used are:
186  *  - allocated blocks (persistent)
187  *  - preallocated blocks (non-persistent)
188  *
189  * consistency in mballoc world means that at any time a block is either
190  * free or used in ALL structures. notice: "any time" should not be read
191  * literally -- time is discrete and delimited by locks.
192  *
193  *  to keep it simple, we don't use block numbers, instead we count number of
194  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
195  *
196  * all operations can be expressed as:
197  *  - init buddy:			buddy = on-disk + PAs
198  *  - new PA:				buddy += N; PA = N
199  *  - use inode PA:			on-disk += N; PA -= N
200  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
201  *  - use locality group PA		on-disk += N; PA -= N
202  *  - discard locality group PA		buddy -= PA; PA = 0
203  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
204  *        is used in real operation because we can't know actual used
205  *        bits from PA, only from on-disk bitmap
206  *
207  * if we follow this strict logic, then all operations above should be atomic.
208  * given some of them can block, we'd have to use something like semaphores
209  * killing performance on high-end SMP hardware. let's try to relax it using
210  * the following knowledge:
211  *  1) if buddy is referenced, it's already initialized
212  *  2) while block is used in buddy and the buddy is referenced,
213  *     nobody can re-allocate that block
214  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
215  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
216  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
217  *     block
218  *
219  * so, now we're building a concurrency table:
220  *  - init buddy vs.
221  *    - new PA
222  *      blocks for PA are allocated in the buddy, buddy must be referenced
223  *      until PA is linked to allocation group to avoid concurrent buddy init
224  *    - use inode PA
225  *      we need to make sure that either on-disk bitmap or PA has uptodate data
226  *      given (3) we care that PA-=N operation doesn't interfere with init
227  *    - discard inode PA
228  *      the simplest way would be to have buddy initialized by the discard
229  *    - use locality group PA
230  *      again PA-=N must be serialized with init
231  *    - discard locality group PA
232  *      the simplest way would be to have buddy initialized by the discard
233  *  - new PA vs.
234  *    - use inode PA
235  *      i_data_sem serializes them
236  *    - discard inode PA
237  *      discard process must wait until PA isn't used by another process
238  *    - use locality group PA
239  *      some mutex should serialize them
240  *    - discard locality group PA
241  *      discard process must wait until PA isn't used by another process
242  *  - use inode PA
243  *    - use inode PA
244  *      i_data_sem or another mutex should serializes them
245  *    - discard inode PA
246  *      discard process must wait until PA isn't used by another process
247  *    - use locality group PA
248  *      nothing wrong here -- they're different PAs covering different blocks
249  *    - discard locality group PA
250  *      discard process must wait until PA isn't used by another process
251  *
252  * now we're ready to make few consequences:
253  *  - PA is referenced and while it is no discard is possible
254  *  - PA is referenced until block isn't marked in on-disk bitmap
255  *  - PA changes only after on-disk bitmap
256  *  - discard must not compete with init. either init is done before
257  *    any discard or they're serialized somehow
258  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
259  *
260  * a special case when we've used PA to emptiness. no need to modify buddy
261  * in this case, but we should care about concurrent init
262  *
263  */
264 
265  /*
266  * Logic in few words:
267  *
268  *  - allocation:
269  *    load group
270  *    find blocks
271  *    mark bits in on-disk bitmap
272  *    release group
273  *
274  *  - use preallocation:
275  *    find proper PA (per-inode or group)
276  *    load group
277  *    mark bits in on-disk bitmap
278  *    release group
279  *    release PA
280  *
281  *  - free:
282  *    load group
283  *    mark bits in on-disk bitmap
284  *    release group
285  *
286  *  - discard preallocations in group:
287  *    mark PAs deleted
288  *    move them onto local list
289  *    load on-disk bitmap
290  *    load group
291  *    remove PA from object (inode or locality group)
292  *    mark free blocks in-core
293  *
294  *  - discard inode's preallocations:
295  */
296 
297 /*
298  * Locking rules
299  *
300  * Locks:
301  *  - bitlock on a group	(group)
302  *  - object (inode/locality)	(object)
303  *  - per-pa lock		(pa)
304  *
305  * Paths:
306  *  - new pa
307  *    object
308  *    group
309  *
310  *  - find and use pa:
311  *    pa
312  *
313  *  - release consumed pa:
314  *    pa
315  *    group
316  *    object
317  *
318  *  - generate in-core bitmap:
319  *    group
320  *        pa
321  *
322  *  - discard all for given object (inode, locality group):
323  *    object
324  *        pa
325  *    group
326  *
327  *  - discard all for given group:
328  *    group
329  *        pa
330  *    group
331  *        object
332  *
333  */
334 static struct kmem_cache *ext4_pspace_cachep;
335 static struct kmem_cache *ext4_ac_cachep;
336 static struct kmem_cache *ext4_free_ext_cachep;
337 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
338 					ext4_group_t group);
339 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
340 						ext4_group_t group);
341 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
342 
343 
344 
345 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
346 {
347 #if BITS_PER_LONG == 64
348 	*bit += ((unsigned long) addr & 7UL) << 3;
349 	addr = (void *) ((unsigned long) addr & ~7UL);
350 #elif BITS_PER_LONG == 32
351 	*bit += ((unsigned long) addr & 3UL) << 3;
352 	addr = (void *) ((unsigned long) addr & ~3UL);
353 #else
354 #error "how many bits you are?!"
355 #endif
356 	return addr;
357 }
358 
359 static inline int mb_test_bit(int bit, void *addr)
360 {
361 	/*
362 	 * ext4_test_bit on architecture like powerpc
363 	 * needs unsigned long aligned address
364 	 */
365 	addr = mb_correct_addr_and_bit(&bit, addr);
366 	return ext4_test_bit(bit, addr);
367 }
368 
369 static inline void mb_set_bit(int bit, void *addr)
370 {
371 	addr = mb_correct_addr_and_bit(&bit, addr);
372 	ext4_set_bit(bit, addr);
373 }
374 
375 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
376 {
377 	addr = mb_correct_addr_and_bit(&bit, addr);
378 	ext4_set_bit_atomic(lock, bit, addr);
379 }
380 
381 static inline void mb_clear_bit(int bit, void *addr)
382 {
383 	addr = mb_correct_addr_and_bit(&bit, addr);
384 	ext4_clear_bit(bit, addr);
385 }
386 
387 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
388 {
389 	addr = mb_correct_addr_and_bit(&bit, addr);
390 	ext4_clear_bit_atomic(lock, bit, addr);
391 }
392 
393 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
394 {
395 	int fix = 0, ret, tmpmax;
396 	addr = mb_correct_addr_and_bit(&fix, addr);
397 	tmpmax = max + fix;
398 	start += fix;
399 
400 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
401 	if (ret > max)
402 		return max;
403 	return ret;
404 }
405 
406 static inline int mb_find_next_bit(void *addr, int max, int start)
407 {
408 	int fix = 0, ret, tmpmax;
409 	addr = mb_correct_addr_and_bit(&fix, addr);
410 	tmpmax = max + fix;
411 	start += fix;
412 
413 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
414 	if (ret > max)
415 		return max;
416 	return ret;
417 }
418 
419 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
420 {
421 	char *bb;
422 
423 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
424 	BUG_ON(max == NULL);
425 
426 	if (order > e4b->bd_blkbits + 1) {
427 		*max = 0;
428 		return NULL;
429 	}
430 
431 	/* at order 0 we see each particular block */
432 	*max = 1 << (e4b->bd_blkbits + 3);
433 	if (order == 0)
434 		return EXT4_MB_BITMAP(e4b);
435 
436 	bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
437 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
438 
439 	return bb;
440 }
441 
442 #ifdef DOUBLE_CHECK
443 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
444 			   int first, int count)
445 {
446 	int i;
447 	struct super_block *sb = e4b->bd_sb;
448 
449 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
450 		return;
451 	BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
452 	for (i = 0; i < count; i++) {
453 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
454 			ext4_fsblk_t blocknr;
455 			blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
456 			blocknr += first + i;
457 			blocknr +=
458 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
459 			ext4_grp_locked_error(sb, e4b->bd_group,
460 				   __func__, "double-free of inode"
461 				   " %lu's block %llu(bit %u in group %u)",
462 				   inode ? inode->i_ino : 0, blocknr,
463 				   first + i, e4b->bd_group);
464 		}
465 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
466 	}
467 }
468 
469 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
470 {
471 	int i;
472 
473 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
474 		return;
475 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
476 	for (i = 0; i < count; i++) {
477 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
478 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
479 	}
480 }
481 
482 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
483 {
484 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
485 		unsigned char *b1, *b2;
486 		int i;
487 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
488 		b2 = (unsigned char *) bitmap;
489 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
490 			if (b1[i] != b2[i]) {
491 				printk(KERN_ERR "corruption in group %u "
492 				       "at byte %u(%u): %x in copy != %x "
493 				       "on disk/prealloc\n",
494 				       e4b->bd_group, i, i * 8, b1[i], b2[i]);
495 				BUG();
496 			}
497 		}
498 	}
499 }
500 
501 #else
502 static inline void mb_free_blocks_double(struct inode *inode,
503 				struct ext4_buddy *e4b, int first, int count)
504 {
505 	return;
506 }
507 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
508 						int first, int count)
509 {
510 	return;
511 }
512 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
513 {
514 	return;
515 }
516 #endif
517 
518 #ifdef AGGRESSIVE_CHECK
519 
520 #define MB_CHECK_ASSERT(assert)						\
521 do {									\
522 	if (!(assert)) {						\
523 		printk(KERN_EMERG					\
524 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
525 			function, file, line, # assert);		\
526 		BUG();							\
527 	}								\
528 } while (0)
529 
530 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
531 				const char *function, int line)
532 {
533 	struct super_block *sb = e4b->bd_sb;
534 	int order = e4b->bd_blkbits + 1;
535 	int max;
536 	int max2;
537 	int i;
538 	int j;
539 	int k;
540 	int count;
541 	struct ext4_group_info *grp;
542 	int fragments = 0;
543 	int fstart;
544 	struct list_head *cur;
545 	void *buddy;
546 	void *buddy2;
547 
548 	{
549 		static int mb_check_counter;
550 		if (mb_check_counter++ % 100 != 0)
551 			return 0;
552 	}
553 
554 	while (order > 1) {
555 		buddy = mb_find_buddy(e4b, order, &max);
556 		MB_CHECK_ASSERT(buddy);
557 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
558 		MB_CHECK_ASSERT(buddy2);
559 		MB_CHECK_ASSERT(buddy != buddy2);
560 		MB_CHECK_ASSERT(max * 2 == max2);
561 
562 		count = 0;
563 		for (i = 0; i < max; i++) {
564 
565 			if (mb_test_bit(i, buddy)) {
566 				/* only single bit in buddy2 may be 1 */
567 				if (!mb_test_bit(i << 1, buddy2)) {
568 					MB_CHECK_ASSERT(
569 						mb_test_bit((i<<1)+1, buddy2));
570 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
571 					MB_CHECK_ASSERT(
572 						mb_test_bit(i << 1, buddy2));
573 				}
574 				continue;
575 			}
576 
577 			/* both bits in buddy2 must be 0 */
578 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
579 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
580 
581 			for (j = 0; j < (1 << order); j++) {
582 				k = (i * (1 << order)) + j;
583 				MB_CHECK_ASSERT(
584 					!mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
585 			}
586 			count++;
587 		}
588 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
589 		order--;
590 	}
591 
592 	fstart = -1;
593 	buddy = mb_find_buddy(e4b, 0, &max);
594 	for (i = 0; i < max; i++) {
595 		if (!mb_test_bit(i, buddy)) {
596 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
597 			if (fstart == -1) {
598 				fragments++;
599 				fstart = i;
600 			}
601 			continue;
602 		}
603 		fstart = -1;
604 		/* check used bits only */
605 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
606 			buddy2 = mb_find_buddy(e4b, j, &max2);
607 			k = i >> j;
608 			MB_CHECK_ASSERT(k < max2);
609 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
610 		}
611 	}
612 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
613 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
614 
615 	grp = ext4_get_group_info(sb, e4b->bd_group);
616 	buddy = mb_find_buddy(e4b, 0, &max);
617 	list_for_each(cur, &grp->bb_prealloc_list) {
618 		ext4_group_t groupnr;
619 		struct ext4_prealloc_space *pa;
620 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
621 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
622 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
623 		for (i = 0; i < pa->pa_len; i++)
624 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
625 	}
626 	return 0;
627 }
628 #undef MB_CHECK_ASSERT
629 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
630 					__FILE__, __func__, __LINE__)
631 #else
632 #define mb_check_buddy(e4b)
633 #endif
634 
635 /* FIXME!! need more doc */
636 static void ext4_mb_mark_free_simple(struct super_block *sb,
637 				void *buddy, unsigned first, int len,
638 					struct ext4_group_info *grp)
639 {
640 	struct ext4_sb_info *sbi = EXT4_SB(sb);
641 	unsigned short min;
642 	unsigned short max;
643 	unsigned short chunk;
644 	unsigned short border;
645 
646 	BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
647 
648 	border = 2 << sb->s_blocksize_bits;
649 
650 	while (len > 0) {
651 		/* find how many blocks can be covered since this position */
652 		max = ffs(first | border) - 1;
653 
654 		/* find how many blocks of power 2 we need to mark */
655 		min = fls(len) - 1;
656 
657 		if (max < min)
658 			min = max;
659 		chunk = 1 << min;
660 
661 		/* mark multiblock chunks only */
662 		grp->bb_counters[min]++;
663 		if (min > 0)
664 			mb_clear_bit(first >> min,
665 				     buddy + sbi->s_mb_offsets[min]);
666 
667 		len -= chunk;
668 		first += chunk;
669 	}
670 }
671 
672 static void ext4_mb_generate_buddy(struct super_block *sb,
673 				void *buddy, void *bitmap, ext4_group_t group)
674 {
675 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
676 	unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
677 	unsigned short i = 0;
678 	unsigned short first;
679 	unsigned short len;
680 	unsigned free = 0;
681 	unsigned fragments = 0;
682 	unsigned long long period = get_cycles();
683 
684 	/* initialize buddy from bitmap which is aggregation
685 	 * of on-disk bitmap and preallocations */
686 	i = mb_find_next_zero_bit(bitmap, max, 0);
687 	grp->bb_first_free = i;
688 	while (i < max) {
689 		fragments++;
690 		first = i;
691 		i = mb_find_next_bit(bitmap, max, i);
692 		len = i - first;
693 		free += len;
694 		if (len > 1)
695 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
696 		else
697 			grp->bb_counters[0]++;
698 		if (i < max)
699 			i = mb_find_next_zero_bit(bitmap, max, i);
700 	}
701 	grp->bb_fragments = fragments;
702 
703 	if (free != grp->bb_free) {
704 		ext4_grp_locked_error(sb, group,  __func__,
705 			"EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
706 			group, free, grp->bb_free);
707 		/*
708 		 * If we intent to continue, we consider group descritor
709 		 * corrupt and update bb_free using bitmap value
710 		 */
711 		grp->bb_free = free;
712 	}
713 
714 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
715 
716 	period = get_cycles() - period;
717 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
718 	EXT4_SB(sb)->s_mb_buddies_generated++;
719 	EXT4_SB(sb)->s_mb_generation_time += period;
720 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
721 }
722 
723 /* The buddy information is attached the buddy cache inode
724  * for convenience. The information regarding each group
725  * is loaded via ext4_mb_load_buddy. The information involve
726  * block bitmap and buddy information. The information are
727  * stored in the inode as
728  *
729  * {                        page                        }
730  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
731  *
732  *
733  * one block each for bitmap and buddy information.
734  * So for each group we take up 2 blocks. A page can
735  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
736  * So it can have information regarding groups_per_page which
737  * is blocks_per_page/2
738  */
739 
740 static int ext4_mb_init_cache(struct page *page, char *incore)
741 {
742 	int blocksize;
743 	int blocks_per_page;
744 	int groups_per_page;
745 	int err = 0;
746 	int i;
747 	ext4_group_t first_group;
748 	int first_block;
749 	struct super_block *sb;
750 	struct buffer_head *bhs;
751 	struct buffer_head **bh;
752 	struct inode *inode;
753 	char *data;
754 	char *bitmap;
755 
756 	mb_debug("init page %lu\n", page->index);
757 
758 	inode = page->mapping->host;
759 	sb = inode->i_sb;
760 	blocksize = 1 << inode->i_blkbits;
761 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
762 
763 	groups_per_page = blocks_per_page >> 1;
764 	if (groups_per_page == 0)
765 		groups_per_page = 1;
766 
767 	/* allocate buffer_heads to read bitmaps */
768 	if (groups_per_page > 1) {
769 		err = -ENOMEM;
770 		i = sizeof(struct buffer_head *) * groups_per_page;
771 		bh = kzalloc(i, GFP_NOFS);
772 		if (bh == NULL)
773 			goto out;
774 	} else
775 		bh = &bhs;
776 
777 	first_group = page->index * blocks_per_page / 2;
778 
779 	/* read all groups the page covers into the cache */
780 	for (i = 0; i < groups_per_page; i++) {
781 		struct ext4_group_desc *desc;
782 
783 		if (first_group + i >= EXT4_SB(sb)->s_groups_count)
784 			break;
785 
786 		err = -EIO;
787 		desc = ext4_get_group_desc(sb, first_group + i, NULL);
788 		if (desc == NULL)
789 			goto out;
790 
791 		err = -ENOMEM;
792 		bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
793 		if (bh[i] == NULL)
794 			goto out;
795 
796 		if (bitmap_uptodate(bh[i]))
797 			continue;
798 
799 		lock_buffer(bh[i]);
800 		if (bitmap_uptodate(bh[i])) {
801 			unlock_buffer(bh[i]);
802 			continue;
803 		}
804 		spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
805 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
806 			ext4_init_block_bitmap(sb, bh[i],
807 						first_group + i, desc);
808 			set_bitmap_uptodate(bh[i]);
809 			set_buffer_uptodate(bh[i]);
810 			spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
811 			unlock_buffer(bh[i]);
812 			continue;
813 		}
814 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
815 		if (buffer_uptodate(bh[i])) {
816 			/*
817 			 * if not uninit if bh is uptodate,
818 			 * bitmap is also uptodate
819 			 */
820 			set_bitmap_uptodate(bh[i]);
821 			unlock_buffer(bh[i]);
822 			continue;
823 		}
824 		get_bh(bh[i]);
825 		/*
826 		 * submit the buffer_head for read. We can
827 		 * safely mark the bitmap as uptodate now.
828 		 * We do it here so the bitmap uptodate bit
829 		 * get set with buffer lock held.
830 		 */
831 		set_bitmap_uptodate(bh[i]);
832 		bh[i]->b_end_io = end_buffer_read_sync;
833 		submit_bh(READ, bh[i]);
834 		mb_debug("read bitmap for group %u\n", first_group + i);
835 	}
836 
837 	/* wait for I/O completion */
838 	for (i = 0; i < groups_per_page && bh[i]; i++)
839 		wait_on_buffer(bh[i]);
840 
841 	err = -EIO;
842 	for (i = 0; i < groups_per_page && bh[i]; i++)
843 		if (!buffer_uptodate(bh[i]))
844 			goto out;
845 
846 	err = 0;
847 	first_block = page->index * blocks_per_page;
848 	/* init the page  */
849 	memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
850 	for (i = 0; i < blocks_per_page; i++) {
851 		int group;
852 		struct ext4_group_info *grinfo;
853 
854 		group = (first_block + i) >> 1;
855 		if (group >= EXT4_SB(sb)->s_groups_count)
856 			break;
857 
858 		/*
859 		 * data carry information regarding this
860 		 * particular group in the format specified
861 		 * above
862 		 *
863 		 */
864 		data = page_address(page) + (i * blocksize);
865 		bitmap = bh[group - first_group]->b_data;
866 
867 		/*
868 		 * We place the buddy block and bitmap block
869 		 * close together
870 		 */
871 		if ((first_block + i) & 1) {
872 			/* this is block of buddy */
873 			BUG_ON(incore == NULL);
874 			mb_debug("put buddy for group %u in page %lu/%x\n",
875 				group, page->index, i * blocksize);
876 			grinfo = ext4_get_group_info(sb, group);
877 			grinfo->bb_fragments = 0;
878 			memset(grinfo->bb_counters, 0,
879 			       sizeof(unsigned short)*(sb->s_blocksize_bits+2));
880 			/*
881 			 * incore got set to the group block bitmap below
882 			 */
883 			ext4_lock_group(sb, group);
884 			ext4_mb_generate_buddy(sb, data, incore, group);
885 			ext4_unlock_group(sb, group);
886 			incore = NULL;
887 		} else {
888 			/* this is block of bitmap */
889 			BUG_ON(incore != NULL);
890 			mb_debug("put bitmap for group %u in page %lu/%x\n",
891 				group, page->index, i * blocksize);
892 
893 			/* see comments in ext4_mb_put_pa() */
894 			ext4_lock_group(sb, group);
895 			memcpy(data, bitmap, blocksize);
896 
897 			/* mark all preallocated blks used in in-core bitmap */
898 			ext4_mb_generate_from_pa(sb, data, group);
899 			ext4_mb_generate_from_freelist(sb, data, group);
900 			ext4_unlock_group(sb, group);
901 
902 			/* set incore so that the buddy information can be
903 			 * generated using this
904 			 */
905 			incore = data;
906 		}
907 	}
908 	SetPageUptodate(page);
909 
910 out:
911 	if (bh) {
912 		for (i = 0; i < groups_per_page && bh[i]; i++)
913 			brelse(bh[i]);
914 		if (bh != &bhs)
915 			kfree(bh);
916 	}
917 	return err;
918 }
919 
920 static noinline_for_stack int
921 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
922 					struct ext4_buddy *e4b)
923 {
924 	int blocks_per_page;
925 	int block;
926 	int pnum;
927 	int poff;
928 	struct page *page;
929 	int ret;
930 	struct ext4_group_info *grp;
931 	struct ext4_sb_info *sbi = EXT4_SB(sb);
932 	struct inode *inode = sbi->s_buddy_cache;
933 
934 	mb_debug("load group %u\n", group);
935 
936 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
937 	grp = ext4_get_group_info(sb, group);
938 
939 	e4b->bd_blkbits = sb->s_blocksize_bits;
940 	e4b->bd_info = ext4_get_group_info(sb, group);
941 	e4b->bd_sb = sb;
942 	e4b->bd_group = group;
943 	e4b->bd_buddy_page = NULL;
944 	e4b->bd_bitmap_page = NULL;
945 	e4b->alloc_semp = &grp->alloc_sem;
946 
947 	/* Take the read lock on the group alloc
948 	 * sem. This would make sure a parallel
949 	 * ext4_mb_init_group happening on other
950 	 * groups mapped by the page is blocked
951 	 * till we are done with allocation
952 	 */
953 	down_read(e4b->alloc_semp);
954 
955 	/*
956 	 * the buddy cache inode stores the block bitmap
957 	 * and buddy information in consecutive blocks.
958 	 * So for each group we need two blocks.
959 	 */
960 	block = group * 2;
961 	pnum = block / blocks_per_page;
962 	poff = block % blocks_per_page;
963 
964 	/* we could use find_or_create_page(), but it locks page
965 	 * what we'd like to avoid in fast path ... */
966 	page = find_get_page(inode->i_mapping, pnum);
967 	if (page == NULL || !PageUptodate(page)) {
968 		if (page)
969 			/*
970 			 * drop the page reference and try
971 			 * to get the page with lock. If we
972 			 * are not uptodate that implies
973 			 * somebody just created the page but
974 			 * is yet to initialize the same. So
975 			 * wait for it to initialize.
976 			 */
977 			page_cache_release(page);
978 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
979 		if (page) {
980 			BUG_ON(page->mapping != inode->i_mapping);
981 			if (!PageUptodate(page)) {
982 				ret = ext4_mb_init_cache(page, NULL);
983 				if (ret) {
984 					unlock_page(page);
985 					goto err;
986 				}
987 				mb_cmp_bitmaps(e4b, page_address(page) +
988 					       (poff * sb->s_blocksize));
989 			}
990 			unlock_page(page);
991 		}
992 	}
993 	if (page == NULL || !PageUptodate(page)) {
994 		ret = -EIO;
995 		goto err;
996 	}
997 	e4b->bd_bitmap_page = page;
998 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
999 	mark_page_accessed(page);
1000 
1001 	block++;
1002 	pnum = block / blocks_per_page;
1003 	poff = block % blocks_per_page;
1004 
1005 	page = find_get_page(inode->i_mapping, pnum);
1006 	if (page == NULL || !PageUptodate(page)) {
1007 		if (page)
1008 			page_cache_release(page);
1009 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1010 		if (page) {
1011 			BUG_ON(page->mapping != inode->i_mapping);
1012 			if (!PageUptodate(page)) {
1013 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1014 				if (ret) {
1015 					unlock_page(page);
1016 					goto err;
1017 				}
1018 			}
1019 			unlock_page(page);
1020 		}
1021 	}
1022 	if (page == NULL || !PageUptodate(page)) {
1023 		ret = -EIO;
1024 		goto err;
1025 	}
1026 	e4b->bd_buddy_page = page;
1027 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1028 	mark_page_accessed(page);
1029 
1030 	BUG_ON(e4b->bd_bitmap_page == NULL);
1031 	BUG_ON(e4b->bd_buddy_page == NULL);
1032 
1033 	return 0;
1034 
1035 err:
1036 	if (e4b->bd_bitmap_page)
1037 		page_cache_release(e4b->bd_bitmap_page);
1038 	if (e4b->bd_buddy_page)
1039 		page_cache_release(e4b->bd_buddy_page);
1040 	e4b->bd_buddy = NULL;
1041 	e4b->bd_bitmap = NULL;
1042 
1043 	/* Done with the buddy cache */
1044 	up_read(e4b->alloc_semp);
1045 	return ret;
1046 }
1047 
1048 static void ext4_mb_release_desc(struct ext4_buddy *e4b)
1049 {
1050 	if (e4b->bd_bitmap_page)
1051 		page_cache_release(e4b->bd_bitmap_page);
1052 	if (e4b->bd_buddy_page)
1053 		page_cache_release(e4b->bd_buddy_page);
1054 	/* Done with the buddy cache */
1055 	if (e4b->alloc_semp)
1056 		up_read(e4b->alloc_semp);
1057 }
1058 
1059 
1060 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1061 {
1062 	int order = 1;
1063 	void *bb;
1064 
1065 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1066 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1067 
1068 	bb = EXT4_MB_BUDDY(e4b);
1069 	while (order <= e4b->bd_blkbits + 1) {
1070 		block = block >> 1;
1071 		if (!mb_test_bit(block, bb)) {
1072 			/* this block is part of buddy of order 'order' */
1073 			return order;
1074 		}
1075 		bb += 1 << (e4b->bd_blkbits - order);
1076 		order++;
1077 	}
1078 	return 0;
1079 }
1080 
1081 static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1082 {
1083 	__u32 *addr;
1084 
1085 	len = cur + len;
1086 	while (cur < len) {
1087 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1088 			/* fast path: clear whole word at once */
1089 			addr = bm + (cur >> 3);
1090 			*addr = 0;
1091 			cur += 32;
1092 			continue;
1093 		}
1094 		if (lock)
1095 			mb_clear_bit_atomic(lock, cur, bm);
1096 		else
1097 			mb_clear_bit(cur, bm);
1098 		cur++;
1099 	}
1100 }
1101 
1102 static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1103 {
1104 	__u32 *addr;
1105 
1106 	len = cur + len;
1107 	while (cur < len) {
1108 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1109 			/* fast path: set whole word at once */
1110 			addr = bm + (cur >> 3);
1111 			*addr = 0xffffffff;
1112 			cur += 32;
1113 			continue;
1114 		}
1115 		if (lock)
1116 			mb_set_bit_atomic(lock, cur, bm);
1117 		else
1118 			mb_set_bit(cur, bm);
1119 		cur++;
1120 	}
1121 }
1122 
1123 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1124 			  int first, int count)
1125 {
1126 	int block = 0;
1127 	int max = 0;
1128 	int order;
1129 	void *buddy;
1130 	void *buddy2;
1131 	struct super_block *sb = e4b->bd_sb;
1132 
1133 	BUG_ON(first + count > (sb->s_blocksize << 3));
1134 	BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1135 	mb_check_buddy(e4b);
1136 	mb_free_blocks_double(inode, e4b, first, count);
1137 
1138 	e4b->bd_info->bb_free += count;
1139 	if (first < e4b->bd_info->bb_first_free)
1140 		e4b->bd_info->bb_first_free = first;
1141 
1142 	/* let's maintain fragments counter */
1143 	if (first != 0)
1144 		block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1145 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1146 		max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1147 	if (block && max)
1148 		e4b->bd_info->bb_fragments--;
1149 	else if (!block && !max)
1150 		e4b->bd_info->bb_fragments++;
1151 
1152 	/* let's maintain buddy itself */
1153 	while (count-- > 0) {
1154 		block = first++;
1155 		order = 0;
1156 
1157 		if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1158 			ext4_fsblk_t blocknr;
1159 			blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1160 			blocknr += block;
1161 			blocknr +=
1162 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1163 			ext4_grp_locked_error(sb, e4b->bd_group,
1164 				   __func__, "double-free of inode"
1165 				   " %lu's block %llu(bit %u in group %u)",
1166 				   inode ? inode->i_ino : 0, blocknr, block,
1167 				   e4b->bd_group);
1168 		}
1169 		mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1170 		e4b->bd_info->bb_counters[order]++;
1171 
1172 		/* start of the buddy */
1173 		buddy = mb_find_buddy(e4b, order, &max);
1174 
1175 		do {
1176 			block &= ~1UL;
1177 			if (mb_test_bit(block, buddy) ||
1178 					mb_test_bit(block + 1, buddy))
1179 				break;
1180 
1181 			/* both the buddies are free, try to coalesce them */
1182 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1183 
1184 			if (!buddy2)
1185 				break;
1186 
1187 			if (order > 0) {
1188 				/* for special purposes, we don't set
1189 				 * free bits in bitmap */
1190 				mb_set_bit(block, buddy);
1191 				mb_set_bit(block + 1, buddy);
1192 			}
1193 			e4b->bd_info->bb_counters[order]--;
1194 			e4b->bd_info->bb_counters[order]--;
1195 
1196 			block = block >> 1;
1197 			order++;
1198 			e4b->bd_info->bb_counters[order]++;
1199 
1200 			mb_clear_bit(block, buddy2);
1201 			buddy = buddy2;
1202 		} while (1);
1203 	}
1204 	mb_check_buddy(e4b);
1205 }
1206 
1207 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1208 				int needed, struct ext4_free_extent *ex)
1209 {
1210 	int next = block;
1211 	int max;
1212 	int ord;
1213 	void *buddy;
1214 
1215 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1216 	BUG_ON(ex == NULL);
1217 
1218 	buddy = mb_find_buddy(e4b, order, &max);
1219 	BUG_ON(buddy == NULL);
1220 	BUG_ON(block >= max);
1221 	if (mb_test_bit(block, buddy)) {
1222 		ex->fe_len = 0;
1223 		ex->fe_start = 0;
1224 		ex->fe_group = 0;
1225 		return 0;
1226 	}
1227 
1228 	/* FIXME dorp order completely ? */
1229 	if (likely(order == 0)) {
1230 		/* find actual order */
1231 		order = mb_find_order_for_block(e4b, block);
1232 		block = block >> order;
1233 	}
1234 
1235 	ex->fe_len = 1 << order;
1236 	ex->fe_start = block << order;
1237 	ex->fe_group = e4b->bd_group;
1238 
1239 	/* calc difference from given start */
1240 	next = next - ex->fe_start;
1241 	ex->fe_len -= next;
1242 	ex->fe_start += next;
1243 
1244 	while (needed > ex->fe_len &&
1245 	       (buddy = mb_find_buddy(e4b, order, &max))) {
1246 
1247 		if (block + 1 >= max)
1248 			break;
1249 
1250 		next = (block + 1) * (1 << order);
1251 		if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1252 			break;
1253 
1254 		ord = mb_find_order_for_block(e4b, next);
1255 
1256 		order = ord;
1257 		block = next >> order;
1258 		ex->fe_len += 1 << order;
1259 	}
1260 
1261 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1262 	return ex->fe_len;
1263 }
1264 
1265 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1266 {
1267 	int ord;
1268 	int mlen = 0;
1269 	int max = 0;
1270 	int cur;
1271 	int start = ex->fe_start;
1272 	int len = ex->fe_len;
1273 	unsigned ret = 0;
1274 	int len0 = len;
1275 	void *buddy;
1276 
1277 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1278 	BUG_ON(e4b->bd_group != ex->fe_group);
1279 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1280 	mb_check_buddy(e4b);
1281 	mb_mark_used_double(e4b, start, len);
1282 
1283 	e4b->bd_info->bb_free -= len;
1284 	if (e4b->bd_info->bb_first_free == start)
1285 		e4b->bd_info->bb_first_free += len;
1286 
1287 	/* let's maintain fragments counter */
1288 	if (start != 0)
1289 		mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1290 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1291 		max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1292 	if (mlen && max)
1293 		e4b->bd_info->bb_fragments++;
1294 	else if (!mlen && !max)
1295 		e4b->bd_info->bb_fragments--;
1296 
1297 	/* let's maintain buddy itself */
1298 	while (len) {
1299 		ord = mb_find_order_for_block(e4b, start);
1300 
1301 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1302 			/* the whole chunk may be allocated at once! */
1303 			mlen = 1 << ord;
1304 			buddy = mb_find_buddy(e4b, ord, &max);
1305 			BUG_ON((start >> ord) >= max);
1306 			mb_set_bit(start >> ord, buddy);
1307 			e4b->bd_info->bb_counters[ord]--;
1308 			start += mlen;
1309 			len -= mlen;
1310 			BUG_ON(len < 0);
1311 			continue;
1312 		}
1313 
1314 		/* store for history */
1315 		if (ret == 0)
1316 			ret = len | (ord << 16);
1317 
1318 		/* we have to split large buddy */
1319 		BUG_ON(ord <= 0);
1320 		buddy = mb_find_buddy(e4b, ord, &max);
1321 		mb_set_bit(start >> ord, buddy);
1322 		e4b->bd_info->bb_counters[ord]--;
1323 
1324 		ord--;
1325 		cur = (start >> ord) & ~1U;
1326 		buddy = mb_find_buddy(e4b, ord, &max);
1327 		mb_clear_bit(cur, buddy);
1328 		mb_clear_bit(cur + 1, buddy);
1329 		e4b->bd_info->bb_counters[ord]++;
1330 		e4b->bd_info->bb_counters[ord]++;
1331 	}
1332 
1333 	mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1334 			EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1335 	mb_check_buddy(e4b);
1336 
1337 	return ret;
1338 }
1339 
1340 /*
1341  * Must be called under group lock!
1342  */
1343 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1344 					struct ext4_buddy *e4b)
1345 {
1346 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1347 	int ret;
1348 
1349 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1350 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1351 
1352 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1353 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1354 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1355 
1356 	/* preallocation can change ac_b_ex, thus we store actually
1357 	 * allocated blocks for history */
1358 	ac->ac_f_ex = ac->ac_b_ex;
1359 
1360 	ac->ac_status = AC_STATUS_FOUND;
1361 	ac->ac_tail = ret & 0xffff;
1362 	ac->ac_buddy = ret >> 16;
1363 
1364 	/*
1365 	 * take the page reference. We want the page to be pinned
1366 	 * so that we don't get a ext4_mb_init_cache_call for this
1367 	 * group until we update the bitmap. That would mean we
1368 	 * double allocate blocks. The reference is dropped
1369 	 * in ext4_mb_release_context
1370 	 */
1371 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1372 	get_page(ac->ac_bitmap_page);
1373 	ac->ac_buddy_page = e4b->bd_buddy_page;
1374 	get_page(ac->ac_buddy_page);
1375 	/* on allocation we use ac to track the held semaphore */
1376 	ac->alloc_semp =  e4b->alloc_semp;
1377 	e4b->alloc_semp = NULL;
1378 	/* store last allocated for subsequent stream allocation */
1379 	if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1380 		spin_lock(&sbi->s_md_lock);
1381 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1382 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1383 		spin_unlock(&sbi->s_md_lock);
1384 	}
1385 }
1386 
1387 /*
1388  * regular allocator, for general purposes allocation
1389  */
1390 
1391 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1392 					struct ext4_buddy *e4b,
1393 					int finish_group)
1394 {
1395 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1396 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1397 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1398 	struct ext4_free_extent ex;
1399 	int max;
1400 
1401 	if (ac->ac_status == AC_STATUS_FOUND)
1402 		return;
1403 	/*
1404 	 * We don't want to scan for a whole year
1405 	 */
1406 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1407 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1408 		ac->ac_status = AC_STATUS_BREAK;
1409 		return;
1410 	}
1411 
1412 	/*
1413 	 * Haven't found good chunk so far, let's continue
1414 	 */
1415 	if (bex->fe_len < gex->fe_len)
1416 		return;
1417 
1418 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1419 			&& bex->fe_group == e4b->bd_group) {
1420 		/* recheck chunk's availability - we don't know
1421 		 * when it was found (within this lock-unlock
1422 		 * period or not) */
1423 		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1424 		if (max >= gex->fe_len) {
1425 			ext4_mb_use_best_found(ac, e4b);
1426 			return;
1427 		}
1428 	}
1429 }
1430 
1431 /*
1432  * The routine checks whether found extent is good enough. If it is,
1433  * then the extent gets marked used and flag is set to the context
1434  * to stop scanning. Otherwise, the extent is compared with the
1435  * previous found extent and if new one is better, then it's stored
1436  * in the context. Later, the best found extent will be used, if
1437  * mballoc can't find good enough extent.
1438  *
1439  * FIXME: real allocation policy is to be designed yet!
1440  */
1441 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1442 					struct ext4_free_extent *ex,
1443 					struct ext4_buddy *e4b)
1444 {
1445 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1446 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1447 
1448 	BUG_ON(ex->fe_len <= 0);
1449 	BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1450 	BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1451 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1452 
1453 	ac->ac_found++;
1454 
1455 	/*
1456 	 * The special case - take what you catch first
1457 	 */
1458 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1459 		*bex = *ex;
1460 		ext4_mb_use_best_found(ac, e4b);
1461 		return;
1462 	}
1463 
1464 	/*
1465 	 * Let's check whether the chuck is good enough
1466 	 */
1467 	if (ex->fe_len == gex->fe_len) {
1468 		*bex = *ex;
1469 		ext4_mb_use_best_found(ac, e4b);
1470 		return;
1471 	}
1472 
1473 	/*
1474 	 * If this is first found extent, just store it in the context
1475 	 */
1476 	if (bex->fe_len == 0) {
1477 		*bex = *ex;
1478 		return;
1479 	}
1480 
1481 	/*
1482 	 * If new found extent is better, store it in the context
1483 	 */
1484 	if (bex->fe_len < gex->fe_len) {
1485 		/* if the request isn't satisfied, any found extent
1486 		 * larger than previous best one is better */
1487 		if (ex->fe_len > bex->fe_len)
1488 			*bex = *ex;
1489 	} else if (ex->fe_len > gex->fe_len) {
1490 		/* if the request is satisfied, then we try to find
1491 		 * an extent that still satisfy the request, but is
1492 		 * smaller than previous one */
1493 		if (ex->fe_len < bex->fe_len)
1494 			*bex = *ex;
1495 	}
1496 
1497 	ext4_mb_check_limits(ac, e4b, 0);
1498 }
1499 
1500 static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1501 					struct ext4_buddy *e4b)
1502 {
1503 	struct ext4_free_extent ex = ac->ac_b_ex;
1504 	ext4_group_t group = ex.fe_group;
1505 	int max;
1506 	int err;
1507 
1508 	BUG_ON(ex.fe_len <= 0);
1509 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1510 	if (err)
1511 		return err;
1512 
1513 	ext4_lock_group(ac->ac_sb, group);
1514 	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1515 
1516 	if (max > 0) {
1517 		ac->ac_b_ex = ex;
1518 		ext4_mb_use_best_found(ac, e4b);
1519 	}
1520 
1521 	ext4_unlock_group(ac->ac_sb, group);
1522 	ext4_mb_release_desc(e4b);
1523 
1524 	return 0;
1525 }
1526 
1527 static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1528 				struct ext4_buddy *e4b)
1529 {
1530 	ext4_group_t group = ac->ac_g_ex.fe_group;
1531 	int max;
1532 	int err;
1533 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1534 	struct ext4_super_block *es = sbi->s_es;
1535 	struct ext4_free_extent ex;
1536 
1537 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1538 		return 0;
1539 
1540 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1541 	if (err)
1542 		return err;
1543 
1544 	ext4_lock_group(ac->ac_sb, group);
1545 	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1546 			     ac->ac_g_ex.fe_len, &ex);
1547 
1548 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1549 		ext4_fsblk_t start;
1550 
1551 		start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1552 			ex.fe_start + le32_to_cpu(es->s_first_data_block);
1553 		/* use do_div to get remainder (would be 64-bit modulo) */
1554 		if (do_div(start, sbi->s_stripe) == 0) {
1555 			ac->ac_found++;
1556 			ac->ac_b_ex = ex;
1557 			ext4_mb_use_best_found(ac, e4b);
1558 		}
1559 	} else if (max >= ac->ac_g_ex.fe_len) {
1560 		BUG_ON(ex.fe_len <= 0);
1561 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1562 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1563 		ac->ac_found++;
1564 		ac->ac_b_ex = ex;
1565 		ext4_mb_use_best_found(ac, e4b);
1566 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1567 		/* Sometimes, caller may want to merge even small
1568 		 * number of blocks to an existing extent */
1569 		BUG_ON(ex.fe_len <= 0);
1570 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1571 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1572 		ac->ac_found++;
1573 		ac->ac_b_ex = ex;
1574 		ext4_mb_use_best_found(ac, e4b);
1575 	}
1576 	ext4_unlock_group(ac->ac_sb, group);
1577 	ext4_mb_release_desc(e4b);
1578 
1579 	return 0;
1580 }
1581 
1582 /*
1583  * The routine scans buddy structures (not bitmap!) from given order
1584  * to max order and tries to find big enough chunk to satisfy the req
1585  */
1586 static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1587 					struct ext4_buddy *e4b)
1588 {
1589 	struct super_block *sb = ac->ac_sb;
1590 	struct ext4_group_info *grp = e4b->bd_info;
1591 	void *buddy;
1592 	int i;
1593 	int k;
1594 	int max;
1595 
1596 	BUG_ON(ac->ac_2order <= 0);
1597 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1598 		if (grp->bb_counters[i] == 0)
1599 			continue;
1600 
1601 		buddy = mb_find_buddy(e4b, i, &max);
1602 		BUG_ON(buddy == NULL);
1603 
1604 		k = mb_find_next_zero_bit(buddy, max, 0);
1605 		BUG_ON(k >= max);
1606 
1607 		ac->ac_found++;
1608 
1609 		ac->ac_b_ex.fe_len = 1 << i;
1610 		ac->ac_b_ex.fe_start = k << i;
1611 		ac->ac_b_ex.fe_group = e4b->bd_group;
1612 
1613 		ext4_mb_use_best_found(ac, e4b);
1614 
1615 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1616 
1617 		if (EXT4_SB(sb)->s_mb_stats)
1618 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1619 
1620 		break;
1621 	}
1622 }
1623 
1624 /*
1625  * The routine scans the group and measures all found extents.
1626  * In order to optimize scanning, caller must pass number of
1627  * free blocks in the group, so the routine can know upper limit.
1628  */
1629 static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1630 					struct ext4_buddy *e4b)
1631 {
1632 	struct super_block *sb = ac->ac_sb;
1633 	void *bitmap = EXT4_MB_BITMAP(e4b);
1634 	struct ext4_free_extent ex;
1635 	int i;
1636 	int free;
1637 
1638 	free = e4b->bd_info->bb_free;
1639 	BUG_ON(free <= 0);
1640 
1641 	i = e4b->bd_info->bb_first_free;
1642 
1643 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1644 		i = mb_find_next_zero_bit(bitmap,
1645 						EXT4_BLOCKS_PER_GROUP(sb), i);
1646 		if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1647 			/*
1648 			 * IF we have corrupt bitmap, we won't find any
1649 			 * free blocks even though group info says we
1650 			 * we have free blocks
1651 			 */
1652 			ext4_grp_locked_error(sb, e4b->bd_group,
1653 					__func__, "%d free blocks as per "
1654 					"group info. But bitmap says 0",
1655 					free);
1656 			break;
1657 		}
1658 
1659 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1660 		BUG_ON(ex.fe_len <= 0);
1661 		if (free < ex.fe_len) {
1662 			ext4_grp_locked_error(sb, e4b->bd_group,
1663 					__func__, "%d free blocks as per "
1664 					"group info. But got %d blocks",
1665 					free, ex.fe_len);
1666 			/*
1667 			 * The number of free blocks differs. This mostly
1668 			 * indicate that the bitmap is corrupt. So exit
1669 			 * without claiming the space.
1670 			 */
1671 			break;
1672 		}
1673 
1674 		ext4_mb_measure_extent(ac, &ex, e4b);
1675 
1676 		i += ex.fe_len;
1677 		free -= ex.fe_len;
1678 	}
1679 
1680 	ext4_mb_check_limits(ac, e4b, 1);
1681 }
1682 
1683 /*
1684  * This is a special case for storages like raid5
1685  * we try to find stripe-aligned chunks for stripe-size requests
1686  * XXX should do so at least for multiples of stripe size as well
1687  */
1688 static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1689 				 struct ext4_buddy *e4b)
1690 {
1691 	struct super_block *sb = ac->ac_sb;
1692 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1693 	void *bitmap = EXT4_MB_BITMAP(e4b);
1694 	struct ext4_free_extent ex;
1695 	ext4_fsblk_t first_group_block;
1696 	ext4_fsblk_t a;
1697 	ext4_grpblk_t i;
1698 	int max;
1699 
1700 	BUG_ON(sbi->s_stripe == 0);
1701 
1702 	/* find first stripe-aligned block in group */
1703 	first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1704 		+ le32_to_cpu(sbi->s_es->s_first_data_block);
1705 	a = first_group_block + sbi->s_stripe - 1;
1706 	do_div(a, sbi->s_stripe);
1707 	i = (a * sbi->s_stripe) - first_group_block;
1708 
1709 	while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1710 		if (!mb_test_bit(i, bitmap)) {
1711 			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1712 			if (max >= sbi->s_stripe) {
1713 				ac->ac_found++;
1714 				ac->ac_b_ex = ex;
1715 				ext4_mb_use_best_found(ac, e4b);
1716 				break;
1717 			}
1718 		}
1719 		i += sbi->s_stripe;
1720 	}
1721 }
1722 
1723 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1724 				ext4_group_t group, int cr)
1725 {
1726 	unsigned free, fragments;
1727 	unsigned i, bits;
1728 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1729 	struct ext4_group_desc *desc;
1730 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1731 
1732 	BUG_ON(cr < 0 || cr >= 4);
1733 	BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1734 
1735 	free = grp->bb_free;
1736 	fragments = grp->bb_fragments;
1737 	if (free == 0)
1738 		return 0;
1739 	if (fragments == 0)
1740 		return 0;
1741 
1742 	switch (cr) {
1743 	case 0:
1744 		BUG_ON(ac->ac_2order == 0);
1745 		/* If this group is uninitialized, skip it initially */
1746 		desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1747 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1748 			return 0;
1749 
1750 		/* Avoid using the first bg of a flexgroup for data files */
1751 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1752 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1753 		    ((group % flex_size) == 0))
1754 			return 0;
1755 
1756 		bits = ac->ac_sb->s_blocksize_bits + 1;
1757 		for (i = ac->ac_2order; i <= bits; i++)
1758 			if (grp->bb_counters[i] > 0)
1759 				return 1;
1760 		break;
1761 	case 1:
1762 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1763 			return 1;
1764 		break;
1765 	case 2:
1766 		if (free >= ac->ac_g_ex.fe_len)
1767 			return 1;
1768 		break;
1769 	case 3:
1770 		return 1;
1771 	default:
1772 		BUG();
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 /*
1779  * lock the group_info alloc_sem of all the groups
1780  * belonging to the same buddy cache page. This
1781  * make sure other parallel operation on the buddy
1782  * cache doesn't happen  whild holding the buddy cache
1783  * lock
1784  */
1785 int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
1786 {
1787 	int i;
1788 	int block, pnum;
1789 	int blocks_per_page;
1790 	int groups_per_page;
1791 	ext4_group_t first_group;
1792 	struct ext4_group_info *grp;
1793 
1794 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1795 	/*
1796 	 * the buddy cache inode stores the block bitmap
1797 	 * and buddy information in consecutive blocks.
1798 	 * So for each group we need two blocks.
1799 	 */
1800 	block = group * 2;
1801 	pnum = block / blocks_per_page;
1802 	first_group = pnum * blocks_per_page / 2;
1803 
1804 	groups_per_page = blocks_per_page >> 1;
1805 	if (groups_per_page == 0)
1806 		groups_per_page = 1;
1807 	/* read all groups the page covers into the cache */
1808 	for (i = 0; i < groups_per_page; i++) {
1809 
1810 		if ((first_group + i) >= EXT4_SB(sb)->s_groups_count)
1811 			break;
1812 		grp = ext4_get_group_info(sb, first_group + i);
1813 		/* take all groups write allocation
1814 		 * semaphore. This make sure there is
1815 		 * no block allocation going on in any
1816 		 * of that groups
1817 		 */
1818 		down_write_nested(&grp->alloc_sem, i);
1819 	}
1820 	return i;
1821 }
1822 
1823 void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1824 					ext4_group_t group, int locked_group)
1825 {
1826 	int i;
1827 	int block, pnum;
1828 	int blocks_per_page;
1829 	ext4_group_t first_group;
1830 	struct ext4_group_info *grp;
1831 
1832 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1833 	/*
1834 	 * the buddy cache inode stores the block bitmap
1835 	 * and buddy information in consecutive blocks.
1836 	 * So for each group we need two blocks.
1837 	 */
1838 	block = group * 2;
1839 	pnum = block / blocks_per_page;
1840 	first_group = pnum * blocks_per_page / 2;
1841 	/* release locks on all the groups */
1842 	for (i = 0; i < locked_group; i++) {
1843 
1844 		grp = ext4_get_group_info(sb, first_group + i);
1845 		/* take all groups write allocation
1846 		 * semaphore. This make sure there is
1847 		 * no block allocation going on in any
1848 		 * of that groups
1849 		 */
1850 		up_write(&grp->alloc_sem);
1851 	}
1852 
1853 }
1854 
1855 static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1856 {
1857 
1858 	int ret;
1859 	void *bitmap;
1860 	int blocks_per_page;
1861 	int block, pnum, poff;
1862 	int num_grp_locked = 0;
1863 	struct ext4_group_info *this_grp;
1864 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1865 	struct inode *inode = sbi->s_buddy_cache;
1866 	struct page *page = NULL, *bitmap_page = NULL;
1867 
1868 	mb_debug("init group %lu\n", group);
1869 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1870 	this_grp = ext4_get_group_info(sb, group);
1871 	/*
1872 	 * This ensures we don't add group
1873 	 * to this buddy cache via resize
1874 	 */
1875 	num_grp_locked =  ext4_mb_get_buddy_cache_lock(sb, group);
1876 	if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
1877 		/*
1878 		 * somebody initialized the group
1879 		 * return without doing anything
1880 		 */
1881 		ret = 0;
1882 		goto err;
1883 	}
1884 	/*
1885 	 * the buddy cache inode stores the block bitmap
1886 	 * and buddy information in consecutive blocks.
1887 	 * So for each group we need two blocks.
1888 	 */
1889 	block = group * 2;
1890 	pnum = block / blocks_per_page;
1891 	poff = block % blocks_per_page;
1892 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1893 	if (page) {
1894 		BUG_ON(page->mapping != inode->i_mapping);
1895 		ret = ext4_mb_init_cache(page, NULL);
1896 		if (ret) {
1897 			unlock_page(page);
1898 			goto err;
1899 		}
1900 		unlock_page(page);
1901 	}
1902 	if (page == NULL || !PageUptodate(page)) {
1903 		ret = -EIO;
1904 		goto err;
1905 	}
1906 	mark_page_accessed(page);
1907 	bitmap_page = page;
1908 	bitmap = page_address(page) + (poff * sb->s_blocksize);
1909 
1910 	/* init buddy cache */
1911 	block++;
1912 	pnum = block / blocks_per_page;
1913 	poff = block % blocks_per_page;
1914 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1915 	if (page == bitmap_page) {
1916 		/*
1917 		 * If both the bitmap and buddy are in
1918 		 * the same page we don't need to force
1919 		 * init the buddy
1920 		 */
1921 		unlock_page(page);
1922 	} else if (page) {
1923 		BUG_ON(page->mapping != inode->i_mapping);
1924 		ret = ext4_mb_init_cache(page, bitmap);
1925 		if (ret) {
1926 			unlock_page(page);
1927 			goto err;
1928 		}
1929 		unlock_page(page);
1930 	}
1931 	if (page == NULL || !PageUptodate(page)) {
1932 		ret = -EIO;
1933 		goto err;
1934 	}
1935 	mark_page_accessed(page);
1936 err:
1937 	ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
1938 	if (bitmap_page)
1939 		page_cache_release(bitmap_page);
1940 	if (page)
1941 		page_cache_release(page);
1942 	return ret;
1943 }
1944 
1945 static noinline_for_stack int
1946 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1947 {
1948 	ext4_group_t group;
1949 	ext4_group_t i;
1950 	int cr;
1951 	int err = 0;
1952 	int bsbits;
1953 	struct ext4_sb_info *sbi;
1954 	struct super_block *sb;
1955 	struct ext4_buddy e4b;
1956 	loff_t size, isize;
1957 
1958 	sb = ac->ac_sb;
1959 	sbi = EXT4_SB(sb);
1960 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1961 
1962 	/* first, try the goal */
1963 	err = ext4_mb_find_by_goal(ac, &e4b);
1964 	if (err || ac->ac_status == AC_STATUS_FOUND)
1965 		goto out;
1966 
1967 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1968 		goto out;
1969 
1970 	/*
1971 	 * ac->ac2_order is set only if the fe_len is a power of 2
1972 	 * if ac2_order is set we also set criteria to 0 so that we
1973 	 * try exact allocation using buddy.
1974 	 */
1975 	i = fls(ac->ac_g_ex.fe_len);
1976 	ac->ac_2order = 0;
1977 	/*
1978 	 * We search using buddy data only if the order of the request
1979 	 * is greater than equal to the sbi_s_mb_order2_reqs
1980 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
1981 	 */
1982 	if (i >= sbi->s_mb_order2_reqs) {
1983 		/*
1984 		 * This should tell if fe_len is exactly power of 2
1985 		 */
1986 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1987 			ac->ac_2order = i - 1;
1988 	}
1989 
1990 	bsbits = ac->ac_sb->s_blocksize_bits;
1991 	/* if stream allocation is enabled, use global goal */
1992 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1993 	isize = i_size_read(ac->ac_inode) >> bsbits;
1994 	if (size < isize)
1995 		size = isize;
1996 
1997 	if (size < sbi->s_mb_stream_request &&
1998 			(ac->ac_flags & EXT4_MB_HINT_DATA)) {
1999 		/* TBD: may be hot point */
2000 		spin_lock(&sbi->s_md_lock);
2001 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2002 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2003 		spin_unlock(&sbi->s_md_lock);
2004 	}
2005 	/* Let's just scan groups to find more-less suitable blocks */
2006 	cr = ac->ac_2order ? 0 : 1;
2007 	/*
2008 	 * cr == 0 try to get exact allocation,
2009 	 * cr == 3  try to get anything
2010 	 */
2011 repeat:
2012 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2013 		ac->ac_criteria = cr;
2014 		/*
2015 		 * searching for the right group start
2016 		 * from the goal value specified
2017 		 */
2018 		group = ac->ac_g_ex.fe_group;
2019 
2020 		for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
2021 			struct ext4_group_info *grp;
2022 			struct ext4_group_desc *desc;
2023 
2024 			if (group == EXT4_SB(sb)->s_groups_count)
2025 				group = 0;
2026 
2027 			/* quick check to skip empty groups */
2028 			grp = ext4_get_group_info(sb, group);
2029 			if (grp->bb_free == 0)
2030 				continue;
2031 
2032 			/*
2033 			 * if the group is already init we check whether it is
2034 			 * a good group and if not we don't load the buddy
2035 			 */
2036 			if (EXT4_MB_GRP_NEED_INIT(grp)) {
2037 				/*
2038 				 * we need full data about the group
2039 				 * to make a good selection
2040 				 */
2041 				err = ext4_mb_init_group(sb, group);
2042 				if (err)
2043 					goto out;
2044 			}
2045 
2046 			/*
2047 			 * If the particular group doesn't satisfy our
2048 			 * criteria we continue with the next group
2049 			 */
2050 			if (!ext4_mb_good_group(ac, group, cr))
2051 				continue;
2052 
2053 			err = ext4_mb_load_buddy(sb, group, &e4b);
2054 			if (err)
2055 				goto out;
2056 
2057 			ext4_lock_group(sb, group);
2058 			if (!ext4_mb_good_group(ac, group, cr)) {
2059 				/* someone did allocation from this group */
2060 				ext4_unlock_group(sb, group);
2061 				ext4_mb_release_desc(&e4b);
2062 				continue;
2063 			}
2064 
2065 			ac->ac_groups_scanned++;
2066 			desc = ext4_get_group_desc(sb, group, NULL);
2067 			if (cr == 0 || (desc->bg_flags &
2068 					cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
2069 					ac->ac_2order != 0))
2070 				ext4_mb_simple_scan_group(ac, &e4b);
2071 			else if (cr == 1 &&
2072 					ac->ac_g_ex.fe_len == sbi->s_stripe)
2073 				ext4_mb_scan_aligned(ac, &e4b);
2074 			else
2075 				ext4_mb_complex_scan_group(ac, &e4b);
2076 
2077 			ext4_unlock_group(sb, group);
2078 			ext4_mb_release_desc(&e4b);
2079 
2080 			if (ac->ac_status != AC_STATUS_CONTINUE)
2081 				break;
2082 		}
2083 	}
2084 
2085 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2086 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2087 		/*
2088 		 * We've been searching too long. Let's try to allocate
2089 		 * the best chunk we've found so far
2090 		 */
2091 
2092 		ext4_mb_try_best_found(ac, &e4b);
2093 		if (ac->ac_status != AC_STATUS_FOUND) {
2094 			/*
2095 			 * Someone more lucky has already allocated it.
2096 			 * The only thing we can do is just take first
2097 			 * found block(s)
2098 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2099 			 */
2100 			ac->ac_b_ex.fe_group = 0;
2101 			ac->ac_b_ex.fe_start = 0;
2102 			ac->ac_b_ex.fe_len = 0;
2103 			ac->ac_status = AC_STATUS_CONTINUE;
2104 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2105 			cr = 3;
2106 			atomic_inc(&sbi->s_mb_lost_chunks);
2107 			goto repeat;
2108 		}
2109 	}
2110 out:
2111 	return err;
2112 }
2113 
2114 #ifdef EXT4_MB_HISTORY
2115 struct ext4_mb_proc_session {
2116 	struct ext4_mb_history *history;
2117 	struct super_block *sb;
2118 	int start;
2119 	int max;
2120 };
2121 
2122 static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
2123 					struct ext4_mb_history *hs,
2124 					int first)
2125 {
2126 	if (hs == s->history + s->max)
2127 		hs = s->history;
2128 	if (!first && hs == s->history + s->start)
2129 		return NULL;
2130 	while (hs->orig.fe_len == 0) {
2131 		hs++;
2132 		if (hs == s->history + s->max)
2133 			hs = s->history;
2134 		if (hs == s->history + s->start)
2135 			return NULL;
2136 	}
2137 	return hs;
2138 }
2139 
2140 static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2141 {
2142 	struct ext4_mb_proc_session *s = seq->private;
2143 	struct ext4_mb_history *hs;
2144 	int l = *pos;
2145 
2146 	if (l == 0)
2147 		return SEQ_START_TOKEN;
2148 	hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2149 	if (!hs)
2150 		return NULL;
2151 	while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2152 	return hs;
2153 }
2154 
2155 static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
2156 				      loff_t *pos)
2157 {
2158 	struct ext4_mb_proc_session *s = seq->private;
2159 	struct ext4_mb_history *hs = v;
2160 
2161 	++*pos;
2162 	if (v == SEQ_START_TOKEN)
2163 		return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2164 	else
2165 		return ext4_mb_history_skip_empty(s, ++hs, 0);
2166 }
2167 
2168 static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
2169 {
2170 	char buf[25], buf2[25], buf3[25], *fmt;
2171 	struct ext4_mb_history *hs = v;
2172 
2173 	if (v == SEQ_START_TOKEN) {
2174 		seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2175 				"%-5s %-2s %-5s %-5s %-5s %-6s\n",
2176 			  "pid", "inode", "original", "goal", "result", "found",
2177 			   "grps", "cr", "flags", "merge", "tail", "broken");
2178 		return 0;
2179 	}
2180 
2181 	if (hs->op == EXT4_MB_HISTORY_ALLOC) {
2182 		fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2183 			"%-5u %-5s %-5u %-6u\n";
2184 		sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
2185 			hs->result.fe_start, hs->result.fe_len,
2186 			hs->result.fe_logical);
2187 		sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
2188 			hs->orig.fe_start, hs->orig.fe_len,
2189 			hs->orig.fe_logical);
2190 		sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
2191 			hs->goal.fe_start, hs->goal.fe_len,
2192 			hs->goal.fe_logical);
2193 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2194 				hs->found, hs->groups, hs->cr, hs->flags,
2195 				hs->merged ? "M" : "", hs->tail,
2196 				hs->buddy ? 1 << hs->buddy : 0);
2197 	} else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
2198 		fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2199 		sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
2200 			hs->result.fe_start, hs->result.fe_len,
2201 			hs->result.fe_logical);
2202 		sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
2203 			hs->orig.fe_start, hs->orig.fe_len,
2204 			hs->orig.fe_logical);
2205 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2206 	} else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
2207 		sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
2208 			hs->result.fe_start, hs->result.fe_len);
2209 		seq_printf(seq, "%-5u %-8u %-23s discard\n",
2210 				hs->pid, hs->ino, buf2);
2211 	} else if (hs->op == EXT4_MB_HISTORY_FREE) {
2212 		sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
2213 			hs->result.fe_start, hs->result.fe_len);
2214 		seq_printf(seq, "%-5u %-8u %-23s free\n",
2215 				hs->pid, hs->ino, buf2);
2216 	}
2217 	return 0;
2218 }
2219 
2220 static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
2221 {
2222 }
2223 
2224 static struct seq_operations ext4_mb_seq_history_ops = {
2225 	.start  = ext4_mb_seq_history_start,
2226 	.next   = ext4_mb_seq_history_next,
2227 	.stop   = ext4_mb_seq_history_stop,
2228 	.show   = ext4_mb_seq_history_show,
2229 };
2230 
2231 static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
2232 {
2233 	struct super_block *sb = PDE(inode)->data;
2234 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2235 	struct ext4_mb_proc_session *s;
2236 	int rc;
2237 	int size;
2238 
2239 	if (unlikely(sbi->s_mb_history == NULL))
2240 		return -ENOMEM;
2241 	s = kmalloc(sizeof(*s), GFP_KERNEL);
2242 	if (s == NULL)
2243 		return -ENOMEM;
2244 	s->sb = sb;
2245 	size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
2246 	s->history = kmalloc(size, GFP_KERNEL);
2247 	if (s->history == NULL) {
2248 		kfree(s);
2249 		return -ENOMEM;
2250 	}
2251 
2252 	spin_lock(&sbi->s_mb_history_lock);
2253 	memcpy(s->history, sbi->s_mb_history, size);
2254 	s->max = sbi->s_mb_history_max;
2255 	s->start = sbi->s_mb_history_cur % s->max;
2256 	spin_unlock(&sbi->s_mb_history_lock);
2257 
2258 	rc = seq_open(file, &ext4_mb_seq_history_ops);
2259 	if (rc == 0) {
2260 		struct seq_file *m = (struct seq_file *)file->private_data;
2261 		m->private = s;
2262 	} else {
2263 		kfree(s->history);
2264 		kfree(s);
2265 	}
2266 	return rc;
2267 
2268 }
2269 
2270 static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2271 {
2272 	struct seq_file *seq = (struct seq_file *)file->private_data;
2273 	struct ext4_mb_proc_session *s = seq->private;
2274 	kfree(s->history);
2275 	kfree(s);
2276 	return seq_release(inode, file);
2277 }
2278 
2279 static ssize_t ext4_mb_seq_history_write(struct file *file,
2280 				const char __user *buffer,
2281 				size_t count, loff_t *ppos)
2282 {
2283 	struct seq_file *seq = (struct seq_file *)file->private_data;
2284 	struct ext4_mb_proc_session *s = seq->private;
2285 	struct super_block *sb = s->sb;
2286 	char str[32];
2287 	int value;
2288 
2289 	if (count >= sizeof(str)) {
2290 		printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2291 				"mb_history", (int)sizeof(str));
2292 		return -EOVERFLOW;
2293 	}
2294 
2295 	if (copy_from_user(str, buffer, count))
2296 		return -EFAULT;
2297 
2298 	value = simple_strtol(str, NULL, 0);
2299 	if (value < 0)
2300 		return -ERANGE;
2301 	EXT4_SB(sb)->s_mb_history_filter = value;
2302 
2303 	return count;
2304 }
2305 
2306 static struct file_operations ext4_mb_seq_history_fops = {
2307 	.owner		= THIS_MODULE,
2308 	.open		= ext4_mb_seq_history_open,
2309 	.read		= seq_read,
2310 	.write		= ext4_mb_seq_history_write,
2311 	.llseek		= seq_lseek,
2312 	.release	= ext4_mb_seq_history_release,
2313 };
2314 
2315 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2316 {
2317 	struct super_block *sb = seq->private;
2318 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2319 	ext4_group_t group;
2320 
2321 	if (*pos < 0 || *pos >= sbi->s_groups_count)
2322 		return NULL;
2323 
2324 	group = *pos + 1;
2325 	return (void *) ((unsigned long) group);
2326 }
2327 
2328 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2329 {
2330 	struct super_block *sb = seq->private;
2331 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2332 	ext4_group_t group;
2333 
2334 	++*pos;
2335 	if (*pos < 0 || *pos >= sbi->s_groups_count)
2336 		return NULL;
2337 	group = *pos + 1;
2338 	return (void *) ((unsigned long) group);
2339 }
2340 
2341 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2342 {
2343 	struct super_block *sb = seq->private;
2344 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2345 	int i;
2346 	int err;
2347 	struct ext4_buddy e4b;
2348 	struct sg {
2349 		struct ext4_group_info info;
2350 		unsigned short counters[16];
2351 	} sg;
2352 
2353 	group--;
2354 	if (group == 0)
2355 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2356 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2357 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2358 			   "group", "free", "frags", "first",
2359 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2360 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2361 
2362 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2363 		sizeof(struct ext4_group_info);
2364 	err = ext4_mb_load_buddy(sb, group, &e4b);
2365 	if (err) {
2366 		seq_printf(seq, "#%-5u: I/O error\n", group);
2367 		return 0;
2368 	}
2369 	ext4_lock_group(sb, group);
2370 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2371 	ext4_unlock_group(sb, group);
2372 	ext4_mb_release_desc(&e4b);
2373 
2374 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2375 			sg.info.bb_fragments, sg.info.bb_first_free);
2376 	for (i = 0; i <= 13; i++)
2377 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2378 				sg.info.bb_counters[i] : 0);
2379 	seq_printf(seq, " ]\n");
2380 
2381 	return 0;
2382 }
2383 
2384 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2385 {
2386 }
2387 
2388 static struct seq_operations ext4_mb_seq_groups_ops = {
2389 	.start  = ext4_mb_seq_groups_start,
2390 	.next   = ext4_mb_seq_groups_next,
2391 	.stop   = ext4_mb_seq_groups_stop,
2392 	.show   = ext4_mb_seq_groups_show,
2393 };
2394 
2395 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2396 {
2397 	struct super_block *sb = PDE(inode)->data;
2398 	int rc;
2399 
2400 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2401 	if (rc == 0) {
2402 		struct seq_file *m = (struct seq_file *)file->private_data;
2403 		m->private = sb;
2404 	}
2405 	return rc;
2406 
2407 }
2408 
2409 static struct file_operations ext4_mb_seq_groups_fops = {
2410 	.owner		= THIS_MODULE,
2411 	.open		= ext4_mb_seq_groups_open,
2412 	.read		= seq_read,
2413 	.llseek		= seq_lseek,
2414 	.release	= seq_release,
2415 };
2416 
2417 static void ext4_mb_history_release(struct super_block *sb)
2418 {
2419 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2420 
2421 	if (sbi->s_proc != NULL) {
2422 		remove_proc_entry("mb_groups", sbi->s_proc);
2423 		remove_proc_entry("mb_history", sbi->s_proc);
2424 	}
2425 	kfree(sbi->s_mb_history);
2426 }
2427 
2428 static void ext4_mb_history_init(struct super_block *sb)
2429 {
2430 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2431 	int i;
2432 
2433 	if (sbi->s_proc != NULL) {
2434 		proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2435 				 &ext4_mb_seq_history_fops, sb);
2436 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2437 				 &ext4_mb_seq_groups_fops, sb);
2438 	}
2439 
2440 	sbi->s_mb_history_max = 1000;
2441 	sbi->s_mb_history_cur = 0;
2442 	spin_lock_init(&sbi->s_mb_history_lock);
2443 	i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2444 	sbi->s_mb_history = kzalloc(i, GFP_KERNEL);
2445 	/* if we can't allocate history, then we simple won't use it */
2446 }
2447 
2448 static noinline_for_stack void
2449 ext4_mb_store_history(struct ext4_allocation_context *ac)
2450 {
2451 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2452 	struct ext4_mb_history h;
2453 
2454 	if (unlikely(sbi->s_mb_history == NULL))
2455 		return;
2456 
2457 	if (!(ac->ac_op & sbi->s_mb_history_filter))
2458 		return;
2459 
2460 	h.op = ac->ac_op;
2461 	h.pid = current->pid;
2462 	h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2463 	h.orig = ac->ac_o_ex;
2464 	h.result = ac->ac_b_ex;
2465 	h.flags = ac->ac_flags;
2466 	h.found = ac->ac_found;
2467 	h.groups = ac->ac_groups_scanned;
2468 	h.cr = ac->ac_criteria;
2469 	h.tail = ac->ac_tail;
2470 	h.buddy = ac->ac_buddy;
2471 	h.merged = 0;
2472 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2473 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2474 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2475 			h.merged = 1;
2476 		h.goal = ac->ac_g_ex;
2477 		h.result = ac->ac_f_ex;
2478 	}
2479 
2480 	spin_lock(&sbi->s_mb_history_lock);
2481 	memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2482 	if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2483 		sbi->s_mb_history_cur = 0;
2484 	spin_unlock(&sbi->s_mb_history_lock);
2485 }
2486 
2487 #else
2488 #define ext4_mb_history_release(sb)
2489 #define ext4_mb_history_init(sb)
2490 #endif
2491 
2492 
2493 /* Create and initialize ext4_group_info data for the given group. */
2494 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2495 			  struct ext4_group_desc *desc)
2496 {
2497 	int i, len;
2498 	int metalen = 0;
2499 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2500 	struct ext4_group_info **meta_group_info;
2501 
2502 	/*
2503 	 * First check if this group is the first of a reserved block.
2504 	 * If it's true, we have to allocate a new table of pointers
2505 	 * to ext4_group_info structures
2506 	 */
2507 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2508 		metalen = sizeof(*meta_group_info) <<
2509 			EXT4_DESC_PER_BLOCK_BITS(sb);
2510 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2511 		if (meta_group_info == NULL) {
2512 			printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2513 			       "buddy group\n");
2514 			goto exit_meta_group_info;
2515 		}
2516 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2517 			meta_group_info;
2518 	}
2519 
2520 	/*
2521 	 * calculate needed size. if change bb_counters size,
2522 	 * don't forget about ext4_mb_generate_buddy()
2523 	 */
2524 	len = offsetof(typeof(**meta_group_info),
2525 		       bb_counters[sb->s_blocksize_bits + 2]);
2526 
2527 	meta_group_info =
2528 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2529 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2530 
2531 	meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2532 	if (meta_group_info[i] == NULL) {
2533 		printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2534 		goto exit_group_info;
2535 	}
2536 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2537 		&(meta_group_info[i]->bb_state));
2538 
2539 	/*
2540 	 * initialize bb_free to be able to skip
2541 	 * empty groups without initialization
2542 	 */
2543 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2544 		meta_group_info[i]->bb_free =
2545 			ext4_free_blocks_after_init(sb, group, desc);
2546 	} else {
2547 		meta_group_info[i]->bb_free =
2548 			ext4_free_blks_count(sb, desc);
2549 	}
2550 
2551 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2552 	init_rwsem(&meta_group_info[i]->alloc_sem);
2553 	meta_group_info[i]->bb_free_root.rb_node = NULL;;
2554 
2555 #ifdef DOUBLE_CHECK
2556 	{
2557 		struct buffer_head *bh;
2558 		meta_group_info[i]->bb_bitmap =
2559 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2560 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2561 		bh = ext4_read_block_bitmap(sb, group);
2562 		BUG_ON(bh == NULL);
2563 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2564 			sb->s_blocksize);
2565 		put_bh(bh);
2566 	}
2567 #endif
2568 
2569 	return 0;
2570 
2571 exit_group_info:
2572 	/* If a meta_group_info table has been allocated, release it now */
2573 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2574 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2575 exit_meta_group_info:
2576 	return -ENOMEM;
2577 } /* ext4_mb_add_groupinfo */
2578 
2579 /*
2580  * Update an existing group.
2581  * This function is used for online resize
2582  */
2583 void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2584 {
2585 	grp->bb_free += add;
2586 }
2587 
2588 static int ext4_mb_init_backend(struct super_block *sb)
2589 {
2590 	ext4_group_t i;
2591 	int metalen;
2592 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2593 	struct ext4_super_block *es = sbi->s_es;
2594 	int num_meta_group_infos;
2595 	int num_meta_group_infos_max;
2596 	int array_size;
2597 	struct ext4_group_info **meta_group_info;
2598 	struct ext4_group_desc *desc;
2599 
2600 	/* This is the number of blocks used by GDT */
2601 	num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
2602 				1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2603 
2604 	/*
2605 	 * This is the total number of blocks used by GDT including
2606 	 * the number of reserved blocks for GDT.
2607 	 * The s_group_info array is allocated with this value
2608 	 * to allow a clean online resize without a complex
2609 	 * manipulation of pointer.
2610 	 * The drawback is the unused memory when no resize
2611 	 * occurs but it's very low in terms of pages
2612 	 * (see comments below)
2613 	 * Need to handle this properly when META_BG resizing is allowed
2614 	 */
2615 	num_meta_group_infos_max = num_meta_group_infos +
2616 				le16_to_cpu(es->s_reserved_gdt_blocks);
2617 
2618 	/*
2619 	 * array_size is the size of s_group_info array. We round it
2620 	 * to the next power of two because this approximation is done
2621 	 * internally by kmalloc so we can have some more memory
2622 	 * for free here (e.g. may be used for META_BG resize).
2623 	 */
2624 	array_size = 1;
2625 	while (array_size < sizeof(*sbi->s_group_info) *
2626 	       num_meta_group_infos_max)
2627 		array_size = array_size << 1;
2628 	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2629 	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2630 	 * So a two level scheme suffices for now. */
2631 	sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
2632 	if (sbi->s_group_info == NULL) {
2633 		printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2634 		return -ENOMEM;
2635 	}
2636 	sbi->s_buddy_cache = new_inode(sb);
2637 	if (sbi->s_buddy_cache == NULL) {
2638 		printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2639 		goto err_freesgi;
2640 	}
2641 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2642 
2643 	metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2644 	for (i = 0; i < num_meta_group_infos; i++) {
2645 		if ((i + 1) == num_meta_group_infos)
2646 			metalen = sizeof(*meta_group_info) *
2647 				(sbi->s_groups_count -
2648 					(i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2649 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2650 		if (meta_group_info == NULL) {
2651 			printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2652 			       "buddy group\n");
2653 			goto err_freemeta;
2654 		}
2655 		sbi->s_group_info[i] = meta_group_info;
2656 	}
2657 
2658 	for (i = 0; i < sbi->s_groups_count; i++) {
2659 		desc = ext4_get_group_desc(sb, i, NULL);
2660 		if (desc == NULL) {
2661 			printk(KERN_ERR
2662 				"EXT4-fs: can't read descriptor %u\n", i);
2663 			goto err_freebuddy;
2664 		}
2665 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2666 			goto err_freebuddy;
2667 	}
2668 
2669 	return 0;
2670 
2671 err_freebuddy:
2672 	while (i-- > 0)
2673 		kfree(ext4_get_group_info(sb, i));
2674 	i = num_meta_group_infos;
2675 err_freemeta:
2676 	while (i-- > 0)
2677 		kfree(sbi->s_group_info[i]);
2678 	iput(sbi->s_buddy_cache);
2679 err_freesgi:
2680 	kfree(sbi->s_group_info);
2681 	return -ENOMEM;
2682 }
2683 
2684 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2685 {
2686 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2687 	unsigned i, j;
2688 	unsigned offset;
2689 	unsigned max;
2690 	int ret;
2691 
2692 	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2693 
2694 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2695 	if (sbi->s_mb_offsets == NULL) {
2696 		return -ENOMEM;
2697 	}
2698 
2699 	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
2700 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2701 	if (sbi->s_mb_maxs == NULL) {
2702 		kfree(sbi->s_mb_offsets);
2703 		return -ENOMEM;
2704 	}
2705 
2706 	/* order 0 is regular bitmap */
2707 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2708 	sbi->s_mb_offsets[0] = 0;
2709 
2710 	i = 1;
2711 	offset = 0;
2712 	max = sb->s_blocksize << 2;
2713 	do {
2714 		sbi->s_mb_offsets[i] = offset;
2715 		sbi->s_mb_maxs[i] = max;
2716 		offset += 1 << (sb->s_blocksize_bits - i);
2717 		max = max >> 1;
2718 		i++;
2719 	} while (i <= sb->s_blocksize_bits + 1);
2720 
2721 	/* init file for buddy data */
2722 	ret = ext4_mb_init_backend(sb);
2723 	if (ret != 0) {
2724 		kfree(sbi->s_mb_offsets);
2725 		kfree(sbi->s_mb_maxs);
2726 		return ret;
2727 	}
2728 
2729 	spin_lock_init(&sbi->s_md_lock);
2730 	spin_lock_init(&sbi->s_bal_lock);
2731 
2732 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2733 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2734 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2735 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2736 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2737 	sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2738 	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2739 
2740 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2741 	if (sbi->s_locality_groups == NULL) {
2742 		kfree(sbi->s_mb_offsets);
2743 		kfree(sbi->s_mb_maxs);
2744 		return -ENOMEM;
2745 	}
2746 	for_each_possible_cpu(i) {
2747 		struct ext4_locality_group *lg;
2748 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2749 		mutex_init(&lg->lg_mutex);
2750 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2751 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2752 		spin_lock_init(&lg->lg_prealloc_lock);
2753 	}
2754 
2755 	ext4_mb_history_init(sb);
2756 
2757 	if (sbi->s_journal)
2758 		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2759 
2760 	printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2761 	return 0;
2762 }
2763 
2764 /* need to called with ext4 group lock (ext4_lock_group) */
2765 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2766 {
2767 	struct ext4_prealloc_space *pa;
2768 	struct list_head *cur, *tmp;
2769 	int count = 0;
2770 
2771 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2772 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2773 		list_del(&pa->pa_group_list);
2774 		count++;
2775 		kmem_cache_free(ext4_pspace_cachep, pa);
2776 	}
2777 	if (count)
2778 		mb_debug("mballoc: %u PAs left\n", count);
2779 
2780 }
2781 
2782 int ext4_mb_release(struct super_block *sb)
2783 {
2784 	ext4_group_t i;
2785 	int num_meta_group_infos;
2786 	struct ext4_group_info *grinfo;
2787 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2788 
2789 	if (sbi->s_group_info) {
2790 		for (i = 0; i < sbi->s_groups_count; i++) {
2791 			grinfo = ext4_get_group_info(sb, i);
2792 #ifdef DOUBLE_CHECK
2793 			kfree(grinfo->bb_bitmap);
2794 #endif
2795 			ext4_lock_group(sb, i);
2796 			ext4_mb_cleanup_pa(grinfo);
2797 			ext4_unlock_group(sb, i);
2798 			kfree(grinfo);
2799 		}
2800 		num_meta_group_infos = (sbi->s_groups_count +
2801 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2802 			EXT4_DESC_PER_BLOCK_BITS(sb);
2803 		for (i = 0; i < num_meta_group_infos; i++)
2804 			kfree(sbi->s_group_info[i]);
2805 		kfree(sbi->s_group_info);
2806 	}
2807 	kfree(sbi->s_mb_offsets);
2808 	kfree(sbi->s_mb_maxs);
2809 	if (sbi->s_buddy_cache)
2810 		iput(sbi->s_buddy_cache);
2811 	if (sbi->s_mb_stats) {
2812 		printk(KERN_INFO
2813 		       "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2814 				atomic_read(&sbi->s_bal_allocated),
2815 				atomic_read(&sbi->s_bal_reqs),
2816 				atomic_read(&sbi->s_bal_success));
2817 		printk(KERN_INFO
2818 		      "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2819 				"%u 2^N hits, %u breaks, %u lost\n",
2820 				atomic_read(&sbi->s_bal_ex_scanned),
2821 				atomic_read(&sbi->s_bal_goals),
2822 				atomic_read(&sbi->s_bal_2orders),
2823 				atomic_read(&sbi->s_bal_breaks),
2824 				atomic_read(&sbi->s_mb_lost_chunks));
2825 		printk(KERN_INFO
2826 		       "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2827 				sbi->s_mb_buddies_generated++,
2828 				sbi->s_mb_generation_time);
2829 		printk(KERN_INFO
2830 		       "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2831 				atomic_read(&sbi->s_mb_preallocated),
2832 				atomic_read(&sbi->s_mb_discarded));
2833 	}
2834 
2835 	free_percpu(sbi->s_locality_groups);
2836 	ext4_mb_history_release(sb);
2837 
2838 	return 0;
2839 }
2840 
2841 /*
2842  * This function is called by the jbd2 layer once the commit has finished,
2843  * so we know we can free the blocks that were released with that commit.
2844  */
2845 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2846 {
2847 	struct super_block *sb = journal->j_private;
2848 	struct ext4_buddy e4b;
2849 	struct ext4_group_info *db;
2850 	int err, count = 0, count2 = 0;
2851 	struct ext4_free_data *entry;
2852 	ext4_fsblk_t discard_block;
2853 	struct list_head *l, *ltmp;
2854 
2855 	list_for_each_safe(l, ltmp, &txn->t_private_list) {
2856 		entry = list_entry(l, struct ext4_free_data, list);
2857 
2858 		mb_debug("gonna free %u blocks in group %u (0x%p):",
2859 			 entry->count, entry->group, entry);
2860 
2861 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2862 		/* we expect to find existing buddy because it's pinned */
2863 		BUG_ON(err != 0);
2864 
2865 		db = e4b.bd_info;
2866 		/* there are blocks to put in buddy to make them really free */
2867 		count += entry->count;
2868 		count2++;
2869 		ext4_lock_group(sb, entry->group);
2870 		/* Take it out of per group rb tree */
2871 		rb_erase(&entry->node, &(db->bb_free_root));
2872 		mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2873 
2874 		if (!db->bb_free_root.rb_node) {
2875 			/* No more items in the per group rb tree
2876 			 * balance refcounts from ext4_mb_free_metadata()
2877 			 */
2878 			page_cache_release(e4b.bd_buddy_page);
2879 			page_cache_release(e4b.bd_bitmap_page);
2880 		}
2881 		ext4_unlock_group(sb, entry->group);
2882 		discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2883 			+ entry->start_blk
2884 			+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
2885 		trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u",
2886 			   sb->s_id, (unsigned long long) discard_block,
2887 			   entry->count);
2888 		sb_issue_discard(sb, discard_block, entry->count);
2889 
2890 		kmem_cache_free(ext4_free_ext_cachep, entry);
2891 		ext4_mb_release_desc(&e4b);
2892 	}
2893 
2894 	mb_debug("freed %u blocks in %u structures\n", count, count2);
2895 }
2896 
2897 int __init init_ext4_mballoc(void)
2898 {
2899 	ext4_pspace_cachep =
2900 		kmem_cache_create("ext4_prealloc_space",
2901 				     sizeof(struct ext4_prealloc_space),
2902 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2903 	if (ext4_pspace_cachep == NULL)
2904 		return -ENOMEM;
2905 
2906 	ext4_ac_cachep =
2907 		kmem_cache_create("ext4_alloc_context",
2908 				     sizeof(struct ext4_allocation_context),
2909 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2910 	if (ext4_ac_cachep == NULL) {
2911 		kmem_cache_destroy(ext4_pspace_cachep);
2912 		return -ENOMEM;
2913 	}
2914 
2915 	ext4_free_ext_cachep =
2916 		kmem_cache_create("ext4_free_block_extents",
2917 				     sizeof(struct ext4_free_data),
2918 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2919 	if (ext4_free_ext_cachep == NULL) {
2920 		kmem_cache_destroy(ext4_pspace_cachep);
2921 		kmem_cache_destroy(ext4_ac_cachep);
2922 		return -ENOMEM;
2923 	}
2924 	return 0;
2925 }
2926 
2927 void exit_ext4_mballoc(void)
2928 {
2929 	/* XXX: synchronize_rcu(); */
2930 	kmem_cache_destroy(ext4_pspace_cachep);
2931 	kmem_cache_destroy(ext4_ac_cachep);
2932 	kmem_cache_destroy(ext4_free_ext_cachep);
2933 }
2934 
2935 
2936 /*
2937  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2938  * Returns 0 if success or error code
2939  */
2940 static noinline_for_stack int
2941 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2942 				handle_t *handle, unsigned int reserv_blks)
2943 {
2944 	struct buffer_head *bitmap_bh = NULL;
2945 	struct ext4_super_block *es;
2946 	struct ext4_group_desc *gdp;
2947 	struct buffer_head *gdp_bh;
2948 	struct ext4_sb_info *sbi;
2949 	struct super_block *sb;
2950 	ext4_fsblk_t block;
2951 	int err, len;
2952 
2953 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2954 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2955 
2956 	sb = ac->ac_sb;
2957 	sbi = EXT4_SB(sb);
2958 	es = sbi->s_es;
2959 
2960 
2961 	err = -EIO;
2962 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2963 	if (!bitmap_bh)
2964 		goto out_err;
2965 
2966 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2967 	if (err)
2968 		goto out_err;
2969 
2970 	err = -EIO;
2971 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2972 	if (!gdp)
2973 		goto out_err;
2974 
2975 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2976 			ext4_free_blks_count(sb, gdp));
2977 
2978 	err = ext4_journal_get_write_access(handle, gdp_bh);
2979 	if (err)
2980 		goto out_err;
2981 
2982 	block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2983 		+ ac->ac_b_ex.fe_start
2984 		+ le32_to_cpu(es->s_first_data_block);
2985 
2986 	len = ac->ac_b_ex.fe_len;
2987 	if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2988 	    in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2989 	    in_range(block, ext4_inode_table(sb, gdp),
2990 		     EXT4_SB(sb)->s_itb_per_group) ||
2991 	    in_range(block + len - 1, ext4_inode_table(sb, gdp),
2992 		     EXT4_SB(sb)->s_itb_per_group)) {
2993 		ext4_error(sb, __func__,
2994 			   "Allocating block %llu in system zone of %d group\n",
2995 			   block, ac->ac_b_ex.fe_group);
2996 		/* File system mounted not to panic on error
2997 		 * Fix the bitmap and repeat the block allocation
2998 		 * We leak some of the blocks here.
2999 		 */
3000 		mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
3001 				bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3002 				ac->ac_b_ex.fe_len);
3003 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3004 		if (!err)
3005 			err = -EAGAIN;
3006 		goto out_err;
3007 	}
3008 #ifdef AGGRESSIVE_CHECK
3009 	{
3010 		int i;
3011 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3012 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3013 						bitmap_bh->b_data));
3014 		}
3015 	}
3016 #endif
3017 	spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3018 	mb_set_bits(NULL, bitmap_bh->b_data,
3019 				ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
3020 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
3021 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3022 		ext4_free_blks_set(sb, gdp,
3023 					ext4_free_blocks_after_init(sb,
3024 					ac->ac_b_ex.fe_group, gdp));
3025 	}
3026 	len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
3027 	ext4_free_blks_set(sb, gdp, len);
3028 	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
3029 	spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3030 	percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
3031 	/*
3032 	 * Now reduce the dirty block count also. Should not go negative
3033 	 */
3034 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3035 		/* release all the reserved blocks if non delalloc */
3036 		percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
3037 	else {
3038 		percpu_counter_sub(&sbi->s_dirtyblocks_counter,
3039 						ac->ac_b_ex.fe_len);
3040 		/* convert reserved quota blocks to real quota blocks */
3041 		vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
3042 	}
3043 
3044 	if (sbi->s_log_groups_per_flex) {
3045 		ext4_group_t flex_group = ext4_flex_group(sbi,
3046 							  ac->ac_b_ex.fe_group);
3047 		atomic_sub(ac->ac_b_ex.fe_len,
3048 			   &sbi->s_flex_groups[flex_group].free_blocks);
3049 	}
3050 
3051 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3052 	if (err)
3053 		goto out_err;
3054 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3055 
3056 out_err:
3057 	sb->s_dirt = 1;
3058 	brelse(bitmap_bh);
3059 	return err;
3060 }
3061 
3062 /*
3063  * here we normalize request for locality group
3064  * Group request are normalized to s_strip size if we set the same via mount
3065  * option. If not we set it to s_mb_group_prealloc which can be configured via
3066  * /sys/fs/ext4/<partition>/mb_group_prealloc
3067  *
3068  * XXX: should we try to preallocate more than the group has now?
3069  */
3070 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3071 {
3072 	struct super_block *sb = ac->ac_sb;
3073 	struct ext4_locality_group *lg = ac->ac_lg;
3074 
3075 	BUG_ON(lg == NULL);
3076 	if (EXT4_SB(sb)->s_stripe)
3077 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
3078 	else
3079 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3080 	mb_debug("#%u: goal %u blocks for locality group\n",
3081 		current->pid, ac->ac_g_ex.fe_len);
3082 }
3083 
3084 /*
3085  * Normalization means making request better in terms of
3086  * size and alignment
3087  */
3088 static noinline_for_stack void
3089 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3090 				struct ext4_allocation_request *ar)
3091 {
3092 	int bsbits, max;
3093 	ext4_lblk_t end;
3094 	loff_t size, orig_size, start_off;
3095 	ext4_lblk_t start, orig_start;
3096 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3097 	struct ext4_prealloc_space *pa;
3098 
3099 	/* do normalize only data requests, metadata requests
3100 	   do not need preallocation */
3101 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3102 		return;
3103 
3104 	/* sometime caller may want exact blocks */
3105 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3106 		return;
3107 
3108 	/* caller may indicate that preallocation isn't
3109 	 * required (it's a tail, for example) */
3110 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3111 		return;
3112 
3113 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3114 		ext4_mb_normalize_group_request(ac);
3115 		return ;
3116 	}
3117 
3118 	bsbits = ac->ac_sb->s_blocksize_bits;
3119 
3120 	/* first, let's learn actual file size
3121 	 * given current request is allocated */
3122 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3123 	size = size << bsbits;
3124 	if (size < i_size_read(ac->ac_inode))
3125 		size = i_size_read(ac->ac_inode);
3126 
3127 	/* max size of free chunks */
3128 	max = 2 << bsbits;
3129 
3130 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3131 		(req <= (size) || max <= (chunk_size))
3132 
3133 	/* first, try to predict filesize */
3134 	/* XXX: should this table be tunable? */
3135 	start_off = 0;
3136 	if (size <= 16 * 1024) {
3137 		size = 16 * 1024;
3138 	} else if (size <= 32 * 1024) {
3139 		size = 32 * 1024;
3140 	} else if (size <= 64 * 1024) {
3141 		size = 64 * 1024;
3142 	} else if (size <= 128 * 1024) {
3143 		size = 128 * 1024;
3144 	} else if (size <= 256 * 1024) {
3145 		size = 256 * 1024;
3146 	} else if (size <= 512 * 1024) {
3147 		size = 512 * 1024;
3148 	} else if (size <= 1024 * 1024) {
3149 		size = 1024 * 1024;
3150 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3151 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3152 						(21 - bsbits)) << 21;
3153 		size = 2 * 1024 * 1024;
3154 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3155 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3156 							(22 - bsbits)) << 22;
3157 		size = 4 * 1024 * 1024;
3158 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3159 					(8<<20)>>bsbits, max, 8 * 1024)) {
3160 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3161 							(23 - bsbits)) << 23;
3162 		size = 8 * 1024 * 1024;
3163 	} else {
3164 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3165 		size	  = ac->ac_o_ex.fe_len << bsbits;
3166 	}
3167 	orig_size = size = size >> bsbits;
3168 	orig_start = start = start_off >> bsbits;
3169 
3170 	/* don't cover already allocated blocks in selected range */
3171 	if (ar->pleft && start <= ar->lleft) {
3172 		size -= ar->lleft + 1 - start;
3173 		start = ar->lleft + 1;
3174 	}
3175 	if (ar->pright && start + size - 1 >= ar->lright)
3176 		size -= start + size - ar->lright;
3177 
3178 	end = start + size;
3179 
3180 	/* check we don't cross already preallocated blocks */
3181 	rcu_read_lock();
3182 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3183 		ext4_lblk_t pa_end;
3184 
3185 		if (pa->pa_deleted)
3186 			continue;
3187 		spin_lock(&pa->pa_lock);
3188 		if (pa->pa_deleted) {
3189 			spin_unlock(&pa->pa_lock);
3190 			continue;
3191 		}
3192 
3193 		pa_end = pa->pa_lstart + pa->pa_len;
3194 
3195 		/* PA must not overlap original request */
3196 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3197 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3198 
3199 		/* skip PA normalized request doesn't overlap with */
3200 		if (pa->pa_lstart >= end) {
3201 			spin_unlock(&pa->pa_lock);
3202 			continue;
3203 		}
3204 		if (pa_end <= start) {
3205 			spin_unlock(&pa->pa_lock);
3206 			continue;
3207 		}
3208 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3209 
3210 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3211 			BUG_ON(pa_end < start);
3212 			start = pa_end;
3213 		}
3214 
3215 		if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3216 			BUG_ON(pa->pa_lstart > end);
3217 			end = pa->pa_lstart;
3218 		}
3219 		spin_unlock(&pa->pa_lock);
3220 	}
3221 	rcu_read_unlock();
3222 	size = end - start;
3223 
3224 	/* XXX: extra loop to check we really don't overlap preallocations */
3225 	rcu_read_lock();
3226 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3227 		ext4_lblk_t pa_end;
3228 		spin_lock(&pa->pa_lock);
3229 		if (pa->pa_deleted == 0) {
3230 			pa_end = pa->pa_lstart + pa->pa_len;
3231 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3232 		}
3233 		spin_unlock(&pa->pa_lock);
3234 	}
3235 	rcu_read_unlock();
3236 
3237 	if (start + size <= ac->ac_o_ex.fe_logical &&
3238 			start > ac->ac_o_ex.fe_logical) {
3239 		printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3240 			(unsigned long) start, (unsigned long) size,
3241 			(unsigned long) ac->ac_o_ex.fe_logical);
3242 	}
3243 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3244 			start > ac->ac_o_ex.fe_logical);
3245 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3246 
3247 	/* now prepare goal request */
3248 
3249 	/* XXX: is it better to align blocks WRT to logical
3250 	 * placement or satisfy big request as is */
3251 	ac->ac_g_ex.fe_logical = start;
3252 	ac->ac_g_ex.fe_len = size;
3253 
3254 	/* define goal start in order to merge */
3255 	if (ar->pright && (ar->lright == (start + size))) {
3256 		/* merge to the right */
3257 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3258 						&ac->ac_f_ex.fe_group,
3259 						&ac->ac_f_ex.fe_start);
3260 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3261 	}
3262 	if (ar->pleft && (ar->lleft + 1 == start)) {
3263 		/* merge to the left */
3264 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3265 						&ac->ac_f_ex.fe_group,
3266 						&ac->ac_f_ex.fe_start);
3267 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3268 	}
3269 
3270 	mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3271 		(unsigned) orig_size, (unsigned) start);
3272 }
3273 
3274 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3275 {
3276 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3277 
3278 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3279 		atomic_inc(&sbi->s_bal_reqs);
3280 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3281 		if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3282 			atomic_inc(&sbi->s_bal_success);
3283 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3284 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3285 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3286 			atomic_inc(&sbi->s_bal_goals);
3287 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3288 			atomic_inc(&sbi->s_bal_breaks);
3289 	}
3290 
3291 	ext4_mb_store_history(ac);
3292 }
3293 
3294 /*
3295  * use blocks preallocated to inode
3296  */
3297 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3298 				struct ext4_prealloc_space *pa)
3299 {
3300 	ext4_fsblk_t start;
3301 	ext4_fsblk_t end;
3302 	int len;
3303 
3304 	/* found preallocated blocks, use them */
3305 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3306 	end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3307 	len = end - start;
3308 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3309 					&ac->ac_b_ex.fe_start);
3310 	ac->ac_b_ex.fe_len = len;
3311 	ac->ac_status = AC_STATUS_FOUND;
3312 	ac->ac_pa = pa;
3313 
3314 	BUG_ON(start < pa->pa_pstart);
3315 	BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3316 	BUG_ON(pa->pa_free < len);
3317 	pa->pa_free -= len;
3318 
3319 	mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
3320 }
3321 
3322 /*
3323  * use blocks preallocated to locality group
3324  */
3325 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3326 				struct ext4_prealloc_space *pa)
3327 {
3328 	unsigned int len = ac->ac_o_ex.fe_len;
3329 
3330 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3331 					&ac->ac_b_ex.fe_group,
3332 					&ac->ac_b_ex.fe_start);
3333 	ac->ac_b_ex.fe_len = len;
3334 	ac->ac_status = AC_STATUS_FOUND;
3335 	ac->ac_pa = pa;
3336 
3337 	/* we don't correct pa_pstart or pa_plen here to avoid
3338 	 * possible race when the group is being loaded concurrently
3339 	 * instead we correct pa later, after blocks are marked
3340 	 * in on-disk bitmap -- see ext4_mb_release_context()
3341 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3342 	 */
3343 	mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3344 }
3345 
3346 /*
3347  * Return the prealloc space that have minimal distance
3348  * from the goal block. @cpa is the prealloc
3349  * space that is having currently known minimal distance
3350  * from the goal block.
3351  */
3352 static struct ext4_prealloc_space *
3353 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3354 			struct ext4_prealloc_space *pa,
3355 			struct ext4_prealloc_space *cpa)
3356 {
3357 	ext4_fsblk_t cur_distance, new_distance;
3358 
3359 	if (cpa == NULL) {
3360 		atomic_inc(&pa->pa_count);
3361 		return pa;
3362 	}
3363 	cur_distance = abs(goal_block - cpa->pa_pstart);
3364 	new_distance = abs(goal_block - pa->pa_pstart);
3365 
3366 	if (cur_distance < new_distance)
3367 		return cpa;
3368 
3369 	/* drop the previous reference */
3370 	atomic_dec(&cpa->pa_count);
3371 	atomic_inc(&pa->pa_count);
3372 	return pa;
3373 }
3374 
3375 /*
3376  * search goal blocks in preallocated space
3377  */
3378 static noinline_for_stack int
3379 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3380 {
3381 	int order, i;
3382 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3383 	struct ext4_locality_group *lg;
3384 	struct ext4_prealloc_space *pa, *cpa = NULL;
3385 	ext4_fsblk_t goal_block;
3386 
3387 	/* only data can be preallocated */
3388 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3389 		return 0;
3390 
3391 	/* first, try per-file preallocation */
3392 	rcu_read_lock();
3393 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3394 
3395 		/* all fields in this condition don't change,
3396 		 * so we can skip locking for them */
3397 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3398 			ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3399 			continue;
3400 
3401 		/* found preallocated blocks, use them */
3402 		spin_lock(&pa->pa_lock);
3403 		if (pa->pa_deleted == 0 && pa->pa_free) {
3404 			atomic_inc(&pa->pa_count);
3405 			ext4_mb_use_inode_pa(ac, pa);
3406 			spin_unlock(&pa->pa_lock);
3407 			ac->ac_criteria = 10;
3408 			rcu_read_unlock();
3409 			return 1;
3410 		}
3411 		spin_unlock(&pa->pa_lock);
3412 	}
3413 	rcu_read_unlock();
3414 
3415 	/* can we use group allocation? */
3416 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3417 		return 0;
3418 
3419 	/* inode may have no locality group for some reason */
3420 	lg = ac->ac_lg;
3421 	if (lg == NULL)
3422 		return 0;
3423 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3424 	if (order > PREALLOC_TB_SIZE - 1)
3425 		/* The max size of hash table is PREALLOC_TB_SIZE */
3426 		order = PREALLOC_TB_SIZE - 1;
3427 
3428 	goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3429 		     ac->ac_g_ex.fe_start +
3430 		     le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3431 	/*
3432 	 * search for the prealloc space that is having
3433 	 * minimal distance from the goal block.
3434 	 */
3435 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3436 		rcu_read_lock();
3437 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3438 					pa_inode_list) {
3439 			spin_lock(&pa->pa_lock);
3440 			if (pa->pa_deleted == 0 &&
3441 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3442 
3443 				cpa = ext4_mb_check_group_pa(goal_block,
3444 								pa, cpa);
3445 			}
3446 			spin_unlock(&pa->pa_lock);
3447 		}
3448 		rcu_read_unlock();
3449 	}
3450 	if (cpa) {
3451 		ext4_mb_use_group_pa(ac, cpa);
3452 		ac->ac_criteria = 20;
3453 		return 1;
3454 	}
3455 	return 0;
3456 }
3457 
3458 /*
3459  * the function goes through all block freed in the group
3460  * but not yet committed and marks them used in in-core bitmap.
3461  * buddy must be generated from this bitmap
3462  * Need to be called with ext4 group lock (ext4_lock_group)
3463  */
3464 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3465 						ext4_group_t group)
3466 {
3467 	struct rb_node *n;
3468 	struct ext4_group_info *grp;
3469 	struct ext4_free_data *entry;
3470 
3471 	grp = ext4_get_group_info(sb, group);
3472 	n = rb_first(&(grp->bb_free_root));
3473 
3474 	while (n) {
3475 		entry = rb_entry(n, struct ext4_free_data, node);
3476 		mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3477 				bitmap, entry->start_blk,
3478 				entry->count);
3479 		n = rb_next(n);
3480 	}
3481 	return;
3482 }
3483 
3484 /*
3485  * the function goes through all preallocation in this group and marks them
3486  * used in in-core bitmap. buddy must be generated from this bitmap
3487  * Need to be called with ext4 group lock (ext4_lock_group)
3488  */
3489 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3490 					ext4_group_t group)
3491 {
3492 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3493 	struct ext4_prealloc_space *pa;
3494 	struct list_head *cur;
3495 	ext4_group_t groupnr;
3496 	ext4_grpblk_t start;
3497 	int preallocated = 0;
3498 	int count = 0;
3499 	int len;
3500 
3501 	/* all form of preallocation discards first load group,
3502 	 * so the only competing code is preallocation use.
3503 	 * we don't need any locking here
3504 	 * notice we do NOT ignore preallocations with pa_deleted
3505 	 * otherwise we could leave used blocks available for
3506 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3507 	 * is dropping preallocation
3508 	 */
3509 	list_for_each(cur, &grp->bb_prealloc_list) {
3510 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3511 		spin_lock(&pa->pa_lock);
3512 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3513 					     &groupnr, &start);
3514 		len = pa->pa_len;
3515 		spin_unlock(&pa->pa_lock);
3516 		if (unlikely(len == 0))
3517 			continue;
3518 		BUG_ON(groupnr != group);
3519 		mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3520 						bitmap, start, len);
3521 		preallocated += len;
3522 		count++;
3523 	}
3524 	mb_debug("prellocated %u for group %u\n", preallocated, group);
3525 }
3526 
3527 static void ext4_mb_pa_callback(struct rcu_head *head)
3528 {
3529 	struct ext4_prealloc_space *pa;
3530 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3531 	kmem_cache_free(ext4_pspace_cachep, pa);
3532 }
3533 
3534 /*
3535  * drops a reference to preallocated space descriptor
3536  * if this was the last reference and the space is consumed
3537  */
3538 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3539 			struct super_block *sb, struct ext4_prealloc_space *pa)
3540 {
3541 	ext4_group_t grp;
3542 	ext4_fsblk_t grp_blk;
3543 
3544 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3545 		return;
3546 
3547 	/* in this short window concurrent discard can set pa_deleted */
3548 	spin_lock(&pa->pa_lock);
3549 	if (pa->pa_deleted == 1) {
3550 		spin_unlock(&pa->pa_lock);
3551 		return;
3552 	}
3553 
3554 	pa->pa_deleted = 1;
3555 	spin_unlock(&pa->pa_lock);
3556 
3557 	grp_blk = pa->pa_pstart;
3558 	/*
3559 	 * If doing group-based preallocation, pa_pstart may be in the
3560 	 * next group when pa is used up
3561 	 */
3562 	if (pa->pa_type == MB_GROUP_PA)
3563 		grp_blk--;
3564 
3565 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3566 
3567 	/*
3568 	 * possible race:
3569 	 *
3570 	 *  P1 (buddy init)			P2 (regular allocation)
3571 	 *					find block B in PA
3572 	 *  copy on-disk bitmap to buddy
3573 	 *  					mark B in on-disk bitmap
3574 	 *					drop PA from group
3575 	 *  mark all PAs in buddy
3576 	 *
3577 	 * thus, P1 initializes buddy with B available. to prevent this
3578 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3579 	 * against that pair
3580 	 */
3581 	ext4_lock_group(sb, grp);
3582 	list_del(&pa->pa_group_list);
3583 	ext4_unlock_group(sb, grp);
3584 
3585 	spin_lock(pa->pa_obj_lock);
3586 	list_del_rcu(&pa->pa_inode_list);
3587 	spin_unlock(pa->pa_obj_lock);
3588 
3589 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3590 }
3591 
3592 /*
3593  * creates new preallocated space for given inode
3594  */
3595 static noinline_for_stack int
3596 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3597 {
3598 	struct super_block *sb = ac->ac_sb;
3599 	struct ext4_prealloc_space *pa;
3600 	struct ext4_group_info *grp;
3601 	struct ext4_inode_info *ei;
3602 
3603 	/* preallocate only when found space is larger then requested */
3604 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3605 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3606 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3607 
3608 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3609 	if (pa == NULL)
3610 		return -ENOMEM;
3611 
3612 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3613 		int winl;
3614 		int wins;
3615 		int win;
3616 		int offs;
3617 
3618 		/* we can't allocate as much as normalizer wants.
3619 		 * so, found space must get proper lstart
3620 		 * to cover original request */
3621 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3622 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3623 
3624 		/* we're limited by original request in that
3625 		 * logical block must be covered any way
3626 		 * winl is window we can move our chunk within */
3627 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3628 
3629 		/* also, we should cover whole original request */
3630 		wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3631 
3632 		/* the smallest one defines real window */
3633 		win = min(winl, wins);
3634 
3635 		offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3636 		if (offs && offs < win)
3637 			win = offs;
3638 
3639 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3640 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3641 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3642 	}
3643 
3644 	/* preallocation can change ac_b_ex, thus we store actually
3645 	 * allocated blocks for history */
3646 	ac->ac_f_ex = ac->ac_b_ex;
3647 
3648 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3649 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3650 	pa->pa_len = ac->ac_b_ex.fe_len;
3651 	pa->pa_free = pa->pa_len;
3652 	atomic_set(&pa->pa_count, 1);
3653 	spin_lock_init(&pa->pa_lock);
3654 	INIT_LIST_HEAD(&pa->pa_inode_list);
3655 	INIT_LIST_HEAD(&pa->pa_group_list);
3656 	pa->pa_deleted = 0;
3657 	pa->pa_type = MB_INODE_PA;
3658 
3659 	mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3660 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3661 	trace_mark(ext4_mb_new_inode_pa,
3662 		   "dev %s ino %lu pstart %llu len %u lstart %u",
3663 		   sb->s_id, ac->ac_inode->i_ino,
3664 		   pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3665 
3666 	ext4_mb_use_inode_pa(ac, pa);
3667 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3668 
3669 	ei = EXT4_I(ac->ac_inode);
3670 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3671 
3672 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3673 	pa->pa_inode = ac->ac_inode;
3674 
3675 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3676 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3677 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3678 
3679 	spin_lock(pa->pa_obj_lock);
3680 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3681 	spin_unlock(pa->pa_obj_lock);
3682 
3683 	return 0;
3684 }
3685 
3686 /*
3687  * creates new preallocated space for locality group inodes belongs to
3688  */
3689 static noinline_for_stack int
3690 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3691 {
3692 	struct super_block *sb = ac->ac_sb;
3693 	struct ext4_locality_group *lg;
3694 	struct ext4_prealloc_space *pa;
3695 	struct ext4_group_info *grp;
3696 
3697 	/* preallocate only when found space is larger then requested */
3698 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3699 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3700 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3701 
3702 	BUG_ON(ext4_pspace_cachep == NULL);
3703 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3704 	if (pa == NULL)
3705 		return -ENOMEM;
3706 
3707 	/* preallocation can change ac_b_ex, thus we store actually
3708 	 * allocated blocks for history */
3709 	ac->ac_f_ex = ac->ac_b_ex;
3710 
3711 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3712 	pa->pa_lstart = pa->pa_pstart;
3713 	pa->pa_len = ac->ac_b_ex.fe_len;
3714 	pa->pa_free = pa->pa_len;
3715 	atomic_set(&pa->pa_count, 1);
3716 	spin_lock_init(&pa->pa_lock);
3717 	INIT_LIST_HEAD(&pa->pa_inode_list);
3718 	INIT_LIST_HEAD(&pa->pa_group_list);
3719 	pa->pa_deleted = 0;
3720 	pa->pa_type = MB_GROUP_PA;
3721 
3722 	mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3723 		 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3724 	trace_mark(ext4_mb_new_group_pa, "dev %s pstart %llu len %u lstart %u",
3725 		   sb->s_id, pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3726 
3727 	ext4_mb_use_group_pa(ac, pa);
3728 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3729 
3730 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3731 	lg = ac->ac_lg;
3732 	BUG_ON(lg == NULL);
3733 
3734 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3735 	pa->pa_inode = NULL;
3736 
3737 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3738 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3739 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3740 
3741 	/*
3742 	 * We will later add the new pa to the right bucket
3743 	 * after updating the pa_free in ext4_mb_release_context
3744 	 */
3745 	return 0;
3746 }
3747 
3748 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3749 {
3750 	int err;
3751 
3752 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3753 		err = ext4_mb_new_group_pa(ac);
3754 	else
3755 		err = ext4_mb_new_inode_pa(ac);
3756 	return err;
3757 }
3758 
3759 /*
3760  * finds all unused blocks in on-disk bitmap, frees them in
3761  * in-core bitmap and buddy.
3762  * @pa must be unlinked from inode and group lists, so that
3763  * nobody else can find/use it.
3764  * the caller MUST hold group/inode locks.
3765  * TODO: optimize the case when there are no in-core structures yet
3766  */
3767 static noinline_for_stack int
3768 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3769 			struct ext4_prealloc_space *pa,
3770 			struct ext4_allocation_context *ac)
3771 {
3772 	struct super_block *sb = e4b->bd_sb;
3773 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3774 	unsigned int end;
3775 	unsigned int next;
3776 	ext4_group_t group;
3777 	ext4_grpblk_t bit;
3778 	unsigned long long grp_blk_start;
3779 	sector_t start;
3780 	int err = 0;
3781 	int free = 0;
3782 
3783 	BUG_ON(pa->pa_deleted == 0);
3784 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3785 	grp_blk_start = pa->pa_pstart - bit;
3786 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3787 	end = bit + pa->pa_len;
3788 
3789 	if (ac) {
3790 		ac->ac_sb = sb;
3791 		ac->ac_inode = pa->pa_inode;
3792 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3793 	}
3794 
3795 	while (bit < end) {
3796 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3797 		if (bit >= end)
3798 			break;
3799 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3800 		start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3801 				le32_to_cpu(sbi->s_es->s_first_data_block);
3802 		mb_debug("    free preallocated %u/%u in group %u\n",
3803 				(unsigned) start, (unsigned) next - bit,
3804 				(unsigned) group);
3805 		free += next - bit;
3806 
3807 		if (ac) {
3808 			ac->ac_b_ex.fe_group = group;
3809 			ac->ac_b_ex.fe_start = bit;
3810 			ac->ac_b_ex.fe_len = next - bit;
3811 			ac->ac_b_ex.fe_logical = 0;
3812 			ext4_mb_store_history(ac);
3813 		}
3814 
3815 		trace_mark(ext4_mb_release_inode_pa,
3816 			   "dev %s ino %lu block %llu count %u",
3817 			   sb->s_id, pa->pa_inode->i_ino, grp_blk_start + bit,
3818 			   next - bit);
3819 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3820 		bit = next + 1;
3821 	}
3822 	if (free != pa->pa_free) {
3823 		printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3824 			pa, (unsigned long) pa->pa_lstart,
3825 			(unsigned long) pa->pa_pstart,
3826 			(unsigned long) pa->pa_len);
3827 		ext4_grp_locked_error(sb, group,
3828 					__func__, "free %u, pa_free %u",
3829 					free, pa->pa_free);
3830 		/*
3831 		 * pa is already deleted so we use the value obtained
3832 		 * from the bitmap and continue.
3833 		 */
3834 	}
3835 	atomic_add(free, &sbi->s_mb_discarded);
3836 
3837 	return err;
3838 }
3839 
3840 static noinline_for_stack int
3841 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3842 				struct ext4_prealloc_space *pa,
3843 				struct ext4_allocation_context *ac)
3844 {
3845 	struct super_block *sb = e4b->bd_sb;
3846 	ext4_group_t group;
3847 	ext4_grpblk_t bit;
3848 
3849 	if (ac)
3850 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3851 
3852 	trace_mark(ext4_mb_release_group_pa, "dev %s pstart %llu len %d",
3853 		   sb->s_id, pa->pa_pstart, pa->pa_len);
3854 	BUG_ON(pa->pa_deleted == 0);
3855 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3856 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3857 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3858 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3859 
3860 	if (ac) {
3861 		ac->ac_sb = sb;
3862 		ac->ac_inode = NULL;
3863 		ac->ac_b_ex.fe_group = group;
3864 		ac->ac_b_ex.fe_start = bit;
3865 		ac->ac_b_ex.fe_len = pa->pa_len;
3866 		ac->ac_b_ex.fe_logical = 0;
3867 		ext4_mb_store_history(ac);
3868 	}
3869 
3870 	return 0;
3871 }
3872 
3873 /*
3874  * releases all preallocations in given group
3875  *
3876  * first, we need to decide discard policy:
3877  * - when do we discard
3878  *   1) ENOSPC
3879  * - how many do we discard
3880  *   1) how many requested
3881  */
3882 static noinline_for_stack int
3883 ext4_mb_discard_group_preallocations(struct super_block *sb,
3884 					ext4_group_t group, int needed)
3885 {
3886 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3887 	struct buffer_head *bitmap_bh = NULL;
3888 	struct ext4_prealloc_space *pa, *tmp;
3889 	struct ext4_allocation_context *ac;
3890 	struct list_head list;
3891 	struct ext4_buddy e4b;
3892 	int err;
3893 	int busy = 0;
3894 	int free = 0;
3895 
3896 	mb_debug("discard preallocation for group %u\n", group);
3897 
3898 	if (list_empty(&grp->bb_prealloc_list))
3899 		return 0;
3900 
3901 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3902 	if (bitmap_bh == NULL) {
3903 		ext4_error(sb, __func__, "Error in reading block "
3904 				"bitmap for %u", group);
3905 		return 0;
3906 	}
3907 
3908 	err = ext4_mb_load_buddy(sb, group, &e4b);
3909 	if (err) {
3910 		ext4_error(sb, __func__, "Error in loading buddy "
3911 				"information for %u", group);
3912 		put_bh(bitmap_bh);
3913 		return 0;
3914 	}
3915 
3916 	if (needed == 0)
3917 		needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3918 
3919 	INIT_LIST_HEAD(&list);
3920 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3921 repeat:
3922 	ext4_lock_group(sb, group);
3923 	list_for_each_entry_safe(pa, tmp,
3924 				&grp->bb_prealloc_list, pa_group_list) {
3925 		spin_lock(&pa->pa_lock);
3926 		if (atomic_read(&pa->pa_count)) {
3927 			spin_unlock(&pa->pa_lock);
3928 			busy = 1;
3929 			continue;
3930 		}
3931 		if (pa->pa_deleted) {
3932 			spin_unlock(&pa->pa_lock);
3933 			continue;
3934 		}
3935 
3936 		/* seems this one can be freed ... */
3937 		pa->pa_deleted = 1;
3938 
3939 		/* we can trust pa_free ... */
3940 		free += pa->pa_free;
3941 
3942 		spin_unlock(&pa->pa_lock);
3943 
3944 		list_del(&pa->pa_group_list);
3945 		list_add(&pa->u.pa_tmp_list, &list);
3946 	}
3947 
3948 	/* if we still need more blocks and some PAs were used, try again */
3949 	if (free < needed && busy) {
3950 		busy = 0;
3951 		ext4_unlock_group(sb, group);
3952 		/*
3953 		 * Yield the CPU here so that we don't get soft lockup
3954 		 * in non preempt case.
3955 		 */
3956 		yield();
3957 		goto repeat;
3958 	}
3959 
3960 	/* found anything to free? */
3961 	if (list_empty(&list)) {
3962 		BUG_ON(free != 0);
3963 		goto out;
3964 	}
3965 
3966 	/* now free all selected PAs */
3967 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3968 
3969 		/* remove from object (inode or locality group) */
3970 		spin_lock(pa->pa_obj_lock);
3971 		list_del_rcu(&pa->pa_inode_list);
3972 		spin_unlock(pa->pa_obj_lock);
3973 
3974 		if (pa->pa_type == MB_GROUP_PA)
3975 			ext4_mb_release_group_pa(&e4b, pa, ac);
3976 		else
3977 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3978 
3979 		list_del(&pa->u.pa_tmp_list);
3980 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3981 	}
3982 
3983 out:
3984 	ext4_unlock_group(sb, group);
3985 	if (ac)
3986 		kmem_cache_free(ext4_ac_cachep, ac);
3987 	ext4_mb_release_desc(&e4b);
3988 	put_bh(bitmap_bh);
3989 	return free;
3990 }
3991 
3992 /*
3993  * releases all non-used preallocated blocks for given inode
3994  *
3995  * It's important to discard preallocations under i_data_sem
3996  * We don't want another block to be served from the prealloc
3997  * space when we are discarding the inode prealloc space.
3998  *
3999  * FIXME!! Make sure it is valid at all the call sites
4000  */
4001 void ext4_discard_preallocations(struct inode *inode)
4002 {
4003 	struct ext4_inode_info *ei = EXT4_I(inode);
4004 	struct super_block *sb = inode->i_sb;
4005 	struct buffer_head *bitmap_bh = NULL;
4006 	struct ext4_prealloc_space *pa, *tmp;
4007 	struct ext4_allocation_context *ac;
4008 	ext4_group_t group = 0;
4009 	struct list_head list;
4010 	struct ext4_buddy e4b;
4011 	int err;
4012 
4013 	if (!S_ISREG(inode->i_mode)) {
4014 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4015 		return;
4016 	}
4017 
4018 	mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
4019 	trace_mark(ext4_discard_preallocations, "dev %s ino %lu", sb->s_id,
4020 		   inode->i_ino);
4021 
4022 	INIT_LIST_HEAD(&list);
4023 
4024 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4025 repeat:
4026 	/* first, collect all pa's in the inode */
4027 	spin_lock(&ei->i_prealloc_lock);
4028 	while (!list_empty(&ei->i_prealloc_list)) {
4029 		pa = list_entry(ei->i_prealloc_list.next,
4030 				struct ext4_prealloc_space, pa_inode_list);
4031 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4032 		spin_lock(&pa->pa_lock);
4033 		if (atomic_read(&pa->pa_count)) {
4034 			/* this shouldn't happen often - nobody should
4035 			 * use preallocation while we're discarding it */
4036 			spin_unlock(&pa->pa_lock);
4037 			spin_unlock(&ei->i_prealloc_lock);
4038 			printk(KERN_ERR "uh-oh! used pa while discarding\n");
4039 			WARN_ON(1);
4040 			schedule_timeout_uninterruptible(HZ);
4041 			goto repeat;
4042 
4043 		}
4044 		if (pa->pa_deleted == 0) {
4045 			pa->pa_deleted = 1;
4046 			spin_unlock(&pa->pa_lock);
4047 			list_del_rcu(&pa->pa_inode_list);
4048 			list_add(&pa->u.pa_tmp_list, &list);
4049 			continue;
4050 		}
4051 
4052 		/* someone is deleting pa right now */
4053 		spin_unlock(&pa->pa_lock);
4054 		spin_unlock(&ei->i_prealloc_lock);
4055 
4056 		/* we have to wait here because pa_deleted
4057 		 * doesn't mean pa is already unlinked from
4058 		 * the list. as we might be called from
4059 		 * ->clear_inode() the inode will get freed
4060 		 * and concurrent thread which is unlinking
4061 		 * pa from inode's list may access already
4062 		 * freed memory, bad-bad-bad */
4063 
4064 		/* XXX: if this happens too often, we can
4065 		 * add a flag to force wait only in case
4066 		 * of ->clear_inode(), but not in case of
4067 		 * regular truncate */
4068 		schedule_timeout_uninterruptible(HZ);
4069 		goto repeat;
4070 	}
4071 	spin_unlock(&ei->i_prealloc_lock);
4072 
4073 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4074 		BUG_ON(pa->pa_type != MB_INODE_PA);
4075 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4076 
4077 		err = ext4_mb_load_buddy(sb, group, &e4b);
4078 		if (err) {
4079 			ext4_error(sb, __func__, "Error in loading buddy "
4080 					"information for %u", group);
4081 			continue;
4082 		}
4083 
4084 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4085 		if (bitmap_bh == NULL) {
4086 			ext4_error(sb, __func__, "Error in reading block "
4087 					"bitmap for %u", group);
4088 			ext4_mb_release_desc(&e4b);
4089 			continue;
4090 		}
4091 
4092 		ext4_lock_group(sb, group);
4093 		list_del(&pa->pa_group_list);
4094 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
4095 		ext4_unlock_group(sb, group);
4096 
4097 		ext4_mb_release_desc(&e4b);
4098 		put_bh(bitmap_bh);
4099 
4100 		list_del(&pa->u.pa_tmp_list);
4101 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4102 	}
4103 	if (ac)
4104 		kmem_cache_free(ext4_ac_cachep, ac);
4105 }
4106 
4107 /*
4108  * finds all preallocated spaces and return blocks being freed to them
4109  * if preallocated space becomes full (no block is used from the space)
4110  * then the function frees space in buddy
4111  * XXX: at the moment, truncate (which is the only way to free blocks)
4112  * discards all preallocations
4113  */
4114 static void ext4_mb_return_to_preallocation(struct inode *inode,
4115 					struct ext4_buddy *e4b,
4116 					sector_t block, int count)
4117 {
4118 	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
4119 }
4120 #ifdef MB_DEBUG
4121 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4122 {
4123 	struct super_block *sb = ac->ac_sb;
4124 	ext4_group_t i;
4125 
4126 	printk(KERN_ERR "EXT4-fs: Can't allocate:"
4127 			" Allocation context details:\n");
4128 	printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
4129 			ac->ac_status, ac->ac_flags);
4130 	printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4131 			"best %lu/%lu/%lu@%lu cr %d\n",
4132 			(unsigned long)ac->ac_o_ex.fe_group,
4133 			(unsigned long)ac->ac_o_ex.fe_start,
4134 			(unsigned long)ac->ac_o_ex.fe_len,
4135 			(unsigned long)ac->ac_o_ex.fe_logical,
4136 			(unsigned long)ac->ac_g_ex.fe_group,
4137 			(unsigned long)ac->ac_g_ex.fe_start,
4138 			(unsigned long)ac->ac_g_ex.fe_len,
4139 			(unsigned long)ac->ac_g_ex.fe_logical,
4140 			(unsigned long)ac->ac_b_ex.fe_group,
4141 			(unsigned long)ac->ac_b_ex.fe_start,
4142 			(unsigned long)ac->ac_b_ex.fe_len,
4143 			(unsigned long)ac->ac_b_ex.fe_logical,
4144 			(int)ac->ac_criteria);
4145 	printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4146 		ac->ac_found);
4147 	printk(KERN_ERR "EXT4-fs: groups: \n");
4148 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
4149 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4150 		struct ext4_prealloc_space *pa;
4151 		ext4_grpblk_t start;
4152 		struct list_head *cur;
4153 		ext4_lock_group(sb, i);
4154 		list_for_each(cur, &grp->bb_prealloc_list) {
4155 			pa = list_entry(cur, struct ext4_prealloc_space,
4156 					pa_group_list);
4157 			spin_lock(&pa->pa_lock);
4158 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4159 						     NULL, &start);
4160 			spin_unlock(&pa->pa_lock);
4161 			printk(KERN_ERR "PA:%lu:%d:%u \n", i,
4162 							start, pa->pa_len);
4163 		}
4164 		ext4_unlock_group(sb, i);
4165 
4166 		if (grp->bb_free == 0)
4167 			continue;
4168 		printk(KERN_ERR "%lu: %d/%d \n",
4169 		       i, grp->bb_free, grp->bb_fragments);
4170 	}
4171 	printk(KERN_ERR "\n");
4172 }
4173 #else
4174 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4175 {
4176 	return;
4177 }
4178 #endif
4179 
4180 /*
4181  * We use locality group preallocation for small size file. The size of the
4182  * file is determined by the current size or the resulting size after
4183  * allocation which ever is larger
4184  *
4185  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4186  */
4187 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4188 {
4189 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4190 	int bsbits = ac->ac_sb->s_blocksize_bits;
4191 	loff_t size, isize;
4192 
4193 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4194 		return;
4195 
4196 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4197 	isize = i_size_read(ac->ac_inode) >> bsbits;
4198 	size = max(size, isize);
4199 
4200 	/* don't use group allocation for large files */
4201 	if (size >= sbi->s_mb_stream_request)
4202 		return;
4203 
4204 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4205 		return;
4206 
4207 	BUG_ON(ac->ac_lg != NULL);
4208 	/*
4209 	 * locality group prealloc space are per cpu. The reason for having
4210 	 * per cpu locality group is to reduce the contention between block
4211 	 * request from multiple CPUs.
4212 	 */
4213 	ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
4214 
4215 	/* we're going to use group allocation */
4216 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4217 
4218 	/* serialize all allocations in the group */
4219 	mutex_lock(&ac->ac_lg->lg_mutex);
4220 }
4221 
4222 static noinline_for_stack int
4223 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4224 				struct ext4_allocation_request *ar)
4225 {
4226 	struct super_block *sb = ar->inode->i_sb;
4227 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4228 	struct ext4_super_block *es = sbi->s_es;
4229 	ext4_group_t group;
4230 	unsigned int len;
4231 	ext4_fsblk_t goal;
4232 	ext4_grpblk_t block;
4233 
4234 	/* we can't allocate > group size */
4235 	len = ar->len;
4236 
4237 	/* just a dirty hack to filter too big requests  */
4238 	if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4239 		len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4240 
4241 	/* start searching from the goal */
4242 	goal = ar->goal;
4243 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4244 			goal >= ext4_blocks_count(es))
4245 		goal = le32_to_cpu(es->s_first_data_block);
4246 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4247 
4248 	/* set up allocation goals */
4249 	ac->ac_b_ex.fe_logical = ar->logical;
4250 	ac->ac_b_ex.fe_group = 0;
4251 	ac->ac_b_ex.fe_start = 0;
4252 	ac->ac_b_ex.fe_len = 0;
4253 	ac->ac_status = AC_STATUS_CONTINUE;
4254 	ac->ac_groups_scanned = 0;
4255 	ac->ac_ex_scanned = 0;
4256 	ac->ac_found = 0;
4257 	ac->ac_sb = sb;
4258 	ac->ac_inode = ar->inode;
4259 	ac->ac_o_ex.fe_logical = ar->logical;
4260 	ac->ac_o_ex.fe_group = group;
4261 	ac->ac_o_ex.fe_start = block;
4262 	ac->ac_o_ex.fe_len = len;
4263 	ac->ac_g_ex.fe_logical = ar->logical;
4264 	ac->ac_g_ex.fe_group = group;
4265 	ac->ac_g_ex.fe_start = block;
4266 	ac->ac_g_ex.fe_len = len;
4267 	ac->ac_f_ex.fe_len = 0;
4268 	ac->ac_flags = ar->flags;
4269 	ac->ac_2order = 0;
4270 	ac->ac_criteria = 0;
4271 	ac->ac_pa = NULL;
4272 	ac->ac_bitmap_page = NULL;
4273 	ac->ac_buddy_page = NULL;
4274 	ac->alloc_semp = NULL;
4275 	ac->ac_lg = NULL;
4276 
4277 	/* we have to define context: we'll we work with a file or
4278 	 * locality group. this is a policy, actually */
4279 	ext4_mb_group_or_file(ac);
4280 
4281 	mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4282 			"left: %u/%u, right %u/%u to %swritable\n",
4283 			(unsigned) ar->len, (unsigned) ar->logical,
4284 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4285 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4286 			(unsigned) ar->lright, (unsigned) ar->pright,
4287 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4288 	return 0;
4289 
4290 }
4291 
4292 static noinline_for_stack void
4293 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4294 					struct ext4_locality_group *lg,
4295 					int order, int total_entries)
4296 {
4297 	ext4_group_t group = 0;
4298 	struct ext4_buddy e4b;
4299 	struct list_head discard_list;
4300 	struct ext4_prealloc_space *pa, *tmp;
4301 	struct ext4_allocation_context *ac;
4302 
4303 	mb_debug("discard locality group preallocation\n");
4304 
4305 	INIT_LIST_HEAD(&discard_list);
4306 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4307 
4308 	spin_lock(&lg->lg_prealloc_lock);
4309 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4310 						pa_inode_list) {
4311 		spin_lock(&pa->pa_lock);
4312 		if (atomic_read(&pa->pa_count)) {
4313 			/*
4314 			 * This is the pa that we just used
4315 			 * for block allocation. So don't
4316 			 * free that
4317 			 */
4318 			spin_unlock(&pa->pa_lock);
4319 			continue;
4320 		}
4321 		if (pa->pa_deleted) {
4322 			spin_unlock(&pa->pa_lock);
4323 			continue;
4324 		}
4325 		/* only lg prealloc space */
4326 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4327 
4328 		/* seems this one can be freed ... */
4329 		pa->pa_deleted = 1;
4330 		spin_unlock(&pa->pa_lock);
4331 
4332 		list_del_rcu(&pa->pa_inode_list);
4333 		list_add(&pa->u.pa_tmp_list, &discard_list);
4334 
4335 		total_entries--;
4336 		if (total_entries <= 5) {
4337 			/*
4338 			 * we want to keep only 5 entries
4339 			 * allowing it to grow to 8. This
4340 			 * mak sure we don't call discard
4341 			 * soon for this list.
4342 			 */
4343 			break;
4344 		}
4345 	}
4346 	spin_unlock(&lg->lg_prealloc_lock);
4347 
4348 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4349 
4350 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4351 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4352 			ext4_error(sb, __func__, "Error in loading buddy "
4353 					"information for %u", group);
4354 			continue;
4355 		}
4356 		ext4_lock_group(sb, group);
4357 		list_del(&pa->pa_group_list);
4358 		ext4_mb_release_group_pa(&e4b, pa, ac);
4359 		ext4_unlock_group(sb, group);
4360 
4361 		ext4_mb_release_desc(&e4b);
4362 		list_del(&pa->u.pa_tmp_list);
4363 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4364 	}
4365 	if (ac)
4366 		kmem_cache_free(ext4_ac_cachep, ac);
4367 }
4368 
4369 /*
4370  * We have incremented pa_count. So it cannot be freed at this
4371  * point. Also we hold lg_mutex. So no parallel allocation is
4372  * possible from this lg. That means pa_free cannot be updated.
4373  *
4374  * A parallel ext4_mb_discard_group_preallocations is possible.
4375  * which can cause the lg_prealloc_list to be updated.
4376  */
4377 
4378 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4379 {
4380 	int order, added = 0, lg_prealloc_count = 1;
4381 	struct super_block *sb = ac->ac_sb;
4382 	struct ext4_locality_group *lg = ac->ac_lg;
4383 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4384 
4385 	order = fls(pa->pa_free) - 1;
4386 	if (order > PREALLOC_TB_SIZE - 1)
4387 		/* The max size of hash table is PREALLOC_TB_SIZE */
4388 		order = PREALLOC_TB_SIZE - 1;
4389 	/* Add the prealloc space to lg */
4390 	rcu_read_lock();
4391 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4392 						pa_inode_list) {
4393 		spin_lock(&tmp_pa->pa_lock);
4394 		if (tmp_pa->pa_deleted) {
4395 			spin_unlock(&tmp_pa->pa_lock);
4396 			continue;
4397 		}
4398 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4399 			/* Add to the tail of the previous entry */
4400 			list_add_tail_rcu(&pa->pa_inode_list,
4401 						&tmp_pa->pa_inode_list);
4402 			added = 1;
4403 			/*
4404 			 * we want to count the total
4405 			 * number of entries in the list
4406 			 */
4407 		}
4408 		spin_unlock(&tmp_pa->pa_lock);
4409 		lg_prealloc_count++;
4410 	}
4411 	if (!added)
4412 		list_add_tail_rcu(&pa->pa_inode_list,
4413 					&lg->lg_prealloc_list[order]);
4414 	rcu_read_unlock();
4415 
4416 	/* Now trim the list to be not more than 8 elements */
4417 	if (lg_prealloc_count > 8) {
4418 		ext4_mb_discard_lg_preallocations(sb, lg,
4419 						order, lg_prealloc_count);
4420 		return;
4421 	}
4422 	return ;
4423 }
4424 
4425 /*
4426  * release all resource we used in allocation
4427  */
4428 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4429 {
4430 	struct ext4_prealloc_space *pa = ac->ac_pa;
4431 	if (pa) {
4432 		if (pa->pa_type == MB_GROUP_PA) {
4433 			/* see comment in ext4_mb_use_group_pa() */
4434 			spin_lock(&pa->pa_lock);
4435 			pa->pa_pstart += ac->ac_b_ex.fe_len;
4436 			pa->pa_lstart += ac->ac_b_ex.fe_len;
4437 			pa->pa_free -= ac->ac_b_ex.fe_len;
4438 			pa->pa_len -= ac->ac_b_ex.fe_len;
4439 			spin_unlock(&pa->pa_lock);
4440 		}
4441 	}
4442 	if (ac->alloc_semp)
4443 		up_read(ac->alloc_semp);
4444 	if (pa) {
4445 		/*
4446 		 * We want to add the pa to the right bucket.
4447 		 * Remove it from the list and while adding
4448 		 * make sure the list to which we are adding
4449 		 * doesn't grow big.  We need to release
4450 		 * alloc_semp before calling ext4_mb_add_n_trim()
4451 		 */
4452 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4453 			spin_lock(pa->pa_obj_lock);
4454 			list_del_rcu(&pa->pa_inode_list);
4455 			spin_unlock(pa->pa_obj_lock);
4456 			ext4_mb_add_n_trim(ac);
4457 		}
4458 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4459 	}
4460 	if (ac->ac_bitmap_page)
4461 		page_cache_release(ac->ac_bitmap_page);
4462 	if (ac->ac_buddy_page)
4463 		page_cache_release(ac->ac_buddy_page);
4464 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4465 		mutex_unlock(&ac->ac_lg->lg_mutex);
4466 	ext4_mb_collect_stats(ac);
4467 	return 0;
4468 }
4469 
4470 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4471 {
4472 	ext4_group_t i;
4473 	int ret;
4474 	int freed = 0;
4475 
4476 	trace_mark(ext4_mb_discard_preallocations, "dev %s needed %d",
4477 		   sb->s_id, needed);
4478 	for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4479 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4480 		freed += ret;
4481 		needed -= ret;
4482 	}
4483 
4484 	return freed;
4485 }
4486 
4487 /*
4488  * Main entry point into mballoc to allocate blocks
4489  * it tries to use preallocation first, then falls back
4490  * to usual allocation
4491  */
4492 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4493 				 struct ext4_allocation_request *ar, int *errp)
4494 {
4495 	int freed;
4496 	struct ext4_allocation_context *ac = NULL;
4497 	struct ext4_sb_info *sbi;
4498 	struct super_block *sb;
4499 	ext4_fsblk_t block = 0;
4500 	unsigned int inquota = 0;
4501 	unsigned int reserv_blks = 0;
4502 
4503 	sb = ar->inode->i_sb;
4504 	sbi = EXT4_SB(sb);
4505 
4506 	trace_mark(ext4_request_blocks, "dev %s flags %u len %u ino %lu "
4507 		   "lblk %llu goal %llu lleft %llu lright %llu "
4508 		   "pleft %llu pright %llu ",
4509 		   sb->s_id, ar->flags, ar->len,
4510 		   ar->inode ? ar->inode->i_ino : 0,
4511 		   (unsigned long long) ar->logical,
4512 		   (unsigned long long) ar->goal,
4513 		   (unsigned long long) ar->lleft,
4514 		   (unsigned long long) ar->lright,
4515 		   (unsigned long long) ar->pleft,
4516 		   (unsigned long long) ar->pright);
4517 
4518 	/*
4519 	 * For delayed allocation, we could skip the ENOSPC and
4520 	 * EDQUOT check, as blocks and quotas have been already
4521 	 * reserved when data being copied into pagecache.
4522 	 */
4523 	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4524 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4525 	else {
4526 		/* Without delayed allocation we need to verify
4527 		 * there is enough free blocks to do block allocation
4528 		 * and verify allocation doesn't exceed the quota limits.
4529 		 */
4530 		while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4531 			/* let others to free the space */
4532 			yield();
4533 			ar->len = ar->len >> 1;
4534 		}
4535 		if (!ar->len) {
4536 			*errp = -ENOSPC;
4537 			return 0;
4538 		}
4539 		reserv_blks = ar->len;
4540 		while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
4541 			ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4542 			ar->len--;
4543 		}
4544 		inquota = ar->len;
4545 		if (ar->len == 0) {
4546 			*errp = -EDQUOT;
4547 			goto out3;
4548 		}
4549 	}
4550 
4551 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4552 	if (!ac) {
4553 		ar->len = 0;
4554 		*errp = -ENOMEM;
4555 		goto out1;
4556 	}
4557 
4558 	*errp = ext4_mb_initialize_context(ac, ar);
4559 	if (*errp) {
4560 		ar->len = 0;
4561 		goto out2;
4562 	}
4563 
4564 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4565 	if (!ext4_mb_use_preallocated(ac)) {
4566 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4567 		ext4_mb_normalize_request(ac, ar);
4568 repeat:
4569 		/* allocate space in core */
4570 		ext4_mb_regular_allocator(ac);
4571 
4572 		/* as we've just preallocated more space than
4573 		 * user requested orinally, we store allocated
4574 		 * space in a special descriptor */
4575 		if (ac->ac_status == AC_STATUS_FOUND &&
4576 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4577 			ext4_mb_new_preallocation(ac);
4578 	}
4579 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4580 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4581 		if (*errp ==  -EAGAIN) {
4582 			/*
4583 			 * drop the reference that we took
4584 			 * in ext4_mb_use_best_found
4585 			 */
4586 			ext4_mb_release_context(ac);
4587 			ac->ac_b_ex.fe_group = 0;
4588 			ac->ac_b_ex.fe_start = 0;
4589 			ac->ac_b_ex.fe_len = 0;
4590 			ac->ac_status = AC_STATUS_CONTINUE;
4591 			goto repeat;
4592 		} else if (*errp) {
4593 			ac->ac_b_ex.fe_len = 0;
4594 			ar->len = 0;
4595 			ext4_mb_show_ac(ac);
4596 		} else {
4597 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4598 			ar->len = ac->ac_b_ex.fe_len;
4599 		}
4600 	} else {
4601 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4602 		if (freed)
4603 			goto repeat;
4604 		*errp = -ENOSPC;
4605 		ac->ac_b_ex.fe_len = 0;
4606 		ar->len = 0;
4607 		ext4_mb_show_ac(ac);
4608 	}
4609 
4610 	ext4_mb_release_context(ac);
4611 
4612 out2:
4613 	kmem_cache_free(ext4_ac_cachep, ac);
4614 out1:
4615 	if (inquota && ar->len < inquota)
4616 		vfs_dq_free_block(ar->inode, inquota - ar->len);
4617 out3:
4618 	if (!ar->len) {
4619 		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4620 			/* release all the reserved blocks if non delalloc */
4621 			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4622 						reserv_blks);
4623 	}
4624 
4625 	trace_mark(ext4_allocate_blocks,
4626 		   "dev %s block %llu flags %u len %u ino %lu "
4627 		   "logical %llu goal %llu lleft %llu lright %llu "
4628 		   "pleft %llu pright %llu ",
4629 		   sb->s_id, (unsigned long long) block,
4630 		   ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0,
4631 		   (unsigned long long) ar->logical,
4632 		   (unsigned long long) ar->goal,
4633 		   (unsigned long long) ar->lleft,
4634 		   (unsigned long long) ar->lright,
4635 		   (unsigned long long) ar->pleft,
4636 		   (unsigned long long) ar->pright);
4637 
4638 	return block;
4639 }
4640 
4641 /*
4642  * We can merge two free data extents only if the physical blocks
4643  * are contiguous, AND the extents were freed by the same transaction,
4644  * AND the blocks are associated with the same group.
4645  */
4646 static int can_merge(struct ext4_free_data *entry1,
4647 			struct ext4_free_data *entry2)
4648 {
4649 	if ((entry1->t_tid == entry2->t_tid) &&
4650 	    (entry1->group == entry2->group) &&
4651 	    ((entry1->start_blk + entry1->count) == entry2->start_blk))
4652 		return 1;
4653 	return 0;
4654 }
4655 
4656 static noinline_for_stack int
4657 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4658 		      struct ext4_free_data *new_entry)
4659 {
4660 	ext4_grpblk_t block;
4661 	struct ext4_free_data *entry;
4662 	struct ext4_group_info *db = e4b->bd_info;
4663 	struct super_block *sb = e4b->bd_sb;
4664 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4665 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4666 	struct rb_node *parent = NULL, *new_node;
4667 
4668 	BUG_ON(!ext4_handle_valid(handle));
4669 	BUG_ON(e4b->bd_bitmap_page == NULL);
4670 	BUG_ON(e4b->bd_buddy_page == NULL);
4671 
4672 	new_node = &new_entry->node;
4673 	block = new_entry->start_blk;
4674 
4675 	if (!*n) {
4676 		/* first free block exent. We need to
4677 		   protect buddy cache from being freed,
4678 		 * otherwise we'll refresh it from
4679 		 * on-disk bitmap and lose not-yet-available
4680 		 * blocks */
4681 		page_cache_get(e4b->bd_buddy_page);
4682 		page_cache_get(e4b->bd_bitmap_page);
4683 	}
4684 	while (*n) {
4685 		parent = *n;
4686 		entry = rb_entry(parent, struct ext4_free_data, node);
4687 		if (block < entry->start_blk)
4688 			n = &(*n)->rb_left;
4689 		else if (block >= (entry->start_blk + entry->count))
4690 			n = &(*n)->rb_right;
4691 		else {
4692 			ext4_grp_locked_error(sb, e4b->bd_group, __func__,
4693 					"Double free of blocks %d (%d %d)",
4694 					block, entry->start_blk, entry->count);
4695 			return 0;
4696 		}
4697 	}
4698 
4699 	rb_link_node(new_node, parent, n);
4700 	rb_insert_color(new_node, &db->bb_free_root);
4701 
4702 	/* Now try to see the extent can be merged to left and right */
4703 	node = rb_prev(new_node);
4704 	if (node) {
4705 		entry = rb_entry(node, struct ext4_free_data, node);
4706 		if (can_merge(entry, new_entry)) {
4707 			new_entry->start_blk = entry->start_blk;
4708 			new_entry->count += entry->count;
4709 			rb_erase(node, &(db->bb_free_root));
4710 			spin_lock(&sbi->s_md_lock);
4711 			list_del(&entry->list);
4712 			spin_unlock(&sbi->s_md_lock);
4713 			kmem_cache_free(ext4_free_ext_cachep, entry);
4714 		}
4715 	}
4716 
4717 	node = rb_next(new_node);
4718 	if (node) {
4719 		entry = rb_entry(node, struct ext4_free_data, node);
4720 		if (can_merge(new_entry, entry)) {
4721 			new_entry->count += entry->count;
4722 			rb_erase(node, &(db->bb_free_root));
4723 			spin_lock(&sbi->s_md_lock);
4724 			list_del(&entry->list);
4725 			spin_unlock(&sbi->s_md_lock);
4726 			kmem_cache_free(ext4_free_ext_cachep, entry);
4727 		}
4728 	}
4729 	/* Add the extent to transaction's private list */
4730 	spin_lock(&sbi->s_md_lock);
4731 	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4732 	spin_unlock(&sbi->s_md_lock);
4733 	return 0;
4734 }
4735 
4736 /*
4737  * Main entry point into mballoc to free blocks
4738  */
4739 void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4740 			unsigned long block, unsigned long count,
4741 			int metadata, unsigned long *freed)
4742 {
4743 	struct buffer_head *bitmap_bh = NULL;
4744 	struct super_block *sb = inode->i_sb;
4745 	struct ext4_allocation_context *ac = NULL;
4746 	struct ext4_group_desc *gdp;
4747 	struct ext4_super_block *es;
4748 	unsigned int overflow;
4749 	ext4_grpblk_t bit;
4750 	struct buffer_head *gd_bh;
4751 	ext4_group_t block_group;
4752 	struct ext4_sb_info *sbi;
4753 	struct ext4_buddy e4b;
4754 	int err = 0;
4755 	int ret;
4756 
4757 	*freed = 0;
4758 
4759 	sbi = EXT4_SB(sb);
4760 	es = EXT4_SB(sb)->s_es;
4761 	if (block < le32_to_cpu(es->s_first_data_block) ||
4762 	    block + count < block ||
4763 	    block + count > ext4_blocks_count(es)) {
4764 		ext4_error(sb, __func__,
4765 			    "Freeing blocks not in datazone - "
4766 			    "block = %lu, count = %lu", block, count);
4767 		goto error_return;
4768 	}
4769 
4770 	ext4_debug("freeing block %lu\n", block);
4771 	trace_mark(ext4_free_blocks,
4772 		   "dev %s block %llu count %lu metadata %d ino %lu",
4773 		   sb->s_id, (unsigned long long) block, count, metadata,
4774 		   inode ? inode->i_ino : 0);
4775 
4776 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4777 	if (ac) {
4778 		ac->ac_op = EXT4_MB_HISTORY_FREE;
4779 		ac->ac_inode = inode;
4780 		ac->ac_sb = sb;
4781 	}
4782 
4783 do_more:
4784 	overflow = 0;
4785 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4786 
4787 	/*
4788 	 * Check to see if we are freeing blocks across a group
4789 	 * boundary.
4790 	 */
4791 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4792 		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4793 		count -= overflow;
4794 	}
4795 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4796 	if (!bitmap_bh) {
4797 		err = -EIO;
4798 		goto error_return;
4799 	}
4800 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4801 	if (!gdp) {
4802 		err = -EIO;
4803 		goto error_return;
4804 	}
4805 
4806 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4807 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4808 	    in_range(block, ext4_inode_table(sb, gdp),
4809 		      EXT4_SB(sb)->s_itb_per_group) ||
4810 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4811 		      EXT4_SB(sb)->s_itb_per_group)) {
4812 
4813 		ext4_error(sb, __func__,
4814 			   "Freeing blocks in system zone - "
4815 			   "Block = %lu, count = %lu", block, count);
4816 		/* err = 0. ext4_std_error should be a no op */
4817 		goto error_return;
4818 	}
4819 
4820 	BUFFER_TRACE(bitmap_bh, "getting write access");
4821 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4822 	if (err)
4823 		goto error_return;
4824 
4825 	/*
4826 	 * We are about to modify some metadata.  Call the journal APIs
4827 	 * to unshare ->b_data if a currently-committing transaction is
4828 	 * using it
4829 	 */
4830 	BUFFER_TRACE(gd_bh, "get_write_access");
4831 	err = ext4_journal_get_write_access(handle, gd_bh);
4832 	if (err)
4833 		goto error_return;
4834 #ifdef AGGRESSIVE_CHECK
4835 	{
4836 		int i;
4837 		for (i = 0; i < count; i++)
4838 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4839 	}
4840 #endif
4841 	if (ac) {
4842 		ac->ac_b_ex.fe_group = block_group;
4843 		ac->ac_b_ex.fe_start = bit;
4844 		ac->ac_b_ex.fe_len = count;
4845 		ext4_mb_store_history(ac);
4846 	}
4847 
4848 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4849 	if (err)
4850 		goto error_return;
4851 	if (metadata && ext4_handle_valid(handle)) {
4852 		struct ext4_free_data *new_entry;
4853 		/*
4854 		 * blocks being freed are metadata. these blocks shouldn't
4855 		 * be used until this transaction is committed
4856 		 */
4857 		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4858 		new_entry->start_blk = bit;
4859 		new_entry->group  = block_group;
4860 		new_entry->count = count;
4861 		new_entry->t_tid = handle->h_transaction->t_tid;
4862 		ext4_lock_group(sb, block_group);
4863 		mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4864 				bit, count);
4865 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4866 		ext4_unlock_group(sb, block_group);
4867 	} else {
4868 		ext4_lock_group(sb, block_group);
4869 		/* need to update group_info->bb_free and bitmap
4870 		 * with group lock held. generate_buddy look at
4871 		 * them with group lock_held
4872 		 */
4873 		mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4874 				bit, count);
4875 		mb_free_blocks(inode, &e4b, bit, count);
4876 		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4877 		ext4_unlock_group(sb, block_group);
4878 	}
4879 
4880 	spin_lock(sb_bgl_lock(sbi, block_group));
4881 	ret = ext4_free_blks_count(sb, gdp) + count;
4882 	ext4_free_blks_set(sb, gdp, ret);
4883 	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4884 	spin_unlock(sb_bgl_lock(sbi, block_group));
4885 	percpu_counter_add(&sbi->s_freeblocks_counter, count);
4886 
4887 	if (sbi->s_log_groups_per_flex) {
4888 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4889 		atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
4890 	}
4891 
4892 	ext4_mb_release_desc(&e4b);
4893 
4894 	*freed += count;
4895 
4896 	/* We dirtied the bitmap block */
4897 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4898 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4899 
4900 	/* And the group descriptor block */
4901 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4902 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4903 	if (!err)
4904 		err = ret;
4905 
4906 	if (overflow && !err) {
4907 		block += count;
4908 		count = overflow;
4909 		put_bh(bitmap_bh);
4910 		goto do_more;
4911 	}
4912 	sb->s_dirt = 1;
4913 error_return:
4914 	brelse(bitmap_bh);
4915 	ext4_std_error(sb, err);
4916 	if (ac)
4917 		kmem_cache_free(ext4_ac_cachep, ac);
4918 	return;
4919 }
4920