xref: /openbmc/linux/fs/ext4/mballoc.c (revision 95e9fd10)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include "ext4_jbd2.h"
25 #include "mballoc.h"
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <trace/events/ext4.h>
29 
30 /*
31  * MUSTDO:
32  *   - test ext4_ext_search_left() and ext4_ext_search_right()
33  *   - search for metadata in few groups
34  *
35  * TODO v4:
36  *   - normalization should take into account whether file is still open
37  *   - discard preallocations if no free space left (policy?)
38  *   - don't normalize tails
39  *   - quota
40  *   - reservation for superuser
41  *
42  * TODO v3:
43  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
44  *   - track min/max extents in each group for better group selection
45  *   - mb_mark_used() may allocate chunk right after splitting buddy
46  *   - tree of groups sorted by number of free blocks
47  *   - error handling
48  */
49 
50 /*
51  * The allocation request involve request for multiple number of blocks
52  * near to the goal(block) value specified.
53  *
54  * During initialization phase of the allocator we decide to use the
55  * group preallocation or inode preallocation depending on the size of
56  * the file. The size of the file could be the resulting file size we
57  * would have after allocation, or the current file size, which ever
58  * is larger. If the size is less than sbi->s_mb_stream_request we
59  * select to use the group preallocation. The default value of
60  * s_mb_stream_request is 16 blocks. This can also be tuned via
61  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
62  * terms of number of blocks.
63  *
64  * The main motivation for having small file use group preallocation is to
65  * ensure that we have small files closer together on the disk.
66  *
67  * First stage the allocator looks at the inode prealloc list,
68  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
69  * spaces for this particular inode. The inode prealloc space is
70  * represented as:
71  *
72  * pa_lstart -> the logical start block for this prealloc space
73  * pa_pstart -> the physical start block for this prealloc space
74  * pa_len    -> length for this prealloc space (in clusters)
75  * pa_free   ->  free space available in this prealloc space (in clusters)
76  *
77  * The inode preallocation space is used looking at the _logical_ start
78  * block. If only the logical file block falls within the range of prealloc
79  * space we will consume the particular prealloc space. This makes sure that
80  * we have contiguous physical blocks representing the file blocks
81  *
82  * The important thing to be noted in case of inode prealloc space is that
83  * we don't modify the values associated to inode prealloc space except
84  * pa_free.
85  *
86  * If we are not able to find blocks in the inode prealloc space and if we
87  * have the group allocation flag set then we look at the locality group
88  * prealloc space. These are per CPU prealloc list represented as
89  *
90  * ext4_sb_info.s_locality_groups[smp_processor_id()]
91  *
92  * The reason for having a per cpu locality group is to reduce the contention
93  * between CPUs. It is possible to get scheduled at this point.
94  *
95  * The locality group prealloc space is used looking at whether we have
96  * enough free space (pa_free) within the prealloc space.
97  *
98  * If we can't allocate blocks via inode prealloc or/and locality group
99  * prealloc then we look at the buddy cache. The buddy cache is represented
100  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
101  * mapped to the buddy and bitmap information regarding different
102  * groups. The buddy information is attached to buddy cache inode so that
103  * we can access them through the page cache. The information regarding
104  * each group is loaded via ext4_mb_load_buddy.  The information involve
105  * block bitmap and buddy information. The information are stored in the
106  * inode as:
107  *
108  *  {                        page                        }
109  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
110  *
111  *
112  * one block each for bitmap and buddy information.  So for each group we
113  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
114  * blocksize) blocks.  So it can have information regarding groups_per_page
115  * which is blocks_per_page/2
116  *
117  * The buddy cache inode is not stored on disk. The inode is thrown
118  * away when the filesystem is unmounted.
119  *
120  * We look for count number of blocks in the buddy cache. If we were able
121  * to locate that many free blocks we return with additional information
122  * regarding rest of the contiguous physical block available
123  *
124  * Before allocating blocks via buddy cache we normalize the request
125  * blocks. This ensure we ask for more blocks that we needed. The extra
126  * blocks that we get after allocation is added to the respective prealloc
127  * list. In case of inode preallocation we follow a list of heuristics
128  * based on file size. This can be found in ext4_mb_normalize_request. If
129  * we are doing a group prealloc we try to normalize the request to
130  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
131  * dependent on the cluster size; for non-bigalloc file systems, it is
132  * 512 blocks. This can be tuned via
133  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
134  * terms of number of blocks. If we have mounted the file system with -O
135  * stripe=<value> option the group prealloc request is normalized to the
136  * the smallest multiple of the stripe value (sbi->s_stripe) which is
137  * greater than the default mb_group_prealloc.
138  *
139  * The regular allocator (using the buddy cache) supports a few tunables.
140  *
141  * /sys/fs/ext4/<partition>/mb_min_to_scan
142  * /sys/fs/ext4/<partition>/mb_max_to_scan
143  * /sys/fs/ext4/<partition>/mb_order2_req
144  *
145  * The regular allocator uses buddy scan only if the request len is power of
146  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
147  * value of s_mb_order2_reqs can be tuned via
148  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
149  * stripe size (sbi->s_stripe), we try to search for contiguous block in
150  * stripe size. This should result in better allocation on RAID setups. If
151  * not, we search in the specific group using bitmap for best extents. The
152  * tunable min_to_scan and max_to_scan control the behaviour here.
153  * min_to_scan indicate how long the mballoc __must__ look for a best
154  * extent and max_to_scan indicates how long the mballoc __can__ look for a
155  * best extent in the found extents. Searching for the blocks starts with
156  * the group specified as the goal value in allocation context via
157  * ac_g_ex. Each group is first checked based on the criteria whether it
158  * can be used for allocation. ext4_mb_good_group explains how the groups are
159  * checked.
160  *
161  * Both the prealloc space are getting populated as above. So for the first
162  * request we will hit the buddy cache which will result in this prealloc
163  * space getting filled. The prealloc space is then later used for the
164  * subsequent request.
165  */
166 
167 /*
168  * mballoc operates on the following data:
169  *  - on-disk bitmap
170  *  - in-core buddy (actually includes buddy and bitmap)
171  *  - preallocation descriptors (PAs)
172  *
173  * there are two types of preallocations:
174  *  - inode
175  *    assiged to specific inode and can be used for this inode only.
176  *    it describes part of inode's space preallocated to specific
177  *    physical blocks. any block from that preallocated can be used
178  *    independent. the descriptor just tracks number of blocks left
179  *    unused. so, before taking some block from descriptor, one must
180  *    make sure corresponded logical block isn't allocated yet. this
181  *    also means that freeing any block within descriptor's range
182  *    must discard all preallocated blocks.
183  *  - locality group
184  *    assigned to specific locality group which does not translate to
185  *    permanent set of inodes: inode can join and leave group. space
186  *    from this type of preallocation can be used for any inode. thus
187  *    it's consumed from the beginning to the end.
188  *
189  * relation between them can be expressed as:
190  *    in-core buddy = on-disk bitmap + preallocation descriptors
191  *
192  * this mean blocks mballoc considers used are:
193  *  - allocated blocks (persistent)
194  *  - preallocated blocks (non-persistent)
195  *
196  * consistency in mballoc world means that at any time a block is either
197  * free or used in ALL structures. notice: "any time" should not be read
198  * literally -- time is discrete and delimited by locks.
199  *
200  *  to keep it simple, we don't use block numbers, instead we count number of
201  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
202  *
203  * all operations can be expressed as:
204  *  - init buddy:			buddy = on-disk + PAs
205  *  - new PA:				buddy += N; PA = N
206  *  - use inode PA:			on-disk += N; PA -= N
207  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
208  *  - use locality group PA		on-disk += N; PA -= N
209  *  - discard locality group PA		buddy -= PA; PA = 0
210  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
211  *        is used in real operation because we can't know actual used
212  *        bits from PA, only from on-disk bitmap
213  *
214  * if we follow this strict logic, then all operations above should be atomic.
215  * given some of them can block, we'd have to use something like semaphores
216  * killing performance on high-end SMP hardware. let's try to relax it using
217  * the following knowledge:
218  *  1) if buddy is referenced, it's already initialized
219  *  2) while block is used in buddy and the buddy is referenced,
220  *     nobody can re-allocate that block
221  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
222  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
223  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
224  *     block
225  *
226  * so, now we're building a concurrency table:
227  *  - init buddy vs.
228  *    - new PA
229  *      blocks for PA are allocated in the buddy, buddy must be referenced
230  *      until PA is linked to allocation group to avoid concurrent buddy init
231  *    - use inode PA
232  *      we need to make sure that either on-disk bitmap or PA has uptodate data
233  *      given (3) we care that PA-=N operation doesn't interfere with init
234  *    - discard inode PA
235  *      the simplest way would be to have buddy initialized by the discard
236  *    - use locality group PA
237  *      again PA-=N must be serialized with init
238  *    - discard locality group PA
239  *      the simplest way would be to have buddy initialized by the discard
240  *  - new PA vs.
241  *    - use inode PA
242  *      i_data_sem serializes them
243  *    - discard inode PA
244  *      discard process must wait until PA isn't used by another process
245  *    - use locality group PA
246  *      some mutex should serialize them
247  *    - discard locality group PA
248  *      discard process must wait until PA isn't used by another process
249  *  - use inode PA
250  *    - use inode PA
251  *      i_data_sem or another mutex should serializes them
252  *    - discard inode PA
253  *      discard process must wait until PA isn't used by another process
254  *    - use locality group PA
255  *      nothing wrong here -- they're different PAs covering different blocks
256  *    - discard locality group PA
257  *      discard process must wait until PA isn't used by another process
258  *
259  * now we're ready to make few consequences:
260  *  - PA is referenced and while it is no discard is possible
261  *  - PA is referenced until block isn't marked in on-disk bitmap
262  *  - PA changes only after on-disk bitmap
263  *  - discard must not compete with init. either init is done before
264  *    any discard or they're serialized somehow
265  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
266  *
267  * a special case when we've used PA to emptiness. no need to modify buddy
268  * in this case, but we should care about concurrent init
269  *
270  */
271 
272  /*
273  * Logic in few words:
274  *
275  *  - allocation:
276  *    load group
277  *    find blocks
278  *    mark bits in on-disk bitmap
279  *    release group
280  *
281  *  - use preallocation:
282  *    find proper PA (per-inode or group)
283  *    load group
284  *    mark bits in on-disk bitmap
285  *    release group
286  *    release PA
287  *
288  *  - free:
289  *    load group
290  *    mark bits in on-disk bitmap
291  *    release group
292  *
293  *  - discard preallocations in group:
294  *    mark PAs deleted
295  *    move them onto local list
296  *    load on-disk bitmap
297  *    load group
298  *    remove PA from object (inode or locality group)
299  *    mark free blocks in-core
300  *
301  *  - discard inode's preallocations:
302  */
303 
304 /*
305  * Locking rules
306  *
307  * Locks:
308  *  - bitlock on a group	(group)
309  *  - object (inode/locality)	(object)
310  *  - per-pa lock		(pa)
311  *
312  * Paths:
313  *  - new pa
314  *    object
315  *    group
316  *
317  *  - find and use pa:
318  *    pa
319  *
320  *  - release consumed pa:
321  *    pa
322  *    group
323  *    object
324  *
325  *  - generate in-core bitmap:
326  *    group
327  *        pa
328  *
329  *  - discard all for given object (inode, locality group):
330  *    object
331  *        pa
332  *    group
333  *
334  *  - discard all for given group:
335  *    group
336  *        pa
337  *    group
338  *        object
339  *
340  */
341 static struct kmem_cache *ext4_pspace_cachep;
342 static struct kmem_cache *ext4_ac_cachep;
343 static struct kmem_cache *ext4_free_data_cachep;
344 
345 /* We create slab caches for groupinfo data structures based on the
346  * superblock block size.  There will be one per mounted filesystem for
347  * each unique s_blocksize_bits */
348 #define NR_GRPINFO_CACHES 8
349 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
350 
351 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
352 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
353 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
354 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
355 };
356 
357 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
358 					ext4_group_t group);
359 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
360 						ext4_group_t group);
361 static void ext4_free_data_callback(struct super_block *sb,
362 				struct ext4_journal_cb_entry *jce, int rc);
363 
364 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
365 {
366 #if BITS_PER_LONG == 64
367 	*bit += ((unsigned long) addr & 7UL) << 3;
368 	addr = (void *) ((unsigned long) addr & ~7UL);
369 #elif BITS_PER_LONG == 32
370 	*bit += ((unsigned long) addr & 3UL) << 3;
371 	addr = (void *) ((unsigned long) addr & ~3UL);
372 #else
373 #error "how many bits you are?!"
374 #endif
375 	return addr;
376 }
377 
378 static inline int mb_test_bit(int bit, void *addr)
379 {
380 	/*
381 	 * ext4_test_bit on architecture like powerpc
382 	 * needs unsigned long aligned address
383 	 */
384 	addr = mb_correct_addr_and_bit(&bit, addr);
385 	return ext4_test_bit(bit, addr);
386 }
387 
388 static inline void mb_set_bit(int bit, void *addr)
389 {
390 	addr = mb_correct_addr_and_bit(&bit, addr);
391 	ext4_set_bit(bit, addr);
392 }
393 
394 static inline void mb_clear_bit(int bit, void *addr)
395 {
396 	addr = mb_correct_addr_and_bit(&bit, addr);
397 	ext4_clear_bit(bit, addr);
398 }
399 
400 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
401 {
402 	int fix = 0, ret, tmpmax;
403 	addr = mb_correct_addr_and_bit(&fix, addr);
404 	tmpmax = max + fix;
405 	start += fix;
406 
407 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
408 	if (ret > max)
409 		return max;
410 	return ret;
411 }
412 
413 static inline int mb_find_next_bit(void *addr, int max, int start)
414 {
415 	int fix = 0, ret, tmpmax;
416 	addr = mb_correct_addr_and_bit(&fix, addr);
417 	tmpmax = max + fix;
418 	start += fix;
419 
420 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
421 	if (ret > max)
422 		return max;
423 	return ret;
424 }
425 
426 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
427 {
428 	char *bb;
429 
430 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
431 	BUG_ON(max == NULL);
432 
433 	if (order > e4b->bd_blkbits + 1) {
434 		*max = 0;
435 		return NULL;
436 	}
437 
438 	/* at order 0 we see each particular block */
439 	if (order == 0) {
440 		*max = 1 << (e4b->bd_blkbits + 3);
441 		return e4b->bd_bitmap;
442 	}
443 
444 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
445 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
446 
447 	return bb;
448 }
449 
450 #ifdef DOUBLE_CHECK
451 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
452 			   int first, int count)
453 {
454 	int i;
455 	struct super_block *sb = e4b->bd_sb;
456 
457 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
458 		return;
459 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
460 	for (i = 0; i < count; i++) {
461 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
462 			ext4_fsblk_t blocknr;
463 
464 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
465 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
466 			ext4_grp_locked_error(sb, e4b->bd_group,
467 					      inode ? inode->i_ino : 0,
468 					      blocknr,
469 					      "freeing block already freed "
470 					      "(bit %u)",
471 					      first + i);
472 		}
473 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
474 	}
475 }
476 
477 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
478 {
479 	int i;
480 
481 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
482 		return;
483 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
484 	for (i = 0; i < count; i++) {
485 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
486 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
487 	}
488 }
489 
490 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
491 {
492 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
493 		unsigned char *b1, *b2;
494 		int i;
495 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
496 		b2 = (unsigned char *) bitmap;
497 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
498 			if (b1[i] != b2[i]) {
499 				ext4_msg(e4b->bd_sb, KERN_ERR,
500 					 "corruption in group %u "
501 					 "at byte %u(%u): %x in copy != %x "
502 					 "on disk/prealloc",
503 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
504 				BUG();
505 			}
506 		}
507 	}
508 }
509 
510 #else
511 static inline void mb_free_blocks_double(struct inode *inode,
512 				struct ext4_buddy *e4b, int first, int count)
513 {
514 	return;
515 }
516 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
517 						int first, int count)
518 {
519 	return;
520 }
521 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
522 {
523 	return;
524 }
525 #endif
526 
527 #ifdef AGGRESSIVE_CHECK
528 
529 #define MB_CHECK_ASSERT(assert)						\
530 do {									\
531 	if (!(assert)) {						\
532 		printk(KERN_EMERG					\
533 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
534 			function, file, line, # assert);		\
535 		BUG();							\
536 	}								\
537 } while (0)
538 
539 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
540 				const char *function, int line)
541 {
542 	struct super_block *sb = e4b->bd_sb;
543 	int order = e4b->bd_blkbits + 1;
544 	int max;
545 	int max2;
546 	int i;
547 	int j;
548 	int k;
549 	int count;
550 	struct ext4_group_info *grp;
551 	int fragments = 0;
552 	int fstart;
553 	struct list_head *cur;
554 	void *buddy;
555 	void *buddy2;
556 
557 	{
558 		static int mb_check_counter;
559 		if (mb_check_counter++ % 100 != 0)
560 			return 0;
561 	}
562 
563 	while (order > 1) {
564 		buddy = mb_find_buddy(e4b, order, &max);
565 		MB_CHECK_ASSERT(buddy);
566 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
567 		MB_CHECK_ASSERT(buddy2);
568 		MB_CHECK_ASSERT(buddy != buddy2);
569 		MB_CHECK_ASSERT(max * 2 == max2);
570 
571 		count = 0;
572 		for (i = 0; i < max; i++) {
573 
574 			if (mb_test_bit(i, buddy)) {
575 				/* only single bit in buddy2 may be 1 */
576 				if (!mb_test_bit(i << 1, buddy2)) {
577 					MB_CHECK_ASSERT(
578 						mb_test_bit((i<<1)+1, buddy2));
579 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
580 					MB_CHECK_ASSERT(
581 						mb_test_bit(i << 1, buddy2));
582 				}
583 				continue;
584 			}
585 
586 			/* both bits in buddy2 must be 1 */
587 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
588 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
589 
590 			for (j = 0; j < (1 << order); j++) {
591 				k = (i * (1 << order)) + j;
592 				MB_CHECK_ASSERT(
593 					!mb_test_bit(k, e4b->bd_bitmap));
594 			}
595 			count++;
596 		}
597 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
598 		order--;
599 	}
600 
601 	fstart = -1;
602 	buddy = mb_find_buddy(e4b, 0, &max);
603 	for (i = 0; i < max; i++) {
604 		if (!mb_test_bit(i, buddy)) {
605 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
606 			if (fstart == -1) {
607 				fragments++;
608 				fstart = i;
609 			}
610 			continue;
611 		}
612 		fstart = -1;
613 		/* check used bits only */
614 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
615 			buddy2 = mb_find_buddy(e4b, j, &max2);
616 			k = i >> j;
617 			MB_CHECK_ASSERT(k < max2);
618 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
619 		}
620 	}
621 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
622 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
623 
624 	grp = ext4_get_group_info(sb, e4b->bd_group);
625 	list_for_each(cur, &grp->bb_prealloc_list) {
626 		ext4_group_t groupnr;
627 		struct ext4_prealloc_space *pa;
628 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
629 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
630 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
631 		for (i = 0; i < pa->pa_len; i++)
632 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
633 	}
634 	return 0;
635 }
636 #undef MB_CHECK_ASSERT
637 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
638 					__FILE__, __func__, __LINE__)
639 #else
640 #define mb_check_buddy(e4b)
641 #endif
642 
643 /*
644  * Divide blocks started from @first with length @len into
645  * smaller chunks with power of 2 blocks.
646  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
647  * then increase bb_counters[] for corresponded chunk size.
648  */
649 static void ext4_mb_mark_free_simple(struct super_block *sb,
650 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
651 					struct ext4_group_info *grp)
652 {
653 	struct ext4_sb_info *sbi = EXT4_SB(sb);
654 	ext4_grpblk_t min;
655 	ext4_grpblk_t max;
656 	ext4_grpblk_t chunk;
657 	unsigned short border;
658 
659 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
660 
661 	border = 2 << sb->s_blocksize_bits;
662 
663 	while (len > 0) {
664 		/* find how many blocks can be covered since this position */
665 		max = ffs(first | border) - 1;
666 
667 		/* find how many blocks of power 2 we need to mark */
668 		min = fls(len) - 1;
669 
670 		if (max < min)
671 			min = max;
672 		chunk = 1 << min;
673 
674 		/* mark multiblock chunks only */
675 		grp->bb_counters[min]++;
676 		if (min > 0)
677 			mb_clear_bit(first >> min,
678 				     buddy + sbi->s_mb_offsets[min]);
679 
680 		len -= chunk;
681 		first += chunk;
682 	}
683 }
684 
685 /*
686  * Cache the order of the largest free extent we have available in this block
687  * group.
688  */
689 static void
690 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
691 {
692 	int i;
693 	int bits;
694 
695 	grp->bb_largest_free_order = -1; /* uninit */
696 
697 	bits = sb->s_blocksize_bits + 1;
698 	for (i = bits; i >= 0; i--) {
699 		if (grp->bb_counters[i] > 0) {
700 			grp->bb_largest_free_order = i;
701 			break;
702 		}
703 	}
704 }
705 
706 static noinline_for_stack
707 void ext4_mb_generate_buddy(struct super_block *sb,
708 				void *buddy, void *bitmap, ext4_group_t group)
709 {
710 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
711 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
712 	ext4_grpblk_t i = 0;
713 	ext4_grpblk_t first;
714 	ext4_grpblk_t len;
715 	unsigned free = 0;
716 	unsigned fragments = 0;
717 	unsigned long long period = get_cycles();
718 
719 	/* initialize buddy from bitmap which is aggregation
720 	 * of on-disk bitmap and preallocations */
721 	i = mb_find_next_zero_bit(bitmap, max, 0);
722 	grp->bb_first_free = i;
723 	while (i < max) {
724 		fragments++;
725 		first = i;
726 		i = mb_find_next_bit(bitmap, max, i);
727 		len = i - first;
728 		free += len;
729 		if (len > 1)
730 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
731 		else
732 			grp->bb_counters[0]++;
733 		if (i < max)
734 			i = mb_find_next_zero_bit(bitmap, max, i);
735 	}
736 	grp->bb_fragments = fragments;
737 
738 	if (free != grp->bb_free) {
739 		ext4_grp_locked_error(sb, group, 0, 0,
740 				      "%u clusters in bitmap, %u in gd",
741 				      free, grp->bb_free);
742 		/*
743 		 * If we intent to continue, we consider group descritor
744 		 * corrupt and update bb_free using bitmap value
745 		 */
746 		grp->bb_free = free;
747 	}
748 	mb_set_largest_free_order(sb, grp);
749 
750 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
751 
752 	period = get_cycles() - period;
753 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
754 	EXT4_SB(sb)->s_mb_buddies_generated++;
755 	EXT4_SB(sb)->s_mb_generation_time += period;
756 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
757 }
758 
759 /* The buddy information is attached the buddy cache inode
760  * for convenience. The information regarding each group
761  * is loaded via ext4_mb_load_buddy. The information involve
762  * block bitmap and buddy information. The information are
763  * stored in the inode as
764  *
765  * {                        page                        }
766  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
767  *
768  *
769  * one block each for bitmap and buddy information.
770  * So for each group we take up 2 blocks. A page can
771  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
772  * So it can have information regarding groups_per_page which
773  * is blocks_per_page/2
774  *
775  * Locking note:  This routine takes the block group lock of all groups
776  * for this page; do not hold this lock when calling this routine!
777  */
778 
779 static int ext4_mb_init_cache(struct page *page, char *incore)
780 {
781 	ext4_group_t ngroups;
782 	int blocksize;
783 	int blocks_per_page;
784 	int groups_per_page;
785 	int err = 0;
786 	int i;
787 	ext4_group_t first_group, group;
788 	int first_block;
789 	struct super_block *sb;
790 	struct buffer_head *bhs;
791 	struct buffer_head **bh = NULL;
792 	struct inode *inode;
793 	char *data;
794 	char *bitmap;
795 	struct ext4_group_info *grinfo;
796 
797 	mb_debug(1, "init page %lu\n", page->index);
798 
799 	inode = page->mapping->host;
800 	sb = inode->i_sb;
801 	ngroups = ext4_get_groups_count(sb);
802 	blocksize = 1 << inode->i_blkbits;
803 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
804 
805 	groups_per_page = blocks_per_page >> 1;
806 	if (groups_per_page == 0)
807 		groups_per_page = 1;
808 
809 	/* allocate buffer_heads to read bitmaps */
810 	if (groups_per_page > 1) {
811 		i = sizeof(struct buffer_head *) * groups_per_page;
812 		bh = kzalloc(i, GFP_NOFS);
813 		if (bh == NULL) {
814 			err = -ENOMEM;
815 			goto out;
816 		}
817 	} else
818 		bh = &bhs;
819 
820 	first_group = page->index * blocks_per_page / 2;
821 
822 	/* read all groups the page covers into the cache */
823 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
824 		if (group >= ngroups)
825 			break;
826 
827 		grinfo = ext4_get_group_info(sb, group);
828 		/*
829 		 * If page is uptodate then we came here after online resize
830 		 * which added some new uninitialized group info structs, so
831 		 * we must skip all initialized uptodate buddies on the page,
832 		 * which may be currently in use by an allocating task.
833 		 */
834 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
835 			bh[i] = NULL;
836 			continue;
837 		}
838 		if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
839 			err = -ENOMEM;
840 			goto out;
841 		}
842 		mb_debug(1, "read bitmap for group %u\n", group);
843 	}
844 
845 	/* wait for I/O completion */
846 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
847 		if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
848 			err = -EIO;
849 			goto out;
850 		}
851 	}
852 
853 	first_block = page->index * blocks_per_page;
854 	for (i = 0; i < blocks_per_page; i++) {
855 		int group;
856 
857 		group = (first_block + i) >> 1;
858 		if (group >= ngroups)
859 			break;
860 
861 		if (!bh[group - first_group])
862 			/* skip initialized uptodate buddy */
863 			continue;
864 
865 		/*
866 		 * data carry information regarding this
867 		 * particular group in the format specified
868 		 * above
869 		 *
870 		 */
871 		data = page_address(page) + (i * blocksize);
872 		bitmap = bh[group - first_group]->b_data;
873 
874 		/*
875 		 * We place the buddy block and bitmap block
876 		 * close together
877 		 */
878 		if ((first_block + i) & 1) {
879 			/* this is block of buddy */
880 			BUG_ON(incore == NULL);
881 			mb_debug(1, "put buddy for group %u in page %lu/%x\n",
882 				group, page->index, i * blocksize);
883 			trace_ext4_mb_buddy_bitmap_load(sb, group);
884 			grinfo = ext4_get_group_info(sb, group);
885 			grinfo->bb_fragments = 0;
886 			memset(grinfo->bb_counters, 0,
887 			       sizeof(*grinfo->bb_counters) *
888 				(sb->s_blocksize_bits+2));
889 			/*
890 			 * incore got set to the group block bitmap below
891 			 */
892 			ext4_lock_group(sb, group);
893 			/* init the buddy */
894 			memset(data, 0xff, blocksize);
895 			ext4_mb_generate_buddy(sb, data, incore, group);
896 			ext4_unlock_group(sb, group);
897 			incore = NULL;
898 		} else {
899 			/* this is block of bitmap */
900 			BUG_ON(incore != NULL);
901 			mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
902 				group, page->index, i * blocksize);
903 			trace_ext4_mb_bitmap_load(sb, group);
904 
905 			/* see comments in ext4_mb_put_pa() */
906 			ext4_lock_group(sb, group);
907 			memcpy(data, bitmap, blocksize);
908 
909 			/* mark all preallocated blks used in in-core bitmap */
910 			ext4_mb_generate_from_pa(sb, data, group);
911 			ext4_mb_generate_from_freelist(sb, data, group);
912 			ext4_unlock_group(sb, group);
913 
914 			/* set incore so that the buddy information can be
915 			 * generated using this
916 			 */
917 			incore = data;
918 		}
919 	}
920 	SetPageUptodate(page);
921 
922 out:
923 	if (bh) {
924 		for (i = 0; i < groups_per_page; i++)
925 			brelse(bh[i]);
926 		if (bh != &bhs)
927 			kfree(bh);
928 	}
929 	return err;
930 }
931 
932 /*
933  * Lock the buddy and bitmap pages. This make sure other parallel init_group
934  * on the same buddy page doesn't happen whild holding the buddy page lock.
935  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
936  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
937  */
938 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
939 		ext4_group_t group, struct ext4_buddy *e4b)
940 {
941 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
942 	int block, pnum, poff;
943 	int blocks_per_page;
944 	struct page *page;
945 
946 	e4b->bd_buddy_page = NULL;
947 	e4b->bd_bitmap_page = NULL;
948 
949 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
950 	/*
951 	 * the buddy cache inode stores the block bitmap
952 	 * and buddy information in consecutive blocks.
953 	 * So for each group we need two blocks.
954 	 */
955 	block = group * 2;
956 	pnum = block / blocks_per_page;
957 	poff = block % blocks_per_page;
958 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
959 	if (!page)
960 		return -EIO;
961 	BUG_ON(page->mapping != inode->i_mapping);
962 	e4b->bd_bitmap_page = page;
963 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
964 
965 	if (blocks_per_page >= 2) {
966 		/* buddy and bitmap are on the same page */
967 		return 0;
968 	}
969 
970 	block++;
971 	pnum = block / blocks_per_page;
972 	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
973 	if (!page)
974 		return -EIO;
975 	BUG_ON(page->mapping != inode->i_mapping);
976 	e4b->bd_buddy_page = page;
977 	return 0;
978 }
979 
980 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
981 {
982 	if (e4b->bd_bitmap_page) {
983 		unlock_page(e4b->bd_bitmap_page);
984 		page_cache_release(e4b->bd_bitmap_page);
985 	}
986 	if (e4b->bd_buddy_page) {
987 		unlock_page(e4b->bd_buddy_page);
988 		page_cache_release(e4b->bd_buddy_page);
989 	}
990 }
991 
992 /*
993  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
994  * block group lock of all groups for this page; do not hold the BG lock when
995  * calling this routine!
996  */
997 static noinline_for_stack
998 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
999 {
1000 
1001 	struct ext4_group_info *this_grp;
1002 	struct ext4_buddy e4b;
1003 	struct page *page;
1004 	int ret = 0;
1005 
1006 	mb_debug(1, "init group %u\n", group);
1007 	this_grp = ext4_get_group_info(sb, group);
1008 	/*
1009 	 * This ensures that we don't reinit the buddy cache
1010 	 * page which map to the group from which we are already
1011 	 * allocating. If we are looking at the buddy cache we would
1012 	 * have taken a reference using ext4_mb_load_buddy and that
1013 	 * would have pinned buddy page to page cache.
1014 	 */
1015 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1016 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1017 		/*
1018 		 * somebody initialized the group
1019 		 * return without doing anything
1020 		 */
1021 		goto err;
1022 	}
1023 
1024 	page = e4b.bd_bitmap_page;
1025 	ret = ext4_mb_init_cache(page, NULL);
1026 	if (ret)
1027 		goto err;
1028 	if (!PageUptodate(page)) {
1029 		ret = -EIO;
1030 		goto err;
1031 	}
1032 	mark_page_accessed(page);
1033 
1034 	if (e4b.bd_buddy_page == NULL) {
1035 		/*
1036 		 * If both the bitmap and buddy are in
1037 		 * the same page we don't need to force
1038 		 * init the buddy
1039 		 */
1040 		ret = 0;
1041 		goto err;
1042 	}
1043 	/* init buddy cache */
1044 	page = e4b.bd_buddy_page;
1045 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1046 	if (ret)
1047 		goto err;
1048 	if (!PageUptodate(page)) {
1049 		ret = -EIO;
1050 		goto err;
1051 	}
1052 	mark_page_accessed(page);
1053 err:
1054 	ext4_mb_put_buddy_page_lock(&e4b);
1055 	return ret;
1056 }
1057 
1058 /*
1059  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1060  * block group lock of all groups for this page; do not hold the BG lock when
1061  * calling this routine!
1062  */
1063 static noinline_for_stack int
1064 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1065 					struct ext4_buddy *e4b)
1066 {
1067 	int blocks_per_page;
1068 	int block;
1069 	int pnum;
1070 	int poff;
1071 	struct page *page;
1072 	int ret;
1073 	struct ext4_group_info *grp;
1074 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1075 	struct inode *inode = sbi->s_buddy_cache;
1076 
1077 	mb_debug(1, "load group %u\n", group);
1078 
1079 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1080 	grp = ext4_get_group_info(sb, group);
1081 
1082 	e4b->bd_blkbits = sb->s_blocksize_bits;
1083 	e4b->bd_info = grp;
1084 	e4b->bd_sb = sb;
1085 	e4b->bd_group = group;
1086 	e4b->bd_buddy_page = NULL;
1087 	e4b->bd_bitmap_page = NULL;
1088 
1089 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1090 		/*
1091 		 * we need full data about the group
1092 		 * to make a good selection
1093 		 */
1094 		ret = ext4_mb_init_group(sb, group);
1095 		if (ret)
1096 			return ret;
1097 	}
1098 
1099 	/*
1100 	 * the buddy cache inode stores the block bitmap
1101 	 * and buddy information in consecutive blocks.
1102 	 * So for each group we need two blocks.
1103 	 */
1104 	block = group * 2;
1105 	pnum = block / blocks_per_page;
1106 	poff = block % blocks_per_page;
1107 
1108 	/* we could use find_or_create_page(), but it locks page
1109 	 * what we'd like to avoid in fast path ... */
1110 	page = find_get_page(inode->i_mapping, pnum);
1111 	if (page == NULL || !PageUptodate(page)) {
1112 		if (page)
1113 			/*
1114 			 * drop the page reference and try
1115 			 * to get the page with lock. If we
1116 			 * are not uptodate that implies
1117 			 * somebody just created the page but
1118 			 * is yet to initialize the same. So
1119 			 * wait for it to initialize.
1120 			 */
1121 			page_cache_release(page);
1122 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1123 		if (page) {
1124 			BUG_ON(page->mapping != inode->i_mapping);
1125 			if (!PageUptodate(page)) {
1126 				ret = ext4_mb_init_cache(page, NULL);
1127 				if (ret) {
1128 					unlock_page(page);
1129 					goto err;
1130 				}
1131 				mb_cmp_bitmaps(e4b, page_address(page) +
1132 					       (poff * sb->s_blocksize));
1133 			}
1134 			unlock_page(page);
1135 		}
1136 	}
1137 	if (page == NULL || !PageUptodate(page)) {
1138 		ret = -EIO;
1139 		goto err;
1140 	}
1141 	e4b->bd_bitmap_page = page;
1142 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1143 	mark_page_accessed(page);
1144 
1145 	block++;
1146 	pnum = block / blocks_per_page;
1147 	poff = block % blocks_per_page;
1148 
1149 	page = find_get_page(inode->i_mapping, pnum);
1150 	if (page == NULL || !PageUptodate(page)) {
1151 		if (page)
1152 			page_cache_release(page);
1153 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1154 		if (page) {
1155 			BUG_ON(page->mapping != inode->i_mapping);
1156 			if (!PageUptodate(page)) {
1157 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1158 				if (ret) {
1159 					unlock_page(page);
1160 					goto err;
1161 				}
1162 			}
1163 			unlock_page(page);
1164 		}
1165 	}
1166 	if (page == NULL || !PageUptodate(page)) {
1167 		ret = -EIO;
1168 		goto err;
1169 	}
1170 	e4b->bd_buddy_page = page;
1171 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1172 	mark_page_accessed(page);
1173 
1174 	BUG_ON(e4b->bd_bitmap_page == NULL);
1175 	BUG_ON(e4b->bd_buddy_page == NULL);
1176 
1177 	return 0;
1178 
1179 err:
1180 	if (page)
1181 		page_cache_release(page);
1182 	if (e4b->bd_bitmap_page)
1183 		page_cache_release(e4b->bd_bitmap_page);
1184 	if (e4b->bd_buddy_page)
1185 		page_cache_release(e4b->bd_buddy_page);
1186 	e4b->bd_buddy = NULL;
1187 	e4b->bd_bitmap = NULL;
1188 	return ret;
1189 }
1190 
1191 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1192 {
1193 	if (e4b->bd_bitmap_page)
1194 		page_cache_release(e4b->bd_bitmap_page);
1195 	if (e4b->bd_buddy_page)
1196 		page_cache_release(e4b->bd_buddy_page);
1197 }
1198 
1199 
1200 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1201 {
1202 	int order = 1;
1203 	void *bb;
1204 
1205 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1206 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1207 
1208 	bb = e4b->bd_buddy;
1209 	while (order <= e4b->bd_blkbits + 1) {
1210 		block = block >> 1;
1211 		if (!mb_test_bit(block, bb)) {
1212 			/* this block is part of buddy of order 'order' */
1213 			return order;
1214 		}
1215 		bb += 1 << (e4b->bd_blkbits - order);
1216 		order++;
1217 	}
1218 	return 0;
1219 }
1220 
1221 static void mb_clear_bits(void *bm, int cur, int len)
1222 {
1223 	__u32 *addr;
1224 
1225 	len = cur + len;
1226 	while (cur < len) {
1227 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1228 			/* fast path: clear whole word at once */
1229 			addr = bm + (cur >> 3);
1230 			*addr = 0;
1231 			cur += 32;
1232 			continue;
1233 		}
1234 		mb_clear_bit(cur, bm);
1235 		cur++;
1236 	}
1237 }
1238 
1239 void ext4_set_bits(void *bm, int cur, int len)
1240 {
1241 	__u32 *addr;
1242 
1243 	len = cur + len;
1244 	while (cur < len) {
1245 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1246 			/* fast path: set whole word at once */
1247 			addr = bm + (cur >> 3);
1248 			*addr = 0xffffffff;
1249 			cur += 32;
1250 			continue;
1251 		}
1252 		mb_set_bit(cur, bm);
1253 		cur++;
1254 	}
1255 }
1256 
1257 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1258 			  int first, int count)
1259 {
1260 	int block = 0;
1261 	int max = 0;
1262 	int order;
1263 	void *buddy;
1264 	void *buddy2;
1265 	struct super_block *sb = e4b->bd_sb;
1266 
1267 	BUG_ON(first + count > (sb->s_blocksize << 3));
1268 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1269 	mb_check_buddy(e4b);
1270 	mb_free_blocks_double(inode, e4b, first, count);
1271 
1272 	e4b->bd_info->bb_free += count;
1273 	if (first < e4b->bd_info->bb_first_free)
1274 		e4b->bd_info->bb_first_free = first;
1275 
1276 	/* let's maintain fragments counter */
1277 	if (first != 0)
1278 		block = !mb_test_bit(first - 1, e4b->bd_bitmap);
1279 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1280 		max = !mb_test_bit(first + count, e4b->bd_bitmap);
1281 	if (block && max)
1282 		e4b->bd_info->bb_fragments--;
1283 	else if (!block && !max)
1284 		e4b->bd_info->bb_fragments++;
1285 
1286 	/* let's maintain buddy itself */
1287 	while (count-- > 0) {
1288 		block = first++;
1289 		order = 0;
1290 
1291 		if (!mb_test_bit(block, e4b->bd_bitmap)) {
1292 			ext4_fsblk_t blocknr;
1293 
1294 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1295 			blocknr += EXT4_C2B(EXT4_SB(sb), block);
1296 			ext4_grp_locked_error(sb, e4b->bd_group,
1297 					      inode ? inode->i_ino : 0,
1298 					      blocknr,
1299 					      "freeing already freed block "
1300 					      "(bit %u)", block);
1301 		}
1302 		mb_clear_bit(block, e4b->bd_bitmap);
1303 		e4b->bd_info->bb_counters[order]++;
1304 
1305 		/* start of the buddy */
1306 		buddy = mb_find_buddy(e4b, order, &max);
1307 
1308 		do {
1309 			block &= ~1UL;
1310 			if (mb_test_bit(block, buddy) ||
1311 					mb_test_bit(block + 1, buddy))
1312 				break;
1313 
1314 			/* both the buddies are free, try to coalesce them */
1315 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1316 
1317 			if (!buddy2)
1318 				break;
1319 
1320 			if (order > 0) {
1321 				/* for special purposes, we don't set
1322 				 * free bits in bitmap */
1323 				mb_set_bit(block, buddy);
1324 				mb_set_bit(block + 1, buddy);
1325 			}
1326 			e4b->bd_info->bb_counters[order]--;
1327 			e4b->bd_info->bb_counters[order]--;
1328 
1329 			block = block >> 1;
1330 			order++;
1331 			e4b->bd_info->bb_counters[order]++;
1332 
1333 			mb_clear_bit(block, buddy2);
1334 			buddy = buddy2;
1335 		} while (1);
1336 	}
1337 	mb_set_largest_free_order(sb, e4b->bd_info);
1338 	mb_check_buddy(e4b);
1339 }
1340 
1341 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1342 				int needed, struct ext4_free_extent *ex)
1343 {
1344 	int next = block;
1345 	int max;
1346 	void *buddy;
1347 
1348 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1349 	BUG_ON(ex == NULL);
1350 
1351 	buddy = mb_find_buddy(e4b, order, &max);
1352 	BUG_ON(buddy == NULL);
1353 	BUG_ON(block >= max);
1354 	if (mb_test_bit(block, buddy)) {
1355 		ex->fe_len = 0;
1356 		ex->fe_start = 0;
1357 		ex->fe_group = 0;
1358 		return 0;
1359 	}
1360 
1361 	/* FIXME dorp order completely ? */
1362 	if (likely(order == 0)) {
1363 		/* find actual order */
1364 		order = mb_find_order_for_block(e4b, block);
1365 		block = block >> order;
1366 	}
1367 
1368 	ex->fe_len = 1 << order;
1369 	ex->fe_start = block << order;
1370 	ex->fe_group = e4b->bd_group;
1371 
1372 	/* calc difference from given start */
1373 	next = next - ex->fe_start;
1374 	ex->fe_len -= next;
1375 	ex->fe_start += next;
1376 
1377 	while (needed > ex->fe_len &&
1378 	       (buddy = mb_find_buddy(e4b, order, &max))) {
1379 
1380 		if (block + 1 >= max)
1381 			break;
1382 
1383 		next = (block + 1) * (1 << order);
1384 		if (mb_test_bit(next, e4b->bd_bitmap))
1385 			break;
1386 
1387 		order = mb_find_order_for_block(e4b, next);
1388 
1389 		block = next >> order;
1390 		ex->fe_len += 1 << order;
1391 	}
1392 
1393 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1394 	return ex->fe_len;
1395 }
1396 
1397 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1398 {
1399 	int ord;
1400 	int mlen = 0;
1401 	int max = 0;
1402 	int cur;
1403 	int start = ex->fe_start;
1404 	int len = ex->fe_len;
1405 	unsigned ret = 0;
1406 	int len0 = len;
1407 	void *buddy;
1408 
1409 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1410 	BUG_ON(e4b->bd_group != ex->fe_group);
1411 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1412 	mb_check_buddy(e4b);
1413 	mb_mark_used_double(e4b, start, len);
1414 
1415 	e4b->bd_info->bb_free -= len;
1416 	if (e4b->bd_info->bb_first_free == start)
1417 		e4b->bd_info->bb_first_free += len;
1418 
1419 	/* let's maintain fragments counter */
1420 	if (start != 0)
1421 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1422 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1423 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1424 	if (mlen && max)
1425 		e4b->bd_info->bb_fragments++;
1426 	else if (!mlen && !max)
1427 		e4b->bd_info->bb_fragments--;
1428 
1429 	/* let's maintain buddy itself */
1430 	while (len) {
1431 		ord = mb_find_order_for_block(e4b, start);
1432 
1433 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1434 			/* the whole chunk may be allocated at once! */
1435 			mlen = 1 << ord;
1436 			buddy = mb_find_buddy(e4b, ord, &max);
1437 			BUG_ON((start >> ord) >= max);
1438 			mb_set_bit(start >> ord, buddy);
1439 			e4b->bd_info->bb_counters[ord]--;
1440 			start += mlen;
1441 			len -= mlen;
1442 			BUG_ON(len < 0);
1443 			continue;
1444 		}
1445 
1446 		/* store for history */
1447 		if (ret == 0)
1448 			ret = len | (ord << 16);
1449 
1450 		/* we have to split large buddy */
1451 		BUG_ON(ord <= 0);
1452 		buddy = mb_find_buddy(e4b, ord, &max);
1453 		mb_set_bit(start >> ord, buddy);
1454 		e4b->bd_info->bb_counters[ord]--;
1455 
1456 		ord--;
1457 		cur = (start >> ord) & ~1U;
1458 		buddy = mb_find_buddy(e4b, ord, &max);
1459 		mb_clear_bit(cur, buddy);
1460 		mb_clear_bit(cur + 1, buddy);
1461 		e4b->bd_info->bb_counters[ord]++;
1462 		e4b->bd_info->bb_counters[ord]++;
1463 	}
1464 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1465 
1466 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1467 	mb_check_buddy(e4b);
1468 
1469 	return ret;
1470 }
1471 
1472 /*
1473  * Must be called under group lock!
1474  */
1475 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1476 					struct ext4_buddy *e4b)
1477 {
1478 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1479 	int ret;
1480 
1481 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1482 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1483 
1484 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1485 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1486 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1487 
1488 	/* preallocation can change ac_b_ex, thus we store actually
1489 	 * allocated blocks for history */
1490 	ac->ac_f_ex = ac->ac_b_ex;
1491 
1492 	ac->ac_status = AC_STATUS_FOUND;
1493 	ac->ac_tail = ret & 0xffff;
1494 	ac->ac_buddy = ret >> 16;
1495 
1496 	/*
1497 	 * take the page reference. We want the page to be pinned
1498 	 * so that we don't get a ext4_mb_init_cache_call for this
1499 	 * group until we update the bitmap. That would mean we
1500 	 * double allocate blocks. The reference is dropped
1501 	 * in ext4_mb_release_context
1502 	 */
1503 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1504 	get_page(ac->ac_bitmap_page);
1505 	ac->ac_buddy_page = e4b->bd_buddy_page;
1506 	get_page(ac->ac_buddy_page);
1507 	/* store last allocated for subsequent stream allocation */
1508 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1509 		spin_lock(&sbi->s_md_lock);
1510 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1511 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1512 		spin_unlock(&sbi->s_md_lock);
1513 	}
1514 }
1515 
1516 /*
1517  * regular allocator, for general purposes allocation
1518  */
1519 
1520 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1521 					struct ext4_buddy *e4b,
1522 					int finish_group)
1523 {
1524 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1525 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1526 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1527 	struct ext4_free_extent ex;
1528 	int max;
1529 
1530 	if (ac->ac_status == AC_STATUS_FOUND)
1531 		return;
1532 	/*
1533 	 * We don't want to scan for a whole year
1534 	 */
1535 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1536 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1537 		ac->ac_status = AC_STATUS_BREAK;
1538 		return;
1539 	}
1540 
1541 	/*
1542 	 * Haven't found good chunk so far, let's continue
1543 	 */
1544 	if (bex->fe_len < gex->fe_len)
1545 		return;
1546 
1547 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1548 			&& bex->fe_group == e4b->bd_group) {
1549 		/* recheck chunk's availability - we don't know
1550 		 * when it was found (within this lock-unlock
1551 		 * period or not) */
1552 		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1553 		if (max >= gex->fe_len) {
1554 			ext4_mb_use_best_found(ac, e4b);
1555 			return;
1556 		}
1557 	}
1558 }
1559 
1560 /*
1561  * The routine checks whether found extent is good enough. If it is,
1562  * then the extent gets marked used and flag is set to the context
1563  * to stop scanning. Otherwise, the extent is compared with the
1564  * previous found extent and if new one is better, then it's stored
1565  * in the context. Later, the best found extent will be used, if
1566  * mballoc can't find good enough extent.
1567  *
1568  * FIXME: real allocation policy is to be designed yet!
1569  */
1570 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1571 					struct ext4_free_extent *ex,
1572 					struct ext4_buddy *e4b)
1573 {
1574 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1575 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1576 
1577 	BUG_ON(ex->fe_len <= 0);
1578 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1579 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1580 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1581 
1582 	ac->ac_found++;
1583 
1584 	/*
1585 	 * The special case - take what you catch first
1586 	 */
1587 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1588 		*bex = *ex;
1589 		ext4_mb_use_best_found(ac, e4b);
1590 		return;
1591 	}
1592 
1593 	/*
1594 	 * Let's check whether the chuck is good enough
1595 	 */
1596 	if (ex->fe_len == gex->fe_len) {
1597 		*bex = *ex;
1598 		ext4_mb_use_best_found(ac, e4b);
1599 		return;
1600 	}
1601 
1602 	/*
1603 	 * If this is first found extent, just store it in the context
1604 	 */
1605 	if (bex->fe_len == 0) {
1606 		*bex = *ex;
1607 		return;
1608 	}
1609 
1610 	/*
1611 	 * If new found extent is better, store it in the context
1612 	 */
1613 	if (bex->fe_len < gex->fe_len) {
1614 		/* if the request isn't satisfied, any found extent
1615 		 * larger than previous best one is better */
1616 		if (ex->fe_len > bex->fe_len)
1617 			*bex = *ex;
1618 	} else if (ex->fe_len > gex->fe_len) {
1619 		/* if the request is satisfied, then we try to find
1620 		 * an extent that still satisfy the request, but is
1621 		 * smaller than previous one */
1622 		if (ex->fe_len < bex->fe_len)
1623 			*bex = *ex;
1624 	}
1625 
1626 	ext4_mb_check_limits(ac, e4b, 0);
1627 }
1628 
1629 static noinline_for_stack
1630 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1631 					struct ext4_buddy *e4b)
1632 {
1633 	struct ext4_free_extent ex = ac->ac_b_ex;
1634 	ext4_group_t group = ex.fe_group;
1635 	int max;
1636 	int err;
1637 
1638 	BUG_ON(ex.fe_len <= 0);
1639 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1640 	if (err)
1641 		return err;
1642 
1643 	ext4_lock_group(ac->ac_sb, group);
1644 	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1645 
1646 	if (max > 0) {
1647 		ac->ac_b_ex = ex;
1648 		ext4_mb_use_best_found(ac, e4b);
1649 	}
1650 
1651 	ext4_unlock_group(ac->ac_sb, group);
1652 	ext4_mb_unload_buddy(e4b);
1653 
1654 	return 0;
1655 }
1656 
1657 static noinline_for_stack
1658 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1659 				struct ext4_buddy *e4b)
1660 {
1661 	ext4_group_t group = ac->ac_g_ex.fe_group;
1662 	int max;
1663 	int err;
1664 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1665 	struct ext4_free_extent ex;
1666 
1667 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1668 		return 0;
1669 
1670 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1671 	if (err)
1672 		return err;
1673 
1674 	ext4_lock_group(ac->ac_sb, group);
1675 	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1676 			     ac->ac_g_ex.fe_len, &ex);
1677 
1678 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1679 		ext4_fsblk_t start;
1680 
1681 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1682 			ex.fe_start;
1683 		/* use do_div to get remainder (would be 64-bit modulo) */
1684 		if (do_div(start, sbi->s_stripe) == 0) {
1685 			ac->ac_found++;
1686 			ac->ac_b_ex = ex;
1687 			ext4_mb_use_best_found(ac, e4b);
1688 		}
1689 	} else if (max >= ac->ac_g_ex.fe_len) {
1690 		BUG_ON(ex.fe_len <= 0);
1691 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1692 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1693 		ac->ac_found++;
1694 		ac->ac_b_ex = ex;
1695 		ext4_mb_use_best_found(ac, e4b);
1696 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1697 		/* Sometimes, caller may want to merge even small
1698 		 * number of blocks to an existing extent */
1699 		BUG_ON(ex.fe_len <= 0);
1700 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1701 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1702 		ac->ac_found++;
1703 		ac->ac_b_ex = ex;
1704 		ext4_mb_use_best_found(ac, e4b);
1705 	}
1706 	ext4_unlock_group(ac->ac_sb, group);
1707 	ext4_mb_unload_buddy(e4b);
1708 
1709 	return 0;
1710 }
1711 
1712 /*
1713  * The routine scans buddy structures (not bitmap!) from given order
1714  * to max order and tries to find big enough chunk to satisfy the req
1715  */
1716 static noinline_for_stack
1717 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1718 					struct ext4_buddy *e4b)
1719 {
1720 	struct super_block *sb = ac->ac_sb;
1721 	struct ext4_group_info *grp = e4b->bd_info;
1722 	void *buddy;
1723 	int i;
1724 	int k;
1725 	int max;
1726 
1727 	BUG_ON(ac->ac_2order <= 0);
1728 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1729 		if (grp->bb_counters[i] == 0)
1730 			continue;
1731 
1732 		buddy = mb_find_buddy(e4b, i, &max);
1733 		BUG_ON(buddy == NULL);
1734 
1735 		k = mb_find_next_zero_bit(buddy, max, 0);
1736 		BUG_ON(k >= max);
1737 
1738 		ac->ac_found++;
1739 
1740 		ac->ac_b_ex.fe_len = 1 << i;
1741 		ac->ac_b_ex.fe_start = k << i;
1742 		ac->ac_b_ex.fe_group = e4b->bd_group;
1743 
1744 		ext4_mb_use_best_found(ac, e4b);
1745 
1746 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1747 
1748 		if (EXT4_SB(sb)->s_mb_stats)
1749 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1750 
1751 		break;
1752 	}
1753 }
1754 
1755 /*
1756  * The routine scans the group and measures all found extents.
1757  * In order to optimize scanning, caller must pass number of
1758  * free blocks in the group, so the routine can know upper limit.
1759  */
1760 static noinline_for_stack
1761 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1762 					struct ext4_buddy *e4b)
1763 {
1764 	struct super_block *sb = ac->ac_sb;
1765 	void *bitmap = e4b->bd_bitmap;
1766 	struct ext4_free_extent ex;
1767 	int i;
1768 	int free;
1769 
1770 	free = e4b->bd_info->bb_free;
1771 	BUG_ON(free <= 0);
1772 
1773 	i = e4b->bd_info->bb_first_free;
1774 
1775 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1776 		i = mb_find_next_zero_bit(bitmap,
1777 						EXT4_CLUSTERS_PER_GROUP(sb), i);
1778 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
1779 			/*
1780 			 * IF we have corrupt bitmap, we won't find any
1781 			 * free blocks even though group info says we
1782 			 * we have free blocks
1783 			 */
1784 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1785 					"%d free clusters as per "
1786 					"group info. But bitmap says 0",
1787 					free);
1788 			break;
1789 		}
1790 
1791 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1792 		BUG_ON(ex.fe_len <= 0);
1793 		if (free < ex.fe_len) {
1794 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
1795 					"%d free clusters as per "
1796 					"group info. But got %d blocks",
1797 					free, ex.fe_len);
1798 			/*
1799 			 * The number of free blocks differs. This mostly
1800 			 * indicate that the bitmap is corrupt. So exit
1801 			 * without claiming the space.
1802 			 */
1803 			break;
1804 		}
1805 
1806 		ext4_mb_measure_extent(ac, &ex, e4b);
1807 
1808 		i += ex.fe_len;
1809 		free -= ex.fe_len;
1810 	}
1811 
1812 	ext4_mb_check_limits(ac, e4b, 1);
1813 }
1814 
1815 /*
1816  * This is a special case for storages like raid5
1817  * we try to find stripe-aligned chunks for stripe-size-multiple requests
1818  */
1819 static noinline_for_stack
1820 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1821 				 struct ext4_buddy *e4b)
1822 {
1823 	struct super_block *sb = ac->ac_sb;
1824 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1825 	void *bitmap = e4b->bd_bitmap;
1826 	struct ext4_free_extent ex;
1827 	ext4_fsblk_t first_group_block;
1828 	ext4_fsblk_t a;
1829 	ext4_grpblk_t i;
1830 	int max;
1831 
1832 	BUG_ON(sbi->s_stripe == 0);
1833 
1834 	/* find first stripe-aligned block in group */
1835 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1836 
1837 	a = first_group_block + sbi->s_stripe - 1;
1838 	do_div(a, sbi->s_stripe);
1839 	i = (a * sbi->s_stripe) - first_group_block;
1840 
1841 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
1842 		if (!mb_test_bit(i, bitmap)) {
1843 			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1844 			if (max >= sbi->s_stripe) {
1845 				ac->ac_found++;
1846 				ac->ac_b_ex = ex;
1847 				ext4_mb_use_best_found(ac, e4b);
1848 				break;
1849 			}
1850 		}
1851 		i += sbi->s_stripe;
1852 	}
1853 }
1854 
1855 /* This is now called BEFORE we load the buddy bitmap. */
1856 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1857 				ext4_group_t group, int cr)
1858 {
1859 	unsigned free, fragments;
1860 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1861 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1862 
1863 	BUG_ON(cr < 0 || cr >= 4);
1864 
1865 	/* We only do this if the grp has never been initialized */
1866 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1867 		int ret = ext4_mb_init_group(ac->ac_sb, group);
1868 		if (ret)
1869 			return 0;
1870 	}
1871 
1872 	free = grp->bb_free;
1873 	fragments = grp->bb_fragments;
1874 	if (free == 0)
1875 		return 0;
1876 	if (fragments == 0)
1877 		return 0;
1878 
1879 	switch (cr) {
1880 	case 0:
1881 		BUG_ON(ac->ac_2order == 0);
1882 
1883 		if (grp->bb_largest_free_order < ac->ac_2order)
1884 			return 0;
1885 
1886 		/* Avoid using the first bg of a flexgroup for data files */
1887 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1888 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1889 		    ((group % flex_size) == 0))
1890 			return 0;
1891 
1892 		return 1;
1893 	case 1:
1894 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1895 			return 1;
1896 		break;
1897 	case 2:
1898 		if (free >= ac->ac_g_ex.fe_len)
1899 			return 1;
1900 		break;
1901 	case 3:
1902 		return 1;
1903 	default:
1904 		BUG();
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 static noinline_for_stack int
1911 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1912 {
1913 	ext4_group_t ngroups, group, i;
1914 	int cr;
1915 	int err = 0;
1916 	struct ext4_sb_info *sbi;
1917 	struct super_block *sb;
1918 	struct ext4_buddy e4b;
1919 
1920 	sb = ac->ac_sb;
1921 	sbi = EXT4_SB(sb);
1922 	ngroups = ext4_get_groups_count(sb);
1923 	/* non-extent files are limited to low blocks/groups */
1924 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1925 		ngroups = sbi->s_blockfile_groups;
1926 
1927 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1928 
1929 	/* first, try the goal */
1930 	err = ext4_mb_find_by_goal(ac, &e4b);
1931 	if (err || ac->ac_status == AC_STATUS_FOUND)
1932 		goto out;
1933 
1934 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1935 		goto out;
1936 
1937 	/*
1938 	 * ac->ac2_order is set only if the fe_len is a power of 2
1939 	 * if ac2_order is set we also set criteria to 0 so that we
1940 	 * try exact allocation using buddy.
1941 	 */
1942 	i = fls(ac->ac_g_ex.fe_len);
1943 	ac->ac_2order = 0;
1944 	/*
1945 	 * We search using buddy data only if the order of the request
1946 	 * is greater than equal to the sbi_s_mb_order2_reqs
1947 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
1948 	 */
1949 	if (i >= sbi->s_mb_order2_reqs) {
1950 		/*
1951 		 * This should tell if fe_len is exactly power of 2
1952 		 */
1953 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1954 			ac->ac_2order = i - 1;
1955 	}
1956 
1957 	/* if stream allocation is enabled, use global goal */
1958 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1959 		/* TBD: may be hot point */
1960 		spin_lock(&sbi->s_md_lock);
1961 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1962 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1963 		spin_unlock(&sbi->s_md_lock);
1964 	}
1965 
1966 	/* Let's just scan groups to find more-less suitable blocks */
1967 	cr = ac->ac_2order ? 0 : 1;
1968 	/*
1969 	 * cr == 0 try to get exact allocation,
1970 	 * cr == 3  try to get anything
1971 	 */
1972 repeat:
1973 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1974 		ac->ac_criteria = cr;
1975 		/*
1976 		 * searching for the right group start
1977 		 * from the goal value specified
1978 		 */
1979 		group = ac->ac_g_ex.fe_group;
1980 
1981 		for (i = 0; i < ngroups; group++, i++) {
1982 			if (group == ngroups)
1983 				group = 0;
1984 
1985 			/* This now checks without needing the buddy page */
1986 			if (!ext4_mb_good_group(ac, group, cr))
1987 				continue;
1988 
1989 			err = ext4_mb_load_buddy(sb, group, &e4b);
1990 			if (err)
1991 				goto out;
1992 
1993 			ext4_lock_group(sb, group);
1994 
1995 			/*
1996 			 * We need to check again after locking the
1997 			 * block group
1998 			 */
1999 			if (!ext4_mb_good_group(ac, group, cr)) {
2000 				ext4_unlock_group(sb, group);
2001 				ext4_mb_unload_buddy(&e4b);
2002 				continue;
2003 			}
2004 
2005 			ac->ac_groups_scanned++;
2006 			if (cr == 0)
2007 				ext4_mb_simple_scan_group(ac, &e4b);
2008 			else if (cr == 1 && sbi->s_stripe &&
2009 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2010 				ext4_mb_scan_aligned(ac, &e4b);
2011 			else
2012 				ext4_mb_complex_scan_group(ac, &e4b);
2013 
2014 			ext4_unlock_group(sb, group);
2015 			ext4_mb_unload_buddy(&e4b);
2016 
2017 			if (ac->ac_status != AC_STATUS_CONTINUE)
2018 				break;
2019 		}
2020 	}
2021 
2022 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2023 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2024 		/*
2025 		 * We've been searching too long. Let's try to allocate
2026 		 * the best chunk we've found so far
2027 		 */
2028 
2029 		ext4_mb_try_best_found(ac, &e4b);
2030 		if (ac->ac_status != AC_STATUS_FOUND) {
2031 			/*
2032 			 * Someone more lucky has already allocated it.
2033 			 * The only thing we can do is just take first
2034 			 * found block(s)
2035 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2036 			 */
2037 			ac->ac_b_ex.fe_group = 0;
2038 			ac->ac_b_ex.fe_start = 0;
2039 			ac->ac_b_ex.fe_len = 0;
2040 			ac->ac_status = AC_STATUS_CONTINUE;
2041 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2042 			cr = 3;
2043 			atomic_inc(&sbi->s_mb_lost_chunks);
2044 			goto repeat;
2045 		}
2046 	}
2047 out:
2048 	return err;
2049 }
2050 
2051 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2052 {
2053 	struct super_block *sb = seq->private;
2054 	ext4_group_t group;
2055 
2056 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2057 		return NULL;
2058 	group = *pos + 1;
2059 	return (void *) ((unsigned long) group);
2060 }
2061 
2062 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2063 {
2064 	struct super_block *sb = seq->private;
2065 	ext4_group_t group;
2066 
2067 	++*pos;
2068 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2069 		return NULL;
2070 	group = *pos + 1;
2071 	return (void *) ((unsigned long) group);
2072 }
2073 
2074 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2075 {
2076 	struct super_block *sb = seq->private;
2077 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2078 	int i;
2079 	int err, buddy_loaded = 0;
2080 	struct ext4_buddy e4b;
2081 	struct ext4_group_info *grinfo;
2082 	struct sg {
2083 		struct ext4_group_info info;
2084 		ext4_grpblk_t counters[16];
2085 	} sg;
2086 
2087 	group--;
2088 	if (group == 0)
2089 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2090 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2091 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2092 			   "group", "free", "frags", "first",
2093 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2094 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2095 
2096 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2097 		sizeof(struct ext4_group_info);
2098 	grinfo = ext4_get_group_info(sb, group);
2099 	/* Load the group info in memory only if not already loaded. */
2100 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2101 		err = ext4_mb_load_buddy(sb, group, &e4b);
2102 		if (err) {
2103 			seq_printf(seq, "#%-5u: I/O error\n", group);
2104 			return 0;
2105 		}
2106 		buddy_loaded = 1;
2107 	}
2108 
2109 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2110 
2111 	if (buddy_loaded)
2112 		ext4_mb_unload_buddy(&e4b);
2113 
2114 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2115 			sg.info.bb_fragments, sg.info.bb_first_free);
2116 	for (i = 0; i <= 13; i++)
2117 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2118 				sg.info.bb_counters[i] : 0);
2119 	seq_printf(seq, " ]\n");
2120 
2121 	return 0;
2122 }
2123 
2124 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2125 {
2126 }
2127 
2128 static const struct seq_operations ext4_mb_seq_groups_ops = {
2129 	.start  = ext4_mb_seq_groups_start,
2130 	.next   = ext4_mb_seq_groups_next,
2131 	.stop   = ext4_mb_seq_groups_stop,
2132 	.show   = ext4_mb_seq_groups_show,
2133 };
2134 
2135 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2136 {
2137 	struct super_block *sb = PDE(inode)->data;
2138 	int rc;
2139 
2140 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2141 	if (rc == 0) {
2142 		struct seq_file *m = file->private_data;
2143 		m->private = sb;
2144 	}
2145 	return rc;
2146 
2147 }
2148 
2149 static const struct file_operations ext4_mb_seq_groups_fops = {
2150 	.owner		= THIS_MODULE,
2151 	.open		= ext4_mb_seq_groups_open,
2152 	.read		= seq_read,
2153 	.llseek		= seq_lseek,
2154 	.release	= seq_release,
2155 };
2156 
2157 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2158 {
2159 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2160 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2161 
2162 	BUG_ON(!cachep);
2163 	return cachep;
2164 }
2165 
2166 /* Create and initialize ext4_group_info data for the given group. */
2167 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2168 			  struct ext4_group_desc *desc)
2169 {
2170 	int i;
2171 	int metalen = 0;
2172 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2173 	struct ext4_group_info **meta_group_info;
2174 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2175 
2176 	/*
2177 	 * First check if this group is the first of a reserved block.
2178 	 * If it's true, we have to allocate a new table of pointers
2179 	 * to ext4_group_info structures
2180 	 */
2181 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2182 		metalen = sizeof(*meta_group_info) <<
2183 			EXT4_DESC_PER_BLOCK_BITS(sb);
2184 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2185 		if (meta_group_info == NULL) {
2186 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2187 				 "for a buddy group");
2188 			goto exit_meta_group_info;
2189 		}
2190 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2191 			meta_group_info;
2192 	}
2193 
2194 	meta_group_info =
2195 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2196 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2197 
2198 	meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
2199 	if (meta_group_info[i] == NULL) {
2200 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2201 		goto exit_group_info;
2202 	}
2203 	memset(meta_group_info[i], 0, kmem_cache_size(cachep));
2204 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2205 		&(meta_group_info[i]->bb_state));
2206 
2207 	/*
2208 	 * initialize bb_free to be able to skip
2209 	 * empty groups without initialization
2210 	 */
2211 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2212 		meta_group_info[i]->bb_free =
2213 			ext4_free_clusters_after_init(sb, group, desc);
2214 	} else {
2215 		meta_group_info[i]->bb_free =
2216 			ext4_free_group_clusters(sb, desc);
2217 	}
2218 
2219 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2220 	init_rwsem(&meta_group_info[i]->alloc_sem);
2221 	meta_group_info[i]->bb_free_root = RB_ROOT;
2222 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2223 
2224 #ifdef DOUBLE_CHECK
2225 	{
2226 		struct buffer_head *bh;
2227 		meta_group_info[i]->bb_bitmap =
2228 			kmalloc(sb->s_blocksize, GFP_KERNEL);
2229 		BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2230 		bh = ext4_read_block_bitmap(sb, group);
2231 		BUG_ON(bh == NULL);
2232 		memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2233 			sb->s_blocksize);
2234 		put_bh(bh);
2235 	}
2236 #endif
2237 
2238 	return 0;
2239 
2240 exit_group_info:
2241 	/* If a meta_group_info table has been allocated, release it now */
2242 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2243 		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2244 		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2245 	}
2246 exit_meta_group_info:
2247 	return -ENOMEM;
2248 } /* ext4_mb_add_groupinfo */
2249 
2250 static int ext4_mb_init_backend(struct super_block *sb)
2251 {
2252 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2253 	ext4_group_t i;
2254 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2255 	struct ext4_super_block *es = sbi->s_es;
2256 	int num_meta_group_infos;
2257 	int num_meta_group_infos_max;
2258 	int array_size;
2259 	struct ext4_group_desc *desc;
2260 	struct kmem_cache *cachep;
2261 
2262 	/* This is the number of blocks used by GDT */
2263 	num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
2264 				1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2265 
2266 	/*
2267 	 * This is the total number of blocks used by GDT including
2268 	 * the number of reserved blocks for GDT.
2269 	 * The s_group_info array is allocated with this value
2270 	 * to allow a clean online resize without a complex
2271 	 * manipulation of pointer.
2272 	 * The drawback is the unused memory when no resize
2273 	 * occurs but it's very low in terms of pages
2274 	 * (see comments below)
2275 	 * Need to handle this properly when META_BG resizing is allowed
2276 	 */
2277 	num_meta_group_infos_max = num_meta_group_infos +
2278 				le16_to_cpu(es->s_reserved_gdt_blocks);
2279 
2280 	/*
2281 	 * array_size is the size of s_group_info array. We round it
2282 	 * to the next power of two because this approximation is done
2283 	 * internally by kmalloc so we can have some more memory
2284 	 * for free here (e.g. may be used for META_BG resize).
2285 	 */
2286 	array_size = 1;
2287 	while (array_size < sizeof(*sbi->s_group_info) *
2288 	       num_meta_group_infos_max)
2289 		array_size = array_size << 1;
2290 	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2291 	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2292 	 * So a two level scheme suffices for now. */
2293 	sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
2294 	if (sbi->s_group_info == NULL) {
2295 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2296 		return -ENOMEM;
2297 	}
2298 	sbi->s_buddy_cache = new_inode(sb);
2299 	if (sbi->s_buddy_cache == NULL) {
2300 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2301 		goto err_freesgi;
2302 	}
2303 	/* To avoid potentially colliding with an valid on-disk inode number,
2304 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2305 	 * not in the inode hash, so it should never be found by iget(), but
2306 	 * this will avoid confusion if it ever shows up during debugging. */
2307 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2308 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2309 	for (i = 0; i < ngroups; i++) {
2310 		desc = ext4_get_group_desc(sb, i, NULL);
2311 		if (desc == NULL) {
2312 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2313 			goto err_freebuddy;
2314 		}
2315 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2316 			goto err_freebuddy;
2317 	}
2318 
2319 	return 0;
2320 
2321 err_freebuddy:
2322 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2323 	while (i-- > 0)
2324 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2325 	i = num_meta_group_infos;
2326 	while (i-- > 0)
2327 		kfree(sbi->s_group_info[i]);
2328 	iput(sbi->s_buddy_cache);
2329 err_freesgi:
2330 	ext4_kvfree(sbi->s_group_info);
2331 	return -ENOMEM;
2332 }
2333 
2334 static void ext4_groupinfo_destroy_slabs(void)
2335 {
2336 	int i;
2337 
2338 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2339 		if (ext4_groupinfo_caches[i])
2340 			kmem_cache_destroy(ext4_groupinfo_caches[i]);
2341 		ext4_groupinfo_caches[i] = NULL;
2342 	}
2343 }
2344 
2345 static int ext4_groupinfo_create_slab(size_t size)
2346 {
2347 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2348 	int slab_size;
2349 	int blocksize_bits = order_base_2(size);
2350 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2351 	struct kmem_cache *cachep;
2352 
2353 	if (cache_index >= NR_GRPINFO_CACHES)
2354 		return -EINVAL;
2355 
2356 	if (unlikely(cache_index < 0))
2357 		cache_index = 0;
2358 
2359 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2360 	if (ext4_groupinfo_caches[cache_index]) {
2361 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2362 		return 0;	/* Already created */
2363 	}
2364 
2365 	slab_size = offsetof(struct ext4_group_info,
2366 				bb_counters[blocksize_bits + 2]);
2367 
2368 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2369 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2370 					NULL);
2371 
2372 	ext4_groupinfo_caches[cache_index] = cachep;
2373 
2374 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2375 	if (!cachep) {
2376 		printk(KERN_EMERG
2377 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2378 		return -ENOMEM;
2379 	}
2380 
2381 	return 0;
2382 }
2383 
2384 int ext4_mb_init(struct super_block *sb)
2385 {
2386 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2387 	unsigned i, j;
2388 	unsigned offset;
2389 	unsigned max;
2390 	int ret;
2391 
2392 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2393 
2394 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2395 	if (sbi->s_mb_offsets == NULL) {
2396 		ret = -ENOMEM;
2397 		goto out;
2398 	}
2399 
2400 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2401 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2402 	if (sbi->s_mb_maxs == NULL) {
2403 		ret = -ENOMEM;
2404 		goto out;
2405 	}
2406 
2407 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2408 	if (ret < 0)
2409 		goto out;
2410 
2411 	/* order 0 is regular bitmap */
2412 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2413 	sbi->s_mb_offsets[0] = 0;
2414 
2415 	i = 1;
2416 	offset = 0;
2417 	max = sb->s_blocksize << 2;
2418 	do {
2419 		sbi->s_mb_offsets[i] = offset;
2420 		sbi->s_mb_maxs[i] = max;
2421 		offset += 1 << (sb->s_blocksize_bits - i);
2422 		max = max >> 1;
2423 		i++;
2424 	} while (i <= sb->s_blocksize_bits + 1);
2425 
2426 	spin_lock_init(&sbi->s_md_lock);
2427 	spin_lock_init(&sbi->s_bal_lock);
2428 
2429 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2430 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2431 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2432 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2433 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2434 	/*
2435 	 * The default group preallocation is 512, which for 4k block
2436 	 * sizes translates to 2 megabytes.  However for bigalloc file
2437 	 * systems, this is probably too big (i.e, if the cluster size
2438 	 * is 1 megabyte, then group preallocation size becomes half a
2439 	 * gigabyte!).  As a default, we will keep a two megabyte
2440 	 * group pralloc size for cluster sizes up to 64k, and after
2441 	 * that, we will force a minimum group preallocation size of
2442 	 * 32 clusters.  This translates to 8 megs when the cluster
2443 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2444 	 * which seems reasonable as a default.
2445 	 */
2446 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2447 				       sbi->s_cluster_bits, 32);
2448 	/*
2449 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2450 	 * to the lowest multiple of s_stripe which is bigger than
2451 	 * the s_mb_group_prealloc as determined above. We want
2452 	 * the preallocation size to be an exact multiple of the
2453 	 * RAID stripe size so that preallocations don't fragment
2454 	 * the stripes.
2455 	 */
2456 	if (sbi->s_stripe > 1) {
2457 		sbi->s_mb_group_prealloc = roundup(
2458 			sbi->s_mb_group_prealloc, sbi->s_stripe);
2459 	}
2460 
2461 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2462 	if (sbi->s_locality_groups == NULL) {
2463 		ret = -ENOMEM;
2464 		goto out_free_groupinfo_slab;
2465 	}
2466 	for_each_possible_cpu(i) {
2467 		struct ext4_locality_group *lg;
2468 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
2469 		mutex_init(&lg->lg_mutex);
2470 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
2471 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2472 		spin_lock_init(&lg->lg_prealloc_lock);
2473 	}
2474 
2475 	/* init file for buddy data */
2476 	ret = ext4_mb_init_backend(sb);
2477 	if (ret != 0)
2478 		goto out_free_locality_groups;
2479 
2480 	if (sbi->s_proc)
2481 		proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2482 				 &ext4_mb_seq_groups_fops, sb);
2483 
2484 	return 0;
2485 
2486 out_free_locality_groups:
2487 	free_percpu(sbi->s_locality_groups);
2488 	sbi->s_locality_groups = NULL;
2489 out_free_groupinfo_slab:
2490 	ext4_groupinfo_destroy_slabs();
2491 out:
2492 	kfree(sbi->s_mb_offsets);
2493 	sbi->s_mb_offsets = NULL;
2494 	kfree(sbi->s_mb_maxs);
2495 	sbi->s_mb_maxs = NULL;
2496 	return ret;
2497 }
2498 
2499 /* need to called with the ext4 group lock held */
2500 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2501 {
2502 	struct ext4_prealloc_space *pa;
2503 	struct list_head *cur, *tmp;
2504 	int count = 0;
2505 
2506 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2507 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2508 		list_del(&pa->pa_group_list);
2509 		count++;
2510 		kmem_cache_free(ext4_pspace_cachep, pa);
2511 	}
2512 	if (count)
2513 		mb_debug(1, "mballoc: %u PAs left\n", count);
2514 
2515 }
2516 
2517 int ext4_mb_release(struct super_block *sb)
2518 {
2519 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2520 	ext4_group_t i;
2521 	int num_meta_group_infos;
2522 	struct ext4_group_info *grinfo;
2523 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2524 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2525 
2526 	if (sbi->s_proc)
2527 		remove_proc_entry("mb_groups", sbi->s_proc);
2528 
2529 	if (sbi->s_group_info) {
2530 		for (i = 0; i < ngroups; i++) {
2531 			grinfo = ext4_get_group_info(sb, i);
2532 #ifdef DOUBLE_CHECK
2533 			kfree(grinfo->bb_bitmap);
2534 #endif
2535 			ext4_lock_group(sb, i);
2536 			ext4_mb_cleanup_pa(grinfo);
2537 			ext4_unlock_group(sb, i);
2538 			kmem_cache_free(cachep, grinfo);
2539 		}
2540 		num_meta_group_infos = (ngroups +
2541 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2542 			EXT4_DESC_PER_BLOCK_BITS(sb);
2543 		for (i = 0; i < num_meta_group_infos; i++)
2544 			kfree(sbi->s_group_info[i]);
2545 		ext4_kvfree(sbi->s_group_info);
2546 	}
2547 	kfree(sbi->s_mb_offsets);
2548 	kfree(sbi->s_mb_maxs);
2549 	if (sbi->s_buddy_cache)
2550 		iput(sbi->s_buddy_cache);
2551 	if (sbi->s_mb_stats) {
2552 		ext4_msg(sb, KERN_INFO,
2553 		       "mballoc: %u blocks %u reqs (%u success)",
2554 				atomic_read(&sbi->s_bal_allocated),
2555 				atomic_read(&sbi->s_bal_reqs),
2556 				atomic_read(&sbi->s_bal_success));
2557 		ext4_msg(sb, KERN_INFO,
2558 		      "mballoc: %u extents scanned, %u goal hits, "
2559 				"%u 2^N hits, %u breaks, %u lost",
2560 				atomic_read(&sbi->s_bal_ex_scanned),
2561 				atomic_read(&sbi->s_bal_goals),
2562 				atomic_read(&sbi->s_bal_2orders),
2563 				atomic_read(&sbi->s_bal_breaks),
2564 				atomic_read(&sbi->s_mb_lost_chunks));
2565 		ext4_msg(sb, KERN_INFO,
2566 		       "mballoc: %lu generated and it took %Lu",
2567 				sbi->s_mb_buddies_generated,
2568 				sbi->s_mb_generation_time);
2569 		ext4_msg(sb, KERN_INFO,
2570 		       "mballoc: %u preallocated, %u discarded",
2571 				atomic_read(&sbi->s_mb_preallocated),
2572 				atomic_read(&sbi->s_mb_discarded));
2573 	}
2574 
2575 	free_percpu(sbi->s_locality_groups);
2576 
2577 	return 0;
2578 }
2579 
2580 static inline int ext4_issue_discard(struct super_block *sb,
2581 		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
2582 {
2583 	ext4_fsblk_t discard_block;
2584 
2585 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2586 			 ext4_group_first_block_no(sb, block_group));
2587 	count = EXT4_C2B(EXT4_SB(sb), count);
2588 	trace_ext4_discard_blocks(sb,
2589 			(unsigned long long) discard_block, count);
2590 	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2591 }
2592 
2593 /*
2594  * This function is called by the jbd2 layer once the commit has finished,
2595  * so we know we can free the blocks that were released with that commit.
2596  */
2597 static void ext4_free_data_callback(struct super_block *sb,
2598 				    struct ext4_journal_cb_entry *jce,
2599 				    int rc)
2600 {
2601 	struct ext4_free_data *entry = (struct ext4_free_data *)jce;
2602 	struct ext4_buddy e4b;
2603 	struct ext4_group_info *db;
2604 	int err, count = 0, count2 = 0;
2605 
2606 	mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2607 		 entry->efd_count, entry->efd_group, entry);
2608 
2609 	if (test_opt(sb, DISCARD))
2610 		ext4_issue_discard(sb, entry->efd_group,
2611 				   entry->efd_start_cluster, entry->efd_count);
2612 
2613 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2614 	/* we expect to find existing buddy because it's pinned */
2615 	BUG_ON(err != 0);
2616 
2617 
2618 	db = e4b.bd_info;
2619 	/* there are blocks to put in buddy to make them really free */
2620 	count += entry->efd_count;
2621 	count2++;
2622 	ext4_lock_group(sb, entry->efd_group);
2623 	/* Take it out of per group rb tree */
2624 	rb_erase(&entry->efd_node, &(db->bb_free_root));
2625 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
2626 
2627 	/*
2628 	 * Clear the trimmed flag for the group so that the next
2629 	 * ext4_trim_fs can trim it.
2630 	 * If the volume is mounted with -o discard, online discard
2631 	 * is supported and the free blocks will be trimmed online.
2632 	 */
2633 	if (!test_opt(sb, DISCARD))
2634 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
2635 
2636 	if (!db->bb_free_root.rb_node) {
2637 		/* No more items in the per group rb tree
2638 		 * balance refcounts from ext4_mb_free_metadata()
2639 		 */
2640 		page_cache_release(e4b.bd_buddy_page);
2641 		page_cache_release(e4b.bd_bitmap_page);
2642 	}
2643 	ext4_unlock_group(sb, entry->efd_group);
2644 	kmem_cache_free(ext4_free_data_cachep, entry);
2645 	ext4_mb_unload_buddy(&e4b);
2646 
2647 	mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
2648 }
2649 
2650 #ifdef CONFIG_EXT4_DEBUG
2651 u8 mb_enable_debug __read_mostly;
2652 
2653 static struct dentry *debugfs_dir;
2654 static struct dentry *debugfs_debug;
2655 
2656 static void __init ext4_create_debugfs_entry(void)
2657 {
2658 	debugfs_dir = debugfs_create_dir("ext4", NULL);
2659 	if (debugfs_dir)
2660 		debugfs_debug = debugfs_create_u8("mballoc-debug",
2661 						  S_IRUGO | S_IWUSR,
2662 						  debugfs_dir,
2663 						  &mb_enable_debug);
2664 }
2665 
2666 static void ext4_remove_debugfs_entry(void)
2667 {
2668 	debugfs_remove(debugfs_debug);
2669 	debugfs_remove(debugfs_dir);
2670 }
2671 
2672 #else
2673 
2674 static void __init ext4_create_debugfs_entry(void)
2675 {
2676 }
2677 
2678 static void ext4_remove_debugfs_entry(void)
2679 {
2680 }
2681 
2682 #endif
2683 
2684 int __init ext4_init_mballoc(void)
2685 {
2686 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2687 					SLAB_RECLAIM_ACCOUNT);
2688 	if (ext4_pspace_cachep == NULL)
2689 		return -ENOMEM;
2690 
2691 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2692 				    SLAB_RECLAIM_ACCOUNT);
2693 	if (ext4_ac_cachep == NULL) {
2694 		kmem_cache_destroy(ext4_pspace_cachep);
2695 		return -ENOMEM;
2696 	}
2697 
2698 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2699 					   SLAB_RECLAIM_ACCOUNT);
2700 	if (ext4_free_data_cachep == NULL) {
2701 		kmem_cache_destroy(ext4_pspace_cachep);
2702 		kmem_cache_destroy(ext4_ac_cachep);
2703 		return -ENOMEM;
2704 	}
2705 	ext4_create_debugfs_entry();
2706 	return 0;
2707 }
2708 
2709 void ext4_exit_mballoc(void)
2710 {
2711 	/*
2712 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2713 	 * before destroying the slab cache.
2714 	 */
2715 	rcu_barrier();
2716 	kmem_cache_destroy(ext4_pspace_cachep);
2717 	kmem_cache_destroy(ext4_ac_cachep);
2718 	kmem_cache_destroy(ext4_free_data_cachep);
2719 	ext4_groupinfo_destroy_slabs();
2720 	ext4_remove_debugfs_entry();
2721 }
2722 
2723 
2724 /*
2725  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
2726  * Returns 0 if success or error code
2727  */
2728 static noinline_for_stack int
2729 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2730 				handle_t *handle, unsigned int reserv_clstrs)
2731 {
2732 	struct buffer_head *bitmap_bh = NULL;
2733 	struct ext4_group_desc *gdp;
2734 	struct buffer_head *gdp_bh;
2735 	struct ext4_sb_info *sbi;
2736 	struct super_block *sb;
2737 	ext4_fsblk_t block;
2738 	int err, len;
2739 
2740 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2741 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
2742 
2743 	sb = ac->ac_sb;
2744 	sbi = EXT4_SB(sb);
2745 
2746 	err = -EIO;
2747 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2748 	if (!bitmap_bh)
2749 		goto out_err;
2750 
2751 	err = ext4_journal_get_write_access(handle, bitmap_bh);
2752 	if (err)
2753 		goto out_err;
2754 
2755 	err = -EIO;
2756 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2757 	if (!gdp)
2758 		goto out_err;
2759 
2760 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2761 			ext4_free_group_clusters(sb, gdp));
2762 
2763 	err = ext4_journal_get_write_access(handle, gdp_bh);
2764 	if (err)
2765 		goto out_err;
2766 
2767 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
2768 
2769 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
2770 	if (!ext4_data_block_valid(sbi, block, len)) {
2771 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
2772 			   "fs metadata", block, block+len);
2773 		/* File system mounted not to panic on error
2774 		 * Fix the bitmap and repeat the block allocation
2775 		 * We leak some of the blocks here.
2776 		 */
2777 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2778 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2779 			      ac->ac_b_ex.fe_len);
2780 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2781 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2782 		if (!err)
2783 			err = -EAGAIN;
2784 		goto out_err;
2785 	}
2786 
2787 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
2788 #ifdef AGGRESSIVE_CHECK
2789 	{
2790 		int i;
2791 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2792 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2793 						bitmap_bh->b_data));
2794 		}
2795 	}
2796 #endif
2797 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2798 		      ac->ac_b_ex.fe_len);
2799 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2800 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2801 		ext4_free_group_clusters_set(sb, gdp,
2802 					     ext4_free_clusters_after_init(sb,
2803 						ac->ac_b_ex.fe_group, gdp));
2804 	}
2805 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2806 	ext4_free_group_clusters_set(sb, gdp, len);
2807 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
2808 				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
2809 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2810 
2811 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2812 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
2813 	/*
2814 	 * Now reduce the dirty block count also. Should not go negative
2815 	 */
2816 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2817 		/* release all the reserved blocks if non delalloc */
2818 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2819 				   reserv_clstrs);
2820 
2821 	if (sbi->s_log_groups_per_flex) {
2822 		ext4_group_t flex_group = ext4_flex_group(sbi,
2823 							  ac->ac_b_ex.fe_group);
2824 		atomic_sub(ac->ac_b_ex.fe_len,
2825 			   &sbi->s_flex_groups[flex_group].free_clusters);
2826 	}
2827 
2828 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2829 	if (err)
2830 		goto out_err;
2831 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2832 
2833 out_err:
2834 	brelse(bitmap_bh);
2835 	return err;
2836 }
2837 
2838 /*
2839  * here we normalize request for locality group
2840  * Group request are normalized to s_mb_group_prealloc, which goes to
2841  * s_strip if we set the same via mount option.
2842  * s_mb_group_prealloc can be configured via
2843  * /sys/fs/ext4/<partition>/mb_group_prealloc
2844  *
2845  * XXX: should we try to preallocate more than the group has now?
2846  */
2847 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2848 {
2849 	struct super_block *sb = ac->ac_sb;
2850 	struct ext4_locality_group *lg = ac->ac_lg;
2851 
2852 	BUG_ON(lg == NULL);
2853 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2854 	mb_debug(1, "#%u: goal %u blocks for locality group\n",
2855 		current->pid, ac->ac_g_ex.fe_len);
2856 }
2857 
2858 /*
2859  * Normalization means making request better in terms of
2860  * size and alignment
2861  */
2862 static noinline_for_stack void
2863 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2864 				struct ext4_allocation_request *ar)
2865 {
2866 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2867 	int bsbits, max;
2868 	ext4_lblk_t end;
2869 	loff_t size, start_off;
2870 	loff_t orig_size __maybe_unused;
2871 	ext4_lblk_t start;
2872 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2873 	struct ext4_prealloc_space *pa;
2874 
2875 	/* do normalize only data requests, metadata requests
2876 	   do not need preallocation */
2877 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2878 		return;
2879 
2880 	/* sometime caller may want exact blocks */
2881 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2882 		return;
2883 
2884 	/* caller may indicate that preallocation isn't
2885 	 * required (it's a tail, for example) */
2886 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2887 		return;
2888 
2889 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2890 		ext4_mb_normalize_group_request(ac);
2891 		return ;
2892 	}
2893 
2894 	bsbits = ac->ac_sb->s_blocksize_bits;
2895 
2896 	/* first, let's learn actual file size
2897 	 * given current request is allocated */
2898 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
2899 	size = size << bsbits;
2900 	if (size < i_size_read(ac->ac_inode))
2901 		size = i_size_read(ac->ac_inode);
2902 	orig_size = size;
2903 
2904 	/* max size of free chunks */
2905 	max = 2 << bsbits;
2906 
2907 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
2908 		(req <= (size) || max <= (chunk_size))
2909 
2910 	/* first, try to predict filesize */
2911 	/* XXX: should this table be tunable? */
2912 	start_off = 0;
2913 	if (size <= 16 * 1024) {
2914 		size = 16 * 1024;
2915 	} else if (size <= 32 * 1024) {
2916 		size = 32 * 1024;
2917 	} else if (size <= 64 * 1024) {
2918 		size = 64 * 1024;
2919 	} else if (size <= 128 * 1024) {
2920 		size = 128 * 1024;
2921 	} else if (size <= 256 * 1024) {
2922 		size = 256 * 1024;
2923 	} else if (size <= 512 * 1024) {
2924 		size = 512 * 1024;
2925 	} else if (size <= 1024 * 1024) {
2926 		size = 1024 * 1024;
2927 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
2928 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2929 						(21 - bsbits)) << 21;
2930 		size = 2 * 1024 * 1024;
2931 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
2932 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2933 							(22 - bsbits)) << 22;
2934 		size = 4 * 1024 * 1024;
2935 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
2936 					(8<<20)>>bsbits, max, 8 * 1024)) {
2937 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2938 							(23 - bsbits)) << 23;
2939 		size = 8 * 1024 * 1024;
2940 	} else {
2941 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2942 		size	  = ac->ac_o_ex.fe_len << bsbits;
2943 	}
2944 	size = size >> bsbits;
2945 	start = start_off >> bsbits;
2946 
2947 	/* don't cover already allocated blocks in selected range */
2948 	if (ar->pleft && start <= ar->lleft) {
2949 		size -= ar->lleft + 1 - start;
2950 		start = ar->lleft + 1;
2951 	}
2952 	if (ar->pright && start + size - 1 >= ar->lright)
2953 		size -= start + size - ar->lright;
2954 
2955 	end = start + size;
2956 
2957 	/* check we don't cross already preallocated blocks */
2958 	rcu_read_lock();
2959 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
2960 		ext4_lblk_t pa_end;
2961 
2962 		if (pa->pa_deleted)
2963 			continue;
2964 		spin_lock(&pa->pa_lock);
2965 		if (pa->pa_deleted) {
2966 			spin_unlock(&pa->pa_lock);
2967 			continue;
2968 		}
2969 
2970 		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2971 						  pa->pa_len);
2972 
2973 		/* PA must not overlap original request */
2974 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2975 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
2976 
2977 		/* skip PAs this normalized request doesn't overlap with */
2978 		if (pa->pa_lstart >= end || pa_end <= start) {
2979 			spin_unlock(&pa->pa_lock);
2980 			continue;
2981 		}
2982 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2983 
2984 		/* adjust start or end to be adjacent to this pa */
2985 		if (pa_end <= ac->ac_o_ex.fe_logical) {
2986 			BUG_ON(pa_end < start);
2987 			start = pa_end;
2988 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
2989 			BUG_ON(pa->pa_lstart > end);
2990 			end = pa->pa_lstart;
2991 		}
2992 		spin_unlock(&pa->pa_lock);
2993 	}
2994 	rcu_read_unlock();
2995 	size = end - start;
2996 
2997 	/* XXX: extra loop to check we really don't overlap preallocations */
2998 	rcu_read_lock();
2999 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3000 		ext4_lblk_t pa_end;
3001 
3002 		spin_lock(&pa->pa_lock);
3003 		if (pa->pa_deleted == 0) {
3004 			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3005 							  pa->pa_len);
3006 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3007 		}
3008 		spin_unlock(&pa->pa_lock);
3009 	}
3010 	rcu_read_unlock();
3011 
3012 	if (start + size <= ac->ac_o_ex.fe_logical &&
3013 			start > ac->ac_o_ex.fe_logical) {
3014 		ext4_msg(ac->ac_sb, KERN_ERR,
3015 			 "start %lu, size %lu, fe_logical %lu",
3016 			 (unsigned long) start, (unsigned long) size,
3017 			 (unsigned long) ac->ac_o_ex.fe_logical);
3018 	}
3019 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3020 			start > ac->ac_o_ex.fe_logical);
3021 	BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
3022 
3023 	/* now prepare goal request */
3024 
3025 	/* XXX: is it better to align blocks WRT to logical
3026 	 * placement or satisfy big request as is */
3027 	ac->ac_g_ex.fe_logical = start;
3028 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3029 
3030 	/* define goal start in order to merge */
3031 	if (ar->pright && (ar->lright == (start + size))) {
3032 		/* merge to the right */
3033 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3034 						&ac->ac_f_ex.fe_group,
3035 						&ac->ac_f_ex.fe_start);
3036 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3037 	}
3038 	if (ar->pleft && (ar->lleft + 1 == start)) {
3039 		/* merge to the left */
3040 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3041 						&ac->ac_f_ex.fe_group,
3042 						&ac->ac_f_ex.fe_start);
3043 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3044 	}
3045 
3046 	mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
3047 		(unsigned) orig_size, (unsigned) start);
3048 }
3049 
3050 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3051 {
3052 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3053 
3054 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3055 		atomic_inc(&sbi->s_bal_reqs);
3056 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3057 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3058 			atomic_inc(&sbi->s_bal_success);
3059 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3060 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3061 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3062 			atomic_inc(&sbi->s_bal_goals);
3063 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3064 			atomic_inc(&sbi->s_bal_breaks);
3065 	}
3066 
3067 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3068 		trace_ext4_mballoc_alloc(ac);
3069 	else
3070 		trace_ext4_mballoc_prealloc(ac);
3071 }
3072 
3073 /*
3074  * Called on failure; free up any blocks from the inode PA for this
3075  * context.  We don't need this for MB_GROUP_PA because we only change
3076  * pa_free in ext4_mb_release_context(), but on failure, we've already
3077  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3078  */
3079 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3080 {
3081 	struct ext4_prealloc_space *pa = ac->ac_pa;
3082 
3083 	if (pa && pa->pa_type == MB_INODE_PA)
3084 		pa->pa_free += ac->ac_b_ex.fe_len;
3085 }
3086 
3087 /*
3088  * use blocks preallocated to inode
3089  */
3090 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3091 				struct ext4_prealloc_space *pa)
3092 {
3093 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3094 	ext4_fsblk_t start;
3095 	ext4_fsblk_t end;
3096 	int len;
3097 
3098 	/* found preallocated blocks, use them */
3099 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3100 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3101 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3102 	len = EXT4_NUM_B2C(sbi, end - start);
3103 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3104 					&ac->ac_b_ex.fe_start);
3105 	ac->ac_b_ex.fe_len = len;
3106 	ac->ac_status = AC_STATUS_FOUND;
3107 	ac->ac_pa = pa;
3108 
3109 	BUG_ON(start < pa->pa_pstart);
3110 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3111 	BUG_ON(pa->pa_free < len);
3112 	pa->pa_free -= len;
3113 
3114 	mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
3115 }
3116 
3117 /*
3118  * use blocks preallocated to locality group
3119  */
3120 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3121 				struct ext4_prealloc_space *pa)
3122 {
3123 	unsigned int len = ac->ac_o_ex.fe_len;
3124 
3125 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3126 					&ac->ac_b_ex.fe_group,
3127 					&ac->ac_b_ex.fe_start);
3128 	ac->ac_b_ex.fe_len = len;
3129 	ac->ac_status = AC_STATUS_FOUND;
3130 	ac->ac_pa = pa;
3131 
3132 	/* we don't correct pa_pstart or pa_plen here to avoid
3133 	 * possible race when the group is being loaded concurrently
3134 	 * instead we correct pa later, after blocks are marked
3135 	 * in on-disk bitmap -- see ext4_mb_release_context()
3136 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3137 	 */
3138 	mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3139 }
3140 
3141 /*
3142  * Return the prealloc space that have minimal distance
3143  * from the goal block. @cpa is the prealloc
3144  * space that is having currently known minimal distance
3145  * from the goal block.
3146  */
3147 static struct ext4_prealloc_space *
3148 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3149 			struct ext4_prealloc_space *pa,
3150 			struct ext4_prealloc_space *cpa)
3151 {
3152 	ext4_fsblk_t cur_distance, new_distance;
3153 
3154 	if (cpa == NULL) {
3155 		atomic_inc(&pa->pa_count);
3156 		return pa;
3157 	}
3158 	cur_distance = abs(goal_block - cpa->pa_pstart);
3159 	new_distance = abs(goal_block - pa->pa_pstart);
3160 
3161 	if (cur_distance <= new_distance)
3162 		return cpa;
3163 
3164 	/* drop the previous reference */
3165 	atomic_dec(&cpa->pa_count);
3166 	atomic_inc(&pa->pa_count);
3167 	return pa;
3168 }
3169 
3170 /*
3171  * search goal blocks in preallocated space
3172  */
3173 static noinline_for_stack int
3174 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3175 {
3176 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3177 	int order, i;
3178 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3179 	struct ext4_locality_group *lg;
3180 	struct ext4_prealloc_space *pa, *cpa = NULL;
3181 	ext4_fsblk_t goal_block;
3182 
3183 	/* only data can be preallocated */
3184 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3185 		return 0;
3186 
3187 	/* first, try per-file preallocation */
3188 	rcu_read_lock();
3189 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3190 
3191 		/* all fields in this condition don't change,
3192 		 * so we can skip locking for them */
3193 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3194 		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3195 					       EXT4_C2B(sbi, pa->pa_len)))
3196 			continue;
3197 
3198 		/* non-extent files can't have physical blocks past 2^32 */
3199 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3200 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3201 		     EXT4_MAX_BLOCK_FILE_PHYS))
3202 			continue;
3203 
3204 		/* found preallocated blocks, use them */
3205 		spin_lock(&pa->pa_lock);
3206 		if (pa->pa_deleted == 0 && pa->pa_free) {
3207 			atomic_inc(&pa->pa_count);
3208 			ext4_mb_use_inode_pa(ac, pa);
3209 			spin_unlock(&pa->pa_lock);
3210 			ac->ac_criteria = 10;
3211 			rcu_read_unlock();
3212 			return 1;
3213 		}
3214 		spin_unlock(&pa->pa_lock);
3215 	}
3216 	rcu_read_unlock();
3217 
3218 	/* can we use group allocation? */
3219 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3220 		return 0;
3221 
3222 	/* inode may have no locality group for some reason */
3223 	lg = ac->ac_lg;
3224 	if (lg == NULL)
3225 		return 0;
3226 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3227 	if (order > PREALLOC_TB_SIZE - 1)
3228 		/* The max size of hash table is PREALLOC_TB_SIZE */
3229 		order = PREALLOC_TB_SIZE - 1;
3230 
3231 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3232 	/*
3233 	 * search for the prealloc space that is having
3234 	 * minimal distance from the goal block.
3235 	 */
3236 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3237 		rcu_read_lock();
3238 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3239 					pa_inode_list) {
3240 			spin_lock(&pa->pa_lock);
3241 			if (pa->pa_deleted == 0 &&
3242 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3243 
3244 				cpa = ext4_mb_check_group_pa(goal_block,
3245 								pa, cpa);
3246 			}
3247 			spin_unlock(&pa->pa_lock);
3248 		}
3249 		rcu_read_unlock();
3250 	}
3251 	if (cpa) {
3252 		ext4_mb_use_group_pa(ac, cpa);
3253 		ac->ac_criteria = 20;
3254 		return 1;
3255 	}
3256 	return 0;
3257 }
3258 
3259 /*
3260  * the function goes through all block freed in the group
3261  * but not yet committed and marks them used in in-core bitmap.
3262  * buddy must be generated from this bitmap
3263  * Need to be called with the ext4 group lock held
3264  */
3265 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3266 						ext4_group_t group)
3267 {
3268 	struct rb_node *n;
3269 	struct ext4_group_info *grp;
3270 	struct ext4_free_data *entry;
3271 
3272 	grp = ext4_get_group_info(sb, group);
3273 	n = rb_first(&(grp->bb_free_root));
3274 
3275 	while (n) {
3276 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3277 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3278 		n = rb_next(n);
3279 	}
3280 	return;
3281 }
3282 
3283 /*
3284  * the function goes through all preallocation in this group and marks them
3285  * used in in-core bitmap. buddy must be generated from this bitmap
3286  * Need to be called with ext4 group lock held
3287  */
3288 static noinline_for_stack
3289 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3290 					ext4_group_t group)
3291 {
3292 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3293 	struct ext4_prealloc_space *pa;
3294 	struct list_head *cur;
3295 	ext4_group_t groupnr;
3296 	ext4_grpblk_t start;
3297 	int preallocated = 0;
3298 	int len;
3299 
3300 	/* all form of preallocation discards first load group,
3301 	 * so the only competing code is preallocation use.
3302 	 * we don't need any locking here
3303 	 * notice we do NOT ignore preallocations with pa_deleted
3304 	 * otherwise we could leave used blocks available for
3305 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3306 	 * is dropping preallocation
3307 	 */
3308 	list_for_each(cur, &grp->bb_prealloc_list) {
3309 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3310 		spin_lock(&pa->pa_lock);
3311 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3312 					     &groupnr, &start);
3313 		len = pa->pa_len;
3314 		spin_unlock(&pa->pa_lock);
3315 		if (unlikely(len == 0))
3316 			continue;
3317 		BUG_ON(groupnr != group);
3318 		ext4_set_bits(bitmap, start, len);
3319 		preallocated += len;
3320 	}
3321 	mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
3322 }
3323 
3324 static void ext4_mb_pa_callback(struct rcu_head *head)
3325 {
3326 	struct ext4_prealloc_space *pa;
3327 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3328 	kmem_cache_free(ext4_pspace_cachep, pa);
3329 }
3330 
3331 /*
3332  * drops a reference to preallocated space descriptor
3333  * if this was the last reference and the space is consumed
3334  */
3335 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3336 			struct super_block *sb, struct ext4_prealloc_space *pa)
3337 {
3338 	ext4_group_t grp;
3339 	ext4_fsblk_t grp_blk;
3340 
3341 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3342 		return;
3343 
3344 	/* in this short window concurrent discard can set pa_deleted */
3345 	spin_lock(&pa->pa_lock);
3346 	if (pa->pa_deleted == 1) {
3347 		spin_unlock(&pa->pa_lock);
3348 		return;
3349 	}
3350 
3351 	pa->pa_deleted = 1;
3352 	spin_unlock(&pa->pa_lock);
3353 
3354 	grp_blk = pa->pa_pstart;
3355 	/*
3356 	 * If doing group-based preallocation, pa_pstart may be in the
3357 	 * next group when pa is used up
3358 	 */
3359 	if (pa->pa_type == MB_GROUP_PA)
3360 		grp_blk--;
3361 
3362 	ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
3363 
3364 	/*
3365 	 * possible race:
3366 	 *
3367 	 *  P1 (buddy init)			P2 (regular allocation)
3368 	 *					find block B in PA
3369 	 *  copy on-disk bitmap to buddy
3370 	 *  					mark B in on-disk bitmap
3371 	 *					drop PA from group
3372 	 *  mark all PAs in buddy
3373 	 *
3374 	 * thus, P1 initializes buddy with B available. to prevent this
3375 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3376 	 * against that pair
3377 	 */
3378 	ext4_lock_group(sb, grp);
3379 	list_del(&pa->pa_group_list);
3380 	ext4_unlock_group(sb, grp);
3381 
3382 	spin_lock(pa->pa_obj_lock);
3383 	list_del_rcu(&pa->pa_inode_list);
3384 	spin_unlock(pa->pa_obj_lock);
3385 
3386 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3387 }
3388 
3389 /*
3390  * creates new preallocated space for given inode
3391  */
3392 static noinline_for_stack int
3393 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3394 {
3395 	struct super_block *sb = ac->ac_sb;
3396 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3397 	struct ext4_prealloc_space *pa;
3398 	struct ext4_group_info *grp;
3399 	struct ext4_inode_info *ei;
3400 
3401 	/* preallocate only when found space is larger then requested */
3402 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3403 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3404 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3405 
3406 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3407 	if (pa == NULL)
3408 		return -ENOMEM;
3409 
3410 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3411 		int winl;
3412 		int wins;
3413 		int win;
3414 		int offs;
3415 
3416 		/* we can't allocate as much as normalizer wants.
3417 		 * so, found space must get proper lstart
3418 		 * to cover original request */
3419 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3420 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3421 
3422 		/* we're limited by original request in that
3423 		 * logical block must be covered any way
3424 		 * winl is window we can move our chunk within */
3425 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3426 
3427 		/* also, we should cover whole original request */
3428 		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
3429 
3430 		/* the smallest one defines real window */
3431 		win = min(winl, wins);
3432 
3433 		offs = ac->ac_o_ex.fe_logical %
3434 			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3435 		if (offs && offs < win)
3436 			win = offs;
3437 
3438 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3439 			EXT4_B2C(sbi, win);
3440 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3441 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3442 	}
3443 
3444 	/* preallocation can change ac_b_ex, thus we store actually
3445 	 * allocated blocks for history */
3446 	ac->ac_f_ex = ac->ac_b_ex;
3447 
3448 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3449 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3450 	pa->pa_len = ac->ac_b_ex.fe_len;
3451 	pa->pa_free = pa->pa_len;
3452 	atomic_set(&pa->pa_count, 1);
3453 	spin_lock_init(&pa->pa_lock);
3454 	INIT_LIST_HEAD(&pa->pa_inode_list);
3455 	INIT_LIST_HEAD(&pa->pa_group_list);
3456 	pa->pa_deleted = 0;
3457 	pa->pa_type = MB_INODE_PA;
3458 
3459 	mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
3460 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3461 	trace_ext4_mb_new_inode_pa(ac, pa);
3462 
3463 	ext4_mb_use_inode_pa(ac, pa);
3464 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
3465 
3466 	ei = EXT4_I(ac->ac_inode);
3467 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3468 
3469 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3470 	pa->pa_inode = ac->ac_inode;
3471 
3472 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3473 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3474 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3475 
3476 	spin_lock(pa->pa_obj_lock);
3477 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3478 	spin_unlock(pa->pa_obj_lock);
3479 
3480 	return 0;
3481 }
3482 
3483 /*
3484  * creates new preallocated space for locality group inodes belongs to
3485  */
3486 static noinline_for_stack int
3487 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3488 {
3489 	struct super_block *sb = ac->ac_sb;
3490 	struct ext4_locality_group *lg;
3491 	struct ext4_prealloc_space *pa;
3492 	struct ext4_group_info *grp;
3493 
3494 	/* preallocate only when found space is larger then requested */
3495 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3496 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3497 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3498 
3499 	BUG_ON(ext4_pspace_cachep == NULL);
3500 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3501 	if (pa == NULL)
3502 		return -ENOMEM;
3503 
3504 	/* preallocation can change ac_b_ex, thus we store actually
3505 	 * allocated blocks for history */
3506 	ac->ac_f_ex = ac->ac_b_ex;
3507 
3508 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3509 	pa->pa_lstart = pa->pa_pstart;
3510 	pa->pa_len = ac->ac_b_ex.fe_len;
3511 	pa->pa_free = pa->pa_len;
3512 	atomic_set(&pa->pa_count, 1);
3513 	spin_lock_init(&pa->pa_lock);
3514 	INIT_LIST_HEAD(&pa->pa_inode_list);
3515 	INIT_LIST_HEAD(&pa->pa_group_list);
3516 	pa->pa_deleted = 0;
3517 	pa->pa_type = MB_GROUP_PA;
3518 
3519 	mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
3520 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3521 	trace_ext4_mb_new_group_pa(ac, pa);
3522 
3523 	ext4_mb_use_group_pa(ac, pa);
3524 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3525 
3526 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3527 	lg = ac->ac_lg;
3528 	BUG_ON(lg == NULL);
3529 
3530 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3531 	pa->pa_inode = NULL;
3532 
3533 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3534 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3535 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3536 
3537 	/*
3538 	 * We will later add the new pa to the right bucket
3539 	 * after updating the pa_free in ext4_mb_release_context
3540 	 */
3541 	return 0;
3542 }
3543 
3544 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3545 {
3546 	int err;
3547 
3548 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3549 		err = ext4_mb_new_group_pa(ac);
3550 	else
3551 		err = ext4_mb_new_inode_pa(ac);
3552 	return err;
3553 }
3554 
3555 /*
3556  * finds all unused blocks in on-disk bitmap, frees them in
3557  * in-core bitmap and buddy.
3558  * @pa must be unlinked from inode and group lists, so that
3559  * nobody else can find/use it.
3560  * the caller MUST hold group/inode locks.
3561  * TODO: optimize the case when there are no in-core structures yet
3562  */
3563 static noinline_for_stack int
3564 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3565 			struct ext4_prealloc_space *pa)
3566 {
3567 	struct super_block *sb = e4b->bd_sb;
3568 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3569 	unsigned int end;
3570 	unsigned int next;
3571 	ext4_group_t group;
3572 	ext4_grpblk_t bit;
3573 	unsigned long long grp_blk_start;
3574 	int err = 0;
3575 	int free = 0;
3576 
3577 	BUG_ON(pa->pa_deleted == 0);
3578 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3579 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
3580 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3581 	end = bit + pa->pa_len;
3582 
3583 	while (bit < end) {
3584 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3585 		if (bit >= end)
3586 			break;
3587 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3588 		mb_debug(1, "    free preallocated %u/%u in group %u\n",
3589 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3590 			 (unsigned) next - bit, (unsigned) group);
3591 		free += next - bit;
3592 
3593 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3594 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3595 						    EXT4_C2B(sbi, bit)),
3596 					       next - bit);
3597 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3598 		bit = next + 1;
3599 	}
3600 	if (free != pa->pa_free) {
3601 		ext4_msg(e4b->bd_sb, KERN_CRIT,
3602 			 "pa %p: logic %lu, phys. %lu, len %lu",
3603 			 pa, (unsigned long) pa->pa_lstart,
3604 			 (unsigned long) pa->pa_pstart,
3605 			 (unsigned long) pa->pa_len);
3606 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
3607 					free, pa->pa_free);
3608 		/*
3609 		 * pa is already deleted so we use the value obtained
3610 		 * from the bitmap and continue.
3611 		 */
3612 	}
3613 	atomic_add(free, &sbi->s_mb_discarded);
3614 
3615 	return err;
3616 }
3617 
3618 static noinline_for_stack int
3619 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3620 				struct ext4_prealloc_space *pa)
3621 {
3622 	struct super_block *sb = e4b->bd_sb;
3623 	ext4_group_t group;
3624 	ext4_grpblk_t bit;
3625 
3626 	trace_ext4_mb_release_group_pa(sb, pa);
3627 	BUG_ON(pa->pa_deleted == 0);
3628 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3629 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3630 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3631 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3632 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
3633 
3634 	return 0;
3635 }
3636 
3637 /*
3638  * releases all preallocations in given group
3639  *
3640  * first, we need to decide discard policy:
3641  * - when do we discard
3642  *   1) ENOSPC
3643  * - how many do we discard
3644  *   1) how many requested
3645  */
3646 static noinline_for_stack int
3647 ext4_mb_discard_group_preallocations(struct super_block *sb,
3648 					ext4_group_t group, int needed)
3649 {
3650 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3651 	struct buffer_head *bitmap_bh = NULL;
3652 	struct ext4_prealloc_space *pa, *tmp;
3653 	struct list_head list;
3654 	struct ext4_buddy e4b;
3655 	int err;
3656 	int busy = 0;
3657 	int free = 0;
3658 
3659 	mb_debug(1, "discard preallocation for group %u\n", group);
3660 
3661 	if (list_empty(&grp->bb_prealloc_list))
3662 		return 0;
3663 
3664 	bitmap_bh = ext4_read_block_bitmap(sb, group);
3665 	if (bitmap_bh == NULL) {
3666 		ext4_error(sb, "Error reading block bitmap for %u", group);
3667 		return 0;
3668 	}
3669 
3670 	err = ext4_mb_load_buddy(sb, group, &e4b);
3671 	if (err) {
3672 		ext4_error(sb, "Error loading buddy information for %u", group);
3673 		put_bh(bitmap_bh);
3674 		return 0;
3675 	}
3676 
3677 	if (needed == 0)
3678 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
3679 
3680 	INIT_LIST_HEAD(&list);
3681 repeat:
3682 	ext4_lock_group(sb, group);
3683 	list_for_each_entry_safe(pa, tmp,
3684 				&grp->bb_prealloc_list, pa_group_list) {
3685 		spin_lock(&pa->pa_lock);
3686 		if (atomic_read(&pa->pa_count)) {
3687 			spin_unlock(&pa->pa_lock);
3688 			busy = 1;
3689 			continue;
3690 		}
3691 		if (pa->pa_deleted) {
3692 			spin_unlock(&pa->pa_lock);
3693 			continue;
3694 		}
3695 
3696 		/* seems this one can be freed ... */
3697 		pa->pa_deleted = 1;
3698 
3699 		/* we can trust pa_free ... */
3700 		free += pa->pa_free;
3701 
3702 		spin_unlock(&pa->pa_lock);
3703 
3704 		list_del(&pa->pa_group_list);
3705 		list_add(&pa->u.pa_tmp_list, &list);
3706 	}
3707 
3708 	/* if we still need more blocks and some PAs were used, try again */
3709 	if (free < needed && busy) {
3710 		busy = 0;
3711 		ext4_unlock_group(sb, group);
3712 		/*
3713 		 * Yield the CPU here so that we don't get soft lockup
3714 		 * in non preempt case.
3715 		 */
3716 		yield();
3717 		goto repeat;
3718 	}
3719 
3720 	/* found anything to free? */
3721 	if (list_empty(&list)) {
3722 		BUG_ON(free != 0);
3723 		goto out;
3724 	}
3725 
3726 	/* now free all selected PAs */
3727 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3728 
3729 		/* remove from object (inode or locality group) */
3730 		spin_lock(pa->pa_obj_lock);
3731 		list_del_rcu(&pa->pa_inode_list);
3732 		spin_unlock(pa->pa_obj_lock);
3733 
3734 		if (pa->pa_type == MB_GROUP_PA)
3735 			ext4_mb_release_group_pa(&e4b, pa);
3736 		else
3737 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3738 
3739 		list_del(&pa->u.pa_tmp_list);
3740 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3741 	}
3742 
3743 out:
3744 	ext4_unlock_group(sb, group);
3745 	ext4_mb_unload_buddy(&e4b);
3746 	put_bh(bitmap_bh);
3747 	return free;
3748 }
3749 
3750 /*
3751  * releases all non-used preallocated blocks for given inode
3752  *
3753  * It's important to discard preallocations under i_data_sem
3754  * We don't want another block to be served from the prealloc
3755  * space when we are discarding the inode prealloc space.
3756  *
3757  * FIXME!! Make sure it is valid at all the call sites
3758  */
3759 void ext4_discard_preallocations(struct inode *inode)
3760 {
3761 	struct ext4_inode_info *ei = EXT4_I(inode);
3762 	struct super_block *sb = inode->i_sb;
3763 	struct buffer_head *bitmap_bh = NULL;
3764 	struct ext4_prealloc_space *pa, *tmp;
3765 	ext4_group_t group = 0;
3766 	struct list_head list;
3767 	struct ext4_buddy e4b;
3768 	int err;
3769 
3770 	if (!S_ISREG(inode->i_mode)) {
3771 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3772 		return;
3773 	}
3774 
3775 	mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
3776 	trace_ext4_discard_preallocations(inode);
3777 
3778 	INIT_LIST_HEAD(&list);
3779 
3780 repeat:
3781 	/* first, collect all pa's in the inode */
3782 	spin_lock(&ei->i_prealloc_lock);
3783 	while (!list_empty(&ei->i_prealloc_list)) {
3784 		pa = list_entry(ei->i_prealloc_list.next,
3785 				struct ext4_prealloc_space, pa_inode_list);
3786 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3787 		spin_lock(&pa->pa_lock);
3788 		if (atomic_read(&pa->pa_count)) {
3789 			/* this shouldn't happen often - nobody should
3790 			 * use preallocation while we're discarding it */
3791 			spin_unlock(&pa->pa_lock);
3792 			spin_unlock(&ei->i_prealloc_lock);
3793 			ext4_msg(sb, KERN_ERR,
3794 				 "uh-oh! used pa while discarding");
3795 			WARN_ON(1);
3796 			schedule_timeout_uninterruptible(HZ);
3797 			goto repeat;
3798 
3799 		}
3800 		if (pa->pa_deleted == 0) {
3801 			pa->pa_deleted = 1;
3802 			spin_unlock(&pa->pa_lock);
3803 			list_del_rcu(&pa->pa_inode_list);
3804 			list_add(&pa->u.pa_tmp_list, &list);
3805 			continue;
3806 		}
3807 
3808 		/* someone is deleting pa right now */
3809 		spin_unlock(&pa->pa_lock);
3810 		spin_unlock(&ei->i_prealloc_lock);
3811 
3812 		/* we have to wait here because pa_deleted
3813 		 * doesn't mean pa is already unlinked from
3814 		 * the list. as we might be called from
3815 		 * ->clear_inode() the inode will get freed
3816 		 * and concurrent thread which is unlinking
3817 		 * pa from inode's list may access already
3818 		 * freed memory, bad-bad-bad */
3819 
3820 		/* XXX: if this happens too often, we can
3821 		 * add a flag to force wait only in case
3822 		 * of ->clear_inode(), but not in case of
3823 		 * regular truncate */
3824 		schedule_timeout_uninterruptible(HZ);
3825 		goto repeat;
3826 	}
3827 	spin_unlock(&ei->i_prealloc_lock);
3828 
3829 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3830 		BUG_ON(pa->pa_type != MB_INODE_PA);
3831 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3832 
3833 		err = ext4_mb_load_buddy(sb, group, &e4b);
3834 		if (err) {
3835 			ext4_error(sb, "Error loading buddy information for %u",
3836 					group);
3837 			continue;
3838 		}
3839 
3840 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3841 		if (bitmap_bh == NULL) {
3842 			ext4_error(sb, "Error reading block bitmap for %u",
3843 					group);
3844 			ext4_mb_unload_buddy(&e4b);
3845 			continue;
3846 		}
3847 
3848 		ext4_lock_group(sb, group);
3849 		list_del(&pa->pa_group_list);
3850 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3851 		ext4_unlock_group(sb, group);
3852 
3853 		ext4_mb_unload_buddy(&e4b);
3854 		put_bh(bitmap_bh);
3855 
3856 		list_del(&pa->u.pa_tmp_list);
3857 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3858 	}
3859 }
3860 
3861 #ifdef CONFIG_EXT4_DEBUG
3862 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3863 {
3864 	struct super_block *sb = ac->ac_sb;
3865 	ext4_group_t ngroups, i;
3866 
3867 	if (!mb_enable_debug ||
3868 	    (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3869 		return;
3870 
3871 	ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
3872 			" Allocation context details:");
3873 	ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
3874 			ac->ac_status, ac->ac_flags);
3875 	ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
3876 		 	"goal %lu/%lu/%lu@%lu, "
3877 			"best %lu/%lu/%lu@%lu cr %d",
3878 			(unsigned long)ac->ac_o_ex.fe_group,
3879 			(unsigned long)ac->ac_o_ex.fe_start,
3880 			(unsigned long)ac->ac_o_ex.fe_len,
3881 			(unsigned long)ac->ac_o_ex.fe_logical,
3882 			(unsigned long)ac->ac_g_ex.fe_group,
3883 			(unsigned long)ac->ac_g_ex.fe_start,
3884 			(unsigned long)ac->ac_g_ex.fe_len,
3885 			(unsigned long)ac->ac_g_ex.fe_logical,
3886 			(unsigned long)ac->ac_b_ex.fe_group,
3887 			(unsigned long)ac->ac_b_ex.fe_start,
3888 			(unsigned long)ac->ac_b_ex.fe_len,
3889 			(unsigned long)ac->ac_b_ex.fe_logical,
3890 			(int)ac->ac_criteria);
3891 	ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
3892 		 ac->ac_ex_scanned, ac->ac_found);
3893 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
3894 	ngroups = ext4_get_groups_count(sb);
3895 	for (i = 0; i < ngroups; i++) {
3896 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3897 		struct ext4_prealloc_space *pa;
3898 		ext4_grpblk_t start;
3899 		struct list_head *cur;
3900 		ext4_lock_group(sb, i);
3901 		list_for_each(cur, &grp->bb_prealloc_list) {
3902 			pa = list_entry(cur, struct ext4_prealloc_space,
3903 					pa_group_list);
3904 			spin_lock(&pa->pa_lock);
3905 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3906 						     NULL, &start);
3907 			spin_unlock(&pa->pa_lock);
3908 			printk(KERN_ERR "PA:%u:%d:%u \n", i,
3909 			       start, pa->pa_len);
3910 		}
3911 		ext4_unlock_group(sb, i);
3912 
3913 		if (grp->bb_free == 0)
3914 			continue;
3915 		printk(KERN_ERR "%u: %d/%d \n",
3916 		       i, grp->bb_free, grp->bb_fragments);
3917 	}
3918 	printk(KERN_ERR "\n");
3919 }
3920 #else
3921 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3922 {
3923 	return;
3924 }
3925 #endif
3926 
3927 /*
3928  * We use locality group preallocation for small size file. The size of the
3929  * file is determined by the current size or the resulting size after
3930  * allocation which ever is larger
3931  *
3932  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
3933  */
3934 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3935 {
3936 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3937 	int bsbits = ac->ac_sb->s_blocksize_bits;
3938 	loff_t size, isize;
3939 
3940 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3941 		return;
3942 
3943 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3944 		return;
3945 
3946 	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
3947 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3948 		>> bsbits;
3949 
3950 	if ((size == isize) &&
3951 	    !ext4_fs_is_busy(sbi) &&
3952 	    (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3953 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3954 		return;
3955 	}
3956 
3957 	if (sbi->s_mb_group_prealloc <= 0) {
3958 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3959 		return;
3960 	}
3961 
3962 	/* don't use group allocation for large files */
3963 	size = max(size, isize);
3964 	if (size > sbi->s_mb_stream_request) {
3965 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3966 		return;
3967 	}
3968 
3969 	BUG_ON(ac->ac_lg != NULL);
3970 	/*
3971 	 * locality group prealloc space are per cpu. The reason for having
3972 	 * per cpu locality group is to reduce the contention between block
3973 	 * request from multiple CPUs.
3974 	 */
3975 	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3976 
3977 	/* we're going to use group allocation */
3978 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3979 
3980 	/* serialize all allocations in the group */
3981 	mutex_lock(&ac->ac_lg->lg_mutex);
3982 }
3983 
3984 static noinline_for_stack int
3985 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3986 				struct ext4_allocation_request *ar)
3987 {
3988 	struct super_block *sb = ar->inode->i_sb;
3989 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3990 	struct ext4_super_block *es = sbi->s_es;
3991 	ext4_group_t group;
3992 	unsigned int len;
3993 	ext4_fsblk_t goal;
3994 	ext4_grpblk_t block;
3995 
3996 	/* we can't allocate > group size */
3997 	len = ar->len;
3998 
3999 	/* just a dirty hack to filter too big requests  */
4000 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
4001 		len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
4002 
4003 	/* start searching from the goal */
4004 	goal = ar->goal;
4005 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4006 			goal >= ext4_blocks_count(es))
4007 		goal = le32_to_cpu(es->s_first_data_block);
4008 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4009 
4010 	/* set up allocation goals */
4011 	memset(ac, 0, sizeof(struct ext4_allocation_context));
4012 	ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
4013 	ac->ac_status = AC_STATUS_CONTINUE;
4014 	ac->ac_sb = sb;
4015 	ac->ac_inode = ar->inode;
4016 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4017 	ac->ac_o_ex.fe_group = group;
4018 	ac->ac_o_ex.fe_start = block;
4019 	ac->ac_o_ex.fe_len = len;
4020 	ac->ac_g_ex = ac->ac_o_ex;
4021 	ac->ac_flags = ar->flags;
4022 
4023 	/* we have to define context: we'll we work with a file or
4024 	 * locality group. this is a policy, actually */
4025 	ext4_mb_group_or_file(ac);
4026 
4027 	mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4028 			"left: %u/%u, right %u/%u to %swritable\n",
4029 			(unsigned) ar->len, (unsigned) ar->logical,
4030 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4031 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4032 			(unsigned) ar->lright, (unsigned) ar->pright,
4033 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4034 	return 0;
4035 
4036 }
4037 
4038 static noinline_for_stack void
4039 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4040 					struct ext4_locality_group *lg,
4041 					int order, int total_entries)
4042 {
4043 	ext4_group_t group = 0;
4044 	struct ext4_buddy e4b;
4045 	struct list_head discard_list;
4046 	struct ext4_prealloc_space *pa, *tmp;
4047 
4048 	mb_debug(1, "discard locality group preallocation\n");
4049 
4050 	INIT_LIST_HEAD(&discard_list);
4051 
4052 	spin_lock(&lg->lg_prealloc_lock);
4053 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4054 						pa_inode_list) {
4055 		spin_lock(&pa->pa_lock);
4056 		if (atomic_read(&pa->pa_count)) {
4057 			/*
4058 			 * This is the pa that we just used
4059 			 * for block allocation. So don't
4060 			 * free that
4061 			 */
4062 			spin_unlock(&pa->pa_lock);
4063 			continue;
4064 		}
4065 		if (pa->pa_deleted) {
4066 			spin_unlock(&pa->pa_lock);
4067 			continue;
4068 		}
4069 		/* only lg prealloc space */
4070 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4071 
4072 		/* seems this one can be freed ... */
4073 		pa->pa_deleted = 1;
4074 		spin_unlock(&pa->pa_lock);
4075 
4076 		list_del_rcu(&pa->pa_inode_list);
4077 		list_add(&pa->u.pa_tmp_list, &discard_list);
4078 
4079 		total_entries--;
4080 		if (total_entries <= 5) {
4081 			/*
4082 			 * we want to keep only 5 entries
4083 			 * allowing it to grow to 8. This
4084 			 * mak sure we don't call discard
4085 			 * soon for this list.
4086 			 */
4087 			break;
4088 		}
4089 	}
4090 	spin_unlock(&lg->lg_prealloc_lock);
4091 
4092 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4093 
4094 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4095 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
4096 			ext4_error(sb, "Error loading buddy information for %u",
4097 					group);
4098 			continue;
4099 		}
4100 		ext4_lock_group(sb, group);
4101 		list_del(&pa->pa_group_list);
4102 		ext4_mb_release_group_pa(&e4b, pa);
4103 		ext4_unlock_group(sb, group);
4104 
4105 		ext4_mb_unload_buddy(&e4b);
4106 		list_del(&pa->u.pa_tmp_list);
4107 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4108 	}
4109 }
4110 
4111 /*
4112  * We have incremented pa_count. So it cannot be freed at this
4113  * point. Also we hold lg_mutex. So no parallel allocation is
4114  * possible from this lg. That means pa_free cannot be updated.
4115  *
4116  * A parallel ext4_mb_discard_group_preallocations is possible.
4117  * which can cause the lg_prealloc_list to be updated.
4118  */
4119 
4120 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4121 {
4122 	int order, added = 0, lg_prealloc_count = 1;
4123 	struct super_block *sb = ac->ac_sb;
4124 	struct ext4_locality_group *lg = ac->ac_lg;
4125 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4126 
4127 	order = fls(pa->pa_free) - 1;
4128 	if (order > PREALLOC_TB_SIZE - 1)
4129 		/* The max size of hash table is PREALLOC_TB_SIZE */
4130 		order = PREALLOC_TB_SIZE - 1;
4131 	/* Add the prealloc space to lg */
4132 	rcu_read_lock();
4133 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4134 						pa_inode_list) {
4135 		spin_lock(&tmp_pa->pa_lock);
4136 		if (tmp_pa->pa_deleted) {
4137 			spin_unlock(&tmp_pa->pa_lock);
4138 			continue;
4139 		}
4140 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4141 			/* Add to the tail of the previous entry */
4142 			list_add_tail_rcu(&pa->pa_inode_list,
4143 						&tmp_pa->pa_inode_list);
4144 			added = 1;
4145 			/*
4146 			 * we want to count the total
4147 			 * number of entries in the list
4148 			 */
4149 		}
4150 		spin_unlock(&tmp_pa->pa_lock);
4151 		lg_prealloc_count++;
4152 	}
4153 	if (!added)
4154 		list_add_tail_rcu(&pa->pa_inode_list,
4155 					&lg->lg_prealloc_list[order]);
4156 	rcu_read_unlock();
4157 
4158 	/* Now trim the list to be not more than 8 elements */
4159 	if (lg_prealloc_count > 8) {
4160 		ext4_mb_discard_lg_preallocations(sb, lg,
4161 						order, lg_prealloc_count);
4162 		return;
4163 	}
4164 	return ;
4165 }
4166 
4167 /*
4168  * release all resource we used in allocation
4169  */
4170 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4171 {
4172 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4173 	struct ext4_prealloc_space *pa = ac->ac_pa;
4174 	if (pa) {
4175 		if (pa->pa_type == MB_GROUP_PA) {
4176 			/* see comment in ext4_mb_use_group_pa() */
4177 			spin_lock(&pa->pa_lock);
4178 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4179 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4180 			pa->pa_free -= ac->ac_b_ex.fe_len;
4181 			pa->pa_len -= ac->ac_b_ex.fe_len;
4182 			spin_unlock(&pa->pa_lock);
4183 		}
4184 	}
4185 	if (pa) {
4186 		/*
4187 		 * We want to add the pa to the right bucket.
4188 		 * Remove it from the list and while adding
4189 		 * make sure the list to which we are adding
4190 		 * doesn't grow big.
4191 		 */
4192 		if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4193 			spin_lock(pa->pa_obj_lock);
4194 			list_del_rcu(&pa->pa_inode_list);
4195 			spin_unlock(pa->pa_obj_lock);
4196 			ext4_mb_add_n_trim(ac);
4197 		}
4198 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
4199 	}
4200 	if (ac->ac_bitmap_page)
4201 		page_cache_release(ac->ac_bitmap_page);
4202 	if (ac->ac_buddy_page)
4203 		page_cache_release(ac->ac_buddy_page);
4204 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4205 		mutex_unlock(&ac->ac_lg->lg_mutex);
4206 	ext4_mb_collect_stats(ac);
4207 	return 0;
4208 }
4209 
4210 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4211 {
4212 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4213 	int ret;
4214 	int freed = 0;
4215 
4216 	trace_ext4_mb_discard_preallocations(sb, needed);
4217 	for (i = 0; i < ngroups && needed > 0; i++) {
4218 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4219 		freed += ret;
4220 		needed -= ret;
4221 	}
4222 
4223 	return freed;
4224 }
4225 
4226 /*
4227  * Main entry point into mballoc to allocate blocks
4228  * it tries to use preallocation first, then falls back
4229  * to usual allocation
4230  */
4231 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4232 				struct ext4_allocation_request *ar, int *errp)
4233 {
4234 	int freed;
4235 	struct ext4_allocation_context *ac = NULL;
4236 	struct ext4_sb_info *sbi;
4237 	struct super_block *sb;
4238 	ext4_fsblk_t block = 0;
4239 	unsigned int inquota = 0;
4240 	unsigned int reserv_clstrs = 0;
4241 
4242 	sb = ar->inode->i_sb;
4243 	sbi = EXT4_SB(sb);
4244 
4245 	trace_ext4_request_blocks(ar);
4246 
4247 	/* Allow to use superuser reservation for quota file */
4248 	if (IS_NOQUOTA(ar->inode))
4249 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4250 
4251 	/*
4252 	 * For delayed allocation, we could skip the ENOSPC and
4253 	 * EDQUOT check, as blocks and quotas have been already
4254 	 * reserved when data being copied into pagecache.
4255 	 */
4256 	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4257 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4258 	else {
4259 		/* Without delayed allocation we need to verify
4260 		 * there is enough free blocks to do block allocation
4261 		 * and verify allocation doesn't exceed the quota limits.
4262 		 */
4263 		while (ar->len &&
4264 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
4265 
4266 			/* let others to free the space */
4267 			yield();
4268 			ar->len = ar->len >> 1;
4269 		}
4270 		if (!ar->len) {
4271 			*errp = -ENOSPC;
4272 			return 0;
4273 		}
4274 		reserv_clstrs = ar->len;
4275 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4276 			dquot_alloc_block_nofail(ar->inode,
4277 						 EXT4_C2B(sbi, ar->len));
4278 		} else {
4279 			while (ar->len &&
4280 				dquot_alloc_block(ar->inode,
4281 						  EXT4_C2B(sbi, ar->len))) {
4282 
4283 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4284 				ar->len--;
4285 			}
4286 		}
4287 		inquota = ar->len;
4288 		if (ar->len == 0) {
4289 			*errp = -EDQUOT;
4290 			goto out;
4291 		}
4292 	}
4293 
4294 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4295 	if (!ac) {
4296 		ar->len = 0;
4297 		*errp = -ENOMEM;
4298 		goto out;
4299 	}
4300 
4301 	*errp = ext4_mb_initialize_context(ac, ar);
4302 	if (*errp) {
4303 		ar->len = 0;
4304 		goto out;
4305 	}
4306 
4307 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4308 	if (!ext4_mb_use_preallocated(ac)) {
4309 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4310 		ext4_mb_normalize_request(ac, ar);
4311 repeat:
4312 		/* allocate space in core */
4313 		*errp = ext4_mb_regular_allocator(ac);
4314 		if (*errp)
4315 			goto errout;
4316 
4317 		/* as we've just preallocated more space than
4318 		 * user requested orinally, we store allocated
4319 		 * space in a special descriptor */
4320 		if (ac->ac_status == AC_STATUS_FOUND &&
4321 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4322 			ext4_mb_new_preallocation(ac);
4323 	}
4324 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4325 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
4326 		if (*errp == -EAGAIN) {
4327 			/*
4328 			 * drop the reference that we took
4329 			 * in ext4_mb_use_best_found
4330 			 */
4331 			ext4_mb_release_context(ac);
4332 			ac->ac_b_ex.fe_group = 0;
4333 			ac->ac_b_ex.fe_start = 0;
4334 			ac->ac_b_ex.fe_len = 0;
4335 			ac->ac_status = AC_STATUS_CONTINUE;
4336 			goto repeat;
4337 		} else if (*errp)
4338 		errout:
4339 			ext4_discard_allocated_blocks(ac);
4340 		else {
4341 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4342 			ar->len = ac->ac_b_ex.fe_len;
4343 		}
4344 	} else {
4345 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4346 		if (freed)
4347 			goto repeat;
4348 		*errp = -ENOSPC;
4349 	}
4350 
4351 	if (*errp) {
4352 		ac->ac_b_ex.fe_len = 0;
4353 		ar->len = 0;
4354 		ext4_mb_show_ac(ac);
4355 	}
4356 	ext4_mb_release_context(ac);
4357 out:
4358 	if (ac)
4359 		kmem_cache_free(ext4_ac_cachep, ac);
4360 	if (inquota && ar->len < inquota)
4361 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4362 	if (!ar->len) {
4363 		if (!ext4_test_inode_state(ar->inode,
4364 					   EXT4_STATE_DELALLOC_RESERVED))
4365 			/* release all the reserved blocks if non delalloc */
4366 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4367 						reserv_clstrs);
4368 	}
4369 
4370 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
4371 
4372 	return block;
4373 }
4374 
4375 /*
4376  * We can merge two free data extents only if the physical blocks
4377  * are contiguous, AND the extents were freed by the same transaction,
4378  * AND the blocks are associated with the same group.
4379  */
4380 static int can_merge(struct ext4_free_data *entry1,
4381 			struct ext4_free_data *entry2)
4382 {
4383 	if ((entry1->efd_tid == entry2->efd_tid) &&
4384 	    (entry1->efd_group == entry2->efd_group) &&
4385 	    ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
4386 		return 1;
4387 	return 0;
4388 }
4389 
4390 static noinline_for_stack int
4391 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4392 		      struct ext4_free_data *new_entry)
4393 {
4394 	ext4_group_t group = e4b->bd_group;
4395 	ext4_grpblk_t cluster;
4396 	struct ext4_free_data *entry;
4397 	struct ext4_group_info *db = e4b->bd_info;
4398 	struct super_block *sb = e4b->bd_sb;
4399 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4400 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
4401 	struct rb_node *parent = NULL, *new_node;
4402 
4403 	BUG_ON(!ext4_handle_valid(handle));
4404 	BUG_ON(e4b->bd_bitmap_page == NULL);
4405 	BUG_ON(e4b->bd_buddy_page == NULL);
4406 
4407 	new_node = &new_entry->efd_node;
4408 	cluster = new_entry->efd_start_cluster;
4409 
4410 	if (!*n) {
4411 		/* first free block exent. We need to
4412 		   protect buddy cache from being freed,
4413 		 * otherwise we'll refresh it from
4414 		 * on-disk bitmap and lose not-yet-available
4415 		 * blocks */
4416 		page_cache_get(e4b->bd_buddy_page);
4417 		page_cache_get(e4b->bd_bitmap_page);
4418 	}
4419 	while (*n) {
4420 		parent = *n;
4421 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
4422 		if (cluster < entry->efd_start_cluster)
4423 			n = &(*n)->rb_left;
4424 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
4425 			n = &(*n)->rb_right;
4426 		else {
4427 			ext4_grp_locked_error(sb, group, 0,
4428 				ext4_group_first_block_no(sb, group) +
4429 				EXT4_C2B(sbi, cluster),
4430 				"Block already on to-be-freed list");
4431 			return 0;
4432 		}
4433 	}
4434 
4435 	rb_link_node(new_node, parent, n);
4436 	rb_insert_color(new_node, &db->bb_free_root);
4437 
4438 	/* Now try to see the extent can be merged to left and right */
4439 	node = rb_prev(new_node);
4440 	if (node) {
4441 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4442 		if (can_merge(entry, new_entry)) {
4443 			new_entry->efd_start_cluster = entry->efd_start_cluster;
4444 			new_entry->efd_count += entry->efd_count;
4445 			rb_erase(node, &(db->bb_free_root));
4446 			ext4_journal_callback_del(handle, &entry->efd_jce);
4447 			kmem_cache_free(ext4_free_data_cachep, entry);
4448 		}
4449 	}
4450 
4451 	node = rb_next(new_node);
4452 	if (node) {
4453 		entry = rb_entry(node, struct ext4_free_data, efd_node);
4454 		if (can_merge(new_entry, entry)) {
4455 			new_entry->efd_count += entry->efd_count;
4456 			rb_erase(node, &(db->bb_free_root));
4457 			ext4_journal_callback_del(handle, &entry->efd_jce);
4458 			kmem_cache_free(ext4_free_data_cachep, entry);
4459 		}
4460 	}
4461 	/* Add the extent to transaction's private list */
4462 	ext4_journal_callback_add(handle, ext4_free_data_callback,
4463 				  &new_entry->efd_jce);
4464 	return 0;
4465 }
4466 
4467 /**
4468  * ext4_free_blocks() -- Free given blocks and update quota
4469  * @handle:		handle for this transaction
4470  * @inode:		inode
4471  * @block:		start physical block to free
4472  * @count:		number of blocks to count
4473  * @flags:		flags used by ext4_free_blocks
4474  */
4475 void ext4_free_blocks(handle_t *handle, struct inode *inode,
4476 		      struct buffer_head *bh, ext4_fsblk_t block,
4477 		      unsigned long count, int flags)
4478 {
4479 	struct buffer_head *bitmap_bh = NULL;
4480 	struct super_block *sb = inode->i_sb;
4481 	struct ext4_group_desc *gdp;
4482 	unsigned long freed = 0;
4483 	unsigned int overflow;
4484 	ext4_grpblk_t bit;
4485 	struct buffer_head *gd_bh;
4486 	ext4_group_t block_group;
4487 	struct ext4_sb_info *sbi;
4488 	struct ext4_buddy e4b;
4489 	unsigned int count_clusters;
4490 	int err = 0;
4491 	int ret;
4492 
4493 	if (bh) {
4494 		if (block)
4495 			BUG_ON(block != bh->b_blocknr);
4496 		else
4497 			block = bh->b_blocknr;
4498 	}
4499 
4500 	sbi = EXT4_SB(sb);
4501 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4502 	    !ext4_data_block_valid(sbi, block, count)) {
4503 		ext4_error(sb, "Freeing blocks not in datazone - "
4504 			   "block = %llu, count = %lu", block, count);
4505 		goto error_return;
4506 	}
4507 
4508 	ext4_debug("freeing block %llu\n", block);
4509 	trace_ext4_free_blocks(inode, block, count, flags);
4510 
4511 	if (flags & EXT4_FREE_BLOCKS_FORGET) {
4512 		struct buffer_head *tbh = bh;
4513 		int i;
4514 
4515 		BUG_ON(bh && (count > 1));
4516 
4517 		for (i = 0; i < count; i++) {
4518 			if (!bh)
4519 				tbh = sb_find_get_block(inode->i_sb,
4520 							block + i);
4521 			if (unlikely(!tbh))
4522 				continue;
4523 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4524 				    inode, tbh, block + i);
4525 		}
4526 	}
4527 
4528 	/*
4529 	 * We need to make sure we don't reuse the freed block until
4530 	 * after the transaction is committed, which we can do by
4531 	 * treating the block as metadata, below.  We make an
4532 	 * exception if the inode is to be written in writeback mode
4533 	 * since writeback mode has weak data consistency guarantees.
4534 	 */
4535 	if (!ext4_should_writeback_data(inode))
4536 		flags |= EXT4_FREE_BLOCKS_METADATA;
4537 
4538 	/*
4539 	 * If the extent to be freed does not begin on a cluster
4540 	 * boundary, we need to deal with partial clusters at the
4541 	 * beginning and end of the extent.  Normally we will free
4542 	 * blocks at the beginning or the end unless we are explicitly
4543 	 * requested to avoid doing so.
4544 	 */
4545 	overflow = block & (sbi->s_cluster_ratio - 1);
4546 	if (overflow) {
4547 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4548 			overflow = sbi->s_cluster_ratio - overflow;
4549 			block += overflow;
4550 			if (count > overflow)
4551 				count -= overflow;
4552 			else
4553 				return;
4554 		} else {
4555 			block -= overflow;
4556 			count += overflow;
4557 		}
4558 	}
4559 	overflow = count & (sbi->s_cluster_ratio - 1);
4560 	if (overflow) {
4561 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4562 			if (count > overflow)
4563 				count -= overflow;
4564 			else
4565 				return;
4566 		} else
4567 			count += sbi->s_cluster_ratio - overflow;
4568 	}
4569 
4570 do_more:
4571 	overflow = 0;
4572 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4573 
4574 	/*
4575 	 * Check to see if we are freeing blocks across a group
4576 	 * boundary.
4577 	 */
4578 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4579 		overflow = EXT4_C2B(sbi, bit) + count -
4580 			EXT4_BLOCKS_PER_GROUP(sb);
4581 		count -= overflow;
4582 	}
4583 	count_clusters = EXT4_B2C(sbi, count);
4584 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4585 	if (!bitmap_bh) {
4586 		err = -EIO;
4587 		goto error_return;
4588 	}
4589 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4590 	if (!gdp) {
4591 		err = -EIO;
4592 		goto error_return;
4593 	}
4594 
4595 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4596 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4597 	    in_range(block, ext4_inode_table(sb, gdp),
4598 		     EXT4_SB(sb)->s_itb_per_group) ||
4599 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4600 		     EXT4_SB(sb)->s_itb_per_group)) {
4601 
4602 		ext4_error(sb, "Freeing blocks in system zone - "
4603 			   "Block = %llu, count = %lu", block, count);
4604 		/* err = 0. ext4_std_error should be a no op */
4605 		goto error_return;
4606 	}
4607 
4608 	BUFFER_TRACE(bitmap_bh, "getting write access");
4609 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4610 	if (err)
4611 		goto error_return;
4612 
4613 	/*
4614 	 * We are about to modify some metadata.  Call the journal APIs
4615 	 * to unshare ->b_data if a currently-committing transaction is
4616 	 * using it
4617 	 */
4618 	BUFFER_TRACE(gd_bh, "get_write_access");
4619 	err = ext4_journal_get_write_access(handle, gd_bh);
4620 	if (err)
4621 		goto error_return;
4622 #ifdef AGGRESSIVE_CHECK
4623 	{
4624 		int i;
4625 		for (i = 0; i < count_clusters; i++)
4626 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4627 	}
4628 #endif
4629 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
4630 
4631 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4632 	if (err)
4633 		goto error_return;
4634 
4635 	if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
4636 		struct ext4_free_data *new_entry;
4637 		/*
4638 		 * blocks being freed are metadata. these blocks shouldn't
4639 		 * be used until this transaction is committed
4640 		 */
4641 		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
4642 		if (!new_entry) {
4643 			ext4_mb_unload_buddy(&e4b);
4644 			err = -ENOMEM;
4645 			goto error_return;
4646 		}
4647 		new_entry->efd_start_cluster = bit;
4648 		new_entry->efd_group = block_group;
4649 		new_entry->efd_count = count_clusters;
4650 		new_entry->efd_tid = handle->h_transaction->t_tid;
4651 
4652 		ext4_lock_group(sb, block_group);
4653 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4654 		ext4_mb_free_metadata(handle, &e4b, new_entry);
4655 	} else {
4656 		/* need to update group_info->bb_free and bitmap
4657 		 * with group lock held. generate_buddy look at
4658 		 * them with group lock_held
4659 		 */
4660 		ext4_lock_group(sb, block_group);
4661 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4662 		mb_free_blocks(inode, &e4b, bit, count_clusters);
4663 	}
4664 
4665 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4666 	ext4_free_group_clusters_set(sb, gdp, ret);
4667 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
4668 				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
4669 	ext4_group_desc_csum_set(sb, block_group, gdp);
4670 	ext4_unlock_group(sb, block_group);
4671 	percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4672 
4673 	if (sbi->s_log_groups_per_flex) {
4674 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4675 		atomic_add(count_clusters,
4676 			   &sbi->s_flex_groups[flex_group].free_clusters);
4677 	}
4678 
4679 	ext4_mb_unload_buddy(&e4b);
4680 
4681 	freed += count;
4682 
4683 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4684 		dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4685 
4686 	/* We dirtied the bitmap block */
4687 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4688 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4689 
4690 	/* And the group descriptor block */
4691 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4692 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4693 	if (!err)
4694 		err = ret;
4695 
4696 	if (overflow && !err) {
4697 		block += count;
4698 		count = overflow;
4699 		put_bh(bitmap_bh);
4700 		goto do_more;
4701 	}
4702 error_return:
4703 	brelse(bitmap_bh);
4704 	ext4_std_error(sb, err);
4705 	return;
4706 }
4707 
4708 /**
4709  * ext4_group_add_blocks() -- Add given blocks to an existing group
4710  * @handle:			handle to this transaction
4711  * @sb:				super block
4712  * @block:			start physcial block to add to the block group
4713  * @count:			number of blocks to free
4714  *
4715  * This marks the blocks as free in the bitmap and buddy.
4716  */
4717 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4718 			 ext4_fsblk_t block, unsigned long count)
4719 {
4720 	struct buffer_head *bitmap_bh = NULL;
4721 	struct buffer_head *gd_bh;
4722 	ext4_group_t block_group;
4723 	ext4_grpblk_t bit;
4724 	unsigned int i;
4725 	struct ext4_group_desc *desc;
4726 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4727 	struct ext4_buddy e4b;
4728 	int err = 0, ret, blk_free_count;
4729 	ext4_grpblk_t blocks_freed;
4730 
4731 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4732 
4733 	if (count == 0)
4734 		return 0;
4735 
4736 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4737 	/*
4738 	 * Check to see if we are freeing blocks across a group
4739 	 * boundary.
4740 	 */
4741 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4742 		ext4_warning(sb, "too much blocks added to group %u\n",
4743 			     block_group);
4744 		err = -EINVAL;
4745 		goto error_return;
4746 	}
4747 
4748 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4749 	if (!bitmap_bh) {
4750 		err = -EIO;
4751 		goto error_return;
4752 	}
4753 
4754 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4755 	if (!desc) {
4756 		err = -EIO;
4757 		goto error_return;
4758 	}
4759 
4760 	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4761 	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4762 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4763 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
4764 		     sbi->s_itb_per_group)) {
4765 		ext4_error(sb, "Adding blocks in system zones - "
4766 			   "Block = %llu, count = %lu",
4767 			   block, count);
4768 		err = -EINVAL;
4769 		goto error_return;
4770 	}
4771 
4772 	BUFFER_TRACE(bitmap_bh, "getting write access");
4773 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4774 	if (err)
4775 		goto error_return;
4776 
4777 	/*
4778 	 * We are about to modify some metadata.  Call the journal APIs
4779 	 * to unshare ->b_data if a currently-committing transaction is
4780 	 * using it
4781 	 */
4782 	BUFFER_TRACE(gd_bh, "get_write_access");
4783 	err = ext4_journal_get_write_access(handle, gd_bh);
4784 	if (err)
4785 		goto error_return;
4786 
4787 	for (i = 0, blocks_freed = 0; i < count; i++) {
4788 		BUFFER_TRACE(bitmap_bh, "clear bit");
4789 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4790 			ext4_error(sb, "bit already cleared for block %llu",
4791 				   (ext4_fsblk_t)(block + i));
4792 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
4793 		} else {
4794 			blocks_freed++;
4795 		}
4796 	}
4797 
4798 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4799 	if (err)
4800 		goto error_return;
4801 
4802 	/*
4803 	 * need to update group_info->bb_free and bitmap
4804 	 * with group lock held. generate_buddy look at
4805 	 * them with group lock_held
4806 	 */
4807 	ext4_lock_group(sb, block_group);
4808 	mb_clear_bits(bitmap_bh->b_data, bit, count);
4809 	mb_free_blocks(NULL, &e4b, bit, count);
4810 	blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4811 	ext4_free_group_clusters_set(sb, desc, blk_free_count);
4812 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
4813 				   EXT4_BLOCKS_PER_GROUP(sb) / 8);
4814 	ext4_group_desc_csum_set(sb, block_group, desc);
4815 	ext4_unlock_group(sb, block_group);
4816 	percpu_counter_add(&sbi->s_freeclusters_counter,
4817 			   EXT4_B2C(sbi, blocks_freed));
4818 
4819 	if (sbi->s_log_groups_per_flex) {
4820 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4821 		atomic_add(EXT4_B2C(sbi, blocks_freed),
4822 			   &sbi->s_flex_groups[flex_group].free_clusters);
4823 	}
4824 
4825 	ext4_mb_unload_buddy(&e4b);
4826 
4827 	/* We dirtied the bitmap block */
4828 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4829 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4830 
4831 	/* And the group descriptor block */
4832 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4833 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4834 	if (!err)
4835 		err = ret;
4836 
4837 error_return:
4838 	brelse(bitmap_bh);
4839 	ext4_std_error(sb, err);
4840 	return err;
4841 }
4842 
4843 /**
4844  * ext4_trim_extent -- function to TRIM one single free extent in the group
4845  * @sb:		super block for the file system
4846  * @start:	starting block of the free extent in the alloc. group
4847  * @count:	number of blocks to TRIM
4848  * @group:	alloc. group we are working with
4849  * @e4b:	ext4 buddy for the group
4850  *
4851  * Trim "count" blocks starting at "start" in the "group". To assure that no
4852  * one will allocate those blocks, mark it as used in buddy bitmap. This must
4853  * be called with under the group lock.
4854  */
4855 static void ext4_trim_extent(struct super_block *sb, int start, int count,
4856 			     ext4_group_t group, struct ext4_buddy *e4b)
4857 {
4858 	struct ext4_free_extent ex;
4859 
4860 	trace_ext4_trim_extent(sb, group, start, count);
4861 
4862 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
4863 
4864 	ex.fe_start = start;
4865 	ex.fe_group = group;
4866 	ex.fe_len = count;
4867 
4868 	/*
4869 	 * Mark blocks used, so no one can reuse them while
4870 	 * being trimmed.
4871 	 */
4872 	mb_mark_used(e4b, &ex);
4873 	ext4_unlock_group(sb, group);
4874 	ext4_issue_discard(sb, group, start, count);
4875 	ext4_lock_group(sb, group);
4876 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
4877 }
4878 
4879 /**
4880  * ext4_trim_all_free -- function to trim all free space in alloc. group
4881  * @sb:			super block for file system
4882  * @group:		group to be trimmed
4883  * @start:		first group block to examine
4884  * @max:		last group block to examine
4885  * @minblocks:		minimum extent block count
4886  *
4887  * ext4_trim_all_free walks through group's buddy bitmap searching for free
4888  * extents. When the free block is found, ext4_trim_extent is called to TRIM
4889  * the extent.
4890  *
4891  *
4892  * ext4_trim_all_free walks through group's block bitmap searching for free
4893  * extents. When the free extent is found, mark it as used in group buddy
4894  * bitmap. Then issue a TRIM command on this extent and free the extent in
4895  * the group buddy bitmap. This is done until whole group is scanned.
4896  */
4897 static ext4_grpblk_t
4898 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4899 		   ext4_grpblk_t start, ext4_grpblk_t max,
4900 		   ext4_grpblk_t minblocks)
4901 {
4902 	void *bitmap;
4903 	ext4_grpblk_t next, count = 0, free_count = 0;
4904 	struct ext4_buddy e4b;
4905 	int ret;
4906 
4907 	trace_ext4_trim_all_free(sb, group, start, max);
4908 
4909 	ret = ext4_mb_load_buddy(sb, group, &e4b);
4910 	if (ret) {
4911 		ext4_error(sb, "Error in loading buddy "
4912 				"information for %u", group);
4913 		return ret;
4914 	}
4915 	bitmap = e4b.bd_bitmap;
4916 
4917 	ext4_lock_group(sb, group);
4918 	if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4919 	    minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4920 		goto out;
4921 
4922 	start = (e4b.bd_info->bb_first_free > start) ?
4923 		e4b.bd_info->bb_first_free : start;
4924 
4925 	while (start <= max) {
4926 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
4927 		if (start > max)
4928 			break;
4929 		next = mb_find_next_bit(bitmap, max + 1, start);
4930 
4931 		if ((next - start) >= minblocks) {
4932 			ext4_trim_extent(sb, start,
4933 					 next - start, group, &e4b);
4934 			count += next - start;
4935 		}
4936 		free_count += next - start;
4937 		start = next + 1;
4938 
4939 		if (fatal_signal_pending(current)) {
4940 			count = -ERESTARTSYS;
4941 			break;
4942 		}
4943 
4944 		if (need_resched()) {
4945 			ext4_unlock_group(sb, group);
4946 			cond_resched();
4947 			ext4_lock_group(sb, group);
4948 		}
4949 
4950 		if ((e4b.bd_info->bb_free - free_count) < minblocks)
4951 			break;
4952 	}
4953 
4954 	if (!ret)
4955 		EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4956 out:
4957 	ext4_unlock_group(sb, group);
4958 	ext4_mb_unload_buddy(&e4b);
4959 
4960 	ext4_debug("trimmed %d blocks in the group %d\n",
4961 		count, group);
4962 
4963 	return count;
4964 }
4965 
4966 /**
4967  * ext4_trim_fs() -- trim ioctl handle function
4968  * @sb:			superblock for filesystem
4969  * @range:		fstrim_range structure
4970  *
4971  * start:	First Byte to trim
4972  * len:		number of Bytes to trim from start
4973  * minlen:	minimum extent length in Bytes
4974  * ext4_trim_fs goes through all allocation groups containing Bytes from
4975  * start to start+len. For each such a group ext4_trim_all_free function
4976  * is invoked to trim all free space.
4977  */
4978 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4979 {
4980 	struct ext4_group_info *grp;
4981 	ext4_group_t group, first_group, last_group;
4982 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
4983 	uint64_t start, end, minlen, trimmed = 0;
4984 	ext4_fsblk_t first_data_blk =
4985 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
4986 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
4987 	int ret = 0;
4988 
4989 	start = range->start >> sb->s_blocksize_bits;
4990 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
4991 	minlen = range->minlen >> sb->s_blocksize_bits;
4992 
4993 	if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
4994 	    unlikely(start >= max_blks))
4995 		return -EINVAL;
4996 	if (end >= max_blks)
4997 		end = max_blks - 1;
4998 	if (end <= first_data_blk)
4999 		goto out;
5000 	if (start < first_data_blk)
5001 		start = first_data_blk;
5002 
5003 	/* Determine first and last group to examine based on start and end */
5004 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
5005 				     &first_group, &first_cluster);
5006 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
5007 				     &last_group, &last_cluster);
5008 
5009 	/* end now represents the last cluster to discard in this group */
5010 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5011 
5012 	for (group = first_group; group <= last_group; group++) {
5013 		grp = ext4_get_group_info(sb, group);
5014 		/* We only do this if the grp has never been initialized */
5015 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5016 			ret = ext4_mb_init_group(sb, group);
5017 			if (ret)
5018 				break;
5019 		}
5020 
5021 		/*
5022 		 * For all the groups except the last one, last cluster will
5023 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5024 		 * change it for the last group, note that last_cluster is
5025 		 * already computed earlier by ext4_get_group_no_and_offset()
5026 		 */
5027 		if (group == last_group)
5028 			end = last_cluster;
5029 
5030 		if (grp->bb_free >= minlen) {
5031 			cnt = ext4_trim_all_free(sb, group, first_cluster,
5032 						end, minlen);
5033 			if (cnt < 0) {
5034 				ret = cnt;
5035 				break;
5036 			}
5037 			trimmed += cnt;
5038 		}
5039 
5040 		/*
5041 		 * For every group except the first one, we are sure
5042 		 * that the first cluster to discard will be cluster #0.
5043 		 */
5044 		first_cluster = 0;
5045 	}
5046 
5047 	if (!ret)
5048 		atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5049 
5050 out:
5051 	range->len = trimmed * sb->s_blocksize;
5052 	return ret;
5053 }
5054