xref: /openbmc/linux/fs/ext4/mballoc.c (revision f42b3800)
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18 
19 
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23 
24 #include <linux/time.h>
25 #include <linux/fs.h>
26 #include <linux/namei.h>
27 #include <linux/ext4_jbd2.h>
28 #include <linux/ext4_fs.h>
29 #include <linux/quotaops.h>
30 #include <linux/buffer_head.h>
31 #include <linux/module.h>
32 #include <linux/swap.h>
33 #include <linux/proc_fs.h>
34 #include <linux/pagemap.h>
35 #include <linux/seq_file.h>
36 #include <linux/version.h>
37 #include "group.h"
38 
39 /*
40  * MUSTDO:
41  *   - test ext4_ext_search_left() and ext4_ext_search_right()
42  *   - search for metadata in few groups
43  *
44  * TODO v4:
45  *   - normalization should take into account whether file is still open
46  *   - discard preallocations if no free space left (policy?)
47  *   - don't normalize tails
48  *   - quota
49  *   - reservation for superuser
50  *
51  * TODO v3:
52  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
53  *   - track min/max extents in each group for better group selection
54  *   - mb_mark_used() may allocate chunk right after splitting buddy
55  *   - tree of groups sorted by number of free blocks
56  *   - error handling
57  */
58 
59 /*
60  * The allocation request involve request for multiple number of blocks
61  * near to the goal(block) value specified.
62  *
63  * During initialization phase of the allocator we decide to use the group
64  * preallocation or inode preallocation depending on the size file. The
65  * size of the file could be the resulting file size we would have after
66  * allocation or the current file size which ever is larger. If the size is
67  * less that sbi->s_mb_stream_request we select the group
68  * preallocation. The default value of s_mb_stream_request is 16
69  * blocks. This can also be tuned via
70  * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
71  * of number of blocks.
72  *
73  * The main motivation for having small file use group preallocation is to
74  * ensure that we have small file closer in the disk.
75  *
76  * First stage the allocator looks at the inode prealloc list
77  * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
78  * this particular inode. The inode prealloc space is represented as:
79  *
80  * pa_lstart -> the logical start block for this prealloc space
81  * pa_pstart -> the physical start block for this prealloc space
82  * pa_len    -> lenght for this prealloc space
83  * pa_free   ->  free space available in this prealloc space
84  *
85  * The inode preallocation space is used looking at the _logical_ start
86  * block. If only the logical file block falls within the range of prealloc
87  * space we will consume the particular prealloc space. This make sure that
88  * that the we have contiguous physical blocks representing the file blocks
89  *
90  * The important thing to be noted in case of inode prealloc space is that
91  * we don't modify the values associated to inode prealloc space except
92  * pa_free.
93  *
94  * If we are not able to find blocks in the inode prealloc space and if we
95  * have the group allocation flag set then we look at the locality group
96  * prealloc space. These are per CPU prealloc list repreasented as
97  *
98  * ext4_sb_info.s_locality_groups[smp_processor_id()]
99  *
100  * The reason for having a per cpu locality group is to reduce the contention
101  * between CPUs. It is possible to get scheduled at this point.
102  *
103  * The locality group prealloc space is used looking at whether we have
104  * enough free space (pa_free) withing the prealloc space.
105  *
106  * If we can't allocate blocks via inode prealloc or/and locality group
107  * prealloc then we look at the buddy cache. The buddy cache is represented
108  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
109  * mapped to the buddy and bitmap information regarding different
110  * groups. The buddy information is attached to buddy cache inode so that
111  * we can access them through the page cache. The information regarding
112  * each group is loaded via ext4_mb_load_buddy.  The information involve
113  * block bitmap and buddy information. The information are stored in the
114  * inode as:
115  *
116  *  {                        page                        }
117  *  [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
118  *
119  *
120  * one block each for bitmap and buddy information.  So for each group we
121  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
122  * blocksize) blocks.  So it can have information regarding groups_per_page
123  * which is blocks_per_page/2
124  *
125  * The buddy cache inode is not stored on disk. The inode is thrown
126  * away when the filesystem is unmounted.
127  *
128  * We look for count number of blocks in the buddy cache. If we were able
129  * to locate that many free blocks we return with additional information
130  * regarding rest of the contiguous physical block available
131  *
132  * Before allocating blocks via buddy cache we normalize the request
133  * blocks. This ensure we ask for more blocks that we needed. The extra
134  * blocks that we get after allocation is added to the respective prealloc
135  * list. In case of inode preallocation we follow a list of heuristics
136  * based on file size. This can be found in ext4_mb_normalize_request. If
137  * we are doing a group prealloc we try to normalize the request to
138  * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
139  * 512 blocks. This can be tuned via
140  * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
141  * terms of number of blocks. If we have mounted the file system with -O
142  * stripe=<value> option the group prealloc request is normalized to the
143  * stripe value (sbi->s_stripe)
144  *
145  * The regular allocator(using the buddy cache) support few tunables.
146  *
147  * /proc/fs/ext4/<partition>/min_to_scan
148  * /proc/fs/ext4/<partition>/max_to_scan
149  * /proc/fs/ext4/<partition>/order2_req
150  *
151  * The regular allocator use buddy scan only if the request len is power of
152  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
153  * value of s_mb_order2_reqs can be tuned via
154  * /proc/fs/ext4/<partition>/order2_req.  If the request len is equal to
155  * stripe size (sbi->s_stripe), we try to search for contigous block in
156  * stripe size. This should result in better allocation on RAID setup. If
157  * not we search in the specific group using bitmap for best extents. The
158  * tunable min_to_scan and max_to_scan controll the behaviour here.
159  * min_to_scan indicate how long the mballoc __must__ look for a best
160  * extent and max_to_scanindicate how long the mballoc __can__ look for a
161  * best extent in the found extents. Searching for the blocks starts with
162  * the group specified as the goal value in allocation context via
163  * ac_g_ex. Each group is first checked based on the criteria whether it
164  * can used for allocation. ext4_mb_good_group explains how the groups are
165  * checked.
166  *
167  * Both the prealloc space are getting populated as above. So for the first
168  * request we will hit the buddy cache which will result in this prealloc
169  * space getting filled. The prealloc space is then later used for the
170  * subsequent request.
171  */
172 
173 /*
174  * mballoc operates on the following data:
175  *  - on-disk bitmap
176  *  - in-core buddy (actually includes buddy and bitmap)
177  *  - preallocation descriptors (PAs)
178  *
179  * there are two types of preallocations:
180  *  - inode
181  *    assiged to specific inode and can be used for this inode only.
182  *    it describes part of inode's space preallocated to specific
183  *    physical blocks. any block from that preallocated can be used
184  *    independent. the descriptor just tracks number of blocks left
185  *    unused. so, before taking some block from descriptor, one must
186  *    make sure corresponded logical block isn't allocated yet. this
187  *    also means that freeing any block within descriptor's range
188  *    must discard all preallocated blocks.
189  *  - locality group
190  *    assigned to specific locality group which does not translate to
191  *    permanent set of inodes: inode can join and leave group. space
192  *    from this type of preallocation can be used for any inode. thus
193  *    it's consumed from the beginning to the end.
194  *
195  * relation between them can be expressed as:
196  *    in-core buddy = on-disk bitmap + preallocation descriptors
197  *
198  * this mean blocks mballoc considers used are:
199  *  - allocated blocks (persistent)
200  *  - preallocated blocks (non-persistent)
201  *
202  * consistency in mballoc world means that at any time a block is either
203  * free or used in ALL structures. notice: "any time" should not be read
204  * literally -- time is discrete and delimited by locks.
205  *
206  *  to keep it simple, we don't use block numbers, instead we count number of
207  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
208  *
209  * all operations can be expressed as:
210  *  - init buddy:			buddy = on-disk + PAs
211  *  - new PA:				buddy += N; PA = N
212  *  - use inode PA:			on-disk += N; PA -= N
213  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
214  *  - use locality group PA		on-disk += N; PA -= N
215  *  - discard locality group PA		buddy -= PA; PA = 0
216  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
217  *        is used in real operation because we can't know actual used
218  *        bits from PA, only from on-disk bitmap
219  *
220  * if we follow this strict logic, then all operations above should be atomic.
221  * given some of them can block, we'd have to use something like semaphores
222  * killing performance on high-end SMP hardware. let's try to relax it using
223  * the following knowledge:
224  *  1) if buddy is referenced, it's already initialized
225  *  2) while block is used in buddy and the buddy is referenced,
226  *     nobody can re-allocate that block
227  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
228  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
229  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
230  *     block
231  *
232  * so, now we're building a concurrency table:
233  *  - init buddy vs.
234  *    - new PA
235  *      blocks for PA are allocated in the buddy, buddy must be referenced
236  *      until PA is linked to allocation group to avoid concurrent buddy init
237  *    - use inode PA
238  *      we need to make sure that either on-disk bitmap or PA has uptodate data
239  *      given (3) we care that PA-=N operation doesn't interfere with init
240  *    - discard inode PA
241  *      the simplest way would be to have buddy initialized by the discard
242  *    - use locality group PA
243  *      again PA-=N must be serialized with init
244  *    - discard locality group PA
245  *      the simplest way would be to have buddy initialized by the discard
246  *  - new PA vs.
247  *    - use inode PA
248  *      i_data_sem serializes them
249  *    - discard inode PA
250  *      discard process must wait until PA isn't used by another process
251  *    - use locality group PA
252  *      some mutex should serialize them
253  *    - discard locality group PA
254  *      discard process must wait until PA isn't used by another process
255  *  - use inode PA
256  *    - use inode PA
257  *      i_data_sem or another mutex should serializes them
258  *    - discard inode PA
259  *      discard process must wait until PA isn't used by another process
260  *    - use locality group PA
261  *      nothing wrong here -- they're different PAs covering different blocks
262  *    - discard locality group PA
263  *      discard process must wait until PA isn't used by another process
264  *
265  * now we're ready to make few consequences:
266  *  - PA is referenced and while it is no discard is possible
267  *  - PA is referenced until block isn't marked in on-disk bitmap
268  *  - PA changes only after on-disk bitmap
269  *  - discard must not compete with init. either init is done before
270  *    any discard or they're serialized somehow
271  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
272  *
273  * a special case when we've used PA to emptiness. no need to modify buddy
274  * in this case, but we should care about concurrent init
275  *
276  */
277 
278  /*
279  * Logic in few words:
280  *
281  *  - allocation:
282  *    load group
283  *    find blocks
284  *    mark bits in on-disk bitmap
285  *    release group
286  *
287  *  - use preallocation:
288  *    find proper PA (per-inode or group)
289  *    load group
290  *    mark bits in on-disk bitmap
291  *    release group
292  *    release PA
293  *
294  *  - free:
295  *    load group
296  *    mark bits in on-disk bitmap
297  *    release group
298  *
299  *  - discard preallocations in group:
300  *    mark PAs deleted
301  *    move them onto local list
302  *    load on-disk bitmap
303  *    load group
304  *    remove PA from object (inode or locality group)
305  *    mark free blocks in-core
306  *
307  *  - discard inode's preallocations:
308  */
309 
310 /*
311  * Locking rules
312  *
313  * Locks:
314  *  - bitlock on a group	(group)
315  *  - object (inode/locality)	(object)
316  *  - per-pa lock		(pa)
317  *
318  * Paths:
319  *  - new pa
320  *    object
321  *    group
322  *
323  *  - find and use pa:
324  *    pa
325  *
326  *  - release consumed pa:
327  *    pa
328  *    group
329  *    object
330  *
331  *  - generate in-core bitmap:
332  *    group
333  *        pa
334  *
335  *  - discard all for given object (inode, locality group):
336  *    object
337  *        pa
338  *    group
339  *
340  *  - discard all for given group:
341  *    group
342  *        pa
343  *    group
344  *        object
345  *
346  */
347 
348 /*
349  * with AGGRESSIVE_CHECK allocator runs consistency checks over
350  * structures. these checks slow things down a lot
351  */
352 #define AGGRESSIVE_CHECK__
353 
354 /*
355  * with DOUBLE_CHECK defined mballoc creates persistent in-core
356  * bitmaps, maintains and uses them to check for double allocations
357  */
358 #define DOUBLE_CHECK__
359 
360 /*
361  */
362 #define MB_DEBUG__
363 #ifdef MB_DEBUG
364 #define mb_debug(fmt, a...)	printk(fmt, ##a)
365 #else
366 #define mb_debug(fmt, a...)
367 #endif
368 
369 /*
370  * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
371  * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
372  */
373 #define EXT4_MB_HISTORY
374 #define EXT4_MB_HISTORY_ALLOC		1	/* allocation */
375 #define EXT4_MB_HISTORY_PREALLOC	2	/* preallocated blocks used */
376 #define EXT4_MB_HISTORY_DISCARD		4	/* preallocation discarded */
377 #define EXT4_MB_HISTORY_FREE		8	/* free */
378 
379 #define EXT4_MB_HISTORY_DEFAULT		(EXT4_MB_HISTORY_ALLOC | \
380 					 EXT4_MB_HISTORY_PREALLOC)
381 
382 /*
383  * How long mballoc can look for a best extent (in found extents)
384  */
385 #define MB_DEFAULT_MAX_TO_SCAN		200
386 
387 /*
388  * How long mballoc must look for a best extent
389  */
390 #define MB_DEFAULT_MIN_TO_SCAN		10
391 
392 /*
393  * How many groups mballoc will scan looking for the best chunk
394  */
395 #define MB_DEFAULT_MAX_GROUPS_TO_SCAN	5
396 
397 /*
398  * with 'ext4_mb_stats' allocator will collect stats that will be
399  * shown at umount. The collecting costs though!
400  */
401 #define MB_DEFAULT_STATS		1
402 
403 /*
404  * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
405  * by the stream allocator, which purpose is to pack requests
406  * as close each to other as possible to produce smooth I/O traffic
407  * We use locality group prealloc space for stream request.
408  * We can tune the same via /proc/fs/ext4/<parition>/stream_req
409  */
410 #define MB_DEFAULT_STREAM_THRESHOLD	16	/* 64K */
411 
412 /*
413  * for which requests use 2^N search using buddies
414  */
415 #define MB_DEFAULT_ORDER2_REQS		2
416 
417 /*
418  * default group prealloc size 512 blocks
419  */
420 #define MB_DEFAULT_GROUP_PREALLOC	512
421 
422 static struct kmem_cache *ext4_pspace_cachep;
423 static struct kmem_cache *ext4_ac_cachep;
424 
425 #ifdef EXT4_BB_MAX_BLOCKS
426 #undef EXT4_BB_MAX_BLOCKS
427 #endif
428 #define EXT4_BB_MAX_BLOCKS	30
429 
430 struct ext4_free_metadata {
431 	ext4_group_t group;
432 	unsigned short num;
433 	ext4_grpblk_t  blocks[EXT4_BB_MAX_BLOCKS];
434 	struct list_head list;
435 };
436 
437 struct ext4_group_info {
438 	unsigned long	bb_state;
439 	unsigned long	bb_tid;
440 	struct ext4_free_metadata *bb_md_cur;
441 	unsigned short	bb_first_free;
442 	unsigned short	bb_free;
443 	unsigned short	bb_fragments;
444 	struct		list_head bb_prealloc_list;
445 #ifdef DOUBLE_CHECK
446 	void		*bb_bitmap;
447 #endif
448 	unsigned short	bb_counters[];
449 };
450 
451 #define EXT4_GROUP_INFO_NEED_INIT_BIT	0
452 #define EXT4_GROUP_INFO_LOCKED_BIT	1
453 
454 #define EXT4_MB_GRP_NEED_INIT(grp)	\
455 	(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
456 
457 
458 struct ext4_prealloc_space {
459 	struct list_head	pa_inode_list;
460 	struct list_head	pa_group_list;
461 	union {
462 		struct list_head pa_tmp_list;
463 		struct rcu_head	pa_rcu;
464 	} u;
465 	spinlock_t		pa_lock;
466 	atomic_t		pa_count;
467 	unsigned		pa_deleted;
468 	ext4_fsblk_t		pa_pstart;	/* phys. block */
469 	ext4_lblk_t		pa_lstart;	/* log. block */
470 	unsigned short		pa_len;		/* len of preallocated chunk */
471 	unsigned short		pa_free;	/* how many blocks are free */
472 	unsigned short		pa_linear;	/* consumed in one direction
473 						 * strictly, for grp prealloc */
474 	spinlock_t		*pa_obj_lock;
475 	struct inode		*pa_inode;	/* hack, for history only */
476 };
477 
478 
479 struct ext4_free_extent {
480 	ext4_lblk_t fe_logical;
481 	ext4_grpblk_t fe_start;
482 	ext4_group_t fe_group;
483 	int fe_len;
484 };
485 
486 /*
487  * Locality group:
488  *   we try to group all related changes together
489  *   so that writeback can flush/allocate them together as well
490  */
491 struct ext4_locality_group {
492 	/* for allocator */
493 	struct mutex		lg_mutex;	/* to serialize allocates */
494 	struct list_head	lg_prealloc_list;/* list of preallocations */
495 	spinlock_t		lg_prealloc_lock;
496 };
497 
498 struct ext4_allocation_context {
499 	struct inode *ac_inode;
500 	struct super_block *ac_sb;
501 
502 	/* original request */
503 	struct ext4_free_extent ac_o_ex;
504 
505 	/* goal request (after normalization) */
506 	struct ext4_free_extent ac_g_ex;
507 
508 	/* the best found extent */
509 	struct ext4_free_extent ac_b_ex;
510 
511 	/* copy of the bext found extent taken before preallocation efforts */
512 	struct ext4_free_extent ac_f_ex;
513 
514 	/* number of iterations done. we have to track to limit searching */
515 	unsigned long ac_ex_scanned;
516 	__u16 ac_groups_scanned;
517 	__u16 ac_found;
518 	__u16 ac_tail;
519 	__u16 ac_buddy;
520 	__u16 ac_flags;		/* allocation hints */
521 	__u8 ac_status;
522 	__u8 ac_criteria;
523 	__u8 ac_repeats;
524 	__u8 ac_2order;		/* if request is to allocate 2^N blocks and
525 				 * N > 0, the field stores N, otherwise 0 */
526 	__u8 ac_op;		/* operation, for history only */
527 	struct page *ac_bitmap_page;
528 	struct page *ac_buddy_page;
529 	struct ext4_prealloc_space *ac_pa;
530 	struct ext4_locality_group *ac_lg;
531 };
532 
533 #define AC_STATUS_CONTINUE	1
534 #define AC_STATUS_FOUND		2
535 #define AC_STATUS_BREAK		3
536 
537 struct ext4_mb_history {
538 	struct ext4_free_extent orig;	/* orig allocation */
539 	struct ext4_free_extent goal;	/* goal allocation */
540 	struct ext4_free_extent result;	/* result allocation */
541 	unsigned pid;
542 	unsigned ino;
543 	__u16 found;	/* how many extents have been found */
544 	__u16 groups;	/* how many groups have been scanned */
545 	__u16 tail;	/* what tail broke some buddy */
546 	__u16 buddy;	/* buddy the tail ^^^ broke */
547 	__u16 flags;
548 	__u8 cr:3;	/* which phase the result extent was found at */
549 	__u8 op:4;
550 	__u8 merged:1;
551 };
552 
553 struct ext4_buddy {
554 	struct page *bd_buddy_page;
555 	void *bd_buddy;
556 	struct page *bd_bitmap_page;
557 	void *bd_bitmap;
558 	struct ext4_group_info *bd_info;
559 	struct super_block *bd_sb;
560 	__u16 bd_blkbits;
561 	ext4_group_t bd_group;
562 };
563 #define EXT4_MB_BITMAP(e4b)	((e4b)->bd_bitmap)
564 #define EXT4_MB_BUDDY(e4b)	((e4b)->bd_buddy)
565 
566 #ifndef EXT4_MB_HISTORY
567 static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
568 {
569 	return;
570 }
571 #else
572 static void ext4_mb_store_history(struct ext4_allocation_context *ac);
573 #endif
574 
575 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
576 
577 static struct proc_dir_entry *proc_root_ext4;
578 struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
579 ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
580 			ext4_fsblk_t goal, unsigned long *count, int *errp);
581 
582 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
583 					ext4_group_t group);
584 static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
585 static void ext4_mb_free_committed_blocks(struct super_block *);
586 static void ext4_mb_return_to_preallocation(struct inode *inode,
587 					struct ext4_buddy *e4b, sector_t block,
588 					int count);
589 static void ext4_mb_put_pa(struct ext4_allocation_context *,
590 			struct super_block *, struct ext4_prealloc_space *pa);
591 static int ext4_mb_init_per_dev_proc(struct super_block *sb);
592 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
593 
594 
595 static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
596 {
597 	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
598 
599 	bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
600 }
601 
602 static inline void ext4_unlock_group(struct super_block *sb,
603 					ext4_group_t group)
604 {
605 	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
606 
607 	bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
608 }
609 
610 static inline int ext4_is_group_locked(struct super_block *sb,
611 					ext4_group_t group)
612 {
613 	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
614 
615 	return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
616 						&(grinfo->bb_state));
617 }
618 
619 static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
620 					struct ext4_free_extent *fex)
621 {
622 	ext4_fsblk_t block;
623 
624 	block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
625 			+ fex->fe_start
626 			+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
627 	return block;
628 }
629 
630 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
631 {
632 #if BITS_PER_LONG == 64
633 	*bit += ((unsigned long) addr & 7UL) << 3;
634 	addr = (void *) ((unsigned long) addr & ~7UL);
635 #elif BITS_PER_LONG == 32
636 	*bit += ((unsigned long) addr & 3UL) << 3;
637 	addr = (void *) ((unsigned long) addr & ~3UL);
638 #else
639 #error "how many bits you are?!"
640 #endif
641 	return addr;
642 }
643 
644 static inline int mb_test_bit(int bit, void *addr)
645 {
646 	/*
647 	 * ext4_test_bit on architecture like powerpc
648 	 * needs unsigned long aligned address
649 	 */
650 	addr = mb_correct_addr_and_bit(&bit, addr);
651 	return ext4_test_bit(bit, addr);
652 }
653 
654 static inline void mb_set_bit(int bit, void *addr)
655 {
656 	addr = mb_correct_addr_and_bit(&bit, addr);
657 	ext4_set_bit(bit, addr);
658 }
659 
660 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
661 {
662 	addr = mb_correct_addr_and_bit(&bit, addr);
663 	ext4_set_bit_atomic(lock, bit, addr);
664 }
665 
666 static inline void mb_clear_bit(int bit, void *addr)
667 {
668 	addr = mb_correct_addr_and_bit(&bit, addr);
669 	ext4_clear_bit(bit, addr);
670 }
671 
672 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
673 {
674 	addr = mb_correct_addr_and_bit(&bit, addr);
675 	ext4_clear_bit_atomic(lock, bit, addr);
676 }
677 
678 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
679 {
680 	int fix = 0;
681 	addr = mb_correct_addr_and_bit(&fix, addr);
682 	max += fix;
683 	start += fix;
684 
685 	return ext4_find_next_zero_bit(addr, max, start) - fix;
686 }
687 
688 static inline int mb_find_next_bit(void *addr, int max, int start)
689 {
690 	int fix = 0;
691 	addr = mb_correct_addr_and_bit(&fix, addr);
692 	max += fix;
693 	start += fix;
694 
695 	return ext4_find_next_bit(addr, max, start) - fix;
696 }
697 
698 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
699 {
700 	char *bb;
701 
702 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
703 	BUG_ON(max == NULL);
704 
705 	if (order > e4b->bd_blkbits + 1) {
706 		*max = 0;
707 		return NULL;
708 	}
709 
710 	/* at order 0 we see each particular block */
711 	*max = 1 << (e4b->bd_blkbits + 3);
712 	if (order == 0)
713 		return EXT4_MB_BITMAP(e4b);
714 
715 	bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
716 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
717 
718 	return bb;
719 }
720 
721 #ifdef DOUBLE_CHECK
722 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
723 			   int first, int count)
724 {
725 	int i;
726 	struct super_block *sb = e4b->bd_sb;
727 
728 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
729 		return;
730 	BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
731 	for (i = 0; i < count; i++) {
732 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
733 			ext4_fsblk_t blocknr;
734 			blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
735 			blocknr += first + i;
736 			blocknr +=
737 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
738 
739 			ext4_error(sb, __FUNCTION__, "double-free of inode"
740 				   " %lu's block %llu(bit %u in group %lu)\n",
741 				   inode ? inode->i_ino : 0, blocknr,
742 				   first + i, e4b->bd_group);
743 		}
744 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
745 	}
746 }
747 
748 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
749 {
750 	int i;
751 
752 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
753 		return;
754 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
755 	for (i = 0; i < count; i++) {
756 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
757 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
758 	}
759 }
760 
761 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
762 {
763 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
764 		unsigned char *b1, *b2;
765 		int i;
766 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
767 		b2 = (unsigned char *) bitmap;
768 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
769 			if (b1[i] != b2[i]) {
770 				printk("corruption in group %lu at byte %u(%u):"
771 				       " %x in copy != %x on disk/prealloc\n",
772 					e4b->bd_group, i, i * 8, b1[i], b2[i]);
773 				BUG();
774 			}
775 		}
776 	}
777 }
778 
779 #else
780 static inline void mb_free_blocks_double(struct inode *inode,
781 				struct ext4_buddy *e4b, int first, int count)
782 {
783 	return;
784 }
785 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
786 						int first, int count)
787 {
788 	return;
789 }
790 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
791 {
792 	return;
793 }
794 #endif
795 
796 #ifdef AGGRESSIVE_CHECK
797 
798 #define MB_CHECK_ASSERT(assert)						\
799 do {									\
800 	if (!(assert)) {						\
801 		printk(KERN_EMERG					\
802 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
803 			function, file, line, # assert);		\
804 		BUG();							\
805 	}								\
806 } while (0)
807 
808 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
809 				const char *function, int line)
810 {
811 	struct super_block *sb = e4b->bd_sb;
812 	int order = e4b->bd_blkbits + 1;
813 	int max;
814 	int max2;
815 	int i;
816 	int j;
817 	int k;
818 	int count;
819 	struct ext4_group_info *grp;
820 	int fragments = 0;
821 	int fstart;
822 	struct list_head *cur;
823 	void *buddy;
824 	void *buddy2;
825 
826 	if (!test_opt(sb, MBALLOC))
827 		return 0;
828 
829 	{
830 		static int mb_check_counter;
831 		if (mb_check_counter++ % 100 != 0)
832 			return 0;
833 	}
834 
835 	while (order > 1) {
836 		buddy = mb_find_buddy(e4b, order, &max);
837 		MB_CHECK_ASSERT(buddy);
838 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
839 		MB_CHECK_ASSERT(buddy2);
840 		MB_CHECK_ASSERT(buddy != buddy2);
841 		MB_CHECK_ASSERT(max * 2 == max2);
842 
843 		count = 0;
844 		for (i = 0; i < max; i++) {
845 
846 			if (mb_test_bit(i, buddy)) {
847 				/* only single bit in buddy2 may be 1 */
848 				if (!mb_test_bit(i << 1, buddy2)) {
849 					MB_CHECK_ASSERT(
850 						mb_test_bit((i<<1)+1, buddy2));
851 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
852 					MB_CHECK_ASSERT(
853 						mb_test_bit(i << 1, buddy2));
854 				}
855 				continue;
856 			}
857 
858 			/* both bits in buddy2 must be 0 */
859 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
860 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
861 
862 			for (j = 0; j < (1 << order); j++) {
863 				k = (i * (1 << order)) + j;
864 				MB_CHECK_ASSERT(
865 					!mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
866 			}
867 			count++;
868 		}
869 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
870 		order--;
871 	}
872 
873 	fstart = -1;
874 	buddy = mb_find_buddy(e4b, 0, &max);
875 	for (i = 0; i < max; i++) {
876 		if (!mb_test_bit(i, buddy)) {
877 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
878 			if (fstart == -1) {
879 				fragments++;
880 				fstart = i;
881 			}
882 			continue;
883 		}
884 		fstart = -1;
885 		/* check used bits only */
886 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
887 			buddy2 = mb_find_buddy(e4b, j, &max2);
888 			k = i >> j;
889 			MB_CHECK_ASSERT(k < max2);
890 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
891 		}
892 	}
893 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
894 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
895 
896 	grp = ext4_get_group_info(sb, e4b->bd_group);
897 	buddy = mb_find_buddy(e4b, 0, &max);
898 	list_for_each(cur, &grp->bb_prealloc_list) {
899 		ext4_group_t groupnr;
900 		struct ext4_prealloc_space *pa;
901 		pa = list_entry(cur, struct ext4_prealloc_space, group_list);
902 		ext4_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
903 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
904 		for (i = 0; i < pa->len; i++)
905 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
906 	}
907 	return 0;
908 }
909 #undef MB_CHECK_ASSERT
910 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
911 					__FILE__, __FUNCTION__, __LINE__)
912 #else
913 #define mb_check_buddy(e4b)
914 #endif
915 
916 /* FIXME!! need more doc */
917 static void ext4_mb_mark_free_simple(struct super_block *sb,
918 				void *buddy, unsigned first, int len,
919 					struct ext4_group_info *grp)
920 {
921 	struct ext4_sb_info *sbi = EXT4_SB(sb);
922 	unsigned short min;
923 	unsigned short max;
924 	unsigned short chunk;
925 	unsigned short border;
926 
927 	BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
928 
929 	border = 2 << sb->s_blocksize_bits;
930 
931 	while (len > 0) {
932 		/* find how many blocks can be covered since this position */
933 		max = ffs(first | border) - 1;
934 
935 		/* find how many blocks of power 2 we need to mark */
936 		min = fls(len) - 1;
937 
938 		if (max < min)
939 			min = max;
940 		chunk = 1 << min;
941 
942 		/* mark multiblock chunks only */
943 		grp->bb_counters[min]++;
944 		if (min > 0)
945 			mb_clear_bit(first >> min,
946 				     buddy + sbi->s_mb_offsets[min]);
947 
948 		len -= chunk;
949 		first += chunk;
950 	}
951 }
952 
953 static void ext4_mb_generate_buddy(struct super_block *sb,
954 				void *buddy, void *bitmap, ext4_group_t group)
955 {
956 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
957 	unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
958 	unsigned short i = 0;
959 	unsigned short first;
960 	unsigned short len;
961 	unsigned free = 0;
962 	unsigned fragments = 0;
963 	unsigned long long period = get_cycles();
964 
965 	/* initialize buddy from bitmap which is aggregation
966 	 * of on-disk bitmap and preallocations */
967 	i = mb_find_next_zero_bit(bitmap, max, 0);
968 	grp->bb_first_free = i;
969 	while (i < max) {
970 		fragments++;
971 		first = i;
972 		i = mb_find_next_bit(bitmap, max, i);
973 		len = i - first;
974 		free += len;
975 		if (len > 1)
976 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
977 		else
978 			grp->bb_counters[0]++;
979 		if (i < max)
980 			i = mb_find_next_zero_bit(bitmap, max, i);
981 	}
982 	grp->bb_fragments = fragments;
983 
984 	if (free != grp->bb_free) {
985 		ext4_error(sb, __FUNCTION__,
986 			"EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
987 			group, free, grp->bb_free);
988 		/*
989 		 * If we intent to continue, we consider group descritor
990 		 * corrupt and update bb_free using bitmap value
991 		 */
992 		grp->bb_free = free;
993 	}
994 
995 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
996 
997 	period = get_cycles() - period;
998 	spin_lock(&EXT4_SB(sb)->s_bal_lock);
999 	EXT4_SB(sb)->s_mb_buddies_generated++;
1000 	EXT4_SB(sb)->s_mb_generation_time += period;
1001 	spin_unlock(&EXT4_SB(sb)->s_bal_lock);
1002 }
1003 
1004 /* The buddy information is attached the buddy cache inode
1005  * for convenience. The information regarding each group
1006  * is loaded via ext4_mb_load_buddy. The information involve
1007  * block bitmap and buddy information. The information are
1008  * stored in the inode as
1009  *
1010  * {                        page                        }
1011  * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
1012  *
1013  *
1014  * one block each for bitmap and buddy information.
1015  * So for each group we take up 2 blocks. A page can
1016  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
1017  * So it can have information regarding groups_per_page which
1018  * is blocks_per_page/2
1019  */
1020 
1021 static int ext4_mb_init_cache(struct page *page, char *incore)
1022 {
1023 	int blocksize;
1024 	int blocks_per_page;
1025 	int groups_per_page;
1026 	int err = 0;
1027 	int i;
1028 	ext4_group_t first_group;
1029 	int first_block;
1030 	struct super_block *sb;
1031 	struct buffer_head *bhs;
1032 	struct buffer_head **bh;
1033 	struct inode *inode;
1034 	char *data;
1035 	char *bitmap;
1036 
1037 	mb_debug("init page %lu\n", page->index);
1038 
1039 	inode = page->mapping->host;
1040 	sb = inode->i_sb;
1041 	blocksize = 1 << inode->i_blkbits;
1042 	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
1043 
1044 	groups_per_page = blocks_per_page >> 1;
1045 	if (groups_per_page == 0)
1046 		groups_per_page = 1;
1047 
1048 	/* allocate buffer_heads to read bitmaps */
1049 	if (groups_per_page > 1) {
1050 		err = -ENOMEM;
1051 		i = sizeof(struct buffer_head *) * groups_per_page;
1052 		bh = kzalloc(i, GFP_NOFS);
1053 		if (bh == NULL)
1054 			goto out;
1055 	} else
1056 		bh = &bhs;
1057 
1058 	first_group = page->index * blocks_per_page / 2;
1059 
1060 	/* read all groups the page covers into the cache */
1061 	for (i = 0; i < groups_per_page; i++) {
1062 		struct ext4_group_desc *desc;
1063 
1064 		if (first_group + i >= EXT4_SB(sb)->s_groups_count)
1065 			break;
1066 
1067 		err = -EIO;
1068 		desc = ext4_get_group_desc(sb, first_group + i, NULL);
1069 		if (desc == NULL)
1070 			goto out;
1071 
1072 		err = -ENOMEM;
1073 		bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
1074 		if (bh[i] == NULL)
1075 			goto out;
1076 
1077 		if (bh_uptodate_or_lock(bh[i]))
1078 			continue;
1079 
1080 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1081 			ext4_init_block_bitmap(sb, bh[i],
1082 						first_group + i, desc);
1083 			set_buffer_uptodate(bh[i]);
1084 			unlock_buffer(bh[i]);
1085 			continue;
1086 		}
1087 		get_bh(bh[i]);
1088 		bh[i]->b_end_io = end_buffer_read_sync;
1089 		submit_bh(READ, bh[i]);
1090 		mb_debug("read bitmap for group %lu\n", first_group + i);
1091 	}
1092 
1093 	/* wait for I/O completion */
1094 	for (i = 0; i < groups_per_page && bh[i]; i++)
1095 		wait_on_buffer(bh[i]);
1096 
1097 	err = -EIO;
1098 	for (i = 0; i < groups_per_page && bh[i]; i++)
1099 		if (!buffer_uptodate(bh[i]))
1100 			goto out;
1101 
1102 	first_block = page->index * blocks_per_page;
1103 	for (i = 0; i < blocks_per_page; i++) {
1104 		int group;
1105 		struct ext4_group_info *grinfo;
1106 
1107 		group = (first_block + i) >> 1;
1108 		if (group >= EXT4_SB(sb)->s_groups_count)
1109 			break;
1110 
1111 		/*
1112 		 * data carry information regarding this
1113 		 * particular group in the format specified
1114 		 * above
1115 		 *
1116 		 */
1117 		data = page_address(page) + (i * blocksize);
1118 		bitmap = bh[group - first_group]->b_data;
1119 
1120 		/*
1121 		 * We place the buddy block and bitmap block
1122 		 * close together
1123 		 */
1124 		if ((first_block + i) & 1) {
1125 			/* this is block of buddy */
1126 			BUG_ON(incore == NULL);
1127 			mb_debug("put buddy for group %u in page %lu/%x\n",
1128 				group, page->index, i * blocksize);
1129 			memset(data, 0xff, blocksize);
1130 			grinfo = ext4_get_group_info(sb, group);
1131 			grinfo->bb_fragments = 0;
1132 			memset(grinfo->bb_counters, 0,
1133 			       sizeof(unsigned short)*(sb->s_blocksize_bits+2));
1134 			/*
1135 			 * incore got set to the group block bitmap below
1136 			 */
1137 			ext4_mb_generate_buddy(sb, data, incore, group);
1138 			incore = NULL;
1139 		} else {
1140 			/* this is block of bitmap */
1141 			BUG_ON(incore != NULL);
1142 			mb_debug("put bitmap for group %u in page %lu/%x\n",
1143 				group, page->index, i * blocksize);
1144 
1145 			/* see comments in ext4_mb_put_pa() */
1146 			ext4_lock_group(sb, group);
1147 			memcpy(data, bitmap, blocksize);
1148 
1149 			/* mark all preallocated blks used in in-core bitmap */
1150 			ext4_mb_generate_from_pa(sb, data, group);
1151 			ext4_unlock_group(sb, group);
1152 
1153 			/* set incore so that the buddy information can be
1154 			 * generated using this
1155 			 */
1156 			incore = data;
1157 		}
1158 	}
1159 	SetPageUptodate(page);
1160 
1161 out:
1162 	if (bh) {
1163 		for (i = 0; i < groups_per_page && bh[i]; i++)
1164 			brelse(bh[i]);
1165 		if (bh != &bhs)
1166 			kfree(bh);
1167 	}
1168 	return err;
1169 }
1170 
1171 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1172 		struct ext4_buddy *e4b)
1173 {
1174 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1175 	struct inode *inode = sbi->s_buddy_cache;
1176 	int blocks_per_page;
1177 	int block;
1178 	int pnum;
1179 	int poff;
1180 	struct page *page;
1181 
1182 	mb_debug("load group %lu\n", group);
1183 
1184 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1185 
1186 	e4b->bd_blkbits = sb->s_blocksize_bits;
1187 	e4b->bd_info = ext4_get_group_info(sb, group);
1188 	e4b->bd_sb = sb;
1189 	e4b->bd_group = group;
1190 	e4b->bd_buddy_page = NULL;
1191 	e4b->bd_bitmap_page = NULL;
1192 
1193 	/*
1194 	 * the buddy cache inode stores the block bitmap
1195 	 * and buddy information in consecutive blocks.
1196 	 * So for each group we need two blocks.
1197 	 */
1198 	block = group * 2;
1199 	pnum = block / blocks_per_page;
1200 	poff = block % blocks_per_page;
1201 
1202 	/* we could use find_or_create_page(), but it locks page
1203 	 * what we'd like to avoid in fast path ... */
1204 	page = find_get_page(inode->i_mapping, pnum);
1205 	if (page == NULL || !PageUptodate(page)) {
1206 		if (page)
1207 			page_cache_release(page);
1208 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1209 		if (page) {
1210 			BUG_ON(page->mapping != inode->i_mapping);
1211 			if (!PageUptodate(page)) {
1212 				ext4_mb_init_cache(page, NULL);
1213 				mb_cmp_bitmaps(e4b, page_address(page) +
1214 					       (poff * sb->s_blocksize));
1215 			}
1216 			unlock_page(page);
1217 		}
1218 	}
1219 	if (page == NULL || !PageUptodate(page))
1220 		goto err;
1221 	e4b->bd_bitmap_page = page;
1222 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1223 	mark_page_accessed(page);
1224 
1225 	block++;
1226 	pnum = block / blocks_per_page;
1227 	poff = block % blocks_per_page;
1228 
1229 	page = find_get_page(inode->i_mapping, pnum);
1230 	if (page == NULL || !PageUptodate(page)) {
1231 		if (page)
1232 			page_cache_release(page);
1233 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1234 		if (page) {
1235 			BUG_ON(page->mapping != inode->i_mapping);
1236 			if (!PageUptodate(page))
1237 				ext4_mb_init_cache(page, e4b->bd_bitmap);
1238 
1239 			unlock_page(page);
1240 		}
1241 	}
1242 	if (page == NULL || !PageUptodate(page))
1243 		goto err;
1244 	e4b->bd_buddy_page = page;
1245 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1246 	mark_page_accessed(page);
1247 
1248 	BUG_ON(e4b->bd_bitmap_page == NULL);
1249 	BUG_ON(e4b->bd_buddy_page == NULL);
1250 
1251 	return 0;
1252 
1253 err:
1254 	if (e4b->bd_bitmap_page)
1255 		page_cache_release(e4b->bd_bitmap_page);
1256 	if (e4b->bd_buddy_page)
1257 		page_cache_release(e4b->bd_buddy_page);
1258 	e4b->bd_buddy = NULL;
1259 	e4b->bd_bitmap = NULL;
1260 	return -EIO;
1261 }
1262 
1263 static void ext4_mb_release_desc(struct ext4_buddy *e4b)
1264 {
1265 	if (e4b->bd_bitmap_page)
1266 		page_cache_release(e4b->bd_bitmap_page);
1267 	if (e4b->bd_buddy_page)
1268 		page_cache_release(e4b->bd_buddy_page);
1269 }
1270 
1271 
1272 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1273 {
1274 	int order = 1;
1275 	void *bb;
1276 
1277 	BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1278 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1279 
1280 	bb = EXT4_MB_BUDDY(e4b);
1281 	while (order <= e4b->bd_blkbits + 1) {
1282 		block = block >> 1;
1283 		if (!mb_test_bit(block, bb)) {
1284 			/* this block is part of buddy of order 'order' */
1285 			return order;
1286 		}
1287 		bb += 1 << (e4b->bd_blkbits - order);
1288 		order++;
1289 	}
1290 	return 0;
1291 }
1292 
1293 static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1294 {
1295 	__u32 *addr;
1296 
1297 	len = cur + len;
1298 	while (cur < len) {
1299 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1300 			/* fast path: clear whole word at once */
1301 			addr = bm + (cur >> 3);
1302 			*addr = 0;
1303 			cur += 32;
1304 			continue;
1305 		}
1306 		mb_clear_bit_atomic(lock, cur, bm);
1307 		cur++;
1308 	}
1309 }
1310 
1311 static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1312 {
1313 	__u32 *addr;
1314 
1315 	len = cur + len;
1316 	while (cur < len) {
1317 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1318 			/* fast path: set whole word at once */
1319 			addr = bm + (cur >> 3);
1320 			*addr = 0xffffffff;
1321 			cur += 32;
1322 			continue;
1323 		}
1324 		mb_set_bit_atomic(lock, cur, bm);
1325 		cur++;
1326 	}
1327 }
1328 
1329 static int mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1330 			  int first, int count)
1331 {
1332 	int block = 0;
1333 	int max = 0;
1334 	int order;
1335 	void *buddy;
1336 	void *buddy2;
1337 	struct super_block *sb = e4b->bd_sb;
1338 
1339 	BUG_ON(first + count > (sb->s_blocksize << 3));
1340 	BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1341 	mb_check_buddy(e4b);
1342 	mb_free_blocks_double(inode, e4b, first, count);
1343 
1344 	e4b->bd_info->bb_free += count;
1345 	if (first < e4b->bd_info->bb_first_free)
1346 		e4b->bd_info->bb_first_free = first;
1347 
1348 	/* let's maintain fragments counter */
1349 	if (first != 0)
1350 		block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1351 	if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1352 		max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1353 	if (block && max)
1354 		e4b->bd_info->bb_fragments--;
1355 	else if (!block && !max)
1356 		e4b->bd_info->bb_fragments++;
1357 
1358 	/* let's maintain buddy itself */
1359 	while (count-- > 0) {
1360 		block = first++;
1361 		order = 0;
1362 
1363 		if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1364 			ext4_fsblk_t blocknr;
1365 			blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1366 			blocknr += block;
1367 			blocknr +=
1368 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1369 
1370 			ext4_error(sb, __FUNCTION__, "double-free of inode"
1371 				   " %lu's block %llu(bit %u in group %lu)\n",
1372 				   inode ? inode->i_ino : 0, blocknr, block,
1373 				   e4b->bd_group);
1374 		}
1375 		mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1376 		e4b->bd_info->bb_counters[order]++;
1377 
1378 		/* start of the buddy */
1379 		buddy = mb_find_buddy(e4b, order, &max);
1380 
1381 		do {
1382 			block &= ~1UL;
1383 			if (mb_test_bit(block, buddy) ||
1384 					mb_test_bit(block + 1, buddy))
1385 				break;
1386 
1387 			/* both the buddies are free, try to coalesce them */
1388 			buddy2 = mb_find_buddy(e4b, order + 1, &max);
1389 
1390 			if (!buddy2)
1391 				break;
1392 
1393 			if (order > 0) {
1394 				/* for special purposes, we don't set
1395 				 * free bits in bitmap */
1396 				mb_set_bit(block, buddy);
1397 				mb_set_bit(block + 1, buddy);
1398 			}
1399 			e4b->bd_info->bb_counters[order]--;
1400 			e4b->bd_info->bb_counters[order]--;
1401 
1402 			block = block >> 1;
1403 			order++;
1404 			e4b->bd_info->bb_counters[order]++;
1405 
1406 			mb_clear_bit(block, buddy2);
1407 			buddy = buddy2;
1408 		} while (1);
1409 	}
1410 	mb_check_buddy(e4b);
1411 
1412 	return 0;
1413 }
1414 
1415 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1416 				int needed, struct ext4_free_extent *ex)
1417 {
1418 	int next = block;
1419 	int max;
1420 	int ord;
1421 	void *buddy;
1422 
1423 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1424 	BUG_ON(ex == NULL);
1425 
1426 	buddy = mb_find_buddy(e4b, order, &max);
1427 	BUG_ON(buddy == NULL);
1428 	BUG_ON(block >= max);
1429 	if (mb_test_bit(block, buddy)) {
1430 		ex->fe_len = 0;
1431 		ex->fe_start = 0;
1432 		ex->fe_group = 0;
1433 		return 0;
1434 	}
1435 
1436 	/* FIXME dorp order completely ? */
1437 	if (likely(order == 0)) {
1438 		/* find actual order */
1439 		order = mb_find_order_for_block(e4b, block);
1440 		block = block >> order;
1441 	}
1442 
1443 	ex->fe_len = 1 << order;
1444 	ex->fe_start = block << order;
1445 	ex->fe_group = e4b->bd_group;
1446 
1447 	/* calc difference from given start */
1448 	next = next - ex->fe_start;
1449 	ex->fe_len -= next;
1450 	ex->fe_start += next;
1451 
1452 	while (needed > ex->fe_len &&
1453 	       (buddy = mb_find_buddy(e4b, order, &max))) {
1454 
1455 		if (block + 1 >= max)
1456 			break;
1457 
1458 		next = (block + 1) * (1 << order);
1459 		if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1460 			break;
1461 
1462 		ord = mb_find_order_for_block(e4b, next);
1463 
1464 		order = ord;
1465 		block = next >> order;
1466 		ex->fe_len += 1 << order;
1467 	}
1468 
1469 	BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1470 	return ex->fe_len;
1471 }
1472 
1473 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1474 {
1475 	int ord;
1476 	int mlen = 0;
1477 	int max = 0;
1478 	int cur;
1479 	int start = ex->fe_start;
1480 	int len = ex->fe_len;
1481 	unsigned ret = 0;
1482 	int len0 = len;
1483 	void *buddy;
1484 
1485 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1486 	BUG_ON(e4b->bd_group != ex->fe_group);
1487 	BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1488 	mb_check_buddy(e4b);
1489 	mb_mark_used_double(e4b, start, len);
1490 
1491 	e4b->bd_info->bb_free -= len;
1492 	if (e4b->bd_info->bb_first_free == start)
1493 		e4b->bd_info->bb_first_free += len;
1494 
1495 	/* let's maintain fragments counter */
1496 	if (start != 0)
1497 		mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1498 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1499 		max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1500 	if (mlen && max)
1501 		e4b->bd_info->bb_fragments++;
1502 	else if (!mlen && !max)
1503 		e4b->bd_info->bb_fragments--;
1504 
1505 	/* let's maintain buddy itself */
1506 	while (len) {
1507 		ord = mb_find_order_for_block(e4b, start);
1508 
1509 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1510 			/* the whole chunk may be allocated at once! */
1511 			mlen = 1 << ord;
1512 			buddy = mb_find_buddy(e4b, ord, &max);
1513 			BUG_ON((start >> ord) >= max);
1514 			mb_set_bit(start >> ord, buddy);
1515 			e4b->bd_info->bb_counters[ord]--;
1516 			start += mlen;
1517 			len -= mlen;
1518 			BUG_ON(len < 0);
1519 			continue;
1520 		}
1521 
1522 		/* store for history */
1523 		if (ret == 0)
1524 			ret = len | (ord << 16);
1525 
1526 		/* we have to split large buddy */
1527 		BUG_ON(ord <= 0);
1528 		buddy = mb_find_buddy(e4b, ord, &max);
1529 		mb_set_bit(start >> ord, buddy);
1530 		e4b->bd_info->bb_counters[ord]--;
1531 
1532 		ord--;
1533 		cur = (start >> ord) & ~1U;
1534 		buddy = mb_find_buddy(e4b, ord, &max);
1535 		mb_clear_bit(cur, buddy);
1536 		mb_clear_bit(cur + 1, buddy);
1537 		e4b->bd_info->bb_counters[ord]++;
1538 		e4b->bd_info->bb_counters[ord]++;
1539 	}
1540 
1541 	mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1542 			EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1543 	mb_check_buddy(e4b);
1544 
1545 	return ret;
1546 }
1547 
1548 /*
1549  * Must be called under group lock!
1550  */
1551 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1552 					struct ext4_buddy *e4b)
1553 {
1554 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1555 	int ret;
1556 
1557 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1558 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1559 
1560 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1561 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1562 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1563 
1564 	/* preallocation can change ac_b_ex, thus we store actually
1565 	 * allocated blocks for history */
1566 	ac->ac_f_ex = ac->ac_b_ex;
1567 
1568 	ac->ac_status = AC_STATUS_FOUND;
1569 	ac->ac_tail = ret & 0xffff;
1570 	ac->ac_buddy = ret >> 16;
1571 
1572 	/* XXXXXXX: SUCH A HORRIBLE **CK */
1573 	/*FIXME!! Why ? */
1574 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1575 	get_page(ac->ac_bitmap_page);
1576 	ac->ac_buddy_page = e4b->bd_buddy_page;
1577 	get_page(ac->ac_buddy_page);
1578 
1579 	/* store last allocated for subsequent stream allocation */
1580 	if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1581 		spin_lock(&sbi->s_md_lock);
1582 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1583 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1584 		spin_unlock(&sbi->s_md_lock);
1585 	}
1586 }
1587 
1588 /*
1589  * regular allocator, for general purposes allocation
1590  */
1591 
1592 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1593 					struct ext4_buddy *e4b,
1594 					int finish_group)
1595 {
1596 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1597 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1598 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1599 	struct ext4_free_extent ex;
1600 	int max;
1601 
1602 	/*
1603 	 * We don't want to scan for a whole year
1604 	 */
1605 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1606 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1607 		ac->ac_status = AC_STATUS_BREAK;
1608 		return;
1609 	}
1610 
1611 	/*
1612 	 * Haven't found good chunk so far, let's continue
1613 	 */
1614 	if (bex->fe_len < gex->fe_len)
1615 		return;
1616 
1617 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1618 			&& bex->fe_group == e4b->bd_group) {
1619 		/* recheck chunk's availability - we don't know
1620 		 * when it was found (within this lock-unlock
1621 		 * period or not) */
1622 		max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1623 		if (max >= gex->fe_len) {
1624 			ext4_mb_use_best_found(ac, e4b);
1625 			return;
1626 		}
1627 	}
1628 }
1629 
1630 /*
1631  * The routine checks whether found extent is good enough. If it is,
1632  * then the extent gets marked used and flag is set to the context
1633  * to stop scanning. Otherwise, the extent is compared with the
1634  * previous found extent and if new one is better, then it's stored
1635  * in the context. Later, the best found extent will be used, if
1636  * mballoc can't find good enough extent.
1637  *
1638  * FIXME: real allocation policy is to be designed yet!
1639  */
1640 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1641 					struct ext4_free_extent *ex,
1642 					struct ext4_buddy *e4b)
1643 {
1644 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1645 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1646 
1647 	BUG_ON(ex->fe_len <= 0);
1648 	BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1649 	BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1650 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1651 
1652 	ac->ac_found++;
1653 
1654 	/*
1655 	 * The special case - take what you catch first
1656 	 */
1657 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1658 		*bex = *ex;
1659 		ext4_mb_use_best_found(ac, e4b);
1660 		return;
1661 	}
1662 
1663 	/*
1664 	 * Let's check whether the chuck is good enough
1665 	 */
1666 	if (ex->fe_len == gex->fe_len) {
1667 		*bex = *ex;
1668 		ext4_mb_use_best_found(ac, e4b);
1669 		return;
1670 	}
1671 
1672 	/*
1673 	 * If this is first found extent, just store it in the context
1674 	 */
1675 	if (bex->fe_len == 0) {
1676 		*bex = *ex;
1677 		return;
1678 	}
1679 
1680 	/*
1681 	 * If new found extent is better, store it in the context
1682 	 */
1683 	if (bex->fe_len < gex->fe_len) {
1684 		/* if the request isn't satisfied, any found extent
1685 		 * larger than previous best one is better */
1686 		if (ex->fe_len > bex->fe_len)
1687 			*bex = *ex;
1688 	} else if (ex->fe_len > gex->fe_len) {
1689 		/* if the request is satisfied, then we try to find
1690 		 * an extent that still satisfy the request, but is
1691 		 * smaller than previous one */
1692 		if (ex->fe_len < bex->fe_len)
1693 			*bex = *ex;
1694 	}
1695 
1696 	ext4_mb_check_limits(ac, e4b, 0);
1697 }
1698 
1699 static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1700 					struct ext4_buddy *e4b)
1701 {
1702 	struct ext4_free_extent ex = ac->ac_b_ex;
1703 	ext4_group_t group = ex.fe_group;
1704 	int max;
1705 	int err;
1706 
1707 	BUG_ON(ex.fe_len <= 0);
1708 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1709 	if (err)
1710 		return err;
1711 
1712 	ext4_lock_group(ac->ac_sb, group);
1713 	max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1714 
1715 	if (max > 0) {
1716 		ac->ac_b_ex = ex;
1717 		ext4_mb_use_best_found(ac, e4b);
1718 	}
1719 
1720 	ext4_unlock_group(ac->ac_sb, group);
1721 	ext4_mb_release_desc(e4b);
1722 
1723 	return 0;
1724 }
1725 
1726 static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1727 				struct ext4_buddy *e4b)
1728 {
1729 	ext4_group_t group = ac->ac_g_ex.fe_group;
1730 	int max;
1731 	int err;
1732 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1733 	struct ext4_super_block *es = sbi->s_es;
1734 	struct ext4_free_extent ex;
1735 
1736 	if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1737 		return 0;
1738 
1739 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1740 	if (err)
1741 		return err;
1742 
1743 	ext4_lock_group(ac->ac_sb, group);
1744 	max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1745 			     ac->ac_g_ex.fe_len, &ex);
1746 
1747 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1748 		ext4_fsblk_t start;
1749 
1750 		start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1751 			ex.fe_start + le32_to_cpu(es->s_first_data_block);
1752 		/* use do_div to get remainder (would be 64-bit modulo) */
1753 		if (do_div(start, sbi->s_stripe) == 0) {
1754 			ac->ac_found++;
1755 			ac->ac_b_ex = ex;
1756 			ext4_mb_use_best_found(ac, e4b);
1757 		}
1758 	} else if (max >= ac->ac_g_ex.fe_len) {
1759 		BUG_ON(ex.fe_len <= 0);
1760 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1761 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1762 		ac->ac_found++;
1763 		ac->ac_b_ex = ex;
1764 		ext4_mb_use_best_found(ac, e4b);
1765 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1766 		/* Sometimes, caller may want to merge even small
1767 		 * number of blocks to an existing extent */
1768 		BUG_ON(ex.fe_len <= 0);
1769 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1770 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1771 		ac->ac_found++;
1772 		ac->ac_b_ex = ex;
1773 		ext4_mb_use_best_found(ac, e4b);
1774 	}
1775 	ext4_unlock_group(ac->ac_sb, group);
1776 	ext4_mb_release_desc(e4b);
1777 
1778 	return 0;
1779 }
1780 
1781 /*
1782  * The routine scans buddy structures (not bitmap!) from given order
1783  * to max order and tries to find big enough chunk to satisfy the req
1784  */
1785 static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1786 					struct ext4_buddy *e4b)
1787 {
1788 	struct super_block *sb = ac->ac_sb;
1789 	struct ext4_group_info *grp = e4b->bd_info;
1790 	void *buddy;
1791 	int i;
1792 	int k;
1793 	int max;
1794 
1795 	BUG_ON(ac->ac_2order <= 0);
1796 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1797 		if (grp->bb_counters[i] == 0)
1798 			continue;
1799 
1800 		buddy = mb_find_buddy(e4b, i, &max);
1801 		BUG_ON(buddy == NULL);
1802 
1803 		k = mb_find_next_zero_bit(buddy, max, 0);
1804 		BUG_ON(k >= max);
1805 
1806 		ac->ac_found++;
1807 
1808 		ac->ac_b_ex.fe_len = 1 << i;
1809 		ac->ac_b_ex.fe_start = k << i;
1810 		ac->ac_b_ex.fe_group = e4b->bd_group;
1811 
1812 		ext4_mb_use_best_found(ac, e4b);
1813 
1814 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1815 
1816 		if (EXT4_SB(sb)->s_mb_stats)
1817 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1818 
1819 		break;
1820 	}
1821 }
1822 
1823 /*
1824  * The routine scans the group and measures all found extents.
1825  * In order to optimize scanning, caller must pass number of
1826  * free blocks in the group, so the routine can know upper limit.
1827  */
1828 static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1829 					struct ext4_buddy *e4b)
1830 {
1831 	struct super_block *sb = ac->ac_sb;
1832 	void *bitmap = EXT4_MB_BITMAP(e4b);
1833 	struct ext4_free_extent ex;
1834 	int i;
1835 	int free;
1836 
1837 	free = e4b->bd_info->bb_free;
1838 	BUG_ON(free <= 0);
1839 
1840 	i = e4b->bd_info->bb_first_free;
1841 
1842 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1843 		i = mb_find_next_zero_bit(bitmap,
1844 						EXT4_BLOCKS_PER_GROUP(sb), i);
1845 		if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1846 			/*
1847 			 * IF we have corrupt bitmap, we won't find any
1848 			 * free blocks even though group info says we
1849 			 * we have free blocks
1850 			 */
1851 			ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1852 					"group info. But bitmap says 0\n",
1853 					free);
1854 			break;
1855 		}
1856 
1857 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1858 		BUG_ON(ex.fe_len <= 0);
1859 		if (free < ex.fe_len) {
1860 			ext4_error(sb, __FUNCTION__, "%d free blocks as per "
1861 					"group info. But got %d blocks\n",
1862 					free, ex.fe_len);
1863 			/*
1864 			 * The number of free blocks differs. This mostly
1865 			 * indicate that the bitmap is corrupt. So exit
1866 			 * without claiming the space.
1867 			 */
1868 			break;
1869 		}
1870 
1871 		ext4_mb_measure_extent(ac, &ex, e4b);
1872 
1873 		i += ex.fe_len;
1874 		free -= ex.fe_len;
1875 	}
1876 
1877 	ext4_mb_check_limits(ac, e4b, 1);
1878 }
1879 
1880 /*
1881  * This is a special case for storages like raid5
1882  * we try to find stripe-aligned chunks for stripe-size requests
1883  * XXX should do so at least for multiples of stripe size as well
1884  */
1885 static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1886 				 struct ext4_buddy *e4b)
1887 {
1888 	struct super_block *sb = ac->ac_sb;
1889 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1890 	void *bitmap = EXT4_MB_BITMAP(e4b);
1891 	struct ext4_free_extent ex;
1892 	ext4_fsblk_t first_group_block;
1893 	ext4_fsblk_t a;
1894 	ext4_grpblk_t i;
1895 	int max;
1896 
1897 	BUG_ON(sbi->s_stripe == 0);
1898 
1899 	/* find first stripe-aligned block in group */
1900 	first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1901 		+ le32_to_cpu(sbi->s_es->s_first_data_block);
1902 	a = first_group_block + sbi->s_stripe - 1;
1903 	do_div(a, sbi->s_stripe);
1904 	i = (a * sbi->s_stripe) - first_group_block;
1905 
1906 	while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1907 		if (!mb_test_bit(i, bitmap)) {
1908 			max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1909 			if (max >= sbi->s_stripe) {
1910 				ac->ac_found++;
1911 				ac->ac_b_ex = ex;
1912 				ext4_mb_use_best_found(ac, e4b);
1913 				break;
1914 			}
1915 		}
1916 		i += sbi->s_stripe;
1917 	}
1918 }
1919 
1920 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1921 				ext4_group_t group, int cr)
1922 {
1923 	unsigned free, fragments;
1924 	unsigned i, bits;
1925 	struct ext4_group_desc *desc;
1926 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1927 
1928 	BUG_ON(cr < 0 || cr >= 4);
1929 	BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1930 
1931 	free = grp->bb_free;
1932 	fragments = grp->bb_fragments;
1933 	if (free == 0)
1934 		return 0;
1935 	if (fragments == 0)
1936 		return 0;
1937 
1938 	switch (cr) {
1939 	case 0:
1940 		BUG_ON(ac->ac_2order == 0);
1941 		/* If this group is uninitialized, skip it initially */
1942 		desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1943 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1944 			return 0;
1945 
1946 		bits = ac->ac_sb->s_blocksize_bits + 1;
1947 		for (i = ac->ac_2order; i <= bits; i++)
1948 			if (grp->bb_counters[i] > 0)
1949 				return 1;
1950 		break;
1951 	case 1:
1952 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
1953 			return 1;
1954 		break;
1955 	case 2:
1956 		if (free >= ac->ac_g_ex.fe_len)
1957 			return 1;
1958 		break;
1959 	case 3:
1960 		return 1;
1961 	default:
1962 		BUG();
1963 	}
1964 
1965 	return 0;
1966 }
1967 
1968 static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1969 {
1970 	ext4_group_t group;
1971 	ext4_group_t i;
1972 	int cr;
1973 	int err = 0;
1974 	int bsbits;
1975 	struct ext4_sb_info *sbi;
1976 	struct super_block *sb;
1977 	struct ext4_buddy e4b;
1978 	loff_t size, isize;
1979 
1980 	sb = ac->ac_sb;
1981 	sbi = EXT4_SB(sb);
1982 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1983 
1984 	/* first, try the goal */
1985 	err = ext4_mb_find_by_goal(ac, &e4b);
1986 	if (err || ac->ac_status == AC_STATUS_FOUND)
1987 		goto out;
1988 
1989 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1990 		goto out;
1991 
1992 	/*
1993 	 * ac->ac2_order is set only if the fe_len is a power of 2
1994 	 * if ac2_order is set we also set criteria to 0 so that we
1995 	 * try exact allocation using buddy.
1996 	 */
1997 	i = fls(ac->ac_g_ex.fe_len);
1998 	ac->ac_2order = 0;
1999 	/*
2000 	 * We search using buddy data only if the order of the request
2001 	 * is greater than equal to the sbi_s_mb_order2_reqs
2002 	 * You can tune it via /proc/fs/ext4/<partition>/order2_req
2003 	 */
2004 	if (i >= sbi->s_mb_order2_reqs) {
2005 		/*
2006 		 * This should tell if fe_len is exactly power of 2
2007 		 */
2008 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2009 			ac->ac_2order = i - 1;
2010 	}
2011 
2012 	bsbits = ac->ac_sb->s_blocksize_bits;
2013 	/* if stream allocation is enabled, use global goal */
2014 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2015 	isize = i_size_read(ac->ac_inode) >> bsbits;
2016 	if (size < isize)
2017 		size = isize;
2018 
2019 	if (size < sbi->s_mb_stream_request &&
2020 			(ac->ac_flags & EXT4_MB_HINT_DATA)) {
2021 		/* TBD: may be hot point */
2022 		spin_lock(&sbi->s_md_lock);
2023 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2024 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2025 		spin_unlock(&sbi->s_md_lock);
2026 	}
2027 
2028 	/* searching for the right group start from the goal value specified */
2029 	group = ac->ac_g_ex.fe_group;
2030 
2031 	/* Let's just scan groups to find more-less suitable blocks */
2032 	cr = ac->ac_2order ? 0 : 1;
2033 	/*
2034 	 * cr == 0 try to get exact allocation,
2035 	 * cr == 3  try to get anything
2036 	 */
2037 repeat:
2038 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2039 		ac->ac_criteria = cr;
2040 		for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
2041 			struct ext4_group_info *grp;
2042 			struct ext4_group_desc *desc;
2043 
2044 			if (group == EXT4_SB(sb)->s_groups_count)
2045 				group = 0;
2046 
2047 			/* quick check to skip empty groups */
2048 			grp = ext4_get_group_info(ac->ac_sb, group);
2049 			if (grp->bb_free == 0)
2050 				continue;
2051 
2052 			/*
2053 			 * if the group is already init we check whether it is
2054 			 * a good group and if not we don't load the buddy
2055 			 */
2056 			if (EXT4_MB_GRP_NEED_INIT(grp)) {
2057 				/*
2058 				 * we need full data about the group
2059 				 * to make a good selection
2060 				 */
2061 				err = ext4_mb_load_buddy(sb, group, &e4b);
2062 				if (err)
2063 					goto out;
2064 				ext4_mb_release_desc(&e4b);
2065 			}
2066 
2067 			/*
2068 			 * If the particular group doesn't satisfy our
2069 			 * criteria we continue with the next group
2070 			 */
2071 			if (!ext4_mb_good_group(ac, group, cr))
2072 				continue;
2073 
2074 			err = ext4_mb_load_buddy(sb, group, &e4b);
2075 			if (err)
2076 				goto out;
2077 
2078 			ext4_lock_group(sb, group);
2079 			if (!ext4_mb_good_group(ac, group, cr)) {
2080 				/* someone did allocation from this group */
2081 				ext4_unlock_group(sb, group);
2082 				ext4_mb_release_desc(&e4b);
2083 				continue;
2084 			}
2085 
2086 			ac->ac_groups_scanned++;
2087 			desc = ext4_get_group_desc(sb, group, NULL);
2088 			if (cr == 0 || (desc->bg_flags &
2089 					cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
2090 					ac->ac_2order != 0))
2091 				ext4_mb_simple_scan_group(ac, &e4b);
2092 			else if (cr == 1 &&
2093 					ac->ac_g_ex.fe_len == sbi->s_stripe)
2094 				ext4_mb_scan_aligned(ac, &e4b);
2095 			else
2096 				ext4_mb_complex_scan_group(ac, &e4b);
2097 
2098 			ext4_unlock_group(sb, group);
2099 			ext4_mb_release_desc(&e4b);
2100 
2101 			if (ac->ac_status != AC_STATUS_CONTINUE)
2102 				break;
2103 		}
2104 	}
2105 
2106 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2107 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2108 		/*
2109 		 * We've been searching too long. Let's try to allocate
2110 		 * the best chunk we've found so far
2111 		 */
2112 
2113 		ext4_mb_try_best_found(ac, &e4b);
2114 		if (ac->ac_status != AC_STATUS_FOUND) {
2115 			/*
2116 			 * Someone more lucky has already allocated it.
2117 			 * The only thing we can do is just take first
2118 			 * found block(s)
2119 			printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2120 			 */
2121 			ac->ac_b_ex.fe_group = 0;
2122 			ac->ac_b_ex.fe_start = 0;
2123 			ac->ac_b_ex.fe_len = 0;
2124 			ac->ac_status = AC_STATUS_CONTINUE;
2125 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2126 			cr = 3;
2127 			atomic_inc(&sbi->s_mb_lost_chunks);
2128 			goto repeat;
2129 		}
2130 	}
2131 out:
2132 	return err;
2133 }
2134 
2135 #ifdef EXT4_MB_HISTORY
2136 struct ext4_mb_proc_session {
2137 	struct ext4_mb_history *history;
2138 	struct super_block *sb;
2139 	int start;
2140 	int max;
2141 };
2142 
2143 static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
2144 					struct ext4_mb_history *hs,
2145 					int first)
2146 {
2147 	if (hs == s->history + s->max)
2148 		hs = s->history;
2149 	if (!first && hs == s->history + s->start)
2150 		return NULL;
2151 	while (hs->orig.fe_len == 0) {
2152 		hs++;
2153 		if (hs == s->history + s->max)
2154 			hs = s->history;
2155 		if (hs == s->history + s->start)
2156 			return NULL;
2157 	}
2158 	return hs;
2159 }
2160 
2161 static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
2162 {
2163 	struct ext4_mb_proc_session *s = seq->private;
2164 	struct ext4_mb_history *hs;
2165 	int l = *pos;
2166 
2167 	if (l == 0)
2168 		return SEQ_START_TOKEN;
2169 	hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2170 	if (!hs)
2171 		return NULL;
2172 	while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
2173 	return hs;
2174 }
2175 
2176 static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
2177 				      loff_t *pos)
2178 {
2179 	struct ext4_mb_proc_session *s = seq->private;
2180 	struct ext4_mb_history *hs = v;
2181 
2182 	++*pos;
2183 	if (v == SEQ_START_TOKEN)
2184 		return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
2185 	else
2186 		return ext4_mb_history_skip_empty(s, ++hs, 0);
2187 }
2188 
2189 static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
2190 {
2191 	char buf[25], buf2[25], buf3[25], *fmt;
2192 	struct ext4_mb_history *hs = v;
2193 
2194 	if (v == SEQ_START_TOKEN) {
2195 		seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
2196 				"%-5s %-2s %-5s %-5s %-5s %-6s\n",
2197 			  "pid", "inode", "original", "goal", "result", "found",
2198 			   "grps", "cr", "flags", "merge", "tail", "broken");
2199 		return 0;
2200 	}
2201 
2202 	if (hs->op == EXT4_MB_HISTORY_ALLOC) {
2203 		fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
2204 			"%-5u %-5s %-5u %-6u\n";
2205 		sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
2206 			hs->result.fe_start, hs->result.fe_len,
2207 			hs->result.fe_logical);
2208 		sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
2209 			hs->orig.fe_start, hs->orig.fe_len,
2210 			hs->orig.fe_logical);
2211 		sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group,
2212 			hs->goal.fe_start, hs->goal.fe_len,
2213 			hs->goal.fe_logical);
2214 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
2215 				hs->found, hs->groups, hs->cr, hs->flags,
2216 				hs->merged ? "M" : "", hs->tail,
2217 				hs->buddy ? 1 << hs->buddy : 0);
2218 	} else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
2219 		fmt = "%-5u %-8u %-23s %-23s %-23s\n";
2220 		sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
2221 			hs->result.fe_start, hs->result.fe_len,
2222 			hs->result.fe_logical);
2223 		sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
2224 			hs->orig.fe_start, hs->orig.fe_len,
2225 			hs->orig.fe_logical);
2226 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
2227 	} else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
2228 		sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
2229 			hs->result.fe_start, hs->result.fe_len);
2230 		seq_printf(seq, "%-5u %-8u %-23s discard\n",
2231 				hs->pid, hs->ino, buf2);
2232 	} else if (hs->op == EXT4_MB_HISTORY_FREE) {
2233 		sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
2234 			hs->result.fe_start, hs->result.fe_len);
2235 		seq_printf(seq, "%-5u %-8u %-23s free\n",
2236 				hs->pid, hs->ino, buf2);
2237 	}
2238 	return 0;
2239 }
2240 
2241 static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
2242 {
2243 }
2244 
2245 static struct seq_operations ext4_mb_seq_history_ops = {
2246 	.start  = ext4_mb_seq_history_start,
2247 	.next   = ext4_mb_seq_history_next,
2248 	.stop   = ext4_mb_seq_history_stop,
2249 	.show   = ext4_mb_seq_history_show,
2250 };
2251 
2252 static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
2253 {
2254 	struct super_block *sb = PDE(inode)->data;
2255 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2256 	struct ext4_mb_proc_session *s;
2257 	int rc;
2258 	int size;
2259 
2260 	s = kmalloc(sizeof(*s), GFP_KERNEL);
2261 	if (s == NULL)
2262 		return -ENOMEM;
2263 	s->sb = sb;
2264 	size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
2265 	s->history = kmalloc(size, GFP_KERNEL);
2266 	if (s->history == NULL) {
2267 		kfree(s);
2268 		return -ENOMEM;
2269 	}
2270 
2271 	spin_lock(&sbi->s_mb_history_lock);
2272 	memcpy(s->history, sbi->s_mb_history, size);
2273 	s->max = sbi->s_mb_history_max;
2274 	s->start = sbi->s_mb_history_cur % s->max;
2275 	spin_unlock(&sbi->s_mb_history_lock);
2276 
2277 	rc = seq_open(file, &ext4_mb_seq_history_ops);
2278 	if (rc == 0) {
2279 		struct seq_file *m = (struct seq_file *)file->private_data;
2280 		m->private = s;
2281 	} else {
2282 		kfree(s->history);
2283 		kfree(s);
2284 	}
2285 	return rc;
2286 
2287 }
2288 
2289 static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2290 {
2291 	struct seq_file *seq = (struct seq_file *)file->private_data;
2292 	struct ext4_mb_proc_session *s = seq->private;
2293 	kfree(s->history);
2294 	kfree(s);
2295 	return seq_release(inode, file);
2296 }
2297 
2298 static ssize_t ext4_mb_seq_history_write(struct file *file,
2299 				const char __user *buffer,
2300 				size_t count, loff_t *ppos)
2301 {
2302 	struct seq_file *seq = (struct seq_file *)file->private_data;
2303 	struct ext4_mb_proc_session *s = seq->private;
2304 	struct super_block *sb = s->sb;
2305 	char str[32];
2306 	int value;
2307 
2308 	if (count >= sizeof(str)) {
2309 		printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2310 				"mb_history", (int)sizeof(str));
2311 		return -EOVERFLOW;
2312 	}
2313 
2314 	if (copy_from_user(str, buffer, count))
2315 		return -EFAULT;
2316 
2317 	value = simple_strtol(str, NULL, 0);
2318 	if (value < 0)
2319 		return -ERANGE;
2320 	EXT4_SB(sb)->s_mb_history_filter = value;
2321 
2322 	return count;
2323 }
2324 
2325 static struct file_operations ext4_mb_seq_history_fops = {
2326 	.owner		= THIS_MODULE,
2327 	.open		= ext4_mb_seq_history_open,
2328 	.read		= seq_read,
2329 	.write		= ext4_mb_seq_history_write,
2330 	.llseek		= seq_lseek,
2331 	.release	= ext4_mb_seq_history_release,
2332 };
2333 
2334 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2335 {
2336 	struct super_block *sb = seq->private;
2337 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2338 	ext4_group_t group;
2339 
2340 	if (*pos < 0 || *pos >= sbi->s_groups_count)
2341 		return NULL;
2342 
2343 	group = *pos + 1;
2344 	return (void *) group;
2345 }
2346 
2347 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2348 {
2349 	struct super_block *sb = seq->private;
2350 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2351 	ext4_group_t group;
2352 
2353 	++*pos;
2354 	if (*pos < 0 || *pos >= sbi->s_groups_count)
2355 		return NULL;
2356 	group = *pos + 1;
2357 	return (void *) group;;
2358 }
2359 
2360 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2361 {
2362 	struct super_block *sb = seq->private;
2363 	long group = (long) v;
2364 	int i;
2365 	int err;
2366 	struct ext4_buddy e4b;
2367 	struct sg {
2368 		struct ext4_group_info info;
2369 		unsigned short counters[16];
2370 	} sg;
2371 
2372 	group--;
2373 	if (group == 0)
2374 		seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2375 				"[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2376 				  "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2377 			   "group", "free", "frags", "first",
2378 			   "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2379 			   "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2380 
2381 	i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2382 		sizeof(struct ext4_group_info);
2383 	err = ext4_mb_load_buddy(sb, group, &e4b);
2384 	if (err) {
2385 		seq_printf(seq, "#%-5lu: I/O error\n", group);
2386 		return 0;
2387 	}
2388 	ext4_lock_group(sb, group);
2389 	memcpy(&sg, ext4_get_group_info(sb, group), i);
2390 	ext4_unlock_group(sb, group);
2391 	ext4_mb_release_desc(&e4b);
2392 
2393 	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2394 			sg.info.bb_fragments, sg.info.bb_first_free);
2395 	for (i = 0; i <= 13; i++)
2396 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2397 				sg.info.bb_counters[i] : 0);
2398 	seq_printf(seq, " ]\n");
2399 
2400 	return 0;
2401 }
2402 
2403 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2404 {
2405 }
2406 
2407 static struct seq_operations ext4_mb_seq_groups_ops = {
2408 	.start  = ext4_mb_seq_groups_start,
2409 	.next   = ext4_mb_seq_groups_next,
2410 	.stop   = ext4_mb_seq_groups_stop,
2411 	.show   = ext4_mb_seq_groups_show,
2412 };
2413 
2414 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2415 {
2416 	struct super_block *sb = PDE(inode)->data;
2417 	int rc;
2418 
2419 	rc = seq_open(file, &ext4_mb_seq_groups_ops);
2420 	if (rc == 0) {
2421 		struct seq_file *m = (struct seq_file *)file->private_data;
2422 		m->private = sb;
2423 	}
2424 	return rc;
2425 
2426 }
2427 
2428 static struct file_operations ext4_mb_seq_groups_fops = {
2429 	.owner		= THIS_MODULE,
2430 	.open		= ext4_mb_seq_groups_open,
2431 	.read		= seq_read,
2432 	.llseek		= seq_lseek,
2433 	.release	= seq_release,
2434 };
2435 
2436 static void ext4_mb_history_release(struct super_block *sb)
2437 {
2438 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2439 
2440 	remove_proc_entry("mb_groups", sbi->s_mb_proc);
2441 	remove_proc_entry("mb_history", sbi->s_mb_proc);
2442 
2443 	kfree(sbi->s_mb_history);
2444 }
2445 
2446 static void ext4_mb_history_init(struct super_block *sb)
2447 {
2448 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2449 	int i;
2450 
2451 	if (sbi->s_mb_proc != NULL) {
2452 		struct proc_dir_entry *p;
2453 		p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
2454 		if (p) {
2455 			p->proc_fops = &ext4_mb_seq_history_fops;
2456 			p->data = sb;
2457 		}
2458 		p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
2459 		if (p) {
2460 			p->proc_fops = &ext4_mb_seq_groups_fops;
2461 			p->data = sb;
2462 		}
2463 	}
2464 
2465 	sbi->s_mb_history_max = 1000;
2466 	sbi->s_mb_history_cur = 0;
2467 	spin_lock_init(&sbi->s_mb_history_lock);
2468 	i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2469 	sbi->s_mb_history = kmalloc(i, GFP_KERNEL);
2470 	if (likely(sbi->s_mb_history != NULL))
2471 		memset(sbi->s_mb_history, 0, i);
2472 	/* if we can't allocate history, then we simple won't use it */
2473 }
2474 
2475 static void ext4_mb_store_history(struct ext4_allocation_context *ac)
2476 {
2477 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2478 	struct ext4_mb_history h;
2479 
2480 	if (unlikely(sbi->s_mb_history == NULL))
2481 		return;
2482 
2483 	if (!(ac->ac_op & sbi->s_mb_history_filter))
2484 		return;
2485 
2486 	h.op = ac->ac_op;
2487 	h.pid = current->pid;
2488 	h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2489 	h.orig = ac->ac_o_ex;
2490 	h.result = ac->ac_b_ex;
2491 	h.flags = ac->ac_flags;
2492 	h.found = ac->ac_found;
2493 	h.groups = ac->ac_groups_scanned;
2494 	h.cr = ac->ac_criteria;
2495 	h.tail = ac->ac_tail;
2496 	h.buddy = ac->ac_buddy;
2497 	h.merged = 0;
2498 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2499 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2500 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2501 			h.merged = 1;
2502 		h.goal = ac->ac_g_ex;
2503 		h.result = ac->ac_f_ex;
2504 	}
2505 
2506 	spin_lock(&sbi->s_mb_history_lock);
2507 	memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2508 	if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2509 		sbi->s_mb_history_cur = 0;
2510 	spin_unlock(&sbi->s_mb_history_lock);
2511 }
2512 
2513 #else
2514 #define ext4_mb_history_release(sb)
2515 #define ext4_mb_history_init(sb)
2516 #endif
2517 
2518 static int ext4_mb_init_backend(struct super_block *sb)
2519 {
2520 	ext4_group_t i;
2521 	int j, len, metalen;
2522 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2523 	int num_meta_group_infos =
2524 		(sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2525 			EXT4_DESC_PER_BLOCK_BITS(sb);
2526 	struct ext4_group_info **meta_group_info;
2527 
2528 	/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2529 	 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2530 	 * So a two level scheme suffices for now. */
2531 	sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
2532 				    num_meta_group_infos, GFP_KERNEL);
2533 	if (sbi->s_group_info == NULL) {
2534 		printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2535 		return -ENOMEM;
2536 	}
2537 	sbi->s_buddy_cache = new_inode(sb);
2538 	if (sbi->s_buddy_cache == NULL) {
2539 		printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2540 		goto err_freesgi;
2541 	}
2542 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2543 
2544 	metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2545 	for (i = 0; i < num_meta_group_infos; i++) {
2546 		if ((i + 1) == num_meta_group_infos)
2547 			metalen = sizeof(*meta_group_info) *
2548 				(sbi->s_groups_count -
2549 					(i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2550 		meta_group_info = kmalloc(metalen, GFP_KERNEL);
2551 		if (meta_group_info == NULL) {
2552 			printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2553 			       "buddy group\n");
2554 			goto err_freemeta;
2555 		}
2556 		sbi->s_group_info[i] = meta_group_info;
2557 	}
2558 
2559 	/*
2560 	 * calculate needed size. if change bb_counters size,
2561 	 * don't forget about ext4_mb_generate_buddy()
2562 	 */
2563 	len = sizeof(struct ext4_group_info);
2564 	len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
2565 	for (i = 0; i < sbi->s_groups_count; i++) {
2566 		struct ext4_group_desc *desc;
2567 
2568 		meta_group_info =
2569 			sbi->s_group_info[i >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2570 		j = i & (EXT4_DESC_PER_BLOCK(sb) - 1);
2571 
2572 		meta_group_info[j] = kzalloc(len, GFP_KERNEL);
2573 		if (meta_group_info[j] == NULL) {
2574 			printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2575 			i--;
2576 			goto err_freebuddy;
2577 		}
2578 		desc = ext4_get_group_desc(sb, i, NULL);
2579 		if (desc == NULL) {
2580 			printk(KERN_ERR
2581 				"EXT4-fs: can't read descriptor %lu\n", i);
2582 			goto err_freebuddy;
2583 		}
2584 		memset(meta_group_info[j], 0, len);
2585 		set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2586 			&(meta_group_info[j]->bb_state));
2587 
2588 		/*
2589 		 * initialize bb_free to be able to skip
2590 		 * empty groups without initialization
2591 		 */
2592 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2593 			meta_group_info[j]->bb_free =
2594 				ext4_free_blocks_after_init(sb, i, desc);
2595 		} else {
2596 			meta_group_info[j]->bb_free =
2597 				le16_to_cpu(desc->bg_free_blocks_count);
2598 		}
2599 
2600 		INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
2601 
2602 #ifdef DOUBLE_CHECK
2603 		{
2604 			struct buffer_head *bh;
2605 			meta_group_info[j]->bb_bitmap =
2606 				kmalloc(sb->s_blocksize, GFP_KERNEL);
2607 			BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
2608 			bh = read_block_bitmap(sb, i);
2609 			BUG_ON(bh == NULL);
2610 			memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
2611 					sb->s_blocksize);
2612 			put_bh(bh);
2613 		}
2614 #endif
2615 
2616 	}
2617 
2618 	return 0;
2619 
2620 err_freebuddy:
2621 	while (i >= 0) {
2622 		kfree(ext4_get_group_info(sb, i));
2623 		i--;
2624 	}
2625 	i = num_meta_group_infos;
2626 err_freemeta:
2627 	while (--i >= 0)
2628 		kfree(sbi->s_group_info[i]);
2629 	iput(sbi->s_buddy_cache);
2630 err_freesgi:
2631 	kfree(sbi->s_group_info);
2632 	return -ENOMEM;
2633 }
2634 
2635 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2636 {
2637 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2638 	unsigned i;
2639 	unsigned offset;
2640 	unsigned max;
2641 
2642 	if (!test_opt(sb, MBALLOC))
2643 		return 0;
2644 
2645 	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2646 
2647 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2648 	if (sbi->s_mb_offsets == NULL) {
2649 		clear_opt(sbi->s_mount_opt, MBALLOC);
2650 		return -ENOMEM;
2651 	}
2652 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2653 	if (sbi->s_mb_maxs == NULL) {
2654 		clear_opt(sbi->s_mount_opt, MBALLOC);
2655 		kfree(sbi->s_mb_maxs);
2656 		return -ENOMEM;
2657 	}
2658 
2659 	/* order 0 is regular bitmap */
2660 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2661 	sbi->s_mb_offsets[0] = 0;
2662 
2663 	i = 1;
2664 	offset = 0;
2665 	max = sb->s_blocksize << 2;
2666 	do {
2667 		sbi->s_mb_offsets[i] = offset;
2668 		sbi->s_mb_maxs[i] = max;
2669 		offset += 1 << (sb->s_blocksize_bits - i);
2670 		max = max >> 1;
2671 		i++;
2672 	} while (i <= sb->s_blocksize_bits + 1);
2673 
2674 	/* init file for buddy data */
2675 	i = ext4_mb_init_backend(sb);
2676 	if (i) {
2677 		clear_opt(sbi->s_mount_opt, MBALLOC);
2678 		kfree(sbi->s_mb_offsets);
2679 		kfree(sbi->s_mb_maxs);
2680 		return i;
2681 	}
2682 
2683 	spin_lock_init(&sbi->s_md_lock);
2684 	INIT_LIST_HEAD(&sbi->s_active_transaction);
2685 	INIT_LIST_HEAD(&sbi->s_closed_transaction);
2686 	INIT_LIST_HEAD(&sbi->s_committed_transaction);
2687 	spin_lock_init(&sbi->s_bal_lock);
2688 
2689 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2690 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2691 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2692 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2693 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2694 	sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2695 	sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2696 
2697 	i = sizeof(struct ext4_locality_group) * NR_CPUS;
2698 	sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
2699 	if (sbi->s_locality_groups == NULL) {
2700 		clear_opt(sbi->s_mount_opt, MBALLOC);
2701 		kfree(sbi->s_mb_offsets);
2702 		kfree(sbi->s_mb_maxs);
2703 		return -ENOMEM;
2704 	}
2705 	for (i = 0; i < NR_CPUS; i++) {
2706 		struct ext4_locality_group *lg;
2707 		lg = &sbi->s_locality_groups[i];
2708 		mutex_init(&lg->lg_mutex);
2709 		INIT_LIST_HEAD(&lg->lg_prealloc_list);
2710 		spin_lock_init(&lg->lg_prealloc_lock);
2711 	}
2712 
2713 	ext4_mb_init_per_dev_proc(sb);
2714 	ext4_mb_history_init(sb);
2715 
2716 	printk("EXT4-fs: mballoc enabled\n");
2717 	return 0;
2718 }
2719 
2720 /* need to called with ext4 group lock (ext4_lock_group) */
2721 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2722 {
2723 	struct ext4_prealloc_space *pa;
2724 	struct list_head *cur, *tmp;
2725 	int count = 0;
2726 
2727 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2728 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2729 		list_del(&pa->pa_group_list);
2730 		count++;
2731 		kfree(pa);
2732 	}
2733 	if (count)
2734 		mb_debug("mballoc: %u PAs left\n", count);
2735 
2736 }
2737 
2738 int ext4_mb_release(struct super_block *sb)
2739 {
2740 	ext4_group_t i;
2741 	int num_meta_group_infos;
2742 	struct ext4_group_info *grinfo;
2743 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2744 
2745 	if (!test_opt(sb, MBALLOC))
2746 		return 0;
2747 
2748 	/* release freed, non-committed blocks */
2749 	spin_lock(&sbi->s_md_lock);
2750 	list_splice_init(&sbi->s_closed_transaction,
2751 			&sbi->s_committed_transaction);
2752 	list_splice_init(&sbi->s_active_transaction,
2753 			&sbi->s_committed_transaction);
2754 	spin_unlock(&sbi->s_md_lock);
2755 	ext4_mb_free_committed_blocks(sb);
2756 
2757 	if (sbi->s_group_info) {
2758 		for (i = 0; i < sbi->s_groups_count; i++) {
2759 			grinfo = ext4_get_group_info(sb, i);
2760 #ifdef DOUBLE_CHECK
2761 			kfree(grinfo->bb_bitmap);
2762 #endif
2763 			ext4_lock_group(sb, i);
2764 			ext4_mb_cleanup_pa(grinfo);
2765 			ext4_unlock_group(sb, i);
2766 			kfree(grinfo);
2767 		}
2768 		num_meta_group_infos = (sbi->s_groups_count +
2769 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
2770 			EXT4_DESC_PER_BLOCK_BITS(sb);
2771 		for (i = 0; i < num_meta_group_infos; i++)
2772 			kfree(sbi->s_group_info[i]);
2773 		kfree(sbi->s_group_info);
2774 	}
2775 	kfree(sbi->s_mb_offsets);
2776 	kfree(sbi->s_mb_maxs);
2777 	if (sbi->s_buddy_cache)
2778 		iput(sbi->s_buddy_cache);
2779 	if (sbi->s_mb_stats) {
2780 		printk(KERN_INFO
2781 		       "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2782 				atomic_read(&sbi->s_bal_allocated),
2783 				atomic_read(&sbi->s_bal_reqs),
2784 				atomic_read(&sbi->s_bal_success));
2785 		printk(KERN_INFO
2786 		      "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2787 				"%u 2^N hits, %u breaks, %u lost\n",
2788 				atomic_read(&sbi->s_bal_ex_scanned),
2789 				atomic_read(&sbi->s_bal_goals),
2790 				atomic_read(&sbi->s_bal_2orders),
2791 				atomic_read(&sbi->s_bal_breaks),
2792 				atomic_read(&sbi->s_mb_lost_chunks));
2793 		printk(KERN_INFO
2794 		       "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2795 				sbi->s_mb_buddies_generated++,
2796 				sbi->s_mb_generation_time);
2797 		printk(KERN_INFO
2798 		       "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2799 				atomic_read(&sbi->s_mb_preallocated),
2800 				atomic_read(&sbi->s_mb_discarded));
2801 	}
2802 
2803 	kfree(sbi->s_locality_groups);
2804 
2805 	ext4_mb_history_release(sb);
2806 	ext4_mb_destroy_per_dev_proc(sb);
2807 
2808 	return 0;
2809 }
2810 
2811 static void ext4_mb_free_committed_blocks(struct super_block *sb)
2812 {
2813 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2814 	int err;
2815 	int i;
2816 	int count = 0;
2817 	int count2 = 0;
2818 	struct ext4_free_metadata *md;
2819 	struct ext4_buddy e4b;
2820 
2821 	if (list_empty(&sbi->s_committed_transaction))
2822 		return;
2823 
2824 	/* there is committed blocks to be freed yet */
2825 	do {
2826 		/* get next array of blocks */
2827 		md = NULL;
2828 		spin_lock(&sbi->s_md_lock);
2829 		if (!list_empty(&sbi->s_committed_transaction)) {
2830 			md = list_entry(sbi->s_committed_transaction.next,
2831 					struct ext4_free_metadata, list);
2832 			list_del(&md->list);
2833 		}
2834 		spin_unlock(&sbi->s_md_lock);
2835 
2836 		if (md == NULL)
2837 			break;
2838 
2839 		mb_debug("gonna free %u blocks in group %lu (0x%p):",
2840 				md->num, md->group, md);
2841 
2842 		err = ext4_mb_load_buddy(sb, md->group, &e4b);
2843 		/* we expect to find existing buddy because it's pinned */
2844 		BUG_ON(err != 0);
2845 
2846 		/* there are blocks to put in buddy to make them really free */
2847 		count += md->num;
2848 		count2++;
2849 		ext4_lock_group(sb, md->group);
2850 		for (i = 0; i < md->num; i++) {
2851 			mb_debug(" %u", md->blocks[i]);
2852 			err = mb_free_blocks(NULL, &e4b, md->blocks[i], 1);
2853 			BUG_ON(err != 0);
2854 		}
2855 		mb_debug("\n");
2856 		ext4_unlock_group(sb, md->group);
2857 
2858 		/* balance refcounts from ext4_mb_free_metadata() */
2859 		page_cache_release(e4b.bd_buddy_page);
2860 		page_cache_release(e4b.bd_bitmap_page);
2861 
2862 		kfree(md);
2863 		ext4_mb_release_desc(&e4b);
2864 
2865 	} while (md);
2866 
2867 	mb_debug("freed %u blocks in %u structures\n", count, count2);
2868 }
2869 
2870 #define EXT4_ROOT			"ext4"
2871 #define EXT4_MB_STATS_NAME		"stats"
2872 #define EXT4_MB_MAX_TO_SCAN_NAME	"max_to_scan"
2873 #define EXT4_MB_MIN_TO_SCAN_NAME	"min_to_scan"
2874 #define EXT4_MB_ORDER2_REQ		"order2_req"
2875 #define EXT4_MB_STREAM_REQ		"stream_req"
2876 #define EXT4_MB_GROUP_PREALLOC		"group_prealloc"
2877 
2878 
2879 
2880 #define MB_PROC_VALUE_READ(name)				\
2881 static int ext4_mb_read_##name(char *page, char **start,	\
2882 		off_t off, int count, int *eof, void *data)	\
2883 {								\
2884 	struct ext4_sb_info *sbi = data;			\
2885 	int len;						\
2886 	*eof = 1;						\
2887 	if (off != 0)						\
2888 		return 0;					\
2889 	len = sprintf(page, "%ld\n", sbi->s_mb_##name);		\
2890 	*start = page;						\
2891 	return len;						\
2892 }
2893 
2894 #define MB_PROC_VALUE_WRITE(name)				\
2895 static int ext4_mb_write_##name(struct file *file,		\
2896 		const char __user *buf, unsigned long cnt, void *data)	\
2897 {								\
2898 	struct ext4_sb_info *sbi = data;			\
2899 	char str[32];						\
2900 	long value;						\
2901 	if (cnt >= sizeof(str))					\
2902 		return -EINVAL;					\
2903 	if (copy_from_user(str, buf, cnt))			\
2904 		return -EFAULT;					\
2905 	value = simple_strtol(str, NULL, 0);			\
2906 	if (value <= 0)						\
2907 		return -ERANGE;					\
2908 	sbi->s_mb_##name = value;				\
2909 	return cnt;						\
2910 }
2911 
2912 MB_PROC_VALUE_READ(stats);
2913 MB_PROC_VALUE_WRITE(stats);
2914 MB_PROC_VALUE_READ(max_to_scan);
2915 MB_PROC_VALUE_WRITE(max_to_scan);
2916 MB_PROC_VALUE_READ(min_to_scan);
2917 MB_PROC_VALUE_WRITE(min_to_scan);
2918 MB_PROC_VALUE_READ(order2_reqs);
2919 MB_PROC_VALUE_WRITE(order2_reqs);
2920 MB_PROC_VALUE_READ(stream_request);
2921 MB_PROC_VALUE_WRITE(stream_request);
2922 MB_PROC_VALUE_READ(group_prealloc);
2923 MB_PROC_VALUE_WRITE(group_prealloc);
2924 
2925 #define	MB_PROC_HANDLER(name, var)					\
2926 do {									\
2927 	proc = create_proc_entry(name, mode, sbi->s_mb_proc);		\
2928 	if (proc == NULL) {						\
2929 		printk(KERN_ERR "EXT4-fs: can't to create %s\n", name);	\
2930 		goto err_out;						\
2931 	}								\
2932 	proc->data = sbi;						\
2933 	proc->read_proc  = ext4_mb_read_##var ;				\
2934 	proc->write_proc = ext4_mb_write_##var;				\
2935 } while (0)
2936 
2937 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2938 {
2939 	mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2940 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2941 	struct proc_dir_entry *proc;
2942 	char devname[64];
2943 
2944 	snprintf(devname, sizeof(devname) - 1, "%s",
2945 		bdevname(sb->s_bdev, devname));
2946 	sbi->s_mb_proc = proc_mkdir(devname, proc_root_ext4);
2947 
2948 	MB_PROC_HANDLER(EXT4_MB_STATS_NAME, stats);
2949 	MB_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, max_to_scan);
2950 	MB_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, min_to_scan);
2951 	MB_PROC_HANDLER(EXT4_MB_ORDER2_REQ, order2_reqs);
2952 	MB_PROC_HANDLER(EXT4_MB_STREAM_REQ, stream_request);
2953 	MB_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, group_prealloc);
2954 
2955 	return 0;
2956 
2957 err_out:
2958 	printk(KERN_ERR "EXT4-fs: Unable to create %s\n", devname);
2959 	remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc);
2960 	remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc);
2961 	remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc);
2962 	remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
2963 	remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
2964 	remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc);
2965 	remove_proc_entry(devname, proc_root_ext4);
2966 	sbi->s_mb_proc = NULL;
2967 
2968 	return -ENOMEM;
2969 }
2970 
2971 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2972 {
2973 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2974 	char devname[64];
2975 
2976 	if (sbi->s_mb_proc == NULL)
2977 		return -EINVAL;
2978 
2979 	snprintf(devname, sizeof(devname) - 1, "%s",
2980 		bdevname(sb->s_bdev, devname));
2981 	remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_mb_proc);
2982 	remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_mb_proc);
2983 	remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_mb_proc);
2984 	remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_mb_proc);
2985 	remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_mb_proc);
2986 	remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_mb_proc);
2987 	remove_proc_entry(devname, proc_root_ext4);
2988 
2989 	return 0;
2990 }
2991 
2992 int __init init_ext4_mballoc(void)
2993 {
2994 	ext4_pspace_cachep =
2995 		kmem_cache_create("ext4_prealloc_space",
2996 				     sizeof(struct ext4_prealloc_space),
2997 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
2998 	if (ext4_pspace_cachep == NULL)
2999 		return -ENOMEM;
3000 
3001 	ext4_ac_cachep =
3002 		kmem_cache_create("ext4_alloc_context",
3003 				     sizeof(struct ext4_allocation_context),
3004 				     0, SLAB_RECLAIM_ACCOUNT, NULL);
3005 	if (ext4_ac_cachep == NULL) {
3006 		kmem_cache_destroy(ext4_pspace_cachep);
3007 		return -ENOMEM;
3008 	}
3009 #ifdef CONFIG_PROC_FS
3010 	proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs);
3011 	if (proc_root_ext4 == NULL)
3012 		printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT);
3013 #endif
3014 	return 0;
3015 }
3016 
3017 void exit_ext4_mballoc(void)
3018 {
3019 	/* XXX: synchronize_rcu(); */
3020 	kmem_cache_destroy(ext4_pspace_cachep);
3021 	kmem_cache_destroy(ext4_ac_cachep);
3022 #ifdef CONFIG_PROC_FS
3023 	remove_proc_entry(EXT4_ROOT, proc_root_fs);
3024 #endif
3025 }
3026 
3027 
3028 /*
3029  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
3030  * Returns 0 if success or error code
3031  */
3032 static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3033 				handle_t *handle)
3034 {
3035 	struct buffer_head *bitmap_bh = NULL;
3036 	struct ext4_super_block *es;
3037 	struct ext4_group_desc *gdp;
3038 	struct buffer_head *gdp_bh;
3039 	struct ext4_sb_info *sbi;
3040 	struct super_block *sb;
3041 	ext4_fsblk_t block;
3042 	int err;
3043 
3044 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3045 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3046 
3047 	sb = ac->ac_sb;
3048 	sbi = EXT4_SB(sb);
3049 	es = sbi->s_es;
3050 
3051 	ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
3052 			gdp->bg_free_blocks_count);
3053 
3054 	err = -EIO;
3055 	bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3056 	if (!bitmap_bh)
3057 		goto out_err;
3058 
3059 	err = ext4_journal_get_write_access(handle, bitmap_bh);
3060 	if (err)
3061 		goto out_err;
3062 
3063 	err = -EIO;
3064 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3065 	if (!gdp)
3066 		goto out_err;
3067 
3068 	err = ext4_journal_get_write_access(handle, gdp_bh);
3069 	if (err)
3070 		goto out_err;
3071 
3072 	block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
3073 		+ ac->ac_b_ex.fe_start
3074 		+ le32_to_cpu(es->s_first_data_block);
3075 
3076 	if (block == ext4_block_bitmap(sb, gdp) ||
3077 			block == ext4_inode_bitmap(sb, gdp) ||
3078 			in_range(block, ext4_inode_table(sb, gdp),
3079 				EXT4_SB(sb)->s_itb_per_group)) {
3080 
3081 		ext4_error(sb, __FUNCTION__,
3082 			   "Allocating block in system zone - block = %llu",
3083 			   block);
3084 	}
3085 #ifdef AGGRESSIVE_CHECK
3086 	{
3087 		int i;
3088 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3089 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3090 						bitmap_bh->b_data));
3091 		}
3092 	}
3093 #endif
3094 	mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
3095 				ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
3096 
3097 	spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3098 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
3099 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3100 		gdp->bg_free_blocks_count =
3101 			cpu_to_le16(ext4_free_blocks_after_init(sb,
3102 						ac->ac_b_ex.fe_group,
3103 						gdp));
3104 	}
3105 	gdp->bg_free_blocks_count =
3106 		cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
3107 				- ac->ac_b_ex.fe_len);
3108 	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
3109 	spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
3110 	percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
3111 
3112 	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
3113 	if (err)
3114 		goto out_err;
3115 	err = ext4_journal_dirty_metadata(handle, gdp_bh);
3116 
3117 out_err:
3118 	sb->s_dirt = 1;
3119 	brelse(bitmap_bh);
3120 	return err;
3121 }
3122 
3123 /*
3124  * here we normalize request for locality group
3125  * Group request are normalized to s_strip size if we set the same via mount
3126  * option. If not we set it to s_mb_group_prealloc which can be configured via
3127  * /proc/fs/ext4/<partition>/group_prealloc
3128  *
3129  * XXX: should we try to preallocate more than the group has now?
3130  */
3131 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3132 {
3133 	struct super_block *sb = ac->ac_sb;
3134 	struct ext4_locality_group *lg = ac->ac_lg;
3135 
3136 	BUG_ON(lg == NULL);
3137 	if (EXT4_SB(sb)->s_stripe)
3138 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
3139 	else
3140 		ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3141 	mb_debug("#%u: goal %lu blocks for locality group\n",
3142 		current->pid, ac->ac_g_ex.fe_len);
3143 }
3144 
3145 /*
3146  * Normalization means making request better in terms of
3147  * size and alignment
3148  */
3149 static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3150 				struct ext4_allocation_request *ar)
3151 {
3152 	int bsbits, max;
3153 	ext4_lblk_t end;
3154 	struct list_head *cur;
3155 	loff_t size, orig_size, start_off;
3156 	ext4_lblk_t start, orig_start;
3157 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3158 
3159 	/* do normalize only data requests, metadata requests
3160 	   do not need preallocation */
3161 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3162 		return;
3163 
3164 	/* sometime caller may want exact blocks */
3165 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3166 		return;
3167 
3168 	/* caller may indicate that preallocation isn't
3169 	 * required (it's a tail, for example) */
3170 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3171 		return;
3172 
3173 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3174 		ext4_mb_normalize_group_request(ac);
3175 		return ;
3176 	}
3177 
3178 	bsbits = ac->ac_sb->s_blocksize_bits;
3179 
3180 	/* first, let's learn actual file size
3181 	 * given current request is allocated */
3182 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3183 	size = size << bsbits;
3184 	if (size < i_size_read(ac->ac_inode))
3185 		size = i_size_read(ac->ac_inode);
3186 
3187 	/* max available blocks in a free group */
3188 	max = EXT4_BLOCKS_PER_GROUP(ac->ac_sb) - 1 - 1 -
3189 				EXT4_SB(ac->ac_sb)->s_itb_per_group;
3190 
3191 #define NRL_CHECK_SIZE(req, size, max,bits)	\
3192 		(req <= (size) || max <= ((size) >> bits))
3193 
3194 	/* first, try to predict filesize */
3195 	/* XXX: should this table be tunable? */
3196 	start_off = 0;
3197 	if (size <= 16 * 1024) {
3198 		size = 16 * 1024;
3199 	} else if (size <= 32 * 1024) {
3200 		size = 32 * 1024;
3201 	} else if (size <= 64 * 1024) {
3202 		size = 64 * 1024;
3203 	} else if (size <= 128 * 1024) {
3204 		size = 128 * 1024;
3205 	} else if (size <= 256 * 1024) {
3206 		size = 256 * 1024;
3207 	} else if (size <= 512 * 1024) {
3208 		size = 512 * 1024;
3209 	} else if (size <= 1024 * 1024) {
3210 		size = 1024 * 1024;
3211 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, bsbits)) {
3212 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3213 						(20 - bsbits)) << 20;
3214 		size = 1024 * 1024;
3215 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, bsbits)) {
3216 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3217 							(22 - bsbits)) << 22;
3218 		size = 4 * 1024 * 1024;
3219 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3220 					(8<<20)>>bsbits, max, bsbits)) {
3221 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3222 							(23 - bsbits)) << 23;
3223 		size = 8 * 1024 * 1024;
3224 	} else {
3225 		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3226 		size	  = ac->ac_o_ex.fe_len << bsbits;
3227 	}
3228 	orig_size = size = size >> bsbits;
3229 	orig_start = start = start_off >> bsbits;
3230 
3231 	/* don't cover already allocated blocks in selected range */
3232 	if (ar->pleft && start <= ar->lleft) {
3233 		size -= ar->lleft + 1 - start;
3234 		start = ar->lleft + 1;
3235 	}
3236 	if (ar->pright && start + size - 1 >= ar->lright)
3237 		size -= start + size - ar->lright;
3238 
3239 	end = start + size;
3240 
3241 	/* check we don't cross already preallocated blocks */
3242 	rcu_read_lock();
3243 	list_for_each_rcu(cur, &ei->i_prealloc_list) {
3244 		struct ext4_prealloc_space *pa;
3245 		unsigned long pa_end;
3246 
3247 		pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3248 
3249 		if (pa->pa_deleted)
3250 			continue;
3251 		spin_lock(&pa->pa_lock);
3252 		if (pa->pa_deleted) {
3253 			spin_unlock(&pa->pa_lock);
3254 			continue;
3255 		}
3256 
3257 		pa_end = pa->pa_lstart + pa->pa_len;
3258 
3259 		/* PA must not overlap original request */
3260 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3261 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3262 
3263 		/* skip PA normalized request doesn't overlap with */
3264 		if (pa->pa_lstart >= end) {
3265 			spin_unlock(&pa->pa_lock);
3266 			continue;
3267 		}
3268 		if (pa_end <= start) {
3269 			spin_unlock(&pa->pa_lock);
3270 			continue;
3271 		}
3272 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3273 
3274 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3275 			BUG_ON(pa_end < start);
3276 			start = pa_end;
3277 		}
3278 
3279 		if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3280 			BUG_ON(pa->pa_lstart > end);
3281 			end = pa->pa_lstart;
3282 		}
3283 		spin_unlock(&pa->pa_lock);
3284 	}
3285 	rcu_read_unlock();
3286 	size = end - start;
3287 
3288 	/* XXX: extra loop to check we really don't overlap preallocations */
3289 	rcu_read_lock();
3290 	list_for_each_rcu(cur, &ei->i_prealloc_list) {
3291 		struct ext4_prealloc_space *pa;
3292 		unsigned long pa_end;
3293 		pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3294 		spin_lock(&pa->pa_lock);
3295 		if (pa->pa_deleted == 0) {
3296 			pa_end = pa->pa_lstart + pa->pa_len;
3297 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3298 		}
3299 		spin_unlock(&pa->pa_lock);
3300 	}
3301 	rcu_read_unlock();
3302 
3303 	if (start + size <= ac->ac_o_ex.fe_logical &&
3304 			start > ac->ac_o_ex.fe_logical) {
3305 		printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3306 			(unsigned long) start, (unsigned long) size,
3307 			(unsigned long) ac->ac_o_ex.fe_logical);
3308 	}
3309 	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3310 			start > ac->ac_o_ex.fe_logical);
3311 	BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3312 
3313 	/* now prepare goal request */
3314 
3315 	/* XXX: is it better to align blocks WRT to logical
3316 	 * placement or satisfy big request as is */
3317 	ac->ac_g_ex.fe_logical = start;
3318 	ac->ac_g_ex.fe_len = size;
3319 
3320 	/* define goal start in order to merge */
3321 	if (ar->pright && (ar->lright == (start + size))) {
3322 		/* merge to the right */
3323 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3324 						&ac->ac_f_ex.fe_group,
3325 						&ac->ac_f_ex.fe_start);
3326 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3327 	}
3328 	if (ar->pleft && (ar->lleft + 1 == start)) {
3329 		/* merge to the left */
3330 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3331 						&ac->ac_f_ex.fe_group,
3332 						&ac->ac_f_ex.fe_start);
3333 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3334 	}
3335 
3336 	mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3337 		(unsigned) orig_size, (unsigned) start);
3338 }
3339 
3340 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3341 {
3342 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3343 
3344 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3345 		atomic_inc(&sbi->s_bal_reqs);
3346 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3347 		if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3348 			atomic_inc(&sbi->s_bal_success);
3349 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3350 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3351 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3352 			atomic_inc(&sbi->s_bal_goals);
3353 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3354 			atomic_inc(&sbi->s_bal_breaks);
3355 	}
3356 
3357 	ext4_mb_store_history(ac);
3358 }
3359 
3360 /*
3361  * use blocks preallocated to inode
3362  */
3363 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3364 				struct ext4_prealloc_space *pa)
3365 {
3366 	ext4_fsblk_t start;
3367 	ext4_fsblk_t end;
3368 	int len;
3369 
3370 	/* found preallocated blocks, use them */
3371 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3372 	end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3373 	len = end - start;
3374 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3375 					&ac->ac_b_ex.fe_start);
3376 	ac->ac_b_ex.fe_len = len;
3377 	ac->ac_status = AC_STATUS_FOUND;
3378 	ac->ac_pa = pa;
3379 
3380 	BUG_ON(start < pa->pa_pstart);
3381 	BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3382 	BUG_ON(pa->pa_free < len);
3383 	pa->pa_free -= len;
3384 
3385 	mb_debug("use %llu/%lu from inode pa %p\n", start, len, pa);
3386 }
3387 
3388 /*
3389  * use blocks preallocated to locality group
3390  */
3391 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3392 				struct ext4_prealloc_space *pa)
3393 {
3394 	unsigned len = ac->ac_o_ex.fe_len;
3395 
3396 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3397 					&ac->ac_b_ex.fe_group,
3398 					&ac->ac_b_ex.fe_start);
3399 	ac->ac_b_ex.fe_len = len;
3400 	ac->ac_status = AC_STATUS_FOUND;
3401 	ac->ac_pa = pa;
3402 
3403 	/* we don't correct pa_pstart or pa_plen here to avoid
3404 	 * possible race when the group is being loaded concurrently
3405 	 * instead we correct pa later, after blocks are marked
3406 	 * in on-disk bitmap -- see ext4_mb_release_context()
3407 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3408 	 */
3409 	mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3410 }
3411 
3412 /*
3413  * search goal blocks in preallocated space
3414  */
3415 static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3416 {
3417 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3418 	struct ext4_locality_group *lg;
3419 	struct ext4_prealloc_space *pa;
3420 	struct list_head *cur;
3421 
3422 	/* only data can be preallocated */
3423 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3424 		return 0;
3425 
3426 	/* first, try per-file preallocation */
3427 	rcu_read_lock();
3428 	list_for_each_rcu(cur, &ei->i_prealloc_list) {
3429 		pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3430 
3431 		/* all fields in this condition don't change,
3432 		 * so we can skip locking for them */
3433 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3434 			ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3435 			continue;
3436 
3437 		/* found preallocated blocks, use them */
3438 		spin_lock(&pa->pa_lock);
3439 		if (pa->pa_deleted == 0 && pa->pa_free) {
3440 			atomic_inc(&pa->pa_count);
3441 			ext4_mb_use_inode_pa(ac, pa);
3442 			spin_unlock(&pa->pa_lock);
3443 			ac->ac_criteria = 10;
3444 			rcu_read_unlock();
3445 			return 1;
3446 		}
3447 		spin_unlock(&pa->pa_lock);
3448 	}
3449 	rcu_read_unlock();
3450 
3451 	/* can we use group allocation? */
3452 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3453 		return 0;
3454 
3455 	/* inode may have no locality group for some reason */
3456 	lg = ac->ac_lg;
3457 	if (lg == NULL)
3458 		return 0;
3459 
3460 	rcu_read_lock();
3461 	list_for_each_rcu(cur, &lg->lg_prealloc_list) {
3462 		pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
3463 		spin_lock(&pa->pa_lock);
3464 		if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
3465 			atomic_inc(&pa->pa_count);
3466 			ext4_mb_use_group_pa(ac, pa);
3467 			spin_unlock(&pa->pa_lock);
3468 			ac->ac_criteria = 20;
3469 			rcu_read_unlock();
3470 			return 1;
3471 		}
3472 		spin_unlock(&pa->pa_lock);
3473 	}
3474 	rcu_read_unlock();
3475 
3476 	return 0;
3477 }
3478 
3479 /*
3480  * the function goes through all preallocation in this group and marks them
3481  * used in in-core bitmap. buddy must be generated from this bitmap
3482  * Need to be called with ext4 group lock (ext4_lock_group)
3483  */
3484 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3485 					ext4_group_t group)
3486 {
3487 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3488 	struct ext4_prealloc_space *pa;
3489 	struct list_head *cur;
3490 	ext4_group_t groupnr;
3491 	ext4_grpblk_t start;
3492 	int preallocated = 0;
3493 	int count = 0;
3494 	int len;
3495 
3496 	/* all form of preallocation discards first load group,
3497 	 * so the only competing code is preallocation use.
3498 	 * we don't need any locking here
3499 	 * notice we do NOT ignore preallocations with pa_deleted
3500 	 * otherwise we could leave used blocks available for
3501 	 * allocation in buddy when concurrent ext4_mb_put_pa()
3502 	 * is dropping preallocation
3503 	 */
3504 	list_for_each(cur, &grp->bb_prealloc_list) {
3505 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3506 		spin_lock(&pa->pa_lock);
3507 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3508 					     &groupnr, &start);
3509 		len = pa->pa_len;
3510 		spin_unlock(&pa->pa_lock);
3511 		if (unlikely(len == 0))
3512 			continue;
3513 		BUG_ON(groupnr != group);
3514 		mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3515 						bitmap, start, len);
3516 		preallocated += len;
3517 		count++;
3518 	}
3519 	mb_debug("prellocated %u for group %lu\n", preallocated, group);
3520 }
3521 
3522 static void ext4_mb_pa_callback(struct rcu_head *head)
3523 {
3524 	struct ext4_prealloc_space *pa;
3525 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3526 	kmem_cache_free(ext4_pspace_cachep, pa);
3527 }
3528 
3529 /*
3530  * drops a reference to preallocated space descriptor
3531  * if this was the last reference and the space is consumed
3532  */
3533 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3534 			struct super_block *sb, struct ext4_prealloc_space *pa)
3535 {
3536 	unsigned long grp;
3537 
3538 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3539 		return;
3540 
3541 	/* in this short window concurrent discard can set pa_deleted */
3542 	spin_lock(&pa->pa_lock);
3543 	if (pa->pa_deleted == 1) {
3544 		spin_unlock(&pa->pa_lock);
3545 		return;
3546 	}
3547 
3548 	pa->pa_deleted = 1;
3549 	spin_unlock(&pa->pa_lock);
3550 
3551 	/* -1 is to protect from crossing allocation group */
3552 	ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3553 
3554 	/*
3555 	 * possible race:
3556 	 *
3557 	 *  P1 (buddy init)			P2 (regular allocation)
3558 	 *					find block B in PA
3559 	 *  copy on-disk bitmap to buddy
3560 	 *  					mark B in on-disk bitmap
3561 	 *					drop PA from group
3562 	 *  mark all PAs in buddy
3563 	 *
3564 	 * thus, P1 initializes buddy with B available. to prevent this
3565 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3566 	 * against that pair
3567 	 */
3568 	ext4_lock_group(sb, grp);
3569 	list_del(&pa->pa_group_list);
3570 	ext4_unlock_group(sb, grp);
3571 
3572 	spin_lock(pa->pa_obj_lock);
3573 	list_del_rcu(&pa->pa_inode_list);
3574 	spin_unlock(pa->pa_obj_lock);
3575 
3576 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3577 }
3578 
3579 /*
3580  * creates new preallocated space for given inode
3581  */
3582 static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3583 {
3584 	struct super_block *sb = ac->ac_sb;
3585 	struct ext4_prealloc_space *pa;
3586 	struct ext4_group_info *grp;
3587 	struct ext4_inode_info *ei;
3588 
3589 	/* preallocate only when found space is larger then requested */
3590 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3591 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3592 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3593 
3594 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3595 	if (pa == NULL)
3596 		return -ENOMEM;
3597 
3598 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3599 		int winl;
3600 		int wins;
3601 		int win;
3602 		int offs;
3603 
3604 		/* we can't allocate as much as normalizer wants.
3605 		 * so, found space must get proper lstart
3606 		 * to cover original request */
3607 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3608 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3609 
3610 		/* we're limited by original request in that
3611 		 * logical block must be covered any way
3612 		 * winl is window we can move our chunk within */
3613 		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3614 
3615 		/* also, we should cover whole original request */
3616 		wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3617 
3618 		/* the smallest one defines real window */
3619 		win = min(winl, wins);
3620 
3621 		offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3622 		if (offs && offs < win)
3623 			win = offs;
3624 
3625 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3626 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3627 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3628 	}
3629 
3630 	/* preallocation can change ac_b_ex, thus we store actually
3631 	 * allocated blocks for history */
3632 	ac->ac_f_ex = ac->ac_b_ex;
3633 
3634 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
3635 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3636 	pa->pa_len = ac->ac_b_ex.fe_len;
3637 	pa->pa_free = pa->pa_len;
3638 	atomic_set(&pa->pa_count, 1);
3639 	spin_lock_init(&pa->pa_lock);
3640 	pa->pa_deleted = 0;
3641 	pa->pa_linear = 0;
3642 
3643 	mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3644 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3645 
3646 	ext4_mb_use_inode_pa(ac, pa);
3647 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3648 
3649 	ei = EXT4_I(ac->ac_inode);
3650 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3651 
3652 	pa->pa_obj_lock = &ei->i_prealloc_lock;
3653 	pa->pa_inode = ac->ac_inode;
3654 
3655 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3656 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3657 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3658 
3659 	spin_lock(pa->pa_obj_lock);
3660 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3661 	spin_unlock(pa->pa_obj_lock);
3662 
3663 	return 0;
3664 }
3665 
3666 /*
3667  * creates new preallocated space for locality group inodes belongs to
3668  */
3669 static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3670 {
3671 	struct super_block *sb = ac->ac_sb;
3672 	struct ext4_locality_group *lg;
3673 	struct ext4_prealloc_space *pa;
3674 	struct ext4_group_info *grp;
3675 
3676 	/* preallocate only when found space is larger then requested */
3677 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3678 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3679 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3680 
3681 	BUG_ON(ext4_pspace_cachep == NULL);
3682 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3683 	if (pa == NULL)
3684 		return -ENOMEM;
3685 
3686 	/* preallocation can change ac_b_ex, thus we store actually
3687 	 * allocated blocks for history */
3688 	ac->ac_f_ex = ac->ac_b_ex;
3689 
3690 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3691 	pa->pa_lstart = pa->pa_pstart;
3692 	pa->pa_len = ac->ac_b_ex.fe_len;
3693 	pa->pa_free = pa->pa_len;
3694 	atomic_set(&pa->pa_count, 1);
3695 	spin_lock_init(&pa->pa_lock);
3696 	pa->pa_deleted = 0;
3697 	pa->pa_linear = 1;
3698 
3699 	mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3700 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3701 
3702 	ext4_mb_use_group_pa(ac, pa);
3703 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3704 
3705 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3706 	lg = ac->ac_lg;
3707 	BUG_ON(lg == NULL);
3708 
3709 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
3710 	pa->pa_inode = NULL;
3711 
3712 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3713 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3714 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3715 
3716 	spin_lock(pa->pa_obj_lock);
3717 	list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
3718 	spin_unlock(pa->pa_obj_lock);
3719 
3720 	return 0;
3721 }
3722 
3723 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3724 {
3725 	int err;
3726 
3727 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3728 		err = ext4_mb_new_group_pa(ac);
3729 	else
3730 		err = ext4_mb_new_inode_pa(ac);
3731 	return err;
3732 }
3733 
3734 /*
3735  * finds all unused blocks in on-disk bitmap, frees them in
3736  * in-core bitmap and buddy.
3737  * @pa must be unlinked from inode and group lists, so that
3738  * nobody else can find/use it.
3739  * the caller MUST hold group/inode locks.
3740  * TODO: optimize the case when there are no in-core structures yet
3741  */
3742 static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
3743 				struct buffer_head *bitmap_bh,
3744 				struct ext4_prealloc_space *pa)
3745 {
3746 	struct ext4_allocation_context *ac;
3747 	struct super_block *sb = e4b->bd_sb;
3748 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3749 	unsigned long end;
3750 	unsigned long next;
3751 	ext4_group_t group;
3752 	ext4_grpblk_t bit;
3753 	sector_t start;
3754 	int err = 0;
3755 	int free = 0;
3756 
3757 	BUG_ON(pa->pa_deleted == 0);
3758 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3759 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3760 	end = bit + pa->pa_len;
3761 
3762 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3763 
3764 	if (ac) {
3765 		ac->ac_sb = sb;
3766 		ac->ac_inode = pa->pa_inode;
3767 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3768 	}
3769 
3770 	while (bit < end) {
3771 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3772 		if (bit >= end)
3773 			break;
3774 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3775 		if (next > end)
3776 			next = end;
3777 		start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3778 				le32_to_cpu(sbi->s_es->s_first_data_block);
3779 		mb_debug("    free preallocated %u/%u in group %u\n",
3780 				(unsigned) start, (unsigned) next - bit,
3781 				(unsigned) group);
3782 		free += next - bit;
3783 
3784 		if (ac) {
3785 			ac->ac_b_ex.fe_group = group;
3786 			ac->ac_b_ex.fe_start = bit;
3787 			ac->ac_b_ex.fe_len = next - bit;
3788 			ac->ac_b_ex.fe_logical = 0;
3789 			ext4_mb_store_history(ac);
3790 		}
3791 
3792 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3793 		bit = next + 1;
3794 	}
3795 	if (free != pa->pa_free) {
3796 		printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3797 			pa, (unsigned long) pa->pa_lstart,
3798 			(unsigned long) pa->pa_pstart,
3799 			(unsigned long) pa->pa_len);
3800 		ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
3801 						free, pa->pa_free);
3802 		/*
3803 		 * pa is already deleted so we use the value obtained
3804 		 * from the bitmap and continue.
3805 		 */
3806 	}
3807 	atomic_add(free, &sbi->s_mb_discarded);
3808 	if (ac)
3809 		kmem_cache_free(ext4_ac_cachep, ac);
3810 
3811 	return err;
3812 }
3813 
3814 static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3815 				struct ext4_prealloc_space *pa)
3816 {
3817 	struct ext4_allocation_context *ac;
3818 	struct super_block *sb = e4b->bd_sb;
3819 	ext4_group_t group;
3820 	ext4_grpblk_t bit;
3821 
3822 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3823 
3824 	if (ac)
3825 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3826 
3827 	BUG_ON(pa->pa_deleted == 0);
3828 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3829 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3830 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3831 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3832 
3833 	if (ac) {
3834 		ac->ac_sb = sb;
3835 		ac->ac_inode = NULL;
3836 		ac->ac_b_ex.fe_group = group;
3837 		ac->ac_b_ex.fe_start = bit;
3838 		ac->ac_b_ex.fe_len = pa->pa_len;
3839 		ac->ac_b_ex.fe_logical = 0;
3840 		ext4_mb_store_history(ac);
3841 		kmem_cache_free(ext4_ac_cachep, ac);
3842 	}
3843 
3844 	return 0;
3845 }
3846 
3847 /*
3848  * releases all preallocations in given group
3849  *
3850  * first, we need to decide discard policy:
3851  * - when do we discard
3852  *   1) ENOSPC
3853  * - how many do we discard
3854  *   1) how many requested
3855  */
3856 static int ext4_mb_discard_group_preallocations(struct super_block *sb,
3857 					ext4_group_t group, int needed)
3858 {
3859 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3860 	struct buffer_head *bitmap_bh = NULL;
3861 	struct ext4_prealloc_space *pa, *tmp;
3862 	struct list_head list;
3863 	struct ext4_buddy e4b;
3864 	int err;
3865 	int busy = 0;
3866 	int free = 0;
3867 
3868 	mb_debug("discard preallocation for group %lu\n", group);
3869 
3870 	if (list_empty(&grp->bb_prealloc_list))
3871 		return 0;
3872 
3873 	bitmap_bh = read_block_bitmap(sb, group);
3874 	if (bitmap_bh == NULL) {
3875 		/* error handling here */
3876 		ext4_mb_release_desc(&e4b);
3877 		BUG_ON(bitmap_bh == NULL);
3878 	}
3879 
3880 	err = ext4_mb_load_buddy(sb, group, &e4b);
3881 	BUG_ON(err != 0); /* error handling here */
3882 
3883 	if (needed == 0)
3884 		needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3885 
3886 	grp = ext4_get_group_info(sb, group);
3887 	INIT_LIST_HEAD(&list);
3888 
3889 repeat:
3890 	ext4_lock_group(sb, group);
3891 	list_for_each_entry_safe(pa, tmp,
3892 				&grp->bb_prealloc_list, pa_group_list) {
3893 		spin_lock(&pa->pa_lock);
3894 		if (atomic_read(&pa->pa_count)) {
3895 			spin_unlock(&pa->pa_lock);
3896 			busy = 1;
3897 			continue;
3898 		}
3899 		if (pa->pa_deleted) {
3900 			spin_unlock(&pa->pa_lock);
3901 			continue;
3902 		}
3903 
3904 		/* seems this one can be freed ... */
3905 		pa->pa_deleted = 1;
3906 
3907 		/* we can trust pa_free ... */
3908 		free += pa->pa_free;
3909 
3910 		spin_unlock(&pa->pa_lock);
3911 
3912 		list_del(&pa->pa_group_list);
3913 		list_add(&pa->u.pa_tmp_list, &list);
3914 	}
3915 
3916 	/* if we still need more blocks and some PAs were used, try again */
3917 	if (free < needed && busy) {
3918 		busy = 0;
3919 		ext4_unlock_group(sb, group);
3920 		/*
3921 		 * Yield the CPU here so that we don't get soft lockup
3922 		 * in non preempt case.
3923 		 */
3924 		yield();
3925 		goto repeat;
3926 	}
3927 
3928 	/* found anything to free? */
3929 	if (list_empty(&list)) {
3930 		BUG_ON(free != 0);
3931 		goto out;
3932 	}
3933 
3934 	/* now free all selected PAs */
3935 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3936 
3937 		/* remove from object (inode or locality group) */
3938 		spin_lock(pa->pa_obj_lock);
3939 		list_del_rcu(&pa->pa_inode_list);
3940 		spin_unlock(pa->pa_obj_lock);
3941 
3942 		if (pa->pa_linear)
3943 			ext4_mb_release_group_pa(&e4b, pa);
3944 		else
3945 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
3946 
3947 		list_del(&pa->u.pa_tmp_list);
3948 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3949 	}
3950 
3951 out:
3952 	ext4_unlock_group(sb, group);
3953 	ext4_mb_release_desc(&e4b);
3954 	put_bh(bitmap_bh);
3955 	return free;
3956 }
3957 
3958 /*
3959  * releases all non-used preallocated blocks for given inode
3960  *
3961  * It's important to discard preallocations under i_data_sem
3962  * We don't want another block to be served from the prealloc
3963  * space when we are discarding the inode prealloc space.
3964  *
3965  * FIXME!! Make sure it is valid at all the call sites
3966  */
3967 void ext4_mb_discard_inode_preallocations(struct inode *inode)
3968 {
3969 	struct ext4_inode_info *ei = EXT4_I(inode);
3970 	struct super_block *sb = inode->i_sb;
3971 	struct buffer_head *bitmap_bh = NULL;
3972 	struct ext4_prealloc_space *pa, *tmp;
3973 	ext4_group_t group = 0;
3974 	struct list_head list;
3975 	struct ext4_buddy e4b;
3976 	int err;
3977 
3978 	if (!test_opt(sb, MBALLOC) || !S_ISREG(inode->i_mode)) {
3979 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3980 		return;
3981 	}
3982 
3983 	mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3984 
3985 	INIT_LIST_HEAD(&list);
3986 
3987 repeat:
3988 	/* first, collect all pa's in the inode */
3989 	spin_lock(&ei->i_prealloc_lock);
3990 	while (!list_empty(&ei->i_prealloc_list)) {
3991 		pa = list_entry(ei->i_prealloc_list.next,
3992 				struct ext4_prealloc_space, pa_inode_list);
3993 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3994 		spin_lock(&pa->pa_lock);
3995 		if (atomic_read(&pa->pa_count)) {
3996 			/* this shouldn't happen often - nobody should
3997 			 * use preallocation while we're discarding it */
3998 			spin_unlock(&pa->pa_lock);
3999 			spin_unlock(&ei->i_prealloc_lock);
4000 			printk(KERN_ERR "uh-oh! used pa while discarding\n");
4001 			WARN_ON(1);
4002 			schedule_timeout_uninterruptible(HZ);
4003 			goto repeat;
4004 
4005 		}
4006 		if (pa->pa_deleted == 0) {
4007 			pa->pa_deleted = 1;
4008 			spin_unlock(&pa->pa_lock);
4009 			list_del_rcu(&pa->pa_inode_list);
4010 			list_add(&pa->u.pa_tmp_list, &list);
4011 			continue;
4012 		}
4013 
4014 		/* someone is deleting pa right now */
4015 		spin_unlock(&pa->pa_lock);
4016 		spin_unlock(&ei->i_prealloc_lock);
4017 
4018 		/* we have to wait here because pa_deleted
4019 		 * doesn't mean pa is already unlinked from
4020 		 * the list. as we might be called from
4021 		 * ->clear_inode() the inode will get freed
4022 		 * and concurrent thread which is unlinking
4023 		 * pa from inode's list may access already
4024 		 * freed memory, bad-bad-bad */
4025 
4026 		/* XXX: if this happens too often, we can
4027 		 * add a flag to force wait only in case
4028 		 * of ->clear_inode(), but not in case of
4029 		 * regular truncate */
4030 		schedule_timeout_uninterruptible(HZ);
4031 		goto repeat;
4032 	}
4033 	spin_unlock(&ei->i_prealloc_lock);
4034 
4035 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4036 		BUG_ON(pa->pa_linear != 0);
4037 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4038 
4039 		err = ext4_mb_load_buddy(sb, group, &e4b);
4040 		BUG_ON(err != 0); /* error handling here */
4041 
4042 		bitmap_bh = read_block_bitmap(sb, group);
4043 		if (bitmap_bh == NULL) {
4044 			/* error handling here */
4045 			ext4_mb_release_desc(&e4b);
4046 			BUG_ON(bitmap_bh == NULL);
4047 		}
4048 
4049 		ext4_lock_group(sb, group);
4050 		list_del(&pa->pa_group_list);
4051 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4052 		ext4_unlock_group(sb, group);
4053 
4054 		ext4_mb_release_desc(&e4b);
4055 		put_bh(bitmap_bh);
4056 
4057 		list_del(&pa->u.pa_tmp_list);
4058 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4059 	}
4060 }
4061 
4062 /*
4063  * finds all preallocated spaces and return blocks being freed to them
4064  * if preallocated space becomes full (no block is used from the space)
4065  * then the function frees space in buddy
4066  * XXX: at the moment, truncate (which is the only way to free blocks)
4067  * discards all preallocations
4068  */
4069 static void ext4_mb_return_to_preallocation(struct inode *inode,
4070 					struct ext4_buddy *e4b,
4071 					sector_t block, int count)
4072 {
4073 	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
4074 }
4075 #ifdef MB_DEBUG
4076 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4077 {
4078 	struct super_block *sb = ac->ac_sb;
4079 	ext4_group_t i;
4080 
4081 	printk(KERN_ERR "EXT4-fs: Can't allocate:"
4082 			" Allocation context details:\n");
4083 	printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
4084 			ac->ac_status, ac->ac_flags);
4085 	printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
4086 			"best %lu/%lu/%lu@%lu cr %d\n",
4087 			(unsigned long)ac->ac_o_ex.fe_group,
4088 			(unsigned long)ac->ac_o_ex.fe_start,
4089 			(unsigned long)ac->ac_o_ex.fe_len,
4090 			(unsigned long)ac->ac_o_ex.fe_logical,
4091 			(unsigned long)ac->ac_g_ex.fe_group,
4092 			(unsigned long)ac->ac_g_ex.fe_start,
4093 			(unsigned long)ac->ac_g_ex.fe_len,
4094 			(unsigned long)ac->ac_g_ex.fe_logical,
4095 			(unsigned long)ac->ac_b_ex.fe_group,
4096 			(unsigned long)ac->ac_b_ex.fe_start,
4097 			(unsigned long)ac->ac_b_ex.fe_len,
4098 			(unsigned long)ac->ac_b_ex.fe_logical,
4099 			(int)ac->ac_criteria);
4100 	printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
4101 		ac->ac_found);
4102 	printk(KERN_ERR "EXT4-fs: groups: \n");
4103 	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
4104 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4105 		struct ext4_prealloc_space *pa;
4106 		ext4_grpblk_t start;
4107 		struct list_head *cur;
4108 		ext4_lock_group(sb, i);
4109 		list_for_each(cur, &grp->bb_prealloc_list) {
4110 			pa = list_entry(cur, struct ext4_prealloc_space,
4111 					pa_group_list);
4112 			spin_lock(&pa->pa_lock);
4113 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4114 						     NULL, &start);
4115 			spin_unlock(&pa->pa_lock);
4116 			printk(KERN_ERR "PA:%lu:%d:%u \n", i,
4117 							start, pa->pa_len);
4118 		}
4119 		ext4_lock_group(sb, i);
4120 
4121 		if (grp->bb_free == 0)
4122 			continue;
4123 		printk(KERN_ERR "%lu: %d/%d \n",
4124 		       i, grp->bb_free, grp->bb_fragments);
4125 	}
4126 	printk(KERN_ERR "\n");
4127 }
4128 #else
4129 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4130 {
4131 	return;
4132 }
4133 #endif
4134 
4135 /*
4136  * We use locality group preallocation for small size file. The size of the
4137  * file is determined by the current size or the resulting size after
4138  * allocation which ever is larger
4139  *
4140  * One can tune this size via /proc/fs/ext4/<partition>/stream_req
4141  */
4142 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4143 {
4144 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4145 	int bsbits = ac->ac_sb->s_blocksize_bits;
4146 	loff_t size, isize;
4147 
4148 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4149 		return;
4150 
4151 	size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4152 	isize = i_size_read(ac->ac_inode) >> bsbits;
4153 	size = max(size, isize);
4154 
4155 	/* don't use group allocation for large files */
4156 	if (size >= sbi->s_mb_stream_request)
4157 		return;
4158 
4159 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4160 		return;
4161 
4162 	BUG_ON(ac->ac_lg != NULL);
4163 	/*
4164 	 * locality group prealloc space are per cpu. The reason for having
4165 	 * per cpu locality group is to reduce the contention between block
4166 	 * request from multiple CPUs.
4167 	 */
4168 	ac->ac_lg = &sbi->s_locality_groups[get_cpu()];
4169 	put_cpu();
4170 
4171 	/* we're going to use group allocation */
4172 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4173 
4174 	/* serialize all allocations in the group */
4175 	mutex_lock(&ac->ac_lg->lg_mutex);
4176 }
4177 
4178 static int ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4179 				struct ext4_allocation_request *ar)
4180 {
4181 	struct super_block *sb = ar->inode->i_sb;
4182 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4183 	struct ext4_super_block *es = sbi->s_es;
4184 	ext4_group_t group;
4185 	unsigned long len;
4186 	unsigned long goal;
4187 	ext4_grpblk_t block;
4188 
4189 	/* we can't allocate > group size */
4190 	len = ar->len;
4191 
4192 	/* just a dirty hack to filter too big requests  */
4193 	if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4194 		len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4195 
4196 	/* start searching from the goal */
4197 	goal = ar->goal;
4198 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4199 			goal >= ext4_blocks_count(es))
4200 		goal = le32_to_cpu(es->s_first_data_block);
4201 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4202 
4203 	/* set up allocation goals */
4204 	ac->ac_b_ex.fe_logical = ar->logical;
4205 	ac->ac_b_ex.fe_group = 0;
4206 	ac->ac_b_ex.fe_start = 0;
4207 	ac->ac_b_ex.fe_len = 0;
4208 	ac->ac_status = AC_STATUS_CONTINUE;
4209 	ac->ac_groups_scanned = 0;
4210 	ac->ac_ex_scanned = 0;
4211 	ac->ac_found = 0;
4212 	ac->ac_sb = sb;
4213 	ac->ac_inode = ar->inode;
4214 	ac->ac_o_ex.fe_logical = ar->logical;
4215 	ac->ac_o_ex.fe_group = group;
4216 	ac->ac_o_ex.fe_start = block;
4217 	ac->ac_o_ex.fe_len = len;
4218 	ac->ac_g_ex.fe_logical = ar->logical;
4219 	ac->ac_g_ex.fe_group = group;
4220 	ac->ac_g_ex.fe_start = block;
4221 	ac->ac_g_ex.fe_len = len;
4222 	ac->ac_f_ex.fe_len = 0;
4223 	ac->ac_flags = ar->flags;
4224 	ac->ac_2order = 0;
4225 	ac->ac_criteria = 0;
4226 	ac->ac_pa = NULL;
4227 	ac->ac_bitmap_page = NULL;
4228 	ac->ac_buddy_page = NULL;
4229 	ac->ac_lg = NULL;
4230 
4231 	/* we have to define context: we'll we work with a file or
4232 	 * locality group. this is a policy, actually */
4233 	ext4_mb_group_or_file(ac);
4234 
4235 	mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4236 			"left: %u/%u, right %u/%u to %swritable\n",
4237 			(unsigned) ar->len, (unsigned) ar->logical,
4238 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4239 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4240 			(unsigned) ar->lright, (unsigned) ar->pright,
4241 			atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4242 	return 0;
4243 
4244 }
4245 
4246 /*
4247  * release all resource we used in allocation
4248  */
4249 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4250 {
4251 	if (ac->ac_pa) {
4252 		if (ac->ac_pa->pa_linear) {
4253 			/* see comment in ext4_mb_use_group_pa() */
4254 			spin_lock(&ac->ac_pa->pa_lock);
4255 			ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
4256 			ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
4257 			ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
4258 			ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
4259 			spin_unlock(&ac->ac_pa->pa_lock);
4260 		}
4261 		ext4_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
4262 	}
4263 	if (ac->ac_bitmap_page)
4264 		page_cache_release(ac->ac_bitmap_page);
4265 	if (ac->ac_buddy_page)
4266 		page_cache_release(ac->ac_buddy_page);
4267 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4268 		mutex_unlock(&ac->ac_lg->lg_mutex);
4269 	ext4_mb_collect_stats(ac);
4270 	return 0;
4271 }
4272 
4273 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4274 {
4275 	ext4_group_t i;
4276 	int ret;
4277 	int freed = 0;
4278 
4279 	for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4280 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4281 		freed += ret;
4282 		needed -= ret;
4283 	}
4284 
4285 	return freed;
4286 }
4287 
4288 /*
4289  * Main entry point into mballoc to allocate blocks
4290  * it tries to use preallocation first, then falls back
4291  * to usual allocation
4292  */
4293 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4294 				 struct ext4_allocation_request *ar, int *errp)
4295 {
4296 	struct ext4_allocation_context *ac = NULL;
4297 	struct ext4_sb_info *sbi;
4298 	struct super_block *sb;
4299 	ext4_fsblk_t block = 0;
4300 	int freed;
4301 	int inquota;
4302 
4303 	sb = ar->inode->i_sb;
4304 	sbi = EXT4_SB(sb);
4305 
4306 	if (!test_opt(sb, MBALLOC)) {
4307 		block = ext4_new_blocks_old(handle, ar->inode, ar->goal,
4308 					    &(ar->len), errp);
4309 		return block;
4310 	}
4311 
4312 	while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4313 		ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4314 		ar->len--;
4315 	}
4316 	if (ar->len == 0) {
4317 		*errp = -EDQUOT;
4318 		return 0;
4319 	}
4320 	inquota = ar->len;
4321 
4322 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4323 	if (!ac) {
4324 		*errp = -ENOMEM;
4325 		return 0;
4326 	}
4327 
4328 	ext4_mb_poll_new_transaction(sb, handle);
4329 
4330 	*errp = ext4_mb_initialize_context(ac, ar);
4331 	if (*errp) {
4332 		ar->len = 0;
4333 		goto out;
4334 	}
4335 
4336 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4337 	if (!ext4_mb_use_preallocated(ac)) {
4338 
4339 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4340 		ext4_mb_normalize_request(ac, ar);
4341 
4342 repeat:
4343 		/* allocate space in core */
4344 		ext4_mb_regular_allocator(ac);
4345 
4346 		/* as we've just preallocated more space than
4347 		 * user requested orinally, we store allocated
4348 		 * space in a special descriptor */
4349 		if (ac->ac_status == AC_STATUS_FOUND &&
4350 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4351 			ext4_mb_new_preallocation(ac);
4352 	}
4353 
4354 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4355 		ext4_mb_mark_diskspace_used(ac, handle);
4356 		*errp = 0;
4357 		block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4358 		ar->len = ac->ac_b_ex.fe_len;
4359 	} else {
4360 		freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4361 		if (freed)
4362 			goto repeat;
4363 		*errp = -ENOSPC;
4364 		ac->ac_b_ex.fe_len = 0;
4365 		ar->len = 0;
4366 		ext4_mb_show_ac(ac);
4367 	}
4368 
4369 	ext4_mb_release_context(ac);
4370 
4371 out:
4372 	if (ar->len < inquota)
4373 		DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4374 
4375 	kmem_cache_free(ext4_ac_cachep, ac);
4376 	return block;
4377 }
4378 static void ext4_mb_poll_new_transaction(struct super_block *sb,
4379 						handle_t *handle)
4380 {
4381 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4382 
4383 	if (sbi->s_last_transaction == handle->h_transaction->t_tid)
4384 		return;
4385 
4386 	/* new transaction! time to close last one and free blocks for
4387 	 * committed transaction. we know that only transaction can be
4388 	 * active, so previos transaction can be being logged and we
4389 	 * know that transaction before previous is known to be already
4390 	 * logged. this means that now we may free blocks freed in all
4391 	 * transactions before previous one. hope I'm clear enough ... */
4392 
4393 	spin_lock(&sbi->s_md_lock);
4394 	if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
4395 		mb_debug("new transaction %lu, old %lu\n",
4396 				(unsigned long) handle->h_transaction->t_tid,
4397 				(unsigned long) sbi->s_last_transaction);
4398 		list_splice_init(&sbi->s_closed_transaction,
4399 				&sbi->s_committed_transaction);
4400 		list_splice_init(&sbi->s_active_transaction,
4401 				&sbi->s_closed_transaction);
4402 		sbi->s_last_transaction = handle->h_transaction->t_tid;
4403 	}
4404 	spin_unlock(&sbi->s_md_lock);
4405 
4406 	ext4_mb_free_committed_blocks(sb);
4407 }
4408 
4409 static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4410 			  ext4_group_t group, ext4_grpblk_t block, int count)
4411 {
4412 	struct ext4_group_info *db = e4b->bd_info;
4413 	struct super_block *sb = e4b->bd_sb;
4414 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4415 	struct ext4_free_metadata *md;
4416 	int i;
4417 
4418 	BUG_ON(e4b->bd_bitmap_page == NULL);
4419 	BUG_ON(e4b->bd_buddy_page == NULL);
4420 
4421 	ext4_lock_group(sb, group);
4422 	for (i = 0; i < count; i++) {
4423 		md = db->bb_md_cur;
4424 		if (md && db->bb_tid != handle->h_transaction->t_tid) {
4425 			db->bb_md_cur = NULL;
4426 			md = NULL;
4427 		}
4428 
4429 		if (md == NULL) {
4430 			ext4_unlock_group(sb, group);
4431 			md = kmalloc(sizeof(*md), GFP_NOFS);
4432 			if (md == NULL)
4433 				return -ENOMEM;
4434 			md->num = 0;
4435 			md->group = group;
4436 
4437 			ext4_lock_group(sb, group);
4438 			if (db->bb_md_cur == NULL) {
4439 				spin_lock(&sbi->s_md_lock);
4440 				list_add(&md->list, &sbi->s_active_transaction);
4441 				spin_unlock(&sbi->s_md_lock);
4442 				/* protect buddy cache from being freed,
4443 				 * otherwise we'll refresh it from
4444 				 * on-disk bitmap and lose not-yet-available
4445 				 * blocks */
4446 				page_cache_get(e4b->bd_buddy_page);
4447 				page_cache_get(e4b->bd_bitmap_page);
4448 				db->bb_md_cur = md;
4449 				db->bb_tid = handle->h_transaction->t_tid;
4450 				mb_debug("new md 0x%p for group %lu\n",
4451 						md, md->group);
4452 			} else {
4453 				kfree(md);
4454 				md = db->bb_md_cur;
4455 			}
4456 		}
4457 
4458 		BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS);
4459 		md->blocks[md->num] = block + i;
4460 		md->num++;
4461 		if (md->num == EXT4_BB_MAX_BLOCKS) {
4462 			/* no more space, put full container on a sb's list */
4463 			db->bb_md_cur = NULL;
4464 		}
4465 	}
4466 	ext4_unlock_group(sb, group);
4467 	return 0;
4468 }
4469 
4470 /*
4471  * Main entry point into mballoc to free blocks
4472  */
4473 void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4474 			unsigned long block, unsigned long count,
4475 			int metadata, unsigned long *freed)
4476 {
4477 	struct buffer_head *bitmap_bh = NULL;
4478 	struct super_block *sb = inode->i_sb;
4479 	struct ext4_allocation_context *ac = NULL;
4480 	struct ext4_group_desc *gdp;
4481 	struct ext4_super_block *es;
4482 	unsigned long overflow;
4483 	ext4_grpblk_t bit;
4484 	struct buffer_head *gd_bh;
4485 	ext4_group_t block_group;
4486 	struct ext4_sb_info *sbi;
4487 	struct ext4_buddy e4b;
4488 	int err = 0;
4489 	int ret;
4490 
4491 	*freed = 0;
4492 
4493 	ext4_mb_poll_new_transaction(sb, handle);
4494 
4495 	sbi = EXT4_SB(sb);
4496 	es = EXT4_SB(sb)->s_es;
4497 	if (block < le32_to_cpu(es->s_first_data_block) ||
4498 	    block + count < block ||
4499 	    block + count > ext4_blocks_count(es)) {
4500 		ext4_error(sb, __FUNCTION__,
4501 			    "Freeing blocks not in datazone - "
4502 			    "block = %lu, count = %lu", block, count);
4503 		goto error_return;
4504 	}
4505 
4506 	ext4_debug("freeing block %lu\n", block);
4507 
4508 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4509 	if (ac) {
4510 		ac->ac_op = EXT4_MB_HISTORY_FREE;
4511 		ac->ac_inode = inode;
4512 		ac->ac_sb = sb;
4513 	}
4514 
4515 do_more:
4516 	overflow = 0;
4517 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4518 
4519 	/*
4520 	 * Check to see if we are freeing blocks across a group
4521 	 * boundary.
4522 	 */
4523 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4524 		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4525 		count -= overflow;
4526 	}
4527 	bitmap_bh = read_block_bitmap(sb, block_group);
4528 	if (!bitmap_bh)
4529 		goto error_return;
4530 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4531 	if (!gdp)
4532 		goto error_return;
4533 
4534 	if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4535 	    in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4536 	    in_range(block, ext4_inode_table(sb, gdp),
4537 		      EXT4_SB(sb)->s_itb_per_group) ||
4538 	    in_range(block + count - 1, ext4_inode_table(sb, gdp),
4539 		      EXT4_SB(sb)->s_itb_per_group)) {
4540 
4541 		ext4_error(sb, __FUNCTION__,
4542 			   "Freeing blocks in system zone - "
4543 			   "Block = %lu, count = %lu", block, count);
4544 	}
4545 
4546 	BUFFER_TRACE(bitmap_bh, "getting write access");
4547 	err = ext4_journal_get_write_access(handle, bitmap_bh);
4548 	if (err)
4549 		goto error_return;
4550 
4551 	/*
4552 	 * We are about to modify some metadata.  Call the journal APIs
4553 	 * to unshare ->b_data if a currently-committing transaction is
4554 	 * using it
4555 	 */
4556 	BUFFER_TRACE(gd_bh, "get_write_access");
4557 	err = ext4_journal_get_write_access(handle, gd_bh);
4558 	if (err)
4559 		goto error_return;
4560 
4561 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
4562 	if (err)
4563 		goto error_return;
4564 
4565 #ifdef AGGRESSIVE_CHECK
4566 	{
4567 		int i;
4568 		for (i = 0; i < count; i++)
4569 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4570 	}
4571 #endif
4572 	mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4573 			bit, count);
4574 
4575 	/* We dirtied the bitmap block */
4576 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4577 	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4578 
4579 	if (ac) {
4580 		ac->ac_b_ex.fe_group = block_group;
4581 		ac->ac_b_ex.fe_start = bit;
4582 		ac->ac_b_ex.fe_len = count;
4583 		ext4_mb_store_history(ac);
4584 	}
4585 
4586 	if (metadata) {
4587 		/* blocks being freed are metadata. these blocks shouldn't
4588 		 * be used until this transaction is committed */
4589 		ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4590 	} else {
4591 		ext4_lock_group(sb, block_group);
4592 		err = mb_free_blocks(inode, &e4b, bit, count);
4593 		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4594 		ext4_unlock_group(sb, block_group);
4595 		BUG_ON(err != 0);
4596 	}
4597 
4598 	spin_lock(sb_bgl_lock(sbi, block_group));
4599 	gdp->bg_free_blocks_count =
4600 		cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
4601 	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4602 	spin_unlock(sb_bgl_lock(sbi, block_group));
4603 	percpu_counter_add(&sbi->s_freeblocks_counter, count);
4604 
4605 	ext4_mb_release_desc(&e4b);
4606 
4607 	*freed += count;
4608 
4609 	/* And the group descriptor block */
4610 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4611 	ret = ext4_journal_dirty_metadata(handle, gd_bh);
4612 	if (!err)
4613 		err = ret;
4614 
4615 	if (overflow && !err) {
4616 		block += count;
4617 		count = overflow;
4618 		put_bh(bitmap_bh);
4619 		goto do_more;
4620 	}
4621 	sb->s_dirt = 1;
4622 error_return:
4623 	brelse(bitmap_bh);
4624 	ext4_std_error(sb, err);
4625 	if (ac)
4626 		kmem_cache_free(ext4_ac_cachep, ac);
4627 	return;
4628 }
4629