1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
5 */
6
7
8 /*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21
22 /*
23 * MUSTDO:
24 * - test ext4_ext_search_left() and ext4_ext_search_right()
25 * - search for metadata in few groups
26 *
27 * TODO v4:
28 * - normalization should take into account whether file is still open
29 * - discard preallocations if no free space left (policy?)
30 * - don't normalize tails
31 * - quota
32 * - reservation for superuser
33 *
34 * TODO v3:
35 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
36 * - track min/max extents in each group for better group selection
37 * - mb_mark_used() may allocate chunk right after splitting buddy
38 * - tree of groups sorted by number of free blocks
39 * - error handling
40 */
41
42 /*
43 * The allocation request involve request for multiple number of blocks
44 * near to the goal(block) value specified.
45 *
46 * During initialization phase of the allocator we decide to use the
47 * group preallocation or inode preallocation depending on the size of
48 * the file. The size of the file could be the resulting file size we
49 * would have after allocation, or the current file size, which ever
50 * is larger. If the size is less than sbi->s_mb_stream_request we
51 * select to use the group preallocation. The default value of
52 * s_mb_stream_request is 16 blocks. This can also be tuned via
53 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
54 * terms of number of blocks.
55 *
56 * The main motivation for having small file use group preallocation is to
57 * ensure that we have small files closer together on the disk.
58 *
59 * First stage the allocator looks at the inode prealloc list,
60 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
61 * spaces for this particular inode. The inode prealloc space is
62 * represented as:
63 *
64 * pa_lstart -> the logical start block for this prealloc space
65 * pa_pstart -> the physical start block for this prealloc space
66 * pa_len -> length for this prealloc space (in clusters)
67 * pa_free -> free space available in this prealloc space (in clusters)
68 *
69 * The inode preallocation space is used looking at the _logical_ start
70 * block. If only the logical file block falls within the range of prealloc
71 * space we will consume the particular prealloc space. This makes sure that
72 * we have contiguous physical blocks representing the file blocks
73 *
74 * The important thing to be noted in case of inode prealloc space is that
75 * we don't modify the values associated to inode prealloc space except
76 * pa_free.
77 *
78 * If we are not able to find blocks in the inode prealloc space and if we
79 * have the group allocation flag set then we look at the locality group
80 * prealloc space. These are per CPU prealloc list represented as
81 *
82 * ext4_sb_info.s_locality_groups[smp_processor_id()]
83 *
84 * The reason for having a per cpu locality group is to reduce the contention
85 * between CPUs. It is possible to get scheduled at this point.
86 *
87 * The locality group prealloc space is used looking at whether we have
88 * enough free space (pa_free) within the prealloc space.
89 *
90 * If we can't allocate blocks via inode prealloc or/and locality group
91 * prealloc then we look at the buddy cache. The buddy cache is represented
92 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
93 * mapped to the buddy and bitmap information regarding different
94 * groups. The buddy information is attached to buddy cache inode so that
95 * we can access them through the page cache. The information regarding
96 * each group is loaded via ext4_mb_load_buddy. The information involve
97 * block bitmap and buddy information. The information are stored in the
98 * inode as:
99 *
100 * { page }
101 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
102 *
103 *
104 * one block each for bitmap and buddy information. So for each group we
105 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
106 * blocksize) blocks. So it can have information regarding groups_per_page
107 * which is blocks_per_page/2
108 *
109 * The buddy cache inode is not stored on disk. The inode is thrown
110 * away when the filesystem is unmounted.
111 *
112 * We look for count number of blocks in the buddy cache. If we were able
113 * to locate that many free blocks we return with additional information
114 * regarding rest of the contiguous physical block available
115 *
116 * Before allocating blocks via buddy cache we normalize the request
117 * blocks. This ensure we ask for more blocks that we needed. The extra
118 * blocks that we get after allocation is added to the respective prealloc
119 * list. In case of inode preallocation we follow a list of heuristics
120 * based on file size. This can be found in ext4_mb_normalize_request. If
121 * we are doing a group prealloc we try to normalize the request to
122 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
123 * dependent on the cluster size; for non-bigalloc file systems, it is
124 * 512 blocks. This can be tuned via
125 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
126 * terms of number of blocks. If we have mounted the file system with -O
127 * stripe=<value> option the group prealloc request is normalized to the
128 * smallest multiple of the stripe value (sbi->s_stripe) which is
129 * greater than the default mb_group_prealloc.
130 *
131 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
132 * structures in two data structures:
133 *
134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135 *
136 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137 *
138 * This is an array of lists where the index in the array represents the
139 * largest free order in the buddy bitmap of the participating group infos of
140 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
141 * number of buddy bitmap orders possible) number of lists. Group-infos are
142 * placed in appropriate lists.
143 *
144 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
145 *
146 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
147 *
148 * This is an array of lists where in the i-th list there are groups with
149 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
150 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
151 * Note that we don't bother with a special list for completely empty groups
152 * so we only have MB_NUM_ORDERS(sb) lists.
153 *
154 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
155 * structures to decide the order in which groups are to be traversed for
156 * fulfilling an allocation request.
157 *
158 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
159 * >= the order of the request. We directly look at the largest free order list
160 * in the data structure (1) above where largest_free_order = order of the
161 * request. If that list is empty, we look at remaining list in the increasing
162 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
163 * lookup in O(1) time.
164 *
165 * At CR_GOAL_LEN_FAST, we only consider groups where
166 * average fragment size > request size. So, we lookup a group which has average
167 * fragment size just above or equal to request size using our average fragment
168 * size group lists (data structure 2) in O(1) time.
169 *
170 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
171 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
172 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
173 * fragment size > goal length. So before falling to the slower
174 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
175 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
176 * enough average fragment size. This increases the chances of finding a
177 * suitable block group in O(1) time and results in faster allocation at the
178 * cost of reduced size of allocation.
179 *
180 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
181 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
182 * CR_GOAL_LEN_FAST phase.
183 *
184 * The regular allocator (using the buddy cache) supports a few tunables.
185 *
186 * /sys/fs/ext4/<partition>/mb_min_to_scan
187 * /sys/fs/ext4/<partition>/mb_max_to_scan
188 * /sys/fs/ext4/<partition>/mb_order2_req
189 * /sys/fs/ext4/<partition>/mb_linear_limit
190 *
191 * The regular allocator uses buddy scan only if the request len is power of
192 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
193 * value of s_mb_order2_reqs can be tuned via
194 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
195 * stripe size (sbi->s_stripe), we try to search for contiguous block in
196 * stripe size. This should result in better allocation on RAID setups. If
197 * not, we search in the specific group using bitmap for best extents. The
198 * tunable min_to_scan and max_to_scan control the behaviour here.
199 * min_to_scan indicate how long the mballoc __must__ look for a best
200 * extent and max_to_scan indicates how long the mballoc __can__ look for a
201 * best extent in the found extents. Searching for the blocks starts with
202 * the group specified as the goal value in allocation context via
203 * ac_g_ex. Each group is first checked based on the criteria whether it
204 * can be used for allocation. ext4_mb_good_group explains how the groups are
205 * checked.
206 *
207 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
208 * get traversed linearly. That may result in subsequent allocations being not
209 * close to each other. And so, the underlying device may get filled up in a
210 * non-linear fashion. While that may not matter on non-rotational devices, for
211 * rotational devices that may result in higher seek times. "mb_linear_limit"
212 * tells mballoc how many groups mballoc should search linearly before
213 * performing consulting above data structures for more efficient lookups. For
214 * non rotational devices, this value defaults to 0 and for rotational devices
215 * this is set to MB_DEFAULT_LINEAR_LIMIT.
216 *
217 * Both the prealloc space are getting populated as above. So for the first
218 * request we will hit the buddy cache which will result in this prealloc
219 * space getting filled. The prealloc space is then later used for the
220 * subsequent request.
221 */
222
223 /*
224 * mballoc operates on the following data:
225 * - on-disk bitmap
226 * - in-core buddy (actually includes buddy and bitmap)
227 * - preallocation descriptors (PAs)
228 *
229 * there are two types of preallocations:
230 * - inode
231 * assiged to specific inode and can be used for this inode only.
232 * it describes part of inode's space preallocated to specific
233 * physical blocks. any block from that preallocated can be used
234 * independent. the descriptor just tracks number of blocks left
235 * unused. so, before taking some block from descriptor, one must
236 * make sure corresponded logical block isn't allocated yet. this
237 * also means that freeing any block within descriptor's range
238 * must discard all preallocated blocks.
239 * - locality group
240 * assigned to specific locality group which does not translate to
241 * permanent set of inodes: inode can join and leave group. space
242 * from this type of preallocation can be used for any inode. thus
243 * it's consumed from the beginning to the end.
244 *
245 * relation between them can be expressed as:
246 * in-core buddy = on-disk bitmap + preallocation descriptors
247 *
248 * this mean blocks mballoc considers used are:
249 * - allocated blocks (persistent)
250 * - preallocated blocks (non-persistent)
251 *
252 * consistency in mballoc world means that at any time a block is either
253 * free or used in ALL structures. notice: "any time" should not be read
254 * literally -- time is discrete and delimited by locks.
255 *
256 * to keep it simple, we don't use block numbers, instead we count number of
257 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
258 *
259 * all operations can be expressed as:
260 * - init buddy: buddy = on-disk + PAs
261 * - new PA: buddy += N; PA = N
262 * - use inode PA: on-disk += N; PA -= N
263 * - discard inode PA buddy -= on-disk - PA; PA = 0
264 * - use locality group PA on-disk += N; PA -= N
265 * - discard locality group PA buddy -= PA; PA = 0
266 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
267 * is used in real operation because we can't know actual used
268 * bits from PA, only from on-disk bitmap
269 *
270 * if we follow this strict logic, then all operations above should be atomic.
271 * given some of them can block, we'd have to use something like semaphores
272 * killing performance on high-end SMP hardware. let's try to relax it using
273 * the following knowledge:
274 * 1) if buddy is referenced, it's already initialized
275 * 2) while block is used in buddy and the buddy is referenced,
276 * nobody can re-allocate that block
277 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
278 * bit set and PA claims same block, it's OK. IOW, one can set bit in
279 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
280 * block
281 *
282 * so, now we're building a concurrency table:
283 * - init buddy vs.
284 * - new PA
285 * blocks for PA are allocated in the buddy, buddy must be referenced
286 * until PA is linked to allocation group to avoid concurrent buddy init
287 * - use inode PA
288 * we need to make sure that either on-disk bitmap or PA has uptodate data
289 * given (3) we care that PA-=N operation doesn't interfere with init
290 * - discard inode PA
291 * the simplest way would be to have buddy initialized by the discard
292 * - use locality group PA
293 * again PA-=N must be serialized with init
294 * - discard locality group PA
295 * the simplest way would be to have buddy initialized by the discard
296 * - new PA vs.
297 * - use inode PA
298 * i_data_sem serializes them
299 * - discard inode PA
300 * discard process must wait until PA isn't used by another process
301 * - use locality group PA
302 * some mutex should serialize them
303 * - discard locality group PA
304 * discard process must wait until PA isn't used by another process
305 * - use inode PA
306 * - use inode PA
307 * i_data_sem or another mutex should serializes them
308 * - discard inode PA
309 * discard process must wait until PA isn't used by another process
310 * - use locality group PA
311 * nothing wrong here -- they're different PAs covering different blocks
312 * - discard locality group PA
313 * discard process must wait until PA isn't used by another process
314 *
315 * now we're ready to make few consequences:
316 * - PA is referenced and while it is no discard is possible
317 * - PA is referenced until block isn't marked in on-disk bitmap
318 * - PA changes only after on-disk bitmap
319 * - discard must not compete with init. either init is done before
320 * any discard or they're serialized somehow
321 * - buddy init as sum of on-disk bitmap and PAs is done atomically
322 *
323 * a special case when we've used PA to emptiness. no need to modify buddy
324 * in this case, but we should care about concurrent init
325 *
326 */
327
328 /*
329 * Logic in few words:
330 *
331 * - allocation:
332 * load group
333 * find blocks
334 * mark bits in on-disk bitmap
335 * release group
336 *
337 * - use preallocation:
338 * find proper PA (per-inode or group)
339 * load group
340 * mark bits in on-disk bitmap
341 * release group
342 * release PA
343 *
344 * - free:
345 * load group
346 * mark bits in on-disk bitmap
347 * release group
348 *
349 * - discard preallocations in group:
350 * mark PAs deleted
351 * move them onto local list
352 * load on-disk bitmap
353 * load group
354 * remove PA from object (inode or locality group)
355 * mark free blocks in-core
356 *
357 * - discard inode's preallocations:
358 */
359
360 /*
361 * Locking rules
362 *
363 * Locks:
364 * - bitlock on a group (group)
365 * - object (inode/locality) (object)
366 * - per-pa lock (pa)
367 * - cr_power2_aligned lists lock (cr_power2_aligned)
368 * - cr_goal_len_fast lists lock (cr_goal_len_fast)
369 *
370 * Paths:
371 * - new pa
372 * object
373 * group
374 *
375 * - find and use pa:
376 * pa
377 *
378 * - release consumed pa:
379 * pa
380 * group
381 * object
382 *
383 * - generate in-core bitmap:
384 * group
385 * pa
386 *
387 * - discard all for given object (inode, locality group):
388 * object
389 * pa
390 * group
391 *
392 * - discard all for given group:
393 * group
394 * pa
395 * group
396 * object
397 *
398 * - allocation path (ext4_mb_regular_allocator)
399 * group
400 * cr_power2_aligned/cr_goal_len_fast
401 */
402 static struct kmem_cache *ext4_pspace_cachep;
403 static struct kmem_cache *ext4_ac_cachep;
404 static struct kmem_cache *ext4_free_data_cachep;
405
406 /* We create slab caches for groupinfo data structures based on the
407 * superblock block size. There will be one per mounted filesystem for
408 * each unique s_blocksize_bits */
409 #define NR_GRPINFO_CACHES 8
410 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
411
412 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
413 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
414 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
415 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
416 };
417
418 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
419 ext4_group_t group);
420 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
421
422 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
423 ext4_group_t group, enum criteria cr);
424
425 static int ext4_try_to_trim_range(struct super_block *sb,
426 struct ext4_buddy *e4b, ext4_grpblk_t start,
427 ext4_grpblk_t max, ext4_grpblk_t minblocks);
428
429 /*
430 * The algorithm using this percpu seq counter goes below:
431 * 1. We sample the percpu discard_pa_seq counter before trying for block
432 * allocation in ext4_mb_new_blocks().
433 * 2. We increment this percpu discard_pa_seq counter when we either allocate
434 * or free these blocks i.e. while marking those blocks as used/free in
435 * mb_mark_used()/mb_free_blocks().
436 * 3. We also increment this percpu seq counter when we successfully identify
437 * that the bb_prealloc_list is not empty and hence proceed for discarding
438 * of those PAs inside ext4_mb_discard_group_preallocations().
439 *
440 * Now to make sure that the regular fast path of block allocation is not
441 * affected, as a small optimization we only sample the percpu seq counter
442 * on that cpu. Only when the block allocation fails and when freed blocks
443 * found were 0, that is when we sample percpu seq counter for all cpus using
444 * below function ext4_get_discard_pa_seq_sum(). This happens after making
445 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
446 */
447 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)448 static inline u64 ext4_get_discard_pa_seq_sum(void)
449 {
450 int __cpu;
451 u64 __seq = 0;
452
453 for_each_possible_cpu(__cpu)
454 __seq += per_cpu(discard_pa_seq, __cpu);
455 return __seq;
456 }
457
mb_correct_addr_and_bit(int * bit,void * addr)458 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
459 {
460 #if BITS_PER_LONG == 64
461 *bit += ((unsigned long) addr & 7UL) << 3;
462 addr = (void *) ((unsigned long) addr & ~7UL);
463 #elif BITS_PER_LONG == 32
464 *bit += ((unsigned long) addr & 3UL) << 3;
465 addr = (void *) ((unsigned long) addr & ~3UL);
466 #else
467 #error "how many bits you are?!"
468 #endif
469 return addr;
470 }
471
mb_test_bit(int bit,void * addr)472 static inline int mb_test_bit(int bit, void *addr)
473 {
474 /*
475 * ext4_test_bit on architecture like powerpc
476 * needs unsigned long aligned address
477 */
478 addr = mb_correct_addr_and_bit(&bit, addr);
479 return ext4_test_bit(bit, addr);
480 }
481
mb_set_bit(int bit,void * addr)482 static inline void mb_set_bit(int bit, void *addr)
483 {
484 addr = mb_correct_addr_and_bit(&bit, addr);
485 ext4_set_bit(bit, addr);
486 }
487
mb_clear_bit(int bit,void * addr)488 static inline void mb_clear_bit(int bit, void *addr)
489 {
490 addr = mb_correct_addr_and_bit(&bit, addr);
491 ext4_clear_bit(bit, addr);
492 }
493
mb_test_and_clear_bit(int bit,void * addr)494 static inline int mb_test_and_clear_bit(int bit, void *addr)
495 {
496 addr = mb_correct_addr_and_bit(&bit, addr);
497 return ext4_test_and_clear_bit(bit, addr);
498 }
499
mb_find_next_zero_bit(void * addr,int max,int start)500 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
501 {
502 int fix = 0, ret, tmpmax;
503 addr = mb_correct_addr_and_bit(&fix, addr);
504 tmpmax = max + fix;
505 start += fix;
506
507 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
508 if (ret > max)
509 return max;
510 return ret;
511 }
512
mb_find_next_bit(void * addr,int max,int start)513 static inline int mb_find_next_bit(void *addr, int max, int start)
514 {
515 int fix = 0, ret, tmpmax;
516 addr = mb_correct_addr_and_bit(&fix, addr);
517 tmpmax = max + fix;
518 start += fix;
519
520 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
521 if (ret > max)
522 return max;
523 return ret;
524 }
525
mb_find_buddy(struct ext4_buddy * e4b,int order,int * max)526 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
527 {
528 char *bb;
529
530 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
531 BUG_ON(max == NULL);
532
533 if (order > e4b->bd_blkbits + 1) {
534 *max = 0;
535 return NULL;
536 }
537
538 /* at order 0 we see each particular block */
539 if (order == 0) {
540 *max = 1 << (e4b->bd_blkbits + 3);
541 return e4b->bd_bitmap;
542 }
543
544 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
545 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
546
547 return bb;
548 }
549
550 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)551 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
552 int first, int count)
553 {
554 int i;
555 struct super_block *sb = e4b->bd_sb;
556
557 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
558 return;
559 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
560 for (i = 0; i < count; i++) {
561 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
562 ext4_fsblk_t blocknr;
563
564 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
565 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
566 ext4_grp_locked_error(sb, e4b->bd_group,
567 inode ? inode->i_ino : 0,
568 blocknr,
569 "freeing block already freed "
570 "(bit %u)",
571 first + i);
572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
573 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
574 }
575 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
576 }
577 }
578
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)579 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
580 {
581 int i;
582
583 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
584 return;
585 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
586 for (i = 0; i < count; i++) {
587 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
588 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
589 }
590 }
591
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)592 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
593 {
594 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
595 return;
596 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
597 unsigned char *b1, *b2;
598 int i;
599 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
600 b2 = (unsigned char *) bitmap;
601 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
602 if (b1[i] != b2[i]) {
603 ext4_msg(e4b->bd_sb, KERN_ERR,
604 "corruption in group %u "
605 "at byte %u(%u): %x in copy != %x "
606 "on disk/prealloc",
607 e4b->bd_group, i, i * 8, b1[i], b2[i]);
608 BUG();
609 }
610 }
611 }
612 }
613
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)614 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
615 struct ext4_group_info *grp, ext4_group_t group)
616 {
617 struct buffer_head *bh;
618
619 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
620 if (!grp->bb_bitmap)
621 return;
622
623 bh = ext4_read_block_bitmap(sb, group);
624 if (IS_ERR_OR_NULL(bh)) {
625 kfree(grp->bb_bitmap);
626 grp->bb_bitmap = NULL;
627 return;
628 }
629
630 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
631 put_bh(bh);
632 }
633
mb_group_bb_bitmap_free(struct ext4_group_info * grp)634 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
635 {
636 kfree(grp->bb_bitmap);
637 }
638
639 #else
mb_free_blocks_double(struct inode * inode,struct ext4_buddy * e4b,int first,int count)640 static inline void mb_free_blocks_double(struct inode *inode,
641 struct ext4_buddy *e4b, int first, int count)
642 {
643 return;
644 }
mb_mark_used_double(struct ext4_buddy * e4b,int first,int count)645 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
646 int first, int count)
647 {
648 return;
649 }
mb_cmp_bitmaps(struct ext4_buddy * e4b,void * bitmap)650 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
651 {
652 return;
653 }
654
mb_group_bb_bitmap_alloc(struct super_block * sb,struct ext4_group_info * grp,ext4_group_t group)655 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
656 struct ext4_group_info *grp, ext4_group_t group)
657 {
658 return;
659 }
660
mb_group_bb_bitmap_free(struct ext4_group_info * grp)661 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
662 {
663 return;
664 }
665 #endif
666
667 #ifdef AGGRESSIVE_CHECK
668
669 #define MB_CHECK_ASSERT(assert) \
670 do { \
671 if (!(assert)) { \
672 printk(KERN_EMERG \
673 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
674 function, file, line, # assert); \
675 BUG(); \
676 } \
677 } while (0)
678
__mb_check_buddy(struct ext4_buddy * e4b,char * file,const char * function,int line)679 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
680 const char *function, int line)
681 {
682 struct super_block *sb = e4b->bd_sb;
683 int order = e4b->bd_blkbits + 1;
684 int max;
685 int max2;
686 int i;
687 int j;
688 int k;
689 int count;
690 struct ext4_group_info *grp;
691 int fragments = 0;
692 int fstart;
693 struct list_head *cur;
694 void *buddy;
695 void *buddy2;
696
697 if (e4b->bd_info->bb_check_counter++ % 10)
698 return 0;
699
700 while (order > 1) {
701 buddy = mb_find_buddy(e4b, order, &max);
702 MB_CHECK_ASSERT(buddy);
703 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
704 MB_CHECK_ASSERT(buddy2);
705 MB_CHECK_ASSERT(buddy != buddy2);
706 MB_CHECK_ASSERT(max * 2 == max2);
707
708 count = 0;
709 for (i = 0; i < max; i++) {
710
711 if (mb_test_bit(i, buddy)) {
712 /* only single bit in buddy2 may be 0 */
713 if (!mb_test_bit(i << 1, buddy2)) {
714 MB_CHECK_ASSERT(
715 mb_test_bit((i<<1)+1, buddy2));
716 }
717 continue;
718 }
719
720 /* both bits in buddy2 must be 1 */
721 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
722 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
723
724 for (j = 0; j < (1 << order); j++) {
725 k = (i * (1 << order)) + j;
726 MB_CHECK_ASSERT(
727 !mb_test_bit(k, e4b->bd_bitmap));
728 }
729 count++;
730 }
731 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
732 order--;
733 }
734
735 fstart = -1;
736 buddy = mb_find_buddy(e4b, 0, &max);
737 for (i = 0; i < max; i++) {
738 if (!mb_test_bit(i, buddy)) {
739 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
740 if (fstart == -1) {
741 fragments++;
742 fstart = i;
743 }
744 continue;
745 }
746 fstart = -1;
747 /* check used bits only */
748 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
749 buddy2 = mb_find_buddy(e4b, j, &max2);
750 k = i >> j;
751 MB_CHECK_ASSERT(k < max2);
752 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
753 }
754 }
755 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
756 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
757
758 grp = ext4_get_group_info(sb, e4b->bd_group);
759 if (!grp)
760 return NULL;
761 list_for_each(cur, &grp->bb_prealloc_list) {
762 ext4_group_t groupnr;
763 struct ext4_prealloc_space *pa;
764 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
765 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
766 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
767 for (i = 0; i < pa->pa_len; i++)
768 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
769 }
770 return 0;
771 }
772 #undef MB_CHECK_ASSERT
773 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
774 __FILE__, __func__, __LINE__)
775 #else
776 #define mb_check_buddy(e4b)
777 #endif
778
779 /*
780 * Divide blocks started from @first with length @len into
781 * smaller chunks with power of 2 blocks.
782 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
783 * then increase bb_counters[] for corresponded chunk size.
784 */
ext4_mb_mark_free_simple(struct super_block * sb,void * buddy,ext4_grpblk_t first,ext4_grpblk_t len,struct ext4_group_info * grp)785 static void ext4_mb_mark_free_simple(struct super_block *sb,
786 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
787 struct ext4_group_info *grp)
788 {
789 struct ext4_sb_info *sbi = EXT4_SB(sb);
790 ext4_grpblk_t min;
791 ext4_grpblk_t max;
792 ext4_grpblk_t chunk;
793 unsigned int border;
794
795 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
796
797 border = 2 << sb->s_blocksize_bits;
798
799 while (len > 0) {
800 /* find how many blocks can be covered since this position */
801 max = ffs(first | border) - 1;
802
803 /* find how many blocks of power 2 we need to mark */
804 min = fls(len) - 1;
805
806 if (max < min)
807 min = max;
808 chunk = 1 << min;
809
810 /* mark multiblock chunks only */
811 grp->bb_counters[min]++;
812 if (min > 0)
813 mb_clear_bit(first >> min,
814 buddy + sbi->s_mb_offsets[min]);
815
816 len -= chunk;
817 first += chunk;
818 }
819 }
820
mb_avg_fragment_size_order(struct super_block * sb,ext4_grpblk_t len)821 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
822 {
823 int order;
824
825 /*
826 * We don't bother with a special lists groups with only 1 block free
827 * extents and for completely empty groups.
828 */
829 order = fls(len) - 2;
830 if (order < 0)
831 return 0;
832 if (order == MB_NUM_ORDERS(sb))
833 order--;
834 return order;
835 }
836
837 /* Move group to appropriate avg_fragment_size list */
838 static void
mb_update_avg_fragment_size(struct super_block * sb,struct ext4_group_info * grp)839 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
840 {
841 struct ext4_sb_info *sbi = EXT4_SB(sb);
842 int new_order;
843
844 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
845 return;
846
847 new_order = mb_avg_fragment_size_order(sb,
848 grp->bb_free / grp->bb_fragments);
849 if (new_order == grp->bb_avg_fragment_size_order)
850 return;
851
852 if (grp->bb_avg_fragment_size_order != -1) {
853 write_lock(&sbi->s_mb_avg_fragment_size_locks[
854 grp->bb_avg_fragment_size_order]);
855 list_del(&grp->bb_avg_fragment_size_node);
856 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
857 grp->bb_avg_fragment_size_order]);
858 }
859 grp->bb_avg_fragment_size_order = new_order;
860 write_lock(&sbi->s_mb_avg_fragment_size_locks[
861 grp->bb_avg_fragment_size_order]);
862 list_add_tail(&grp->bb_avg_fragment_size_node,
863 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
864 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
865 grp->bb_avg_fragment_size_order]);
866 }
867
868 /*
869 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
870 * cr level needs an update.
871 */
ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)872 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
873 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
874 {
875 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
876 struct ext4_group_info *iter;
877 int i;
878
879 if (ac->ac_status == AC_STATUS_FOUND)
880 return;
881
882 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
883 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
884
885 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
886 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
887 continue;
888 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
889 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
890 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
891 continue;
892 }
893 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
894 bb_largest_free_order_node) {
895 if (sbi->s_mb_stats)
896 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
897 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
898 *group = iter->bb_group;
899 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
900 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
901 return;
902 }
903 }
904 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
905 }
906
907 /* Increment cr and search again if no group is found */
908 *new_cr = CR_GOAL_LEN_FAST;
909 }
910
911 /*
912 * Find a suitable group of given order from the average fragments list.
913 */
914 static struct ext4_group_info *
ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context * ac,int order)915 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
916 {
917 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
918 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
919 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
920 struct ext4_group_info *grp = NULL, *iter;
921 enum criteria cr = ac->ac_criteria;
922
923 if (list_empty(frag_list))
924 return NULL;
925 read_lock(frag_list_lock);
926 if (list_empty(frag_list)) {
927 read_unlock(frag_list_lock);
928 return NULL;
929 }
930 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
931 if (sbi->s_mb_stats)
932 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
933 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
934 grp = iter;
935 break;
936 }
937 }
938 read_unlock(frag_list_lock);
939 return grp;
940 }
941
942 /*
943 * Choose next group by traversing average fragment size list of suitable
944 * order. Updates *new_cr if cr level needs an update.
945 */
ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)946 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
947 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
948 {
949 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
950 struct ext4_group_info *grp = NULL;
951 int i;
952
953 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
954 if (sbi->s_mb_stats)
955 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
956 }
957
958 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
959 i < MB_NUM_ORDERS(ac->ac_sb); i++) {
960 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
961 if (grp) {
962 *group = grp->bb_group;
963 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
964 return;
965 }
966 }
967
968 /*
969 * CR_BEST_AVAIL_LEN works based on the concept that we have
970 * a larger normalized goal len request which can be trimmed to
971 * a smaller goal len such that it can still satisfy original
972 * request len. However, allocation request for non-regular
973 * files never gets normalized.
974 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
975 */
976 if (ac->ac_flags & EXT4_MB_HINT_DATA)
977 *new_cr = CR_BEST_AVAIL_LEN;
978 else
979 *new_cr = CR_GOAL_LEN_SLOW;
980 }
981
982 /*
983 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
984 * order we have and proactively trim the goal request length to that order to
985 * find a suitable group faster.
986 *
987 * This optimizes allocation speed at the cost of slightly reduced
988 * preallocations. However, we make sure that we don't trim the request too
989 * much and fall to CR_GOAL_LEN_SLOW in that case.
990 */
ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)991 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
992 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
993 {
994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
995 struct ext4_group_info *grp = NULL;
996 int i, order, min_order;
997 unsigned long num_stripe_clusters = 0;
998
999 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
1000 if (sbi->s_mb_stats)
1001 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
1002 }
1003
1004 /*
1005 * mb_avg_fragment_size_order() returns order in a way that makes
1006 * retrieving back the length using (1 << order) inaccurate. Hence, use
1007 * fls() instead since we need to know the actual length while modifying
1008 * goal length.
1009 */
1010 order = fls(ac->ac_g_ex.fe_len) - 1;
1011 min_order = order - sbi->s_mb_best_avail_max_trim_order;
1012 if (min_order < 0)
1013 min_order = 0;
1014
1015 if (sbi->s_stripe > 0) {
1016 /*
1017 * We are assuming that stripe size is always a multiple of
1018 * cluster ratio otherwise __ext4_fill_super exists early.
1019 */
1020 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1021 if (1 << min_order < num_stripe_clusters)
1022 /*
1023 * We consider 1 order less because later we round
1024 * up the goal len to num_stripe_clusters
1025 */
1026 min_order = fls(num_stripe_clusters) - 1;
1027 }
1028
1029 if (1 << min_order < ac->ac_o_ex.fe_len)
1030 min_order = fls(ac->ac_o_ex.fe_len);
1031
1032 for (i = order; i >= min_order; i--) {
1033 int frag_order;
1034 /*
1035 * Scale down goal len to make sure we find something
1036 * in the free fragments list. Basically, reduce
1037 * preallocations.
1038 */
1039 ac->ac_g_ex.fe_len = 1 << i;
1040
1041 if (num_stripe_clusters > 0) {
1042 /*
1043 * Try to round up the adjusted goal length to
1044 * stripe size (in cluster units) multiple for
1045 * efficiency.
1046 */
1047 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1048 num_stripe_clusters);
1049 }
1050
1051 frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1052 ac->ac_g_ex.fe_len);
1053
1054 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
1055 if (grp) {
1056 *group = grp->bb_group;
1057 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1058 return;
1059 }
1060 }
1061
1062 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1063 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1064 *new_cr = CR_GOAL_LEN_SLOW;
1065 }
1066
should_optimize_scan(struct ext4_allocation_context * ac)1067 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1068 {
1069 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1070 return 0;
1071 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
1072 return 0;
1073 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
1074 return 0;
1075 return 1;
1076 }
1077
1078 /*
1079 * Return next linear group for allocation. If linear traversal should not be
1080 * performed, this function just returns the same group
1081 */
1082 static ext4_group_t
next_linear_group(struct ext4_allocation_context * ac,ext4_group_t group,ext4_group_t ngroups)1083 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
1084 ext4_group_t ngroups)
1085 {
1086 if (!should_optimize_scan(ac))
1087 goto inc_and_return;
1088
1089 if (ac->ac_groups_linear_remaining) {
1090 ac->ac_groups_linear_remaining--;
1091 goto inc_and_return;
1092 }
1093
1094 return group;
1095 inc_and_return:
1096 /*
1097 * Artificially restricted ngroups for non-extent
1098 * files makes group > ngroups possible on first loop.
1099 */
1100 return group + 1 >= ngroups ? 0 : group + 1;
1101 }
1102
1103 /*
1104 * ext4_mb_choose_next_group: choose next group for allocation.
1105 *
1106 * @ac Allocation Context
1107 * @new_cr This is an output parameter. If the there is no good group
1108 * available at current CR level, this field is updated to indicate
1109 * the new cr level that should be used.
1110 * @group This is an input / output parameter. As an input it indicates the
1111 * next group that the allocator intends to use for allocation. As
1112 * output, this field indicates the next group that should be used as
1113 * determined by the optimization functions.
1114 * @ngroups Total number of groups
1115 */
ext4_mb_choose_next_group(struct ext4_allocation_context * ac,enum criteria * new_cr,ext4_group_t * group,ext4_group_t ngroups)1116 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1117 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1118 {
1119 *new_cr = ac->ac_criteria;
1120
1121 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1122 *group = next_linear_group(ac, *group, ngroups);
1123 return;
1124 }
1125
1126 if (*new_cr == CR_POWER2_ALIGNED) {
1127 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
1128 } else if (*new_cr == CR_GOAL_LEN_FAST) {
1129 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
1130 } else if (*new_cr == CR_BEST_AVAIL_LEN) {
1131 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
1132 } else {
1133 /*
1134 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1135 * bb_free. But until that happens, we should never come here.
1136 */
1137 WARN_ON(1);
1138 }
1139 }
1140
1141 /*
1142 * Cache the order of the largest free extent we have available in this block
1143 * group.
1144 */
1145 static void
mb_set_largest_free_order(struct super_block * sb,struct ext4_group_info * grp)1146 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1147 {
1148 struct ext4_sb_info *sbi = EXT4_SB(sb);
1149 int i;
1150
1151 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1152 if (grp->bb_counters[i] > 0)
1153 break;
1154 /* No need to move between order lists? */
1155 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1156 i == grp->bb_largest_free_order) {
1157 grp->bb_largest_free_order = i;
1158 return;
1159 }
1160
1161 if (grp->bb_largest_free_order >= 0) {
1162 write_lock(&sbi->s_mb_largest_free_orders_locks[
1163 grp->bb_largest_free_order]);
1164 list_del_init(&grp->bb_largest_free_order_node);
1165 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1166 grp->bb_largest_free_order]);
1167 }
1168 grp->bb_largest_free_order = i;
1169 if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1170 write_lock(&sbi->s_mb_largest_free_orders_locks[
1171 grp->bb_largest_free_order]);
1172 list_add_tail(&grp->bb_largest_free_order_node,
1173 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1174 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1175 grp->bb_largest_free_order]);
1176 }
1177 }
1178
1179 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,ext4_group_t group,struct ext4_group_info * grp)1180 void ext4_mb_generate_buddy(struct super_block *sb,
1181 void *buddy, void *bitmap, ext4_group_t group,
1182 struct ext4_group_info *grp)
1183 {
1184 struct ext4_sb_info *sbi = EXT4_SB(sb);
1185 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1186 ext4_grpblk_t i = 0;
1187 ext4_grpblk_t first;
1188 ext4_grpblk_t len;
1189 unsigned free = 0;
1190 unsigned fragments = 0;
1191 unsigned long long period = get_cycles();
1192
1193 /* initialize buddy from bitmap which is aggregation
1194 * of on-disk bitmap and preallocations */
1195 i = mb_find_next_zero_bit(bitmap, max, 0);
1196 grp->bb_first_free = i;
1197 while (i < max) {
1198 fragments++;
1199 first = i;
1200 i = mb_find_next_bit(bitmap, max, i);
1201 len = i - first;
1202 free += len;
1203 if (len > 1)
1204 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1205 else
1206 grp->bb_counters[0]++;
1207 if (i < max)
1208 i = mb_find_next_zero_bit(bitmap, max, i);
1209 }
1210 grp->bb_fragments = fragments;
1211
1212 if (free != grp->bb_free) {
1213 ext4_grp_locked_error(sb, group, 0, 0,
1214 "block bitmap and bg descriptor "
1215 "inconsistent: %u vs %u free clusters",
1216 free, grp->bb_free);
1217 /*
1218 * If we intend to continue, we consider group descriptor
1219 * corrupt and update bb_free using bitmap value
1220 */
1221 grp->bb_free = free;
1222 ext4_mark_group_bitmap_corrupted(sb, group,
1223 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1224 }
1225 mb_set_largest_free_order(sb, grp);
1226 mb_update_avg_fragment_size(sb, grp);
1227
1228 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1229
1230 period = get_cycles() - period;
1231 atomic_inc(&sbi->s_mb_buddies_generated);
1232 atomic64_add(period, &sbi->s_mb_generation_time);
1233 }
1234
mb_regenerate_buddy(struct ext4_buddy * e4b)1235 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1236 {
1237 int count;
1238 int order = 1;
1239 void *buddy;
1240
1241 while ((buddy = mb_find_buddy(e4b, order++, &count)))
1242 mb_set_bits(buddy, 0, count);
1243
1244 e4b->bd_info->bb_fragments = 0;
1245 memset(e4b->bd_info->bb_counters, 0,
1246 sizeof(*e4b->bd_info->bb_counters) *
1247 (e4b->bd_sb->s_blocksize_bits + 2));
1248
1249 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1250 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1251 }
1252
1253 /* The buddy information is attached the buddy cache inode
1254 * for convenience. The information regarding each group
1255 * is loaded via ext4_mb_load_buddy. The information involve
1256 * block bitmap and buddy information. The information are
1257 * stored in the inode as
1258 *
1259 * { page }
1260 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1261 *
1262 *
1263 * one block each for bitmap and buddy information.
1264 * So for each group we take up 2 blocks. A page can
1265 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1266 * So it can have information regarding groups_per_page which
1267 * is blocks_per_page/2
1268 *
1269 * Locking note: This routine takes the block group lock of all groups
1270 * for this page; do not hold this lock when calling this routine!
1271 */
1272
ext4_mb_init_cache(struct page * page,char * incore,gfp_t gfp)1273 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1274 {
1275 ext4_group_t ngroups;
1276 unsigned int blocksize;
1277 int blocks_per_page;
1278 int groups_per_page;
1279 int err = 0;
1280 int i;
1281 ext4_group_t first_group, group;
1282 int first_block;
1283 struct super_block *sb;
1284 struct buffer_head *bhs;
1285 struct buffer_head **bh = NULL;
1286 struct inode *inode;
1287 char *data;
1288 char *bitmap;
1289 struct ext4_group_info *grinfo;
1290
1291 inode = page->mapping->host;
1292 sb = inode->i_sb;
1293 ngroups = ext4_get_groups_count(sb);
1294 blocksize = i_blocksize(inode);
1295 blocks_per_page = PAGE_SIZE / blocksize;
1296
1297 mb_debug(sb, "init page %lu\n", page->index);
1298
1299 groups_per_page = blocks_per_page >> 1;
1300 if (groups_per_page == 0)
1301 groups_per_page = 1;
1302
1303 /* allocate buffer_heads to read bitmaps */
1304 if (groups_per_page > 1) {
1305 i = sizeof(struct buffer_head *) * groups_per_page;
1306 bh = kzalloc(i, gfp);
1307 if (bh == NULL)
1308 return -ENOMEM;
1309 } else
1310 bh = &bhs;
1311
1312 first_group = page->index * blocks_per_page / 2;
1313
1314 /* read all groups the page covers into the cache */
1315 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1316 if (group >= ngroups)
1317 break;
1318
1319 grinfo = ext4_get_group_info(sb, group);
1320 if (!grinfo)
1321 continue;
1322 /*
1323 * If page is uptodate then we came here after online resize
1324 * which added some new uninitialized group info structs, so
1325 * we must skip all initialized uptodate buddies on the page,
1326 * which may be currently in use by an allocating task.
1327 */
1328 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1329 bh[i] = NULL;
1330 continue;
1331 }
1332 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1333 if (IS_ERR(bh[i])) {
1334 err = PTR_ERR(bh[i]);
1335 bh[i] = NULL;
1336 goto out;
1337 }
1338 mb_debug(sb, "read bitmap for group %u\n", group);
1339 }
1340
1341 /* wait for I/O completion */
1342 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1343 int err2;
1344
1345 if (!bh[i])
1346 continue;
1347 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1348 if (!err)
1349 err = err2;
1350 }
1351
1352 first_block = page->index * blocks_per_page;
1353 for (i = 0; i < blocks_per_page; i++) {
1354 group = (first_block + i) >> 1;
1355 if (group >= ngroups)
1356 break;
1357
1358 if (!bh[group - first_group])
1359 /* skip initialized uptodate buddy */
1360 continue;
1361
1362 if (!buffer_verified(bh[group - first_group]))
1363 /* Skip faulty bitmaps */
1364 continue;
1365 err = 0;
1366
1367 /*
1368 * data carry information regarding this
1369 * particular group in the format specified
1370 * above
1371 *
1372 */
1373 data = page_address(page) + (i * blocksize);
1374 bitmap = bh[group - first_group]->b_data;
1375
1376 /*
1377 * We place the buddy block and bitmap block
1378 * close together
1379 */
1380 grinfo = ext4_get_group_info(sb, group);
1381 if (!grinfo) {
1382 err = -EFSCORRUPTED;
1383 goto out;
1384 }
1385 if ((first_block + i) & 1) {
1386 /* this is block of buddy */
1387 BUG_ON(incore == NULL);
1388 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1389 group, page->index, i * blocksize);
1390 trace_ext4_mb_buddy_bitmap_load(sb, group);
1391 grinfo->bb_fragments = 0;
1392 memset(grinfo->bb_counters, 0,
1393 sizeof(*grinfo->bb_counters) *
1394 (MB_NUM_ORDERS(sb)));
1395 /*
1396 * incore got set to the group block bitmap below
1397 */
1398 ext4_lock_group(sb, group);
1399 /* init the buddy */
1400 memset(data, 0xff, blocksize);
1401 ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
1402 ext4_unlock_group(sb, group);
1403 incore = NULL;
1404 } else {
1405 /* this is block of bitmap */
1406 BUG_ON(incore != NULL);
1407 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1408 group, page->index, i * blocksize);
1409 trace_ext4_mb_bitmap_load(sb, group);
1410
1411 /* see comments in ext4_mb_put_pa() */
1412 ext4_lock_group(sb, group);
1413 memcpy(data, bitmap, blocksize);
1414
1415 /* mark all preallocated blks used in in-core bitmap */
1416 ext4_mb_generate_from_pa(sb, data, group);
1417 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
1418 ext4_unlock_group(sb, group);
1419
1420 /* set incore so that the buddy information can be
1421 * generated using this
1422 */
1423 incore = data;
1424 }
1425 }
1426 SetPageUptodate(page);
1427
1428 out:
1429 if (bh) {
1430 for (i = 0; i < groups_per_page; i++)
1431 brelse(bh[i]);
1432 if (bh != &bhs)
1433 kfree(bh);
1434 }
1435 return err;
1436 }
1437
1438 /*
1439 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1440 * on the same buddy page doesn't happen whild holding the buddy page lock.
1441 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1442 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1443 */
ext4_mb_get_buddy_page_lock(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1444 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1445 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1446 {
1447 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1448 int block, pnum, poff;
1449 int blocks_per_page;
1450 struct page *page;
1451
1452 e4b->bd_buddy_page = NULL;
1453 e4b->bd_bitmap_page = NULL;
1454
1455 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1456 /*
1457 * the buddy cache inode stores the block bitmap
1458 * and buddy information in consecutive blocks.
1459 * So for each group we need two blocks.
1460 */
1461 block = group * 2;
1462 pnum = block / blocks_per_page;
1463 poff = block % blocks_per_page;
1464 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1465 if (!page)
1466 return -ENOMEM;
1467 BUG_ON(page->mapping != inode->i_mapping);
1468 e4b->bd_bitmap_page = page;
1469 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1470
1471 if (blocks_per_page >= 2) {
1472 /* buddy and bitmap are on the same page */
1473 return 0;
1474 }
1475
1476 block++;
1477 pnum = block / blocks_per_page;
1478 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1479 if (!page)
1480 return -ENOMEM;
1481 BUG_ON(page->mapping != inode->i_mapping);
1482 e4b->bd_buddy_page = page;
1483 return 0;
1484 }
1485
ext4_mb_put_buddy_page_lock(struct ext4_buddy * e4b)1486 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1487 {
1488 if (e4b->bd_bitmap_page) {
1489 unlock_page(e4b->bd_bitmap_page);
1490 put_page(e4b->bd_bitmap_page);
1491 }
1492 if (e4b->bd_buddy_page) {
1493 unlock_page(e4b->bd_buddy_page);
1494 put_page(e4b->bd_buddy_page);
1495 }
1496 }
1497
1498 /*
1499 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1500 * block group lock of all groups for this page; do not hold the BG lock when
1501 * calling this routine!
1502 */
1503 static noinline_for_stack
ext4_mb_init_group(struct super_block * sb,ext4_group_t group,gfp_t gfp)1504 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1505 {
1506
1507 struct ext4_group_info *this_grp;
1508 struct ext4_buddy e4b;
1509 struct page *page;
1510 int ret = 0;
1511
1512 might_sleep();
1513 mb_debug(sb, "init group %u\n", group);
1514 this_grp = ext4_get_group_info(sb, group);
1515 if (!this_grp)
1516 return -EFSCORRUPTED;
1517
1518 /*
1519 * This ensures that we don't reinit the buddy cache
1520 * page which map to the group from which we are already
1521 * allocating. If we are looking at the buddy cache we would
1522 * have taken a reference using ext4_mb_load_buddy and that
1523 * would have pinned buddy page to page cache.
1524 * The call to ext4_mb_get_buddy_page_lock will mark the
1525 * page accessed.
1526 */
1527 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1528 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1529 /*
1530 * somebody initialized the group
1531 * return without doing anything
1532 */
1533 goto err;
1534 }
1535
1536 page = e4b.bd_bitmap_page;
1537 ret = ext4_mb_init_cache(page, NULL, gfp);
1538 if (ret)
1539 goto err;
1540 if (!PageUptodate(page)) {
1541 ret = -EIO;
1542 goto err;
1543 }
1544
1545 if (e4b.bd_buddy_page == NULL) {
1546 /*
1547 * If both the bitmap and buddy are in
1548 * the same page we don't need to force
1549 * init the buddy
1550 */
1551 ret = 0;
1552 goto err;
1553 }
1554 /* init buddy cache */
1555 page = e4b.bd_buddy_page;
1556 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1557 if (ret)
1558 goto err;
1559 if (!PageUptodate(page)) {
1560 ret = -EIO;
1561 goto err;
1562 }
1563 err:
1564 ext4_mb_put_buddy_page_lock(&e4b);
1565 return ret;
1566 }
1567
1568 /*
1569 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1570 * block group lock of all groups for this page; do not hold the BG lock when
1571 * calling this routine!
1572 */
1573 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b,gfp_t gfp)1574 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1575 struct ext4_buddy *e4b, gfp_t gfp)
1576 {
1577 int blocks_per_page;
1578 int block;
1579 int pnum;
1580 int poff;
1581 struct page *page;
1582 int ret;
1583 struct ext4_group_info *grp;
1584 struct ext4_sb_info *sbi = EXT4_SB(sb);
1585 struct inode *inode = sbi->s_buddy_cache;
1586
1587 might_sleep();
1588 mb_debug(sb, "load group %u\n", group);
1589
1590 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1591 grp = ext4_get_group_info(sb, group);
1592 if (!grp)
1593 return -EFSCORRUPTED;
1594
1595 e4b->bd_blkbits = sb->s_blocksize_bits;
1596 e4b->bd_info = grp;
1597 e4b->bd_sb = sb;
1598 e4b->bd_group = group;
1599 e4b->bd_buddy_page = NULL;
1600 e4b->bd_bitmap_page = NULL;
1601
1602 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1603 /*
1604 * we need full data about the group
1605 * to make a good selection
1606 */
1607 ret = ext4_mb_init_group(sb, group, gfp);
1608 if (ret)
1609 return ret;
1610 }
1611
1612 /*
1613 * the buddy cache inode stores the block bitmap
1614 * and buddy information in consecutive blocks.
1615 * So for each group we need two blocks.
1616 */
1617 block = group * 2;
1618 pnum = block / blocks_per_page;
1619 poff = block % blocks_per_page;
1620
1621 /* we could use find_or_create_page(), but it locks page
1622 * what we'd like to avoid in fast path ... */
1623 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1624 if (page == NULL || !PageUptodate(page)) {
1625 if (page)
1626 /*
1627 * drop the page reference and try
1628 * to get the page with lock. If we
1629 * are not uptodate that implies
1630 * somebody just created the page but
1631 * is yet to initialize the same. So
1632 * wait for it to initialize.
1633 */
1634 put_page(page);
1635 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1636 if (page) {
1637 if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1638 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
1639 /* should never happen */
1640 unlock_page(page);
1641 ret = -EINVAL;
1642 goto err;
1643 }
1644 if (!PageUptodate(page)) {
1645 ret = ext4_mb_init_cache(page, NULL, gfp);
1646 if (ret) {
1647 unlock_page(page);
1648 goto err;
1649 }
1650 mb_cmp_bitmaps(e4b, page_address(page) +
1651 (poff * sb->s_blocksize));
1652 }
1653 unlock_page(page);
1654 }
1655 }
1656 if (page == NULL) {
1657 ret = -ENOMEM;
1658 goto err;
1659 }
1660 if (!PageUptodate(page)) {
1661 ret = -EIO;
1662 goto err;
1663 }
1664
1665 /* Pages marked accessed already */
1666 e4b->bd_bitmap_page = page;
1667 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1668
1669 block++;
1670 pnum = block / blocks_per_page;
1671 poff = block % blocks_per_page;
1672
1673 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1674 if (page == NULL || !PageUptodate(page)) {
1675 if (page)
1676 put_page(page);
1677 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1678 if (page) {
1679 if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
1680 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
1681 /* should never happen */
1682 unlock_page(page);
1683 ret = -EINVAL;
1684 goto err;
1685 }
1686 if (!PageUptodate(page)) {
1687 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1688 gfp);
1689 if (ret) {
1690 unlock_page(page);
1691 goto err;
1692 }
1693 }
1694 unlock_page(page);
1695 }
1696 }
1697 if (page == NULL) {
1698 ret = -ENOMEM;
1699 goto err;
1700 }
1701 if (!PageUptodate(page)) {
1702 ret = -EIO;
1703 goto err;
1704 }
1705
1706 /* Pages marked accessed already */
1707 e4b->bd_buddy_page = page;
1708 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1709
1710 return 0;
1711
1712 err:
1713 if (page)
1714 put_page(page);
1715 if (e4b->bd_bitmap_page)
1716 put_page(e4b->bd_bitmap_page);
1717
1718 e4b->bd_buddy = NULL;
1719 e4b->bd_bitmap = NULL;
1720 return ret;
1721 }
1722
ext4_mb_load_buddy(struct super_block * sb,ext4_group_t group,struct ext4_buddy * e4b)1723 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1724 struct ext4_buddy *e4b)
1725 {
1726 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1727 }
1728
ext4_mb_unload_buddy(struct ext4_buddy * e4b)1729 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1730 {
1731 if (e4b->bd_bitmap_page)
1732 put_page(e4b->bd_bitmap_page);
1733 if (e4b->bd_buddy_page)
1734 put_page(e4b->bd_buddy_page);
1735 }
1736
1737
mb_find_order_for_block(struct ext4_buddy * e4b,int block)1738 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1739 {
1740 int order = 1, max;
1741 void *bb;
1742
1743 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1744 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1745
1746 while (order <= e4b->bd_blkbits + 1) {
1747 bb = mb_find_buddy(e4b, order, &max);
1748 if (!mb_test_bit(block >> order, bb)) {
1749 /* this block is part of buddy of order 'order' */
1750 return order;
1751 }
1752 order++;
1753 }
1754 return 0;
1755 }
1756
mb_clear_bits(void * bm,int cur,int len)1757 static void mb_clear_bits(void *bm, int cur, int len)
1758 {
1759 __u32 *addr;
1760
1761 len = cur + len;
1762 while (cur < len) {
1763 if ((cur & 31) == 0 && (len - cur) >= 32) {
1764 /* fast path: clear whole word at once */
1765 addr = bm + (cur >> 3);
1766 *addr = 0;
1767 cur += 32;
1768 continue;
1769 }
1770 mb_clear_bit(cur, bm);
1771 cur++;
1772 }
1773 }
1774
1775 /* clear bits in given range
1776 * will return first found zero bit if any, -1 otherwise
1777 */
mb_test_and_clear_bits(void * bm,int cur,int len)1778 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1779 {
1780 __u32 *addr;
1781 int zero_bit = -1;
1782
1783 len = cur + len;
1784 while (cur < len) {
1785 if ((cur & 31) == 0 && (len - cur) >= 32) {
1786 /* fast path: clear whole word at once */
1787 addr = bm + (cur >> 3);
1788 if (*addr != (__u32)(-1) && zero_bit == -1)
1789 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1790 *addr = 0;
1791 cur += 32;
1792 continue;
1793 }
1794 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1795 zero_bit = cur;
1796 cur++;
1797 }
1798
1799 return zero_bit;
1800 }
1801
mb_set_bits(void * bm,int cur,int len)1802 void mb_set_bits(void *bm, int cur, int len)
1803 {
1804 __u32 *addr;
1805
1806 len = cur + len;
1807 while (cur < len) {
1808 if ((cur & 31) == 0 && (len - cur) >= 32) {
1809 /* fast path: set whole word at once */
1810 addr = bm + (cur >> 3);
1811 *addr = 0xffffffff;
1812 cur += 32;
1813 continue;
1814 }
1815 mb_set_bit(cur, bm);
1816 cur++;
1817 }
1818 }
1819
mb_buddy_adjust_border(int * bit,void * bitmap,int side)1820 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1821 {
1822 if (mb_test_bit(*bit + side, bitmap)) {
1823 mb_clear_bit(*bit, bitmap);
1824 (*bit) -= side;
1825 return 1;
1826 }
1827 else {
1828 (*bit) += side;
1829 mb_set_bit(*bit, bitmap);
1830 return -1;
1831 }
1832 }
1833
mb_buddy_mark_free(struct ext4_buddy * e4b,int first,int last)1834 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1835 {
1836 int max;
1837 int order = 1;
1838 void *buddy = mb_find_buddy(e4b, order, &max);
1839
1840 while (buddy) {
1841 void *buddy2;
1842
1843 /* Bits in range [first; last] are known to be set since
1844 * corresponding blocks were allocated. Bits in range
1845 * (first; last) will stay set because they form buddies on
1846 * upper layer. We just deal with borders if they don't
1847 * align with upper layer and then go up.
1848 * Releasing entire group is all about clearing
1849 * single bit of highest order buddy.
1850 */
1851
1852 /* Example:
1853 * ---------------------------------
1854 * | 1 | 1 | 1 | 1 |
1855 * ---------------------------------
1856 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1857 * ---------------------------------
1858 * 0 1 2 3 4 5 6 7
1859 * \_____________________/
1860 *
1861 * Neither [1] nor [6] is aligned to above layer.
1862 * Left neighbour [0] is free, so mark it busy,
1863 * decrease bb_counters and extend range to
1864 * [0; 6]
1865 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1866 * mark [6] free, increase bb_counters and shrink range to
1867 * [0; 5].
1868 * Then shift range to [0; 2], go up and do the same.
1869 */
1870
1871
1872 if (first & 1)
1873 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1874 if (!(last & 1))
1875 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1876 if (first > last)
1877 break;
1878 order++;
1879
1880 buddy2 = mb_find_buddy(e4b, order, &max);
1881 if (!buddy2) {
1882 mb_clear_bits(buddy, first, last - first + 1);
1883 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1884 break;
1885 }
1886 first >>= 1;
1887 last >>= 1;
1888 buddy = buddy2;
1889 }
1890 }
1891
mb_free_blocks(struct inode * inode,struct ext4_buddy * e4b,int first,int count)1892 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1893 int first, int count)
1894 {
1895 int left_is_free = 0;
1896 int right_is_free = 0;
1897 int block;
1898 int last = first + count - 1;
1899 struct super_block *sb = e4b->bd_sb;
1900
1901 if (WARN_ON(count == 0))
1902 return;
1903 BUG_ON(last >= (sb->s_blocksize << 3));
1904 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1905 /* Don't bother if the block group is corrupt. */
1906 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1907 return;
1908
1909 mb_check_buddy(e4b);
1910 mb_free_blocks_double(inode, e4b, first, count);
1911
1912 /* access memory sequentially: check left neighbour,
1913 * clear range and then check right neighbour
1914 */
1915 if (first != 0)
1916 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1917 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1918 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1919 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1920
1921 if (unlikely(block != -1)) {
1922 struct ext4_sb_info *sbi = EXT4_SB(sb);
1923 ext4_fsblk_t blocknr;
1924
1925 /*
1926 * Fastcommit replay can free already freed blocks which
1927 * corrupts allocation info. Regenerate it.
1928 */
1929 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1930 mb_regenerate_buddy(e4b);
1931 goto check;
1932 }
1933
1934 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1935 blocknr += EXT4_C2B(sbi, block);
1936 ext4_grp_locked_error(sb, e4b->bd_group,
1937 inode ? inode->i_ino : 0, blocknr,
1938 "freeing already freed block (bit %u); block bitmap corrupt.",
1939 block);
1940 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1941 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1942 return;
1943 }
1944
1945 this_cpu_inc(discard_pa_seq);
1946 e4b->bd_info->bb_free += count;
1947 if (first < e4b->bd_info->bb_first_free)
1948 e4b->bd_info->bb_first_free = first;
1949
1950 /* let's maintain fragments counter */
1951 if (left_is_free && right_is_free)
1952 e4b->bd_info->bb_fragments--;
1953 else if (!left_is_free && !right_is_free)
1954 e4b->bd_info->bb_fragments++;
1955
1956 /* buddy[0] == bd_bitmap is a special case, so handle
1957 * it right away and let mb_buddy_mark_free stay free of
1958 * zero order checks.
1959 * Check if neighbours are to be coaleasced,
1960 * adjust bitmap bb_counters and borders appropriately.
1961 */
1962 if (first & 1) {
1963 first += !left_is_free;
1964 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1965 }
1966 if (!(last & 1)) {
1967 last -= !right_is_free;
1968 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1969 }
1970
1971 if (first <= last)
1972 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1973
1974 mb_set_largest_free_order(sb, e4b->bd_info);
1975 mb_update_avg_fragment_size(sb, e4b->bd_info);
1976 check:
1977 mb_check_buddy(e4b);
1978 }
1979
mb_find_extent(struct ext4_buddy * e4b,int block,int needed,struct ext4_free_extent * ex)1980 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1981 int needed, struct ext4_free_extent *ex)
1982 {
1983 int next = block;
1984 int max, order;
1985 void *buddy;
1986
1987 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1988 BUG_ON(ex == NULL);
1989
1990 buddy = mb_find_buddy(e4b, 0, &max);
1991 BUG_ON(buddy == NULL);
1992 BUG_ON(block >= max);
1993 if (mb_test_bit(block, buddy)) {
1994 ex->fe_len = 0;
1995 ex->fe_start = 0;
1996 ex->fe_group = 0;
1997 return 0;
1998 }
1999
2000 /* find actual order */
2001 order = mb_find_order_for_block(e4b, block);
2002 block = block >> order;
2003
2004 ex->fe_len = 1 << order;
2005 ex->fe_start = block << order;
2006 ex->fe_group = e4b->bd_group;
2007
2008 /* calc difference from given start */
2009 next = next - ex->fe_start;
2010 ex->fe_len -= next;
2011 ex->fe_start += next;
2012
2013 while (needed > ex->fe_len &&
2014 mb_find_buddy(e4b, order, &max)) {
2015
2016 if (block + 1 >= max)
2017 break;
2018
2019 next = (block + 1) * (1 << order);
2020 if (mb_test_bit(next, e4b->bd_bitmap))
2021 break;
2022
2023 order = mb_find_order_for_block(e4b, next);
2024
2025 block = next >> order;
2026 ex->fe_len += 1 << order;
2027 }
2028
2029 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
2030 /* Should never happen! (but apparently sometimes does?!?) */
2031 WARN_ON(1);
2032 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2033 "corruption or bug in mb_find_extent "
2034 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2035 block, order, needed, ex->fe_group, ex->fe_start,
2036 ex->fe_len, ex->fe_logical);
2037 ex->fe_len = 0;
2038 ex->fe_start = 0;
2039 ex->fe_group = 0;
2040 }
2041 return ex->fe_len;
2042 }
2043
mb_mark_used(struct ext4_buddy * e4b,struct ext4_free_extent * ex)2044 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2045 {
2046 int ord;
2047 int mlen = 0;
2048 int max = 0;
2049 int cur;
2050 int start = ex->fe_start;
2051 int len = ex->fe_len;
2052 unsigned ret = 0;
2053 int len0 = len;
2054 void *buddy;
2055 bool split = false;
2056
2057 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2058 BUG_ON(e4b->bd_group != ex->fe_group);
2059 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
2060 mb_check_buddy(e4b);
2061 mb_mark_used_double(e4b, start, len);
2062
2063 this_cpu_inc(discard_pa_seq);
2064 e4b->bd_info->bb_free -= len;
2065 if (e4b->bd_info->bb_first_free == start)
2066 e4b->bd_info->bb_first_free += len;
2067
2068 /* let's maintain fragments counter */
2069 if (start != 0)
2070 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
2071 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
2072 max = !mb_test_bit(start + len, e4b->bd_bitmap);
2073 if (mlen && max)
2074 e4b->bd_info->bb_fragments++;
2075 else if (!mlen && !max)
2076 e4b->bd_info->bb_fragments--;
2077
2078 /* let's maintain buddy itself */
2079 while (len) {
2080 if (!split)
2081 ord = mb_find_order_for_block(e4b, start);
2082
2083 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2084 /* the whole chunk may be allocated at once! */
2085 mlen = 1 << ord;
2086 if (!split)
2087 buddy = mb_find_buddy(e4b, ord, &max);
2088 else
2089 split = false;
2090 BUG_ON((start >> ord) >= max);
2091 mb_set_bit(start >> ord, buddy);
2092 e4b->bd_info->bb_counters[ord]--;
2093 start += mlen;
2094 len -= mlen;
2095 BUG_ON(len < 0);
2096 continue;
2097 }
2098
2099 /* store for history */
2100 if (ret == 0)
2101 ret = len | (ord << 16);
2102
2103 /* we have to split large buddy */
2104 BUG_ON(ord <= 0);
2105 buddy = mb_find_buddy(e4b, ord, &max);
2106 mb_set_bit(start >> ord, buddy);
2107 e4b->bd_info->bb_counters[ord]--;
2108
2109 ord--;
2110 cur = (start >> ord) & ~1U;
2111 buddy = mb_find_buddy(e4b, ord, &max);
2112 mb_clear_bit(cur, buddy);
2113 mb_clear_bit(cur + 1, buddy);
2114 e4b->bd_info->bb_counters[ord]++;
2115 e4b->bd_info->bb_counters[ord]++;
2116 split = true;
2117 }
2118 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
2119
2120 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
2121 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
2122 mb_check_buddy(e4b);
2123
2124 return ret;
2125 }
2126
2127 /*
2128 * Must be called under group lock!
2129 */
ext4_mb_use_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2130 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2131 struct ext4_buddy *e4b)
2132 {
2133 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2134 int ret;
2135
2136 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2137 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2138
2139 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2140 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2141 ret = mb_mark_used(e4b, &ac->ac_b_ex);
2142
2143 /* preallocation can change ac_b_ex, thus we store actually
2144 * allocated blocks for history */
2145 ac->ac_f_ex = ac->ac_b_ex;
2146
2147 ac->ac_status = AC_STATUS_FOUND;
2148 ac->ac_tail = ret & 0xffff;
2149 ac->ac_buddy = ret >> 16;
2150
2151 /*
2152 * take the page reference. We want the page to be pinned
2153 * so that we don't get a ext4_mb_init_cache_call for this
2154 * group until we update the bitmap. That would mean we
2155 * double allocate blocks. The reference is dropped
2156 * in ext4_mb_release_context
2157 */
2158 ac->ac_bitmap_page = e4b->bd_bitmap_page;
2159 get_page(ac->ac_bitmap_page);
2160 ac->ac_buddy_page = e4b->bd_buddy_page;
2161 get_page(ac->ac_buddy_page);
2162 /* store last allocated for subsequent stream allocation */
2163 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2164 spin_lock(&sbi->s_md_lock);
2165 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2166 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2167 spin_unlock(&sbi->s_md_lock);
2168 }
2169 /*
2170 * As we've just preallocated more space than
2171 * user requested originally, we store allocated
2172 * space in a special descriptor.
2173 */
2174 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2175 ext4_mb_new_preallocation(ac);
2176
2177 }
2178
ext4_mb_check_limits(struct ext4_allocation_context * ac,struct ext4_buddy * e4b,int finish_group)2179 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2180 struct ext4_buddy *e4b,
2181 int finish_group)
2182 {
2183 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2184 struct ext4_free_extent *bex = &ac->ac_b_ex;
2185 struct ext4_free_extent *gex = &ac->ac_g_ex;
2186
2187 if (ac->ac_status == AC_STATUS_FOUND)
2188 return;
2189 /*
2190 * We don't want to scan for a whole year
2191 */
2192 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2193 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2194 ac->ac_status = AC_STATUS_BREAK;
2195 return;
2196 }
2197
2198 /*
2199 * Haven't found good chunk so far, let's continue
2200 */
2201 if (bex->fe_len < gex->fe_len)
2202 return;
2203
2204 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2205 ext4_mb_use_best_found(ac, e4b);
2206 }
2207
2208 /*
2209 * The routine checks whether found extent is good enough. If it is,
2210 * then the extent gets marked used and flag is set to the context
2211 * to stop scanning. Otherwise, the extent is compared with the
2212 * previous found extent and if new one is better, then it's stored
2213 * in the context. Later, the best found extent will be used, if
2214 * mballoc can't find good enough extent.
2215 *
2216 * The algorithm used is roughly as follows:
2217 *
2218 * * If free extent found is exactly as big as goal, then
2219 * stop the scan and use it immediately
2220 *
2221 * * If free extent found is smaller than goal, then keep retrying
2222 * upto a max of sbi->s_mb_max_to_scan times (default 200). After
2223 * that stop scanning and use whatever we have.
2224 *
2225 * * If free extent found is bigger than goal, then keep retrying
2226 * upto a max of sbi->s_mb_min_to_scan times (default 10) before
2227 * stopping the scan and using the extent.
2228 *
2229 *
2230 * FIXME: real allocation policy is to be designed yet!
2231 */
ext4_mb_measure_extent(struct ext4_allocation_context * ac,struct ext4_free_extent * ex,struct ext4_buddy * e4b)2232 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2233 struct ext4_free_extent *ex,
2234 struct ext4_buddy *e4b)
2235 {
2236 struct ext4_free_extent *bex = &ac->ac_b_ex;
2237 struct ext4_free_extent *gex = &ac->ac_g_ex;
2238
2239 BUG_ON(ex->fe_len <= 0);
2240 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2241 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2242 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2243
2244 ac->ac_found++;
2245 ac->ac_cX_found[ac->ac_criteria]++;
2246
2247 /*
2248 * The special case - take what you catch first
2249 */
2250 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2251 *bex = *ex;
2252 ext4_mb_use_best_found(ac, e4b);
2253 return;
2254 }
2255
2256 /*
2257 * Let's check whether the chuck is good enough
2258 */
2259 if (ex->fe_len == gex->fe_len) {
2260 *bex = *ex;
2261 ext4_mb_use_best_found(ac, e4b);
2262 return;
2263 }
2264
2265 /*
2266 * If this is first found extent, just store it in the context
2267 */
2268 if (bex->fe_len == 0) {
2269 *bex = *ex;
2270 return;
2271 }
2272
2273 /*
2274 * If new found extent is better, store it in the context
2275 */
2276 if (bex->fe_len < gex->fe_len) {
2277 /* if the request isn't satisfied, any found extent
2278 * larger than previous best one is better */
2279 if (ex->fe_len > bex->fe_len)
2280 *bex = *ex;
2281 } else if (ex->fe_len > gex->fe_len) {
2282 /* if the request is satisfied, then we try to find
2283 * an extent that still satisfy the request, but is
2284 * smaller than previous one */
2285 if (ex->fe_len < bex->fe_len)
2286 *bex = *ex;
2287 }
2288
2289 ext4_mb_check_limits(ac, e4b, 0);
2290 }
2291
2292 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2293 void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2294 struct ext4_buddy *e4b)
2295 {
2296 struct ext4_free_extent ex = ac->ac_b_ex;
2297 ext4_group_t group = ex.fe_group;
2298 int max;
2299 int err;
2300
2301 BUG_ON(ex.fe_len <= 0);
2302 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2303 if (err)
2304 return;
2305
2306 ext4_lock_group(ac->ac_sb, group);
2307 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2308 goto out;
2309
2310 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2311
2312 if (max > 0) {
2313 ac->ac_b_ex = ex;
2314 ext4_mb_use_best_found(ac, e4b);
2315 }
2316
2317 out:
2318 ext4_unlock_group(ac->ac_sb, group);
2319 ext4_mb_unload_buddy(e4b);
2320 }
2321
2322 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2323 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2324 struct ext4_buddy *e4b)
2325 {
2326 ext4_group_t group = ac->ac_g_ex.fe_group;
2327 int max;
2328 int err;
2329 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2330 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2331 struct ext4_free_extent ex;
2332
2333 if (!grp)
2334 return -EFSCORRUPTED;
2335 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
2336 return 0;
2337 if (grp->bb_free == 0)
2338 return 0;
2339
2340 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2341 if (err)
2342 return err;
2343
2344 ext4_lock_group(ac->ac_sb, group);
2345 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2346 goto out;
2347
2348 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2349 ac->ac_g_ex.fe_len, &ex);
2350 ex.fe_logical = 0xDEADFA11; /* debug value */
2351
2352 if (max >= ac->ac_g_ex.fe_len &&
2353 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) {
2354 ext4_fsblk_t start;
2355
2356 start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2357 /* use do_div to get remainder (would be 64-bit modulo) */
2358 if (do_div(start, sbi->s_stripe) == 0) {
2359 ac->ac_found++;
2360 ac->ac_b_ex = ex;
2361 ext4_mb_use_best_found(ac, e4b);
2362 }
2363 } else if (max >= ac->ac_g_ex.fe_len) {
2364 BUG_ON(ex.fe_len <= 0);
2365 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2366 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2367 ac->ac_found++;
2368 ac->ac_b_ex = ex;
2369 ext4_mb_use_best_found(ac, e4b);
2370 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2371 /* Sometimes, caller may want to merge even small
2372 * number of blocks to an existing extent */
2373 BUG_ON(ex.fe_len <= 0);
2374 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2375 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2376 ac->ac_found++;
2377 ac->ac_b_ex = ex;
2378 ext4_mb_use_best_found(ac, e4b);
2379 }
2380 out:
2381 ext4_unlock_group(ac->ac_sb, group);
2382 ext4_mb_unload_buddy(e4b);
2383
2384 return 0;
2385 }
2386
2387 /*
2388 * The routine scans buddy structures (not bitmap!) from given order
2389 * to max order and tries to find big enough chunk to satisfy the req
2390 */
2391 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2392 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2393 struct ext4_buddy *e4b)
2394 {
2395 struct super_block *sb = ac->ac_sb;
2396 struct ext4_group_info *grp = e4b->bd_info;
2397 void *buddy;
2398 int i;
2399 int k;
2400 int max;
2401
2402 BUG_ON(ac->ac_2order <= 0);
2403 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2404 if (grp->bb_counters[i] == 0)
2405 continue;
2406
2407 buddy = mb_find_buddy(e4b, i, &max);
2408 if (WARN_RATELIMIT(buddy == NULL,
2409 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2410 continue;
2411
2412 k = mb_find_next_zero_bit(buddy, max, 0);
2413 if (k >= max) {
2414 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2415 "%d free clusters of order %d. But found 0",
2416 grp->bb_counters[i], i);
2417 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2418 e4b->bd_group,
2419 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2420 break;
2421 }
2422 ac->ac_found++;
2423 ac->ac_cX_found[ac->ac_criteria]++;
2424
2425 ac->ac_b_ex.fe_len = 1 << i;
2426 ac->ac_b_ex.fe_start = k << i;
2427 ac->ac_b_ex.fe_group = e4b->bd_group;
2428
2429 ext4_mb_use_best_found(ac, e4b);
2430
2431 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2432
2433 if (EXT4_SB(sb)->s_mb_stats)
2434 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2435
2436 break;
2437 }
2438 }
2439
2440 /*
2441 * The routine scans the group and measures all found extents.
2442 * In order to optimize scanning, caller must pass number of
2443 * free blocks in the group, so the routine can know upper limit.
2444 */
2445 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2446 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2447 struct ext4_buddy *e4b)
2448 {
2449 struct super_block *sb = ac->ac_sb;
2450 void *bitmap = e4b->bd_bitmap;
2451 struct ext4_free_extent ex;
2452 int i, j, freelen;
2453 int free;
2454
2455 free = e4b->bd_info->bb_free;
2456 if (WARN_ON(free <= 0))
2457 return;
2458
2459 i = e4b->bd_info->bb_first_free;
2460
2461 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2462 i = mb_find_next_zero_bit(bitmap,
2463 EXT4_CLUSTERS_PER_GROUP(sb), i);
2464 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2465 /*
2466 * IF we have corrupt bitmap, we won't find any
2467 * free blocks even though group info says we
2468 * have free blocks
2469 */
2470 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2471 "%d free clusters as per "
2472 "group info. But bitmap says 0",
2473 free);
2474 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2475 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2476 break;
2477 }
2478
2479 if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
2480 /*
2481 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2482 * sure that this group will have a large enough
2483 * continuous free extent, so skip over the smaller free
2484 * extents
2485 */
2486 j = mb_find_next_bit(bitmap,
2487 EXT4_CLUSTERS_PER_GROUP(sb), i);
2488 freelen = j - i;
2489
2490 if (freelen < ac->ac_g_ex.fe_len) {
2491 i = j;
2492 free -= freelen;
2493 continue;
2494 }
2495 }
2496
2497 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2498 if (WARN_ON(ex.fe_len <= 0))
2499 break;
2500 if (free < ex.fe_len) {
2501 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2502 "%d free clusters as per "
2503 "group info. But got %d blocks",
2504 free, ex.fe_len);
2505 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2506 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2507 /*
2508 * The number of free blocks differs. This mostly
2509 * indicate that the bitmap is corrupt. So exit
2510 * without claiming the space.
2511 */
2512 break;
2513 }
2514 ex.fe_logical = 0xDEADC0DE; /* debug value */
2515 ext4_mb_measure_extent(ac, &ex, e4b);
2516
2517 i += ex.fe_len;
2518 free -= ex.fe_len;
2519 }
2520
2521 ext4_mb_check_limits(ac, e4b, 1);
2522 }
2523
2524 /*
2525 * This is a special case for storages like raid5
2526 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2527 */
2528 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context * ac,struct ext4_buddy * e4b)2529 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2530 struct ext4_buddy *e4b)
2531 {
2532 struct super_block *sb = ac->ac_sb;
2533 struct ext4_sb_info *sbi = EXT4_SB(sb);
2534 void *bitmap = e4b->bd_bitmap;
2535 struct ext4_free_extent ex;
2536 ext4_fsblk_t first_group_block;
2537 ext4_fsblk_t a;
2538 ext4_grpblk_t i, stripe;
2539 int max;
2540
2541 BUG_ON(sbi->s_stripe == 0);
2542
2543 /* find first stripe-aligned block in group */
2544 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2545
2546 a = first_group_block + sbi->s_stripe - 1;
2547 do_div(a, sbi->s_stripe);
2548 i = (a * sbi->s_stripe) - first_group_block;
2549
2550 stripe = EXT4_B2C(sbi, sbi->s_stripe);
2551 i = EXT4_B2C(sbi, i);
2552 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2553 if (!mb_test_bit(i, bitmap)) {
2554 max = mb_find_extent(e4b, i, stripe, &ex);
2555 if (max >= stripe) {
2556 ac->ac_found++;
2557 ac->ac_cX_found[ac->ac_criteria]++;
2558 ex.fe_logical = 0xDEADF00D; /* debug value */
2559 ac->ac_b_ex = ex;
2560 ext4_mb_use_best_found(ac, e4b);
2561 break;
2562 }
2563 }
2564 i += stripe;
2565 }
2566 }
2567
2568 /*
2569 * This is also called BEFORE we load the buddy bitmap.
2570 * Returns either 1 or 0 indicating that the group is either suitable
2571 * for the allocation or not.
2572 */
ext4_mb_good_group(struct ext4_allocation_context * ac,ext4_group_t group,enum criteria cr)2573 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2574 ext4_group_t group, enum criteria cr)
2575 {
2576 ext4_grpblk_t free, fragments;
2577 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2578 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2579
2580 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
2581
2582 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2583 return false;
2584
2585 free = grp->bb_free;
2586 if (free == 0)
2587 return false;
2588
2589 fragments = grp->bb_fragments;
2590 if (fragments == 0)
2591 return false;
2592
2593 switch (cr) {
2594 case CR_POWER2_ALIGNED:
2595 BUG_ON(ac->ac_2order == 0);
2596
2597 /* Avoid using the first bg of a flexgroup for data files */
2598 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2599 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2600 ((group % flex_size) == 0))
2601 return false;
2602
2603 if (free < ac->ac_g_ex.fe_len)
2604 return false;
2605
2606 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2607 return true;
2608
2609 if (grp->bb_largest_free_order < ac->ac_2order)
2610 return false;
2611
2612 return true;
2613 case CR_GOAL_LEN_FAST:
2614 case CR_BEST_AVAIL_LEN:
2615 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2616 return true;
2617 break;
2618 case CR_GOAL_LEN_SLOW:
2619 if (free >= ac->ac_g_ex.fe_len)
2620 return true;
2621 break;
2622 case CR_ANY_FREE:
2623 return true;
2624 default:
2625 BUG();
2626 }
2627
2628 return false;
2629 }
2630
2631 /*
2632 * This could return negative error code if something goes wrong
2633 * during ext4_mb_init_group(). This should not be called with
2634 * ext4_lock_group() held.
2635 *
2636 * Note: because we are conditionally operating with the group lock in
2637 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2638 * function using __acquire and __release. This means we need to be
2639 * super careful before messing with the error path handling via "goto
2640 * out"!
2641 */
ext4_mb_good_group_nolock(struct ext4_allocation_context * ac,ext4_group_t group,enum criteria cr)2642 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2643 ext4_group_t group, enum criteria cr)
2644 {
2645 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2646 struct super_block *sb = ac->ac_sb;
2647 struct ext4_sb_info *sbi = EXT4_SB(sb);
2648 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2649 ext4_grpblk_t free;
2650 int ret = 0;
2651
2652 if (!grp)
2653 return -EFSCORRUPTED;
2654 if (sbi->s_mb_stats)
2655 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2656 if (should_lock) {
2657 ext4_lock_group(sb, group);
2658 __release(ext4_group_lock_ptr(sb, group));
2659 }
2660 free = grp->bb_free;
2661 if (free == 0)
2662 goto out;
2663 /*
2664 * In all criterias except CR_ANY_FREE we try to avoid groups that
2665 * can't possibly satisfy the full goal request due to insufficient
2666 * free blocks.
2667 */
2668 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
2669 goto out;
2670 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2671 goto out;
2672 if (should_lock) {
2673 __acquire(ext4_group_lock_ptr(sb, group));
2674 ext4_unlock_group(sb, group);
2675 }
2676
2677 /* We only do this if the grp has never been initialized */
2678 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2679 struct ext4_group_desc *gdp =
2680 ext4_get_group_desc(sb, group, NULL);
2681 int ret;
2682
2683 /*
2684 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
2685 * search to find large good chunks almost for free. If buddy
2686 * data is not ready, then this optimization makes no sense. But
2687 * we never skip the first block group in a flex_bg, since this
2688 * gets used for metadata block allocation, and we want to make
2689 * sure we locate metadata blocks in the first block group in
2690 * the flex_bg if possible.
2691 */
2692 if (!ext4_mb_cr_expensive(cr) &&
2693 (!sbi->s_log_groups_per_flex ||
2694 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2695 !(ext4_has_group_desc_csum(sb) &&
2696 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2697 return 0;
2698 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2699 if (ret)
2700 return ret;
2701 }
2702
2703 if (should_lock) {
2704 ext4_lock_group(sb, group);
2705 __release(ext4_group_lock_ptr(sb, group));
2706 }
2707 ret = ext4_mb_good_group(ac, group, cr);
2708 out:
2709 if (should_lock) {
2710 __acquire(ext4_group_lock_ptr(sb, group));
2711 ext4_unlock_group(sb, group);
2712 }
2713 return ret;
2714 }
2715
2716 /*
2717 * Start prefetching @nr block bitmaps starting at @group.
2718 * Return the next group which needs to be prefetched.
2719 */
ext4_mb_prefetch(struct super_block * sb,ext4_group_t group,unsigned int nr,int * cnt)2720 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2721 unsigned int nr, int *cnt)
2722 {
2723 ext4_group_t ngroups = ext4_get_groups_count(sb);
2724 struct buffer_head *bh;
2725 struct blk_plug plug;
2726
2727 blk_start_plug(&plug);
2728 while (nr-- > 0) {
2729 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2730 NULL);
2731 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2732
2733 /*
2734 * Prefetch block groups with free blocks; but don't
2735 * bother if it is marked uninitialized on disk, since
2736 * it won't require I/O to read. Also only try to
2737 * prefetch once, so we avoid getblk() call, which can
2738 * be expensive.
2739 */
2740 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2741 EXT4_MB_GRP_NEED_INIT(grp) &&
2742 ext4_free_group_clusters(sb, gdp) > 0 ) {
2743 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2744 if (bh && !IS_ERR(bh)) {
2745 if (!buffer_uptodate(bh) && cnt)
2746 (*cnt)++;
2747 brelse(bh);
2748 }
2749 }
2750 if (++group >= ngroups)
2751 group = 0;
2752 }
2753 blk_finish_plug(&plug);
2754 return group;
2755 }
2756
2757 /*
2758 * Prefetching reads the block bitmap into the buffer cache; but we
2759 * need to make sure that the buddy bitmap in the page cache has been
2760 * initialized. Note that ext4_mb_init_group() will block if the I/O
2761 * is not yet completed, or indeed if it was not initiated by
2762 * ext4_mb_prefetch did not start the I/O.
2763 *
2764 * TODO: We should actually kick off the buddy bitmap setup in a work
2765 * queue when the buffer I/O is completed, so that we don't block
2766 * waiting for the block allocation bitmap read to finish when
2767 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2768 */
ext4_mb_prefetch_fini(struct super_block * sb,ext4_group_t group,unsigned int nr)2769 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2770 unsigned int nr)
2771 {
2772 struct ext4_group_desc *gdp;
2773 struct ext4_group_info *grp;
2774
2775 while (nr-- > 0) {
2776 if (!group)
2777 group = ext4_get_groups_count(sb);
2778 group--;
2779 gdp = ext4_get_group_desc(sb, group, NULL);
2780 grp = ext4_get_group_info(sb, group);
2781
2782 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2783 ext4_free_group_clusters(sb, gdp) > 0) {
2784 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2785 break;
2786 }
2787 }
2788 }
2789
2790 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context * ac)2791 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2792 {
2793 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2794 enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
2795 int err = 0, first_err = 0;
2796 unsigned int nr = 0, prefetch_ios = 0;
2797 struct ext4_sb_info *sbi;
2798 struct super_block *sb;
2799 struct ext4_buddy e4b;
2800 int lost;
2801
2802 sb = ac->ac_sb;
2803 sbi = EXT4_SB(sb);
2804 ngroups = ext4_get_groups_count(sb);
2805 /* non-extent files are limited to low blocks/groups */
2806 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2807 ngroups = sbi->s_blockfile_groups;
2808
2809 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2810
2811 /* first, try the goal */
2812 err = ext4_mb_find_by_goal(ac, &e4b);
2813 if (err || ac->ac_status == AC_STATUS_FOUND)
2814 goto out;
2815
2816 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2817 goto out;
2818
2819 /*
2820 * ac->ac_2order is set only if the fe_len is a power of 2
2821 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2822 * so that we try exact allocation using buddy.
2823 */
2824 i = fls(ac->ac_g_ex.fe_len);
2825 ac->ac_2order = 0;
2826 /*
2827 * We search using buddy data only if the order of the request
2828 * is greater than equal to the sbi_s_mb_order2_reqs
2829 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2830 * We also support searching for power-of-two requests only for
2831 * requests upto maximum buddy size we have constructed.
2832 */
2833 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2834 if (is_power_of_2(ac->ac_g_ex.fe_len))
2835 ac->ac_2order = array_index_nospec(i - 1,
2836 MB_NUM_ORDERS(sb));
2837 }
2838
2839 /* if stream allocation is enabled, use global goal */
2840 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2841 /* TBD: may be hot point */
2842 spin_lock(&sbi->s_md_lock);
2843 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2844 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2845 spin_unlock(&sbi->s_md_lock);
2846 }
2847
2848 /*
2849 * Let's just scan groups to find more-less suitable blocks We
2850 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2851 * aligned, in which case let's do that faster approach first.
2852 */
2853 if (ac->ac_2order)
2854 cr = CR_POWER2_ALIGNED;
2855 repeat:
2856 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2857 ac->ac_criteria = cr;
2858 /*
2859 * searching for the right group start
2860 * from the goal value specified
2861 */
2862 group = ac->ac_g_ex.fe_group;
2863 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2864 prefetch_grp = group;
2865
2866 for (i = 0, new_cr = cr; i < ngroups; i++,
2867 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2868 int ret = 0;
2869
2870 cond_resched();
2871 if (new_cr != cr) {
2872 cr = new_cr;
2873 goto repeat;
2874 }
2875
2876 /*
2877 * Batch reads of the block allocation bitmaps
2878 * to get multiple READs in flight; limit
2879 * prefetching at inexpensive CR, otherwise mballoc
2880 * can spend a lot of time loading imperfect groups
2881 */
2882 if ((prefetch_grp == group) &&
2883 (ext4_mb_cr_expensive(cr) ||
2884 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2885 nr = sbi->s_mb_prefetch;
2886 if (ext4_has_feature_flex_bg(sb)) {
2887 nr = 1 << sbi->s_log_groups_per_flex;
2888 nr -= group & (nr - 1);
2889 nr = min(nr, sbi->s_mb_prefetch);
2890 }
2891 prefetch_grp = ext4_mb_prefetch(sb, group,
2892 nr, &prefetch_ios);
2893 }
2894
2895 /* This now checks without needing the buddy page */
2896 ret = ext4_mb_good_group_nolock(ac, group, cr);
2897 if (ret <= 0) {
2898 if (!first_err)
2899 first_err = ret;
2900 continue;
2901 }
2902
2903 err = ext4_mb_load_buddy(sb, group, &e4b);
2904 if (err)
2905 goto out;
2906
2907 ext4_lock_group(sb, group);
2908
2909 /*
2910 * We need to check again after locking the
2911 * block group
2912 */
2913 ret = ext4_mb_good_group(ac, group, cr);
2914 if (ret == 0) {
2915 ext4_unlock_group(sb, group);
2916 ext4_mb_unload_buddy(&e4b);
2917 continue;
2918 }
2919
2920 ac->ac_groups_scanned++;
2921 if (cr == CR_POWER2_ALIGNED)
2922 ext4_mb_simple_scan_group(ac, &e4b);
2923 else if ((cr == CR_GOAL_LEN_FAST ||
2924 cr == CR_BEST_AVAIL_LEN) &&
2925 sbi->s_stripe &&
2926 !(ac->ac_g_ex.fe_len %
2927 EXT4_B2C(sbi, sbi->s_stripe)))
2928 ext4_mb_scan_aligned(ac, &e4b);
2929 else
2930 ext4_mb_complex_scan_group(ac, &e4b);
2931
2932 ext4_unlock_group(sb, group);
2933 ext4_mb_unload_buddy(&e4b);
2934
2935 if (ac->ac_status != AC_STATUS_CONTINUE)
2936 break;
2937 }
2938 /* Processed all groups and haven't found blocks */
2939 if (sbi->s_mb_stats && i == ngroups)
2940 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2941
2942 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
2943 /* Reset goal length to original goal length before
2944 * falling into CR_GOAL_LEN_SLOW */
2945 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
2946 }
2947
2948 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2949 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2950 /*
2951 * We've been searching too long. Let's try to allocate
2952 * the best chunk we've found so far
2953 */
2954 ext4_mb_try_best_found(ac, &e4b);
2955 if (ac->ac_status != AC_STATUS_FOUND) {
2956 /*
2957 * Someone more lucky has already allocated it.
2958 * The only thing we can do is just take first
2959 * found block(s)
2960 */
2961 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2962 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2963 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2964 ac->ac_b_ex.fe_len, lost);
2965
2966 ac->ac_b_ex.fe_group = 0;
2967 ac->ac_b_ex.fe_start = 0;
2968 ac->ac_b_ex.fe_len = 0;
2969 ac->ac_status = AC_STATUS_CONTINUE;
2970 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2971 cr = CR_ANY_FREE;
2972 goto repeat;
2973 }
2974 }
2975
2976 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2977 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2978 out:
2979 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2980 err = first_err;
2981
2982 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2983 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2984 ac->ac_flags, cr, err);
2985
2986 if (nr)
2987 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2988
2989 return err;
2990 }
2991
ext4_mb_seq_groups_start(struct seq_file * seq,loff_t * pos)2992 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2993 {
2994 struct super_block *sb = pde_data(file_inode(seq->file));
2995 ext4_group_t group;
2996
2997 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2998 return NULL;
2999 group = *pos + 1;
3000 return (void *) ((unsigned long) group);
3001 }
3002
ext4_mb_seq_groups_next(struct seq_file * seq,void * v,loff_t * pos)3003 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
3004 {
3005 struct super_block *sb = pde_data(file_inode(seq->file));
3006 ext4_group_t group;
3007
3008 ++*pos;
3009 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
3010 return NULL;
3011 group = *pos + 1;
3012 return (void *) ((unsigned long) group);
3013 }
3014
ext4_mb_seq_groups_show(struct seq_file * seq,void * v)3015 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
3016 {
3017 struct super_block *sb = pde_data(file_inode(seq->file));
3018 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
3019 int i;
3020 int err, buddy_loaded = 0;
3021 struct ext4_buddy e4b;
3022 struct ext4_group_info *grinfo;
3023 unsigned char blocksize_bits = min_t(unsigned char,
3024 sb->s_blocksize_bits,
3025 EXT4_MAX_BLOCK_LOG_SIZE);
3026 struct sg {
3027 struct ext4_group_info info;
3028 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
3029 } sg;
3030
3031 group--;
3032 if (group == 0)
3033 seq_puts(seq, "#group: free frags first ["
3034 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
3035 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
3036
3037 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
3038 sizeof(struct ext4_group_info);
3039
3040 grinfo = ext4_get_group_info(sb, group);
3041 if (!grinfo)
3042 return 0;
3043 /* Load the group info in memory only if not already loaded. */
3044 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3045 err = ext4_mb_load_buddy(sb, group, &e4b);
3046 if (err) {
3047 seq_printf(seq, "#%-5u: I/O error\n", group);
3048 return 0;
3049 }
3050 buddy_loaded = 1;
3051 }
3052
3053 memcpy(&sg, grinfo, i);
3054
3055 if (buddy_loaded)
3056 ext4_mb_unload_buddy(&e4b);
3057
3058 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
3059 sg.info.bb_fragments, sg.info.bb_first_free);
3060 for (i = 0; i <= 13; i++)
3061 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
3062 sg.info.bb_counters[i] : 0);
3063 seq_puts(seq, " ]");
3064 if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
3065 seq_puts(seq, " Block bitmap corrupted!");
3066 seq_puts(seq, "\n");
3067
3068 return 0;
3069 }
3070
ext4_mb_seq_groups_stop(struct seq_file * seq,void * v)3071 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3072 {
3073 }
3074
3075 const struct seq_operations ext4_mb_seq_groups_ops = {
3076 .start = ext4_mb_seq_groups_start,
3077 .next = ext4_mb_seq_groups_next,
3078 .stop = ext4_mb_seq_groups_stop,
3079 .show = ext4_mb_seq_groups_show,
3080 };
3081
ext4_seq_mb_stats_show(struct seq_file * seq,void * offset)3082 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3083 {
3084 struct super_block *sb = seq->private;
3085 struct ext4_sb_info *sbi = EXT4_SB(sb);
3086
3087 seq_puts(seq, "mballoc:\n");
3088 if (!sbi->s_mb_stats) {
3089 seq_puts(seq, "\tmb stats collection turned off.\n");
3090 seq_puts(
3091 seq,
3092 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
3093 return 0;
3094 }
3095 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3096 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3097
3098 seq_printf(seq, "\tgroups_scanned: %u\n",
3099 atomic_read(&sbi->s_bal_groups_scanned));
3100
3101 /* CR_POWER2_ALIGNED stats */
3102 seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3103 seq_printf(seq, "\t\thits: %llu\n",
3104 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3105 seq_printf(
3106 seq, "\t\tgroups_considered: %llu\n",
3107 atomic64_read(
3108 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3109 seq_printf(seq, "\t\textents_scanned: %u\n",
3110 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
3111 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3112 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
3113 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3114 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
3115
3116 /* CR_GOAL_LEN_FAST stats */
3117 seq_puts(seq, "\tcr_goal_fast_stats:\n");
3118 seq_printf(seq, "\t\thits: %llu\n",
3119 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
3120 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3121 atomic64_read(
3122 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3123 seq_printf(seq, "\t\textents_scanned: %u\n",
3124 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
3125 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3126 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
3127 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3128 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3129
3130 /* CR_BEST_AVAIL_LEN stats */
3131 seq_puts(seq, "\tcr_best_avail_stats:\n");
3132 seq_printf(seq, "\t\thits: %llu\n",
3133 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3134 seq_printf(
3135 seq, "\t\tgroups_considered: %llu\n",
3136 atomic64_read(
3137 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3138 seq_printf(seq, "\t\textents_scanned: %u\n",
3139 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
3140 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3141 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
3142 seq_printf(seq, "\t\tbad_suggestions: %u\n",
3143 atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
3144
3145 /* CR_GOAL_LEN_SLOW stats */
3146 seq_puts(seq, "\tcr_goal_slow_stats:\n");
3147 seq_printf(seq, "\t\thits: %llu\n",
3148 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
3149 seq_printf(seq, "\t\tgroups_considered: %llu\n",
3150 atomic64_read(
3151 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3152 seq_printf(seq, "\t\textents_scanned: %u\n",
3153 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
3154 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3155 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3156
3157 /* CR_ANY_FREE stats */
3158 seq_puts(seq, "\tcr_any_free_stats:\n");
3159 seq_printf(seq, "\t\thits: %llu\n",
3160 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3161 seq_printf(
3162 seq, "\t\tgroups_considered: %llu\n",
3163 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3164 seq_printf(seq, "\t\textents_scanned: %u\n",
3165 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
3166 seq_printf(seq, "\t\tuseless_loops: %llu\n",
3167 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3168
3169 /* Aggregates */
3170 seq_printf(seq, "\textents_scanned: %u\n",
3171 atomic_read(&sbi->s_bal_ex_scanned));
3172 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
3173 seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3174 atomic_read(&sbi->s_bal_len_goals));
3175 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3176 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3177 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
3178 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3179 atomic_read(&sbi->s_mb_buddies_generated),
3180 ext4_get_groups_count(sb));
3181 seq_printf(seq, "\tbuddies_time_used: %llu\n",
3182 atomic64_read(&sbi->s_mb_generation_time));
3183 seq_printf(seq, "\tpreallocated: %u\n",
3184 atomic_read(&sbi->s_mb_preallocated));
3185 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
3186 return 0;
3187 }
3188
ext4_mb_seq_structs_summary_start(struct seq_file * seq,loff_t * pos)3189 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3190 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
3191 {
3192 struct super_block *sb = pde_data(file_inode(seq->file));
3193 unsigned long position;
3194
3195 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3196 return NULL;
3197 position = *pos + 1;
3198 return (void *) ((unsigned long) position);
3199 }
3200
ext4_mb_seq_structs_summary_next(struct seq_file * seq,void * v,loff_t * pos)3201 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3202 {
3203 struct super_block *sb = pde_data(file_inode(seq->file));
3204 unsigned long position;
3205
3206 ++*pos;
3207 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
3208 return NULL;
3209 position = *pos + 1;
3210 return (void *) ((unsigned long) position);
3211 }
3212
ext4_mb_seq_structs_summary_show(struct seq_file * seq,void * v)3213 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3214 {
3215 struct super_block *sb = pde_data(file_inode(seq->file));
3216 struct ext4_sb_info *sbi = EXT4_SB(sb);
3217 unsigned long position = ((unsigned long) v);
3218 struct ext4_group_info *grp;
3219 unsigned int count;
3220
3221 position--;
3222 if (position >= MB_NUM_ORDERS(sb)) {
3223 position -= MB_NUM_ORDERS(sb);
3224 if (position == 0)
3225 seq_puts(seq, "avg_fragment_size_lists:\n");
3226
3227 count = 0;
3228 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3229 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3230 bb_avg_fragment_size_node)
3231 count++;
3232 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3233 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3234 (unsigned int)position, count);
3235 return 0;
3236 }
3237
3238 if (position == 0) {
3239 seq_printf(seq, "optimize_scan: %d\n",
3240 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3241 seq_puts(seq, "max_free_order_lists:\n");
3242 }
3243 count = 0;
3244 read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
3245 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3246 bb_largest_free_order_node)
3247 count++;
3248 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
3249 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3250 (unsigned int)position, count);
3251
3252 return 0;
3253 }
3254
ext4_mb_seq_structs_summary_stop(struct seq_file * seq,void * v)3255 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3256 {
3257 }
3258
3259 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3260 .start = ext4_mb_seq_structs_summary_start,
3261 .next = ext4_mb_seq_structs_summary_next,
3262 .stop = ext4_mb_seq_structs_summary_stop,
3263 .show = ext4_mb_seq_structs_summary_show,
3264 };
3265
get_groupinfo_cache(int blocksize_bits)3266 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3267 {
3268 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3269 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3270
3271 BUG_ON(!cachep);
3272 return cachep;
3273 }
3274
3275 /*
3276 * Allocate the top-level s_group_info array for the specified number
3277 * of groups
3278 */
ext4_mb_alloc_groupinfo(struct super_block * sb,ext4_group_t ngroups)3279 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3280 {
3281 struct ext4_sb_info *sbi = EXT4_SB(sb);
3282 unsigned size;
3283 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3284
3285 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3286 EXT4_DESC_PER_BLOCK_BITS(sb);
3287 if (size <= sbi->s_group_info_size)
3288 return 0;
3289
3290 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3291 new_groupinfo = kvzalloc(size, GFP_KERNEL);
3292 if (!new_groupinfo) {
3293 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3294 return -ENOMEM;
3295 }
3296 rcu_read_lock();
3297 old_groupinfo = rcu_dereference(sbi->s_group_info);
3298 if (old_groupinfo)
3299 memcpy(new_groupinfo, old_groupinfo,
3300 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3301 rcu_read_unlock();
3302 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3303 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3304 if (old_groupinfo)
3305 ext4_kvfree_array_rcu(old_groupinfo);
3306 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3307 sbi->s_group_info_size);
3308 return 0;
3309 }
3310
3311 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block * sb,ext4_group_t group,struct ext4_group_desc * desc)3312 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3313 struct ext4_group_desc *desc)
3314 {
3315 int i;
3316 int metalen = 0;
3317 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3318 struct ext4_sb_info *sbi = EXT4_SB(sb);
3319 struct ext4_group_info **meta_group_info;
3320 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3321
3322 /*
3323 * First check if this group is the first of a reserved block.
3324 * If it's true, we have to allocate a new table of pointers
3325 * to ext4_group_info structures
3326 */
3327 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3328 metalen = sizeof(*meta_group_info) <<
3329 EXT4_DESC_PER_BLOCK_BITS(sb);
3330 meta_group_info = kmalloc(metalen, GFP_NOFS);
3331 if (meta_group_info == NULL) {
3332 ext4_msg(sb, KERN_ERR, "can't allocate mem "
3333 "for a buddy group");
3334 return -ENOMEM;
3335 }
3336 rcu_read_lock();
3337 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3338 rcu_read_unlock();
3339 }
3340
3341 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3342 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3343
3344 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3345 if (meta_group_info[i] == NULL) {
3346 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3347 goto exit_group_info;
3348 }
3349 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3350 &(meta_group_info[i]->bb_state));
3351
3352 /*
3353 * initialize bb_free to be able to skip
3354 * empty groups without initialization
3355 */
3356 if (ext4_has_group_desc_csum(sb) &&
3357 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3358 meta_group_info[i]->bb_free =
3359 ext4_free_clusters_after_init(sb, group, desc);
3360 } else {
3361 meta_group_info[i]->bb_free =
3362 ext4_free_group_clusters(sb, desc);
3363 }
3364
3365 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3366 init_rwsem(&meta_group_info[i]->alloc_sem);
3367 meta_group_info[i]->bb_free_root = RB_ROOT;
3368 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3369 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3370 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
3371 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
3372 meta_group_info[i]->bb_group = group;
3373
3374 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3375 return 0;
3376
3377 exit_group_info:
3378 /* If a meta_group_info table has been allocated, release it now */
3379 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3380 struct ext4_group_info ***group_info;
3381
3382 rcu_read_lock();
3383 group_info = rcu_dereference(sbi->s_group_info);
3384 kfree(group_info[idx]);
3385 group_info[idx] = NULL;
3386 rcu_read_unlock();
3387 }
3388 return -ENOMEM;
3389 } /* ext4_mb_add_groupinfo */
3390
ext4_mb_init_backend(struct super_block * sb)3391 static int ext4_mb_init_backend(struct super_block *sb)
3392 {
3393 ext4_group_t ngroups = ext4_get_groups_count(sb);
3394 ext4_group_t i;
3395 struct ext4_sb_info *sbi = EXT4_SB(sb);
3396 int err;
3397 struct ext4_group_desc *desc;
3398 struct ext4_group_info ***group_info;
3399 struct kmem_cache *cachep;
3400
3401 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3402 if (err)
3403 return err;
3404
3405 sbi->s_buddy_cache = new_inode(sb);
3406 if (sbi->s_buddy_cache == NULL) {
3407 ext4_msg(sb, KERN_ERR, "can't get new inode");
3408 goto err_freesgi;
3409 }
3410 /* To avoid potentially colliding with an valid on-disk inode number,
3411 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3412 * not in the inode hash, so it should never be found by iget(), but
3413 * this will avoid confusion if it ever shows up during debugging. */
3414 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3415 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3416 for (i = 0; i < ngroups; i++) {
3417 cond_resched();
3418 desc = ext4_get_group_desc(sb, i, NULL);
3419 if (desc == NULL) {
3420 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3421 goto err_freebuddy;
3422 }
3423 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3424 goto err_freebuddy;
3425 }
3426
3427 if (ext4_has_feature_flex_bg(sb)) {
3428 /* a single flex group is supposed to be read by a single IO.
3429 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3430 * unsigned integer, so the maximum shift is 32.
3431 */
3432 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3433 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3434 goto err_freebuddy;
3435 }
3436 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3437 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3438 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3439 } else {
3440 sbi->s_mb_prefetch = 32;
3441 }
3442 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3443 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3444 /* now many real IOs to prefetch within a single allocation at cr=0
3445 * given cr=0 is an CPU-related optimization we shouldn't try to
3446 * load too many groups, at some point we should start to use what
3447 * we've got in memory.
3448 * with an average random access time 5ms, it'd take a second to get
3449 * 200 groups (* N with flex_bg), so let's make this limit 4
3450 */
3451 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3452 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3453 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3454
3455 return 0;
3456
3457 err_freebuddy:
3458 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3459 while (i-- > 0) {
3460 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3461
3462 if (grp)
3463 kmem_cache_free(cachep, grp);
3464 }
3465 i = sbi->s_group_info_size;
3466 rcu_read_lock();
3467 group_info = rcu_dereference(sbi->s_group_info);
3468 while (i-- > 0)
3469 kfree(group_info[i]);
3470 rcu_read_unlock();
3471 iput(sbi->s_buddy_cache);
3472 err_freesgi:
3473 rcu_read_lock();
3474 kvfree(rcu_dereference(sbi->s_group_info));
3475 rcu_read_unlock();
3476 return -ENOMEM;
3477 }
3478
ext4_groupinfo_destroy_slabs(void)3479 static void ext4_groupinfo_destroy_slabs(void)
3480 {
3481 int i;
3482
3483 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3484 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3485 ext4_groupinfo_caches[i] = NULL;
3486 }
3487 }
3488
ext4_groupinfo_create_slab(size_t size)3489 static int ext4_groupinfo_create_slab(size_t size)
3490 {
3491 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3492 int slab_size;
3493 int blocksize_bits = order_base_2(size);
3494 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3495 struct kmem_cache *cachep;
3496
3497 if (cache_index >= NR_GRPINFO_CACHES)
3498 return -EINVAL;
3499
3500 if (unlikely(cache_index < 0))
3501 cache_index = 0;
3502
3503 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3504 if (ext4_groupinfo_caches[cache_index]) {
3505 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3506 return 0; /* Already created */
3507 }
3508
3509 slab_size = offsetof(struct ext4_group_info,
3510 bb_counters[blocksize_bits + 2]);
3511
3512 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3513 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3514 NULL);
3515
3516 ext4_groupinfo_caches[cache_index] = cachep;
3517
3518 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3519 if (!cachep) {
3520 printk(KERN_EMERG
3521 "EXT4-fs: no memory for groupinfo slab cache\n");
3522 return -ENOMEM;
3523 }
3524
3525 return 0;
3526 }
3527
ext4_discard_work(struct work_struct * work)3528 static void ext4_discard_work(struct work_struct *work)
3529 {
3530 struct ext4_sb_info *sbi = container_of(work,
3531 struct ext4_sb_info, s_discard_work);
3532 struct super_block *sb = sbi->s_sb;
3533 struct ext4_free_data *fd, *nfd;
3534 struct ext4_buddy e4b;
3535 LIST_HEAD(discard_list);
3536 ext4_group_t grp, load_grp;
3537 int err = 0;
3538
3539 spin_lock(&sbi->s_md_lock);
3540 list_splice_init(&sbi->s_discard_list, &discard_list);
3541 spin_unlock(&sbi->s_md_lock);
3542
3543 load_grp = UINT_MAX;
3544 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3545 /*
3546 * If filesystem is umounting or no memory or suffering
3547 * from no space, give up the discard
3548 */
3549 if ((sb->s_flags & SB_ACTIVE) && !err &&
3550 !atomic_read(&sbi->s_retry_alloc_pending)) {
3551 grp = fd->efd_group;
3552 if (grp != load_grp) {
3553 if (load_grp != UINT_MAX)
3554 ext4_mb_unload_buddy(&e4b);
3555
3556 err = ext4_mb_load_buddy(sb, grp, &e4b);
3557 if (err) {
3558 kmem_cache_free(ext4_free_data_cachep, fd);
3559 load_grp = UINT_MAX;
3560 continue;
3561 } else {
3562 load_grp = grp;
3563 }
3564 }
3565
3566 ext4_lock_group(sb, grp);
3567 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3568 fd->efd_start_cluster + fd->efd_count - 1, 1);
3569 ext4_unlock_group(sb, grp);
3570 }
3571 kmem_cache_free(ext4_free_data_cachep, fd);
3572 }
3573
3574 if (load_grp != UINT_MAX)
3575 ext4_mb_unload_buddy(&e4b);
3576 }
3577
ext4_mb_init(struct super_block * sb)3578 int ext4_mb_init(struct super_block *sb)
3579 {
3580 struct ext4_sb_info *sbi = EXT4_SB(sb);
3581 unsigned i, j;
3582 unsigned offset, offset_incr;
3583 unsigned max;
3584 int ret;
3585
3586 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3587
3588 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3589 if (sbi->s_mb_offsets == NULL) {
3590 ret = -ENOMEM;
3591 goto out;
3592 }
3593
3594 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3595 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3596 if (sbi->s_mb_maxs == NULL) {
3597 ret = -ENOMEM;
3598 goto out;
3599 }
3600
3601 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3602 if (ret < 0)
3603 goto out;
3604
3605 /* order 0 is regular bitmap */
3606 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3607 sbi->s_mb_offsets[0] = 0;
3608
3609 i = 1;
3610 offset = 0;
3611 offset_incr = 1 << (sb->s_blocksize_bits - 1);
3612 max = sb->s_blocksize << 2;
3613 do {
3614 sbi->s_mb_offsets[i] = offset;
3615 sbi->s_mb_maxs[i] = max;
3616 offset += offset_incr;
3617 offset_incr = offset_incr >> 1;
3618 max = max >> 1;
3619 i++;
3620 } while (i < MB_NUM_ORDERS(sb));
3621
3622 sbi->s_mb_avg_fragment_size =
3623 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3624 GFP_KERNEL);
3625 if (!sbi->s_mb_avg_fragment_size) {
3626 ret = -ENOMEM;
3627 goto out;
3628 }
3629 sbi->s_mb_avg_fragment_size_locks =
3630 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3631 GFP_KERNEL);
3632 if (!sbi->s_mb_avg_fragment_size_locks) {
3633 ret = -ENOMEM;
3634 goto out;
3635 }
3636 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3637 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3638 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3639 }
3640 sbi->s_mb_largest_free_orders =
3641 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3642 GFP_KERNEL);
3643 if (!sbi->s_mb_largest_free_orders) {
3644 ret = -ENOMEM;
3645 goto out;
3646 }
3647 sbi->s_mb_largest_free_orders_locks =
3648 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3649 GFP_KERNEL);
3650 if (!sbi->s_mb_largest_free_orders_locks) {
3651 ret = -ENOMEM;
3652 goto out;
3653 }
3654 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3655 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3656 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3657 }
3658
3659 spin_lock_init(&sbi->s_md_lock);
3660 sbi->s_mb_free_pending = 0;
3661 INIT_LIST_HEAD(&sbi->s_freed_data_list);
3662 INIT_LIST_HEAD(&sbi->s_discard_list);
3663 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3664 atomic_set(&sbi->s_retry_alloc_pending, 0);
3665
3666 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3667 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3668 sbi->s_mb_stats = MB_DEFAULT_STATS;
3669 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3670 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3671 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
3672
3673 /*
3674 * The default group preallocation is 512, which for 4k block
3675 * sizes translates to 2 megabytes. However for bigalloc file
3676 * systems, this is probably too big (i.e, if the cluster size
3677 * is 1 megabyte, then group preallocation size becomes half a
3678 * gigabyte!). As a default, we will keep a two megabyte
3679 * group pralloc size for cluster sizes up to 64k, and after
3680 * that, we will force a minimum group preallocation size of
3681 * 32 clusters. This translates to 8 megs when the cluster
3682 * size is 256k, and 32 megs when the cluster size is 1 meg,
3683 * which seems reasonable as a default.
3684 */
3685 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3686 sbi->s_cluster_bits, 32);
3687 /*
3688 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3689 * to the lowest multiple of s_stripe which is bigger than
3690 * the s_mb_group_prealloc as determined above. We want
3691 * the preallocation size to be an exact multiple of the
3692 * RAID stripe size so that preallocations don't fragment
3693 * the stripes.
3694 */
3695 if (sbi->s_stripe > 1) {
3696 sbi->s_mb_group_prealloc = roundup(
3697 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe));
3698 }
3699
3700 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3701 if (sbi->s_locality_groups == NULL) {
3702 ret = -ENOMEM;
3703 goto out;
3704 }
3705 for_each_possible_cpu(i) {
3706 struct ext4_locality_group *lg;
3707 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3708 mutex_init(&lg->lg_mutex);
3709 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3710 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3711 spin_lock_init(&lg->lg_prealloc_lock);
3712 }
3713
3714 if (bdev_nonrot(sb->s_bdev))
3715 sbi->s_mb_max_linear_groups = 0;
3716 else
3717 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3718 /* init file for buddy data */
3719 ret = ext4_mb_init_backend(sb);
3720 if (ret != 0)
3721 goto out_free_locality_groups;
3722
3723 return 0;
3724
3725 out_free_locality_groups:
3726 free_percpu(sbi->s_locality_groups);
3727 sbi->s_locality_groups = NULL;
3728 out:
3729 kfree(sbi->s_mb_avg_fragment_size);
3730 kfree(sbi->s_mb_avg_fragment_size_locks);
3731 kfree(sbi->s_mb_largest_free_orders);
3732 kfree(sbi->s_mb_largest_free_orders_locks);
3733 kfree(sbi->s_mb_offsets);
3734 sbi->s_mb_offsets = NULL;
3735 kfree(sbi->s_mb_maxs);
3736 sbi->s_mb_maxs = NULL;
3737 return ret;
3738 }
3739
3740 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info * grp)3741 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3742 {
3743 struct ext4_prealloc_space *pa;
3744 struct list_head *cur, *tmp;
3745 int count = 0;
3746
3747 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3748 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3749 list_del(&pa->pa_group_list);
3750 count++;
3751 kmem_cache_free(ext4_pspace_cachep, pa);
3752 }
3753 return count;
3754 }
3755
ext4_mb_release(struct super_block * sb)3756 int ext4_mb_release(struct super_block *sb)
3757 {
3758 ext4_group_t ngroups = ext4_get_groups_count(sb);
3759 ext4_group_t i;
3760 int num_meta_group_infos;
3761 struct ext4_group_info *grinfo, ***group_info;
3762 struct ext4_sb_info *sbi = EXT4_SB(sb);
3763 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3764 int count;
3765
3766 if (test_opt(sb, DISCARD)) {
3767 /*
3768 * wait the discard work to drain all of ext4_free_data
3769 */
3770 flush_work(&sbi->s_discard_work);
3771 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3772 }
3773
3774 if (sbi->s_group_info) {
3775 for (i = 0; i < ngroups; i++) {
3776 cond_resched();
3777 grinfo = ext4_get_group_info(sb, i);
3778 if (!grinfo)
3779 continue;
3780 mb_group_bb_bitmap_free(grinfo);
3781 ext4_lock_group(sb, i);
3782 count = ext4_mb_cleanup_pa(grinfo);
3783 if (count)
3784 mb_debug(sb, "mballoc: %d PAs left\n",
3785 count);
3786 ext4_unlock_group(sb, i);
3787 kmem_cache_free(cachep, grinfo);
3788 }
3789 num_meta_group_infos = (ngroups +
3790 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3791 EXT4_DESC_PER_BLOCK_BITS(sb);
3792 rcu_read_lock();
3793 group_info = rcu_dereference(sbi->s_group_info);
3794 for (i = 0; i < num_meta_group_infos; i++)
3795 kfree(group_info[i]);
3796 kvfree(group_info);
3797 rcu_read_unlock();
3798 }
3799 kfree(sbi->s_mb_avg_fragment_size);
3800 kfree(sbi->s_mb_avg_fragment_size_locks);
3801 kfree(sbi->s_mb_largest_free_orders);
3802 kfree(sbi->s_mb_largest_free_orders_locks);
3803 kfree(sbi->s_mb_offsets);
3804 kfree(sbi->s_mb_maxs);
3805 iput(sbi->s_buddy_cache);
3806 if (sbi->s_mb_stats) {
3807 ext4_msg(sb, KERN_INFO,
3808 "mballoc: %u blocks %u reqs (%u success)",
3809 atomic_read(&sbi->s_bal_allocated),
3810 atomic_read(&sbi->s_bal_reqs),
3811 atomic_read(&sbi->s_bal_success));
3812 ext4_msg(sb, KERN_INFO,
3813 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3814 "%u 2^N hits, %u breaks, %u lost",
3815 atomic_read(&sbi->s_bal_ex_scanned),
3816 atomic_read(&sbi->s_bal_groups_scanned),
3817 atomic_read(&sbi->s_bal_goals),
3818 atomic_read(&sbi->s_bal_2orders),
3819 atomic_read(&sbi->s_bal_breaks),
3820 atomic_read(&sbi->s_mb_lost_chunks));
3821 ext4_msg(sb, KERN_INFO,
3822 "mballoc: %u generated and it took %llu",
3823 atomic_read(&sbi->s_mb_buddies_generated),
3824 atomic64_read(&sbi->s_mb_generation_time));
3825 ext4_msg(sb, KERN_INFO,
3826 "mballoc: %u preallocated, %u discarded",
3827 atomic_read(&sbi->s_mb_preallocated),
3828 atomic_read(&sbi->s_mb_discarded));
3829 }
3830
3831 free_percpu(sbi->s_locality_groups);
3832
3833 return 0;
3834 }
3835
ext4_issue_discard(struct super_block * sb,ext4_group_t block_group,ext4_grpblk_t cluster,int count,struct bio ** biop)3836 static inline int ext4_issue_discard(struct super_block *sb,
3837 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3838 struct bio **biop)
3839 {
3840 ext4_fsblk_t discard_block;
3841
3842 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3843 ext4_group_first_block_no(sb, block_group));
3844 count = EXT4_C2B(EXT4_SB(sb), count);
3845 trace_ext4_discard_blocks(sb,
3846 (unsigned long long) discard_block, count);
3847 if (biop) {
3848 return __blkdev_issue_discard(sb->s_bdev,
3849 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3850 (sector_t)count << (sb->s_blocksize_bits - 9),
3851 GFP_NOFS, biop);
3852 } else
3853 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3854 }
3855
ext4_free_data_in_buddy(struct super_block * sb,struct ext4_free_data * entry)3856 static void ext4_free_data_in_buddy(struct super_block *sb,
3857 struct ext4_free_data *entry)
3858 {
3859 struct ext4_buddy e4b;
3860 struct ext4_group_info *db;
3861 int err, count = 0;
3862
3863 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3864 entry->efd_count, entry->efd_group, entry);
3865
3866 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3867 /* we expect to find existing buddy because it's pinned */
3868 BUG_ON(err != 0);
3869
3870 spin_lock(&EXT4_SB(sb)->s_md_lock);
3871 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3872 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3873
3874 db = e4b.bd_info;
3875 /* there are blocks to put in buddy to make them really free */
3876 count += entry->efd_count;
3877 ext4_lock_group(sb, entry->efd_group);
3878 /* Take it out of per group rb tree */
3879 rb_erase(&entry->efd_node, &(db->bb_free_root));
3880 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3881
3882 /*
3883 * Clear the trimmed flag for the group so that the next
3884 * ext4_trim_fs can trim it.
3885 * If the volume is mounted with -o discard, online discard
3886 * is supported and the free blocks will be trimmed online.
3887 */
3888 if (!test_opt(sb, DISCARD))
3889 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3890
3891 if (!db->bb_free_root.rb_node) {
3892 /* No more items in the per group rb tree
3893 * balance refcounts from ext4_mb_free_metadata()
3894 */
3895 put_page(e4b.bd_buddy_page);
3896 put_page(e4b.bd_bitmap_page);
3897 }
3898 ext4_unlock_group(sb, entry->efd_group);
3899 ext4_mb_unload_buddy(&e4b);
3900
3901 mb_debug(sb, "freed %d blocks in 1 structures\n", count);
3902 }
3903
3904 /*
3905 * This function is called by the jbd2 layer once the commit has finished,
3906 * so we know we can free the blocks that were released with that commit.
3907 */
ext4_process_freed_data(struct super_block * sb,tid_t commit_tid)3908 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3909 {
3910 struct ext4_sb_info *sbi = EXT4_SB(sb);
3911 struct ext4_free_data *entry, *tmp;
3912 LIST_HEAD(freed_data_list);
3913 struct list_head *cut_pos = NULL;
3914 bool wake;
3915
3916 spin_lock(&sbi->s_md_lock);
3917 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3918 if (entry->efd_tid != commit_tid)
3919 break;
3920 cut_pos = &entry->efd_list;
3921 }
3922 if (cut_pos)
3923 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3924 cut_pos);
3925 spin_unlock(&sbi->s_md_lock);
3926
3927 list_for_each_entry(entry, &freed_data_list, efd_list)
3928 ext4_free_data_in_buddy(sb, entry);
3929
3930 if (test_opt(sb, DISCARD)) {
3931 spin_lock(&sbi->s_md_lock);
3932 wake = list_empty(&sbi->s_discard_list);
3933 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3934 spin_unlock(&sbi->s_md_lock);
3935 if (wake)
3936 queue_work(system_unbound_wq, &sbi->s_discard_work);
3937 } else {
3938 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3939 kmem_cache_free(ext4_free_data_cachep, entry);
3940 }
3941 }
3942
ext4_init_mballoc(void)3943 int __init ext4_init_mballoc(void)
3944 {
3945 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3946 SLAB_RECLAIM_ACCOUNT);
3947 if (ext4_pspace_cachep == NULL)
3948 goto out;
3949
3950 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3951 SLAB_RECLAIM_ACCOUNT);
3952 if (ext4_ac_cachep == NULL)
3953 goto out_pa_free;
3954
3955 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3956 SLAB_RECLAIM_ACCOUNT);
3957 if (ext4_free_data_cachep == NULL)
3958 goto out_ac_free;
3959
3960 return 0;
3961
3962 out_ac_free:
3963 kmem_cache_destroy(ext4_ac_cachep);
3964 out_pa_free:
3965 kmem_cache_destroy(ext4_pspace_cachep);
3966 out:
3967 return -ENOMEM;
3968 }
3969
ext4_exit_mballoc(void)3970 void ext4_exit_mballoc(void)
3971 {
3972 /*
3973 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3974 * before destroying the slab cache.
3975 */
3976 rcu_barrier();
3977 kmem_cache_destroy(ext4_pspace_cachep);
3978 kmem_cache_destroy(ext4_ac_cachep);
3979 kmem_cache_destroy(ext4_free_data_cachep);
3980 ext4_groupinfo_destroy_slabs();
3981 }
3982
3983
3984 /*
3985 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3986 * Returns 0 if success or error code
3987 */
3988 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context * ac,handle_t * handle,unsigned int reserv_clstrs)3989 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3990 handle_t *handle, unsigned int reserv_clstrs)
3991 {
3992 struct buffer_head *bitmap_bh = NULL;
3993 struct ext4_group_desc *gdp;
3994 struct buffer_head *gdp_bh;
3995 struct ext4_sb_info *sbi;
3996 struct super_block *sb;
3997 ext4_fsblk_t block;
3998 int err, len;
3999
4000 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4001 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4002
4003 sb = ac->ac_sb;
4004 sbi = EXT4_SB(sb);
4005
4006 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
4007 if (IS_ERR(bitmap_bh)) {
4008 return PTR_ERR(bitmap_bh);
4009 }
4010
4011 BUFFER_TRACE(bitmap_bh, "getting write access");
4012 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
4013 EXT4_JTR_NONE);
4014 if (err)
4015 goto out_err;
4016
4017 err = -EIO;
4018 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
4019 if (!gdp)
4020 goto out_err;
4021
4022 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
4023 ext4_free_group_clusters(sb, gdp));
4024
4025 BUFFER_TRACE(gdp_bh, "get_write_access");
4026 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
4027 if (err)
4028 goto out_err;
4029
4030 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4031
4032 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4033 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
4034 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
4035 "fs metadata", block, block+len);
4036 /* File system mounted not to panic on error
4037 * Fix the bitmap and return EFSCORRUPTED
4038 * We leak some of the blocks here.
4039 */
4040 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
4041 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
4042 ac->ac_b_ex.fe_len);
4043 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
4044 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4045 if (!err)
4046 err = -EFSCORRUPTED;
4047 goto out_err;
4048 }
4049
4050 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
4051 #ifdef AGGRESSIVE_CHECK
4052 {
4053 int i;
4054 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
4055 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
4056 bitmap_bh->b_data));
4057 }
4058 }
4059 #endif
4060 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
4061 ac->ac_b_ex.fe_len);
4062 if (ext4_has_group_desc_csum(sb) &&
4063 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4064 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4065 ext4_free_group_clusters_set(sb, gdp,
4066 ext4_free_clusters_after_init(sb,
4067 ac->ac_b_ex.fe_group, gdp));
4068 }
4069 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
4070 ext4_free_group_clusters_set(sb, gdp, len);
4071 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4072 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
4073
4074 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
4075 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
4076 /*
4077 * Now reduce the dirty block count also. Should not go negative
4078 */
4079 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4080 /* release all the reserved blocks if non delalloc */
4081 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4082 reserv_clstrs);
4083
4084 if (sbi->s_log_groups_per_flex) {
4085 ext4_group_t flex_group = ext4_flex_group(sbi,
4086 ac->ac_b_ex.fe_group);
4087 atomic64_sub(ac->ac_b_ex.fe_len,
4088 &sbi_array_rcu_deref(sbi, s_flex_groups,
4089 flex_group)->free_clusters);
4090 }
4091
4092 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4093 if (err)
4094 goto out_err;
4095 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
4096
4097 out_err:
4098 brelse(bitmap_bh);
4099 return err;
4100 }
4101
4102 /*
4103 * Idempotent helper for Ext4 fast commit replay path to set the state of
4104 * blocks in bitmaps and update counters.
4105 */
ext4_mb_mark_bb(struct super_block * sb,ext4_fsblk_t block,int len,int state)4106 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
4107 int len, int state)
4108 {
4109 struct buffer_head *bitmap_bh = NULL;
4110 struct ext4_group_desc *gdp;
4111 struct buffer_head *gdp_bh;
4112 struct ext4_sb_info *sbi = EXT4_SB(sb);
4113 ext4_group_t group;
4114 ext4_grpblk_t blkoff;
4115 int i, err = 0;
4116 int already;
4117 unsigned int clen, clen_changed, thisgrp_len;
4118
4119 while (len > 0) {
4120 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
4121
4122 /*
4123 * Check to see if we are freeing blocks across a group
4124 * boundary.
4125 * In case of flex_bg, this can happen that (block, len) may
4126 * span across more than one group. In that case we need to
4127 * get the corresponding group metadata to work with.
4128 * For this we have goto again loop.
4129 */
4130 thisgrp_len = min_t(unsigned int, (unsigned int)len,
4131 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4132 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4133
4134 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4135 ext4_error(sb, "Marking blocks in system zone - "
4136 "Block = %llu, len = %u",
4137 block, thisgrp_len);
4138 bitmap_bh = NULL;
4139 break;
4140 }
4141
4142 bitmap_bh = ext4_read_block_bitmap(sb, group);
4143 if (IS_ERR(bitmap_bh)) {
4144 err = PTR_ERR(bitmap_bh);
4145 bitmap_bh = NULL;
4146 break;
4147 }
4148
4149 err = -EIO;
4150 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
4151 if (!gdp)
4152 break;
4153
4154 ext4_lock_group(sb, group);
4155 already = 0;
4156 for (i = 0; i < clen; i++)
4157 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4158 !state)
4159 already++;
4160
4161 clen_changed = clen - already;
4162 if (state)
4163 mb_set_bits(bitmap_bh->b_data, blkoff, clen);
4164 else
4165 mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
4166 if (ext4_has_group_desc_csum(sb) &&
4167 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4168 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4169 ext4_free_group_clusters_set(sb, gdp,
4170 ext4_free_clusters_after_init(sb, group, gdp));
4171 }
4172 if (state)
4173 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
4174 else
4175 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
4176
4177 ext4_free_group_clusters_set(sb, gdp, clen);
4178 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4179 ext4_group_desc_csum_set(sb, group, gdp);
4180
4181 ext4_unlock_group(sb, group);
4182
4183 if (sbi->s_log_groups_per_flex) {
4184 ext4_group_t flex_group = ext4_flex_group(sbi, group);
4185 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4186 s_flex_groups, flex_group);
4187
4188 if (state)
4189 atomic64_sub(clen_changed, &fg->free_clusters);
4190 else
4191 atomic64_add(clen_changed, &fg->free_clusters);
4192
4193 }
4194
4195 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
4196 if (err)
4197 break;
4198 sync_dirty_buffer(bitmap_bh);
4199 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
4200 sync_dirty_buffer(gdp_bh);
4201 if (err)
4202 break;
4203
4204 block += thisgrp_len;
4205 len -= thisgrp_len;
4206 brelse(bitmap_bh);
4207 BUG_ON(len < 0);
4208 }
4209
4210 if (err)
4211 brelse(bitmap_bh);
4212 }
4213
4214 /*
4215 * here we normalize request for locality group
4216 * Group request are normalized to s_mb_group_prealloc, which goes to
4217 * s_strip if we set the same via mount option.
4218 * s_mb_group_prealloc can be configured via
4219 * /sys/fs/ext4/<partition>/mb_group_prealloc
4220 *
4221 * XXX: should we try to preallocate more than the group has now?
4222 */
ext4_mb_normalize_group_request(struct ext4_allocation_context * ac)4223 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4224 {
4225 struct super_block *sb = ac->ac_sb;
4226 struct ext4_locality_group *lg = ac->ac_lg;
4227
4228 BUG_ON(lg == NULL);
4229 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
4230 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
4231 }
4232
4233 /*
4234 * This function returns the next element to look at during inode
4235 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4236 * (ei->i_prealloc_lock)
4237 *
4238 * new_start The start of the range we want to compare
4239 * cur_start The existing start that we are comparing against
4240 * node The node of the rb_tree
4241 */
4242 static inline struct rb_node*
ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start,ext4_lblk_t cur_start,struct rb_node * node)4243 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4244 {
4245 if (new_start < cur_start)
4246 return node->rb_left;
4247 else
4248 return node->rb_right;
4249 }
4250
4251 static inline void
ext4_mb_pa_assert_overlap(struct ext4_allocation_context * ac,ext4_lblk_t start,loff_t end)4252 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
4253 ext4_lblk_t start, loff_t end)
4254 {
4255 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4256 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4257 struct ext4_prealloc_space *tmp_pa;
4258 ext4_lblk_t tmp_pa_start;
4259 loff_t tmp_pa_end;
4260 struct rb_node *iter;
4261
4262 read_lock(&ei->i_prealloc_lock);
4263 for (iter = ei->i_prealloc_node.rb_node; iter;
4264 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4265 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4266 pa_node.inode_node);
4267 tmp_pa_start = tmp_pa->pa_lstart;
4268 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4269
4270 spin_lock(&tmp_pa->pa_lock);
4271 if (tmp_pa->pa_deleted == 0)
4272 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
4273 spin_unlock(&tmp_pa->pa_lock);
4274 }
4275 read_unlock(&ei->i_prealloc_lock);
4276 }
4277
4278 /*
4279 * Given an allocation context "ac" and a range "start", "end", check
4280 * and adjust boundaries if the range overlaps with any of the existing
4281 * preallocatoins stored in the corresponding inode of the allocation context.
4282 *
4283 * Parameters:
4284 * ac allocation context
4285 * start start of the new range
4286 * end end of the new range
4287 */
4288 static inline void
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context * ac,ext4_lblk_t * start,loff_t * end)4289 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
4290 ext4_lblk_t *start, loff_t *end)
4291 {
4292 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4293 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4294 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4295 struct rb_node *iter;
4296 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4297 loff_t new_end, tmp_pa_end, left_pa_end = -1;
4298
4299 new_start = *start;
4300 new_end = *end;
4301
4302 /*
4303 * Adjust the normalized range so that it doesn't overlap with any
4304 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4305 * so it doesn't change underneath us.
4306 */
4307 read_lock(&ei->i_prealloc_lock);
4308
4309 /* Step 1: find any one immediate neighboring PA of the normalized range */
4310 for (iter = ei->i_prealloc_node.rb_node; iter;
4311 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4312 tmp_pa_start, iter)) {
4313 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4314 pa_node.inode_node);
4315 tmp_pa_start = tmp_pa->pa_lstart;
4316 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4317
4318 /* PA must not overlap original request */
4319 spin_lock(&tmp_pa->pa_lock);
4320 if (tmp_pa->pa_deleted == 0)
4321 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4322 ac->ac_o_ex.fe_logical < tmp_pa_start));
4323 spin_unlock(&tmp_pa->pa_lock);
4324 }
4325
4326 /*
4327 * Step 2: check if the found PA is left or right neighbor and
4328 * get the other neighbor
4329 */
4330 if (tmp_pa) {
4331 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4332 struct rb_node *tmp;
4333
4334 left_pa = tmp_pa;
4335 tmp = rb_next(&left_pa->pa_node.inode_node);
4336 if (tmp) {
4337 right_pa = rb_entry(tmp,
4338 struct ext4_prealloc_space,
4339 pa_node.inode_node);
4340 }
4341 } else {
4342 struct rb_node *tmp;
4343
4344 right_pa = tmp_pa;
4345 tmp = rb_prev(&right_pa->pa_node.inode_node);
4346 if (tmp) {
4347 left_pa = rb_entry(tmp,
4348 struct ext4_prealloc_space,
4349 pa_node.inode_node);
4350 }
4351 }
4352 }
4353
4354 /* Step 3: get the non deleted neighbors */
4355 if (left_pa) {
4356 for (iter = &left_pa->pa_node.inode_node;;
4357 iter = rb_prev(iter)) {
4358 if (!iter) {
4359 left_pa = NULL;
4360 break;
4361 }
4362
4363 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4364 pa_node.inode_node);
4365 left_pa = tmp_pa;
4366 spin_lock(&tmp_pa->pa_lock);
4367 if (tmp_pa->pa_deleted == 0) {
4368 spin_unlock(&tmp_pa->pa_lock);
4369 break;
4370 }
4371 spin_unlock(&tmp_pa->pa_lock);
4372 }
4373 }
4374
4375 if (right_pa) {
4376 for (iter = &right_pa->pa_node.inode_node;;
4377 iter = rb_next(iter)) {
4378 if (!iter) {
4379 right_pa = NULL;
4380 break;
4381 }
4382
4383 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4384 pa_node.inode_node);
4385 right_pa = tmp_pa;
4386 spin_lock(&tmp_pa->pa_lock);
4387 if (tmp_pa->pa_deleted == 0) {
4388 spin_unlock(&tmp_pa->pa_lock);
4389 break;
4390 }
4391 spin_unlock(&tmp_pa->pa_lock);
4392 }
4393 }
4394
4395 if (left_pa) {
4396 left_pa_end = pa_logical_end(sbi, left_pa);
4397 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4398 }
4399
4400 if (right_pa) {
4401 right_pa_start = right_pa->pa_lstart;
4402 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4403 }
4404
4405 /* Step 4: trim our normalized range to not overlap with the neighbors */
4406 if (left_pa) {
4407 if (left_pa_end > new_start)
4408 new_start = left_pa_end;
4409 }
4410
4411 if (right_pa) {
4412 if (right_pa_start < new_end)
4413 new_end = right_pa_start;
4414 }
4415 read_unlock(&ei->i_prealloc_lock);
4416
4417 /* XXX: extra loop to check we really don't overlap preallocations */
4418 ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4419
4420 *start = new_start;
4421 *end = new_end;
4422 }
4423
4424 /*
4425 * Normalization means making request better in terms of
4426 * size and alignment
4427 */
4428 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)4429 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4430 struct ext4_allocation_request *ar)
4431 {
4432 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4433 struct ext4_super_block *es = sbi->s_es;
4434 int bsbits, max;
4435 loff_t size, start_off, end;
4436 loff_t orig_size __maybe_unused;
4437 ext4_lblk_t start;
4438
4439 /* do normalize only data requests, metadata requests
4440 do not need preallocation */
4441 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4442 return;
4443
4444 /* sometime caller may want exact blocks */
4445 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4446 return;
4447
4448 /* caller may indicate that preallocation isn't
4449 * required (it's a tail, for example) */
4450 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4451 return;
4452
4453 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4454 ext4_mb_normalize_group_request(ac);
4455 return ;
4456 }
4457
4458 bsbits = ac->ac_sb->s_blocksize_bits;
4459
4460 /* first, let's learn actual file size
4461 * given current request is allocated */
4462 size = extent_logical_end(sbi, &ac->ac_o_ex);
4463 size = size << bsbits;
4464 if (size < i_size_read(ac->ac_inode))
4465 size = i_size_read(ac->ac_inode);
4466 orig_size = size;
4467
4468 /* max size of free chunks */
4469 max = 2 << bsbits;
4470
4471 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4472 (req <= (size) || max <= (chunk_size))
4473
4474 /* first, try to predict filesize */
4475 /* XXX: should this table be tunable? */
4476 start_off = 0;
4477 if (size <= 16 * 1024) {
4478 size = 16 * 1024;
4479 } else if (size <= 32 * 1024) {
4480 size = 32 * 1024;
4481 } else if (size <= 64 * 1024) {
4482 size = 64 * 1024;
4483 } else if (size <= 128 * 1024) {
4484 size = 128 * 1024;
4485 } else if (size <= 256 * 1024) {
4486 size = 256 * 1024;
4487 } else if (size <= 512 * 1024) {
4488 size = 512 * 1024;
4489 } else if (size <= 1024 * 1024) {
4490 size = 1024 * 1024;
4491 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4492 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4493 (21 - bsbits)) << 21;
4494 size = 2 * 1024 * 1024;
4495 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4496 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4497 (22 - bsbits)) << 22;
4498 size = 4 * 1024 * 1024;
4499 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
4500 (8<<20)>>bsbits, max, 8 * 1024)) {
4501 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4502 (23 - bsbits)) << 23;
4503 size = 8 * 1024 * 1024;
4504 } else {
4505 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4506 size = (loff_t) EXT4_C2B(sbi,
4507 ac->ac_o_ex.fe_len) << bsbits;
4508 }
4509 size = size >> bsbits;
4510 start = start_off >> bsbits;
4511
4512 /*
4513 * For tiny groups (smaller than 8MB) the chosen allocation
4514 * alignment may be larger than group size. Make sure the
4515 * alignment does not move allocation to a different group which
4516 * makes mballoc fail assertions later.
4517 */
4518 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4519 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4520
4521 /* avoid unnecessary preallocation that may trigger assertions */
4522 if (start + size > EXT_MAX_BLOCKS)
4523 size = EXT_MAX_BLOCKS - start;
4524
4525 /* don't cover already allocated blocks in selected range */
4526 if (ar->pleft && start <= ar->lleft) {
4527 size -= ar->lleft + 1 - start;
4528 start = ar->lleft + 1;
4529 }
4530 if (ar->pright && start + size - 1 >= ar->lright)
4531 size -= start + size - ar->lright;
4532
4533 /*
4534 * Trim allocation request for filesystems with artificially small
4535 * groups.
4536 */
4537 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4538 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4539
4540 end = start + size;
4541
4542 ext4_mb_pa_adjust_overlap(ac, &start, &end);
4543
4544 size = end - start;
4545
4546 /*
4547 * In this function "start" and "size" are normalized for better
4548 * alignment and length such that we could preallocate more blocks.
4549 * This normalization is done such that original request of
4550 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4551 * "size" boundaries.
4552 * (Note fe_len can be relaxed since FS block allocation API does not
4553 * provide gurantee on number of contiguous blocks allocation since that
4554 * depends upon free space left, etc).
4555 * In case of inode pa, later we use the allocated blocks
4556 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
4557 * range of goal/best blocks [start, size] to put it at the
4558 * ac_o_ex.fe_logical extent of this inode.
4559 * (See ext4_mb_use_inode_pa() for more details)
4560 */
4561 if (start + size <= ac->ac_o_ex.fe_logical ||
4562 start > ac->ac_o_ex.fe_logical) {
4563 ext4_msg(ac->ac_sb, KERN_ERR,
4564 "start %lu, size %lu, fe_logical %lu",
4565 (unsigned long) start, (unsigned long) size,
4566 (unsigned long) ac->ac_o_ex.fe_logical);
4567 BUG();
4568 }
4569 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4570
4571 /* now prepare goal request */
4572
4573 /* XXX: is it better to align blocks WRT to logical
4574 * placement or satisfy big request as is */
4575 ac->ac_g_ex.fe_logical = start;
4576 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4577 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
4578
4579 /* define goal start in order to merge */
4580 if (ar->pright && (ar->lright == (start + size)) &&
4581 ar->pright >= size &&
4582 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
4583 /* merge to the right */
4584 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4585 &ac->ac_g_ex.fe_group,
4586 &ac->ac_g_ex.fe_start);
4587 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4588 }
4589 if (ar->pleft && (ar->lleft + 1 == start) &&
4590 ar->pleft + 1 < ext4_blocks_count(es)) {
4591 /* merge to the left */
4592 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4593 &ac->ac_g_ex.fe_group,
4594 &ac->ac_g_ex.fe_start);
4595 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4596 }
4597
4598 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4599 orig_size, start);
4600 }
4601
ext4_mb_collect_stats(struct ext4_allocation_context * ac)4602 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4603 {
4604 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4605
4606 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4607 atomic_inc(&sbi->s_bal_reqs);
4608 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4609 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4610 atomic_inc(&sbi->s_bal_success);
4611
4612 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4613 for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4614 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4615 }
4616
4617 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4618 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4619 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4620 atomic_inc(&sbi->s_bal_goals);
4621 /* did we allocate as much as normalizer originally wanted? */
4622 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
4623 atomic_inc(&sbi->s_bal_len_goals);
4624
4625 if (ac->ac_found > sbi->s_mb_max_to_scan)
4626 atomic_inc(&sbi->s_bal_breaks);
4627 }
4628
4629 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4630 trace_ext4_mballoc_alloc(ac);
4631 else
4632 trace_ext4_mballoc_prealloc(ac);
4633 }
4634
4635 /*
4636 * Called on failure; free up any blocks from the inode PA for this
4637 * context. We don't need this for MB_GROUP_PA because we only change
4638 * pa_free in ext4_mb_release_context(), but on failure, we've already
4639 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4640 */
ext4_discard_allocated_blocks(struct ext4_allocation_context * ac)4641 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4642 {
4643 struct ext4_prealloc_space *pa = ac->ac_pa;
4644 struct ext4_buddy e4b;
4645 int err;
4646
4647 if (pa == NULL) {
4648 if (ac->ac_f_ex.fe_len == 0)
4649 return;
4650 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4651 if (WARN_RATELIMIT(err,
4652 "ext4: mb_load_buddy failed (%d)", err))
4653 /*
4654 * This should never happen since we pin the
4655 * pages in the ext4_allocation_context so
4656 * ext4_mb_load_buddy() should never fail.
4657 */
4658 return;
4659 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4660 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4661 ac->ac_f_ex.fe_len);
4662 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4663 ext4_mb_unload_buddy(&e4b);
4664 return;
4665 }
4666 if (pa->pa_type == MB_INODE_PA) {
4667 spin_lock(&pa->pa_lock);
4668 pa->pa_free += ac->ac_b_ex.fe_len;
4669 spin_unlock(&pa->pa_lock);
4670 }
4671 }
4672
4673 /*
4674 * use blocks preallocated to inode
4675 */
ext4_mb_use_inode_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4676 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4677 struct ext4_prealloc_space *pa)
4678 {
4679 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4680 ext4_fsblk_t start;
4681 ext4_fsblk_t end;
4682 int len;
4683
4684 /* found preallocated blocks, use them */
4685 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4686 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4687 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4688 len = EXT4_NUM_B2C(sbi, end - start);
4689 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4690 &ac->ac_b_ex.fe_start);
4691 ac->ac_b_ex.fe_len = len;
4692 ac->ac_status = AC_STATUS_FOUND;
4693 ac->ac_pa = pa;
4694
4695 BUG_ON(start < pa->pa_pstart);
4696 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4697 BUG_ON(pa->pa_free < len);
4698 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4699 pa->pa_free -= len;
4700
4701 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4702 }
4703
4704 /*
4705 * use blocks preallocated to locality group
4706 */
ext4_mb_use_group_pa(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4707 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4708 struct ext4_prealloc_space *pa)
4709 {
4710 unsigned int len = ac->ac_o_ex.fe_len;
4711
4712 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4713 &ac->ac_b_ex.fe_group,
4714 &ac->ac_b_ex.fe_start);
4715 ac->ac_b_ex.fe_len = len;
4716 ac->ac_status = AC_STATUS_FOUND;
4717 ac->ac_pa = pa;
4718
4719 /* we don't correct pa_pstart or pa_len here to avoid
4720 * possible race when the group is being loaded concurrently
4721 * instead we correct pa later, after blocks are marked
4722 * in on-disk bitmap -- see ext4_mb_release_context()
4723 * Other CPUs are prevented from allocating from this pa by lg_mutex
4724 */
4725 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4726 pa->pa_lstart, len, pa);
4727 }
4728
4729 /*
4730 * Return the prealloc space that have minimal distance
4731 * from the goal block. @cpa is the prealloc
4732 * space that is having currently known minimal distance
4733 * from the goal block.
4734 */
4735 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block,struct ext4_prealloc_space * pa,struct ext4_prealloc_space * cpa)4736 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4737 struct ext4_prealloc_space *pa,
4738 struct ext4_prealloc_space *cpa)
4739 {
4740 ext4_fsblk_t cur_distance, new_distance;
4741
4742 if (cpa == NULL) {
4743 atomic_inc(&pa->pa_count);
4744 return pa;
4745 }
4746 cur_distance = abs(goal_block - cpa->pa_pstart);
4747 new_distance = abs(goal_block - pa->pa_pstart);
4748
4749 if (cur_distance <= new_distance)
4750 return cpa;
4751
4752 /* drop the previous reference */
4753 atomic_dec(&cpa->pa_count);
4754 atomic_inc(&pa->pa_count);
4755 return pa;
4756 }
4757
4758 /*
4759 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4760 */
4761 static bool
ext4_mb_pa_goal_check(struct ext4_allocation_context * ac,struct ext4_prealloc_space * pa)4762 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4763 struct ext4_prealloc_space *pa)
4764 {
4765 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4766 ext4_fsblk_t start;
4767
4768 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4769 return true;
4770
4771 /*
4772 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4773 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4774 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4775 * consistent with ext4_mb_find_by_goal.
4776 */
4777 start = pa->pa_pstart +
4778 (ac->ac_g_ex.fe_logical - pa->pa_lstart);
4779 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4780 return false;
4781
4782 if (ac->ac_g_ex.fe_len > pa->pa_len -
4783 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4784 return false;
4785
4786 return true;
4787 }
4788
4789 /*
4790 * search goal blocks in preallocated space
4791 */
4792 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context * ac)4793 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4794 {
4795 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4796 int order, i;
4797 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4798 struct ext4_locality_group *lg;
4799 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4800 struct rb_node *iter;
4801 ext4_fsblk_t goal_block;
4802
4803 /* only data can be preallocated */
4804 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4805 return false;
4806
4807 /*
4808 * first, try per-file preallocation by searching the inode pa rbtree.
4809 *
4810 * Here, we can't do a direct traversal of the tree because
4811 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4812 * deleted and that can cause direct traversal to skip some entries.
4813 */
4814 read_lock(&ei->i_prealloc_lock);
4815
4816 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4817 goto try_group_pa;
4818 }
4819
4820 /*
4821 * Step 1: Find a pa with logical start immediately adjacent to the
4822 * original logical start. This could be on the left or right.
4823 *
4824 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4825 */
4826 for (iter = ei->i_prealloc_node.rb_node; iter;
4827 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4828 tmp_pa->pa_lstart, iter)) {
4829 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4830 pa_node.inode_node);
4831 }
4832
4833 /*
4834 * Step 2: The adjacent pa might be to the right of logical start, find
4835 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4836 * logical start is towards the left of original request's logical start
4837 */
4838 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4839 struct rb_node *tmp;
4840 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4841
4842 if (tmp) {
4843 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4844 pa_node.inode_node);
4845 } else {
4846 /*
4847 * If there is no adjacent pa to the left then finding
4848 * an overlapping pa is not possible hence stop searching
4849 * inode pa tree
4850 */
4851 goto try_group_pa;
4852 }
4853 }
4854
4855 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4856
4857 /*
4858 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4859 * the first non deleted adjacent pa. After this step we should have a
4860 * valid tmp_pa which is guaranteed to be non deleted.
4861 */
4862 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4863 if (!iter) {
4864 /*
4865 * no non deleted left adjacent pa, so stop searching
4866 * inode pa tree
4867 */
4868 goto try_group_pa;
4869 }
4870 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4871 pa_node.inode_node);
4872 spin_lock(&tmp_pa->pa_lock);
4873 if (tmp_pa->pa_deleted == 0) {
4874 /*
4875 * We will keep holding the pa_lock from
4876 * this point on because we don't want group discard
4877 * to delete this pa underneath us. Since group
4878 * discard is anyways an ENOSPC operation it
4879 * should be okay for it to wait a few more cycles.
4880 */
4881 break;
4882 } else {
4883 spin_unlock(&tmp_pa->pa_lock);
4884 }
4885 }
4886
4887 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4888 BUG_ON(tmp_pa->pa_deleted == 1);
4889
4890 /*
4891 * Step 4: We now have the non deleted left adjacent pa. Only this
4892 * pa can possibly satisfy the request hence check if it overlaps
4893 * original logical start and stop searching if it doesn't.
4894 */
4895 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4896 spin_unlock(&tmp_pa->pa_lock);
4897 goto try_group_pa;
4898 }
4899
4900 /* non-extent files can't have physical blocks past 2^32 */
4901 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4902 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4903 EXT4_MAX_BLOCK_FILE_PHYS)) {
4904 /*
4905 * Since PAs don't overlap, we won't find any other PA to
4906 * satisfy this.
4907 */
4908 spin_unlock(&tmp_pa->pa_lock);
4909 goto try_group_pa;
4910 }
4911
4912 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4913 atomic_inc(&tmp_pa->pa_count);
4914 ext4_mb_use_inode_pa(ac, tmp_pa);
4915 spin_unlock(&tmp_pa->pa_lock);
4916 read_unlock(&ei->i_prealloc_lock);
4917 return true;
4918 } else {
4919 /*
4920 * We found a valid overlapping pa but couldn't use it because
4921 * it had no free blocks. This should ideally never happen
4922 * because:
4923 *
4924 * 1. When a new inode pa is added to rbtree it must have
4925 * pa_free > 0 since otherwise we won't actually need
4926 * preallocation.
4927 *
4928 * 2. An inode pa that is in the rbtree can only have it's
4929 * pa_free become zero when another thread calls:
4930 * ext4_mb_new_blocks
4931 * ext4_mb_use_preallocated
4932 * ext4_mb_use_inode_pa
4933 *
4934 * 3. Further, after the above calls make pa_free == 0, we will
4935 * immediately remove it from the rbtree in:
4936 * ext4_mb_new_blocks
4937 * ext4_mb_release_context
4938 * ext4_mb_put_pa
4939 *
4940 * 4. Since the pa_free becoming 0 and pa_free getting removed
4941 * from tree both happen in ext4_mb_new_blocks, which is always
4942 * called with i_data_sem held for data allocations, we can be
4943 * sure that another process will never see a pa in rbtree with
4944 * pa_free == 0.
4945 */
4946 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4947 }
4948 spin_unlock(&tmp_pa->pa_lock);
4949 try_group_pa:
4950 read_unlock(&ei->i_prealloc_lock);
4951
4952 /* can we use group allocation? */
4953 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4954 return false;
4955
4956 /* inode may have no locality group for some reason */
4957 lg = ac->ac_lg;
4958 if (lg == NULL)
4959 return false;
4960 order = fls(ac->ac_o_ex.fe_len) - 1;
4961 if (order > PREALLOC_TB_SIZE - 1)
4962 /* The max size of hash table is PREALLOC_TB_SIZE */
4963 order = PREALLOC_TB_SIZE - 1;
4964
4965 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4966 /*
4967 * search for the prealloc space that is having
4968 * minimal distance from the goal block.
4969 */
4970 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4971 rcu_read_lock();
4972 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4973 pa_node.lg_list) {
4974 spin_lock(&tmp_pa->pa_lock);
4975 if (tmp_pa->pa_deleted == 0 &&
4976 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4977
4978 cpa = ext4_mb_check_group_pa(goal_block,
4979 tmp_pa, cpa);
4980 }
4981 spin_unlock(&tmp_pa->pa_lock);
4982 }
4983 rcu_read_unlock();
4984 }
4985 if (cpa) {
4986 ext4_mb_use_group_pa(ac, cpa);
4987 return true;
4988 }
4989 return false;
4990 }
4991
4992 /*
4993 * the function goes through all preallocation in this group and marks them
4994 * used in in-core bitmap. buddy must be generated from this bitmap
4995 * Need to be called with ext4 group lock held
4996 */
4997 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block * sb,void * bitmap,ext4_group_t group)4998 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4999 ext4_group_t group)
5000 {
5001 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5002 struct ext4_prealloc_space *pa;
5003 struct list_head *cur;
5004 ext4_group_t groupnr;
5005 ext4_grpblk_t start;
5006 int preallocated = 0;
5007 int len;
5008
5009 if (!grp)
5010 return;
5011
5012 /* all form of preallocation discards first load group,
5013 * so the only competing code is preallocation use.
5014 * we don't need any locking here
5015 * notice we do NOT ignore preallocations with pa_deleted
5016 * otherwise we could leave used blocks available for
5017 * allocation in buddy when concurrent ext4_mb_put_pa()
5018 * is dropping preallocation
5019 */
5020 list_for_each(cur, &grp->bb_prealloc_list) {
5021 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5022 spin_lock(&pa->pa_lock);
5023 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5024 &groupnr, &start);
5025 len = pa->pa_len;
5026 spin_unlock(&pa->pa_lock);
5027 if (unlikely(len == 0))
5028 continue;
5029 BUG_ON(groupnr != group);
5030 mb_set_bits(bitmap, start, len);
5031 preallocated += len;
5032 }
5033 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
5034 }
5035
ext4_mb_mark_pa_deleted(struct super_block * sb,struct ext4_prealloc_space * pa)5036 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
5037 struct ext4_prealloc_space *pa)
5038 {
5039 struct ext4_inode_info *ei;
5040
5041 if (pa->pa_deleted) {
5042 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5043 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5044 pa->pa_len);
5045 return;
5046 }
5047
5048 pa->pa_deleted = 1;
5049
5050 if (pa->pa_type == MB_INODE_PA) {
5051 ei = EXT4_I(pa->pa_inode);
5052 atomic_dec(&ei->i_prealloc_active);
5053 }
5054 }
5055
ext4_mb_pa_free(struct ext4_prealloc_space * pa)5056 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5057 {
5058 BUG_ON(!pa);
5059 BUG_ON(atomic_read(&pa->pa_count));
5060 BUG_ON(pa->pa_deleted == 0);
5061 kmem_cache_free(ext4_pspace_cachep, pa);
5062 }
5063
ext4_mb_pa_callback(struct rcu_head * head)5064 static void ext4_mb_pa_callback(struct rcu_head *head)
5065 {
5066 struct ext4_prealloc_space *pa;
5067
5068 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5069 ext4_mb_pa_free(pa);
5070 }
5071
5072 /*
5073 * drops a reference to preallocated space descriptor
5074 * if this was the last reference and the space is consumed
5075 */
ext4_mb_put_pa(struct ext4_allocation_context * ac,struct super_block * sb,struct ext4_prealloc_space * pa)5076 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5077 struct super_block *sb, struct ext4_prealloc_space *pa)
5078 {
5079 ext4_group_t grp;
5080 ext4_fsblk_t grp_blk;
5081 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
5082
5083 /* in this short window concurrent discard can set pa_deleted */
5084 spin_lock(&pa->pa_lock);
5085 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5086 spin_unlock(&pa->pa_lock);
5087 return;
5088 }
5089
5090 if (pa->pa_deleted == 1) {
5091 spin_unlock(&pa->pa_lock);
5092 return;
5093 }
5094
5095 ext4_mb_mark_pa_deleted(sb, pa);
5096 spin_unlock(&pa->pa_lock);
5097
5098 grp_blk = pa->pa_pstart;
5099 /*
5100 * If doing group-based preallocation, pa_pstart may be in the
5101 * next group when pa is used up
5102 */
5103 if (pa->pa_type == MB_GROUP_PA)
5104 grp_blk--;
5105
5106 grp = ext4_get_group_number(sb, grp_blk);
5107
5108 /*
5109 * possible race:
5110 *
5111 * P1 (buddy init) P2 (regular allocation)
5112 * find block B in PA
5113 * copy on-disk bitmap to buddy
5114 * mark B in on-disk bitmap
5115 * drop PA from group
5116 * mark all PAs in buddy
5117 *
5118 * thus, P1 initializes buddy with B available. to prevent this
5119 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5120 * against that pair
5121 */
5122 ext4_lock_group(sb, grp);
5123 list_del(&pa->pa_group_list);
5124 ext4_unlock_group(sb, grp);
5125
5126 if (pa->pa_type == MB_INODE_PA) {
5127 write_lock(pa->pa_node_lock.inode_lock);
5128 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5129 write_unlock(pa->pa_node_lock.inode_lock);
5130 ext4_mb_pa_free(pa);
5131 } else {
5132 spin_lock(pa->pa_node_lock.lg_lock);
5133 list_del_rcu(&pa->pa_node.lg_list);
5134 spin_unlock(pa->pa_node_lock.lg_lock);
5135 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5136 }
5137 }
5138
ext4_mb_pa_rb_insert(struct rb_root * root,struct rb_node * new)5139 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5140 {
5141 struct rb_node **iter = &root->rb_node, *parent = NULL;
5142 struct ext4_prealloc_space *iter_pa, *new_pa;
5143 ext4_lblk_t iter_start, new_start;
5144
5145 while (*iter) {
5146 iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5147 pa_node.inode_node);
5148 new_pa = rb_entry(new, struct ext4_prealloc_space,
5149 pa_node.inode_node);
5150 iter_start = iter_pa->pa_lstart;
5151 new_start = new_pa->pa_lstart;
5152
5153 parent = *iter;
5154 if (new_start < iter_start)
5155 iter = &((*iter)->rb_left);
5156 else
5157 iter = &((*iter)->rb_right);
5158 }
5159
5160 rb_link_node(new, parent, iter);
5161 rb_insert_color(new, root);
5162 }
5163
5164 /*
5165 * creates new preallocated space for given inode
5166 */
5167 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context * ac)5168 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
5169 {
5170 struct super_block *sb = ac->ac_sb;
5171 struct ext4_sb_info *sbi = EXT4_SB(sb);
5172 struct ext4_prealloc_space *pa;
5173 struct ext4_group_info *grp;
5174 struct ext4_inode_info *ei;
5175
5176 /* preallocate only when found space is larger then requested */
5177 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5178 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5179 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5180 BUG_ON(ac->ac_pa == NULL);
5181
5182 pa = ac->ac_pa;
5183
5184 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
5185 struct ext4_free_extent ex = {
5186 .fe_logical = ac->ac_g_ex.fe_logical,
5187 .fe_len = ac->ac_orig_goal_len,
5188 };
5189 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
5190 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
5191
5192 /*
5193 * We can't allocate as much as normalizer wants, so we try
5194 * to get proper lstart to cover the original request, except
5195 * when the goal doesn't cover the original request as below:
5196 *
5197 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
5198 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
5199 */
5200 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5201 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5202
5203 /*
5204 * Use the below logic for adjusting best extent as it keeps
5205 * fragmentation in check while ensuring logical range of best
5206 * extent doesn't overflow out of goal extent:
5207 *
5208 * 1. Check if best ex can be kept at end of goal (before
5209 * cr_best_avail trimmed it) and still cover original start
5210 * 2. Else, check if best ex can be kept at start of goal and
5211 * still cover original end
5212 * 3. Else, keep the best ex at start of original request.
5213 */
5214 ex.fe_len = ac->ac_b_ex.fe_len;
5215
5216 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5217 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
5218 goto adjust_bex;
5219
5220 ex.fe_logical = ac->ac_g_ex.fe_logical;
5221 if (o_ex_end <= extent_logical_end(sbi, &ex))
5222 goto adjust_bex;
5223
5224 ex.fe_logical = ac->ac_o_ex.fe_logical;
5225 adjust_bex:
5226 ac->ac_b_ex.fe_logical = ex.fe_logical;
5227
5228 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
5229 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
5230 }
5231
5232 pa->pa_lstart = ac->ac_b_ex.fe_logical;
5233 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5234 pa->pa_len = ac->ac_b_ex.fe_len;
5235 pa->pa_free = pa->pa_len;
5236 spin_lock_init(&pa->pa_lock);
5237 INIT_LIST_HEAD(&pa->pa_group_list);
5238 pa->pa_deleted = 0;
5239 pa->pa_type = MB_INODE_PA;
5240
5241 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5242 pa->pa_len, pa->pa_lstart);
5243 trace_ext4_mb_new_inode_pa(ac, pa);
5244
5245 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5246 ext4_mb_use_inode_pa(ac, pa);
5247
5248 ei = EXT4_I(ac->ac_inode);
5249 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5250 if (!grp)
5251 return;
5252
5253 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5254 pa->pa_inode = ac->ac_inode;
5255
5256 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5257
5258 write_lock(pa->pa_node_lock.inode_lock);
5259 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5260 write_unlock(pa->pa_node_lock.inode_lock);
5261 atomic_inc(&ei->i_prealloc_active);
5262 }
5263
5264 /*
5265 * creates new preallocated space for locality group inodes belongs to
5266 */
5267 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context * ac)5268 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
5269 {
5270 struct super_block *sb = ac->ac_sb;
5271 struct ext4_locality_group *lg;
5272 struct ext4_prealloc_space *pa;
5273 struct ext4_group_info *grp;
5274
5275 /* preallocate only when found space is larger then requested */
5276 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5277 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5278 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
5279 BUG_ON(ac->ac_pa == NULL);
5280
5281 pa = ac->ac_pa;
5282
5283 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5284 pa->pa_lstart = pa->pa_pstart;
5285 pa->pa_len = ac->ac_b_ex.fe_len;
5286 pa->pa_free = pa->pa_len;
5287 spin_lock_init(&pa->pa_lock);
5288 INIT_LIST_HEAD(&pa->pa_node.lg_list);
5289 INIT_LIST_HEAD(&pa->pa_group_list);
5290 pa->pa_deleted = 0;
5291 pa->pa_type = MB_GROUP_PA;
5292
5293 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5294 pa->pa_len, pa->pa_lstart);
5295 trace_ext4_mb_new_group_pa(ac, pa);
5296
5297 ext4_mb_use_group_pa(ac, pa);
5298 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5299
5300 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5301 if (!grp)
5302 return;
5303 lg = ac->ac_lg;
5304 BUG_ON(lg == NULL);
5305
5306 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5307 pa->pa_inode = NULL;
5308
5309 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5310
5311 /*
5312 * We will later add the new pa to the right bucket
5313 * after updating the pa_free in ext4_mb_release_context
5314 */
5315 }
5316
ext4_mb_new_preallocation(struct ext4_allocation_context * ac)5317 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
5318 {
5319 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5320 ext4_mb_new_group_pa(ac);
5321 else
5322 ext4_mb_new_inode_pa(ac);
5323 }
5324
5325 /*
5326 * finds all unused blocks in on-disk bitmap, frees them in
5327 * in-core bitmap and buddy.
5328 * @pa must be unlinked from inode and group lists, so that
5329 * nobody else can find/use it.
5330 * the caller MUST hold group/inode locks.
5331 * TODO: optimize the case when there are no in-core structures yet
5332 */
5333 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy * e4b,struct buffer_head * bitmap_bh,struct ext4_prealloc_space * pa)5334 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
5335 struct ext4_prealloc_space *pa)
5336 {
5337 struct super_block *sb = e4b->bd_sb;
5338 struct ext4_sb_info *sbi = EXT4_SB(sb);
5339 unsigned int end;
5340 unsigned int next;
5341 ext4_group_t group;
5342 ext4_grpblk_t bit;
5343 unsigned long long grp_blk_start;
5344 int free = 0;
5345
5346 BUG_ON(pa->pa_deleted == 0);
5347 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5348 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5349 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5350 end = bit + pa->pa_len;
5351
5352 while (bit < end) {
5353 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
5354 if (bit >= end)
5355 break;
5356 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
5357 mb_debug(sb, "free preallocated %u/%u in group %u\n",
5358 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5359 (unsigned) next - bit, (unsigned) group);
5360 free += next - bit;
5361
5362 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
5363 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5364 EXT4_C2B(sbi, bit)),
5365 next - bit);
5366 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5367 bit = next + 1;
5368 }
5369 if (free != pa->pa_free) {
5370 ext4_msg(e4b->bd_sb, KERN_CRIT,
5371 "pa %p: logic %lu, phys. %lu, len %d",
5372 pa, (unsigned long) pa->pa_lstart,
5373 (unsigned long) pa->pa_pstart,
5374 pa->pa_len);
5375 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5376 free, pa->pa_free);
5377 /*
5378 * pa is already deleted so we use the value obtained
5379 * from the bitmap and continue.
5380 */
5381 }
5382 atomic_add(free, &sbi->s_mb_discarded);
5383
5384 return 0;
5385 }
5386
5387 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy * e4b,struct ext4_prealloc_space * pa)5388 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
5389 struct ext4_prealloc_space *pa)
5390 {
5391 struct super_block *sb = e4b->bd_sb;
5392 ext4_group_t group;
5393 ext4_grpblk_t bit;
5394
5395 trace_ext4_mb_release_group_pa(sb, pa);
5396 BUG_ON(pa->pa_deleted == 0);
5397 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5398 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5399 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5400 e4b->bd_group, group, pa->pa_pstart);
5401 return 0;
5402 }
5403 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5404 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5405 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5406
5407 return 0;
5408 }
5409
5410 /*
5411 * releases all preallocations in given group
5412 *
5413 * first, we need to decide discard policy:
5414 * - when do we discard
5415 * 1) ENOSPC
5416 * - how many do we discard
5417 * 1) how many requested
5418 */
5419 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block * sb,ext4_group_t group,int * busy)5420 ext4_mb_discard_group_preallocations(struct super_block *sb,
5421 ext4_group_t group, int *busy)
5422 {
5423 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5424 struct buffer_head *bitmap_bh = NULL;
5425 struct ext4_prealloc_space *pa, *tmp;
5426 LIST_HEAD(list);
5427 struct ext4_buddy e4b;
5428 struct ext4_inode_info *ei;
5429 int err;
5430 int free = 0;
5431
5432 if (!grp)
5433 return 0;
5434 mb_debug(sb, "discard preallocation for group %u\n", group);
5435 if (list_empty(&grp->bb_prealloc_list))
5436 goto out_dbg;
5437
5438 bitmap_bh = ext4_read_block_bitmap(sb, group);
5439 if (IS_ERR(bitmap_bh)) {
5440 err = PTR_ERR(bitmap_bh);
5441 ext4_error_err(sb, -err,
5442 "Error %d reading block bitmap for %u",
5443 err, group);
5444 goto out_dbg;
5445 }
5446
5447 err = ext4_mb_load_buddy(sb, group, &e4b);
5448 if (err) {
5449 ext4_warning(sb, "Error %d loading buddy information for %u",
5450 err, group);
5451 put_bh(bitmap_bh);
5452 goto out_dbg;
5453 }
5454
5455 ext4_lock_group(sb, group);
5456 list_for_each_entry_safe(pa, tmp,
5457 &grp->bb_prealloc_list, pa_group_list) {
5458 spin_lock(&pa->pa_lock);
5459 if (atomic_read(&pa->pa_count)) {
5460 spin_unlock(&pa->pa_lock);
5461 *busy = 1;
5462 continue;
5463 }
5464 if (pa->pa_deleted) {
5465 spin_unlock(&pa->pa_lock);
5466 continue;
5467 }
5468
5469 /* seems this one can be freed ... */
5470 ext4_mb_mark_pa_deleted(sb, pa);
5471
5472 if (!free)
5473 this_cpu_inc(discard_pa_seq);
5474
5475 /* we can trust pa_free ... */
5476 free += pa->pa_free;
5477
5478 spin_unlock(&pa->pa_lock);
5479
5480 list_del(&pa->pa_group_list);
5481 list_add(&pa->u.pa_tmp_list, &list);
5482 }
5483
5484 /* now free all selected PAs */
5485 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5486
5487 /* remove from object (inode or locality group) */
5488 if (pa->pa_type == MB_GROUP_PA) {
5489 spin_lock(pa->pa_node_lock.lg_lock);
5490 list_del_rcu(&pa->pa_node.lg_list);
5491 spin_unlock(pa->pa_node_lock.lg_lock);
5492 } else {
5493 write_lock(pa->pa_node_lock.inode_lock);
5494 ei = EXT4_I(pa->pa_inode);
5495 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5496 write_unlock(pa->pa_node_lock.inode_lock);
5497 }
5498
5499 list_del(&pa->u.pa_tmp_list);
5500
5501 if (pa->pa_type == MB_GROUP_PA) {
5502 ext4_mb_release_group_pa(&e4b, pa);
5503 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5504 } else {
5505 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5506 ext4_mb_pa_free(pa);
5507 }
5508 }
5509
5510 ext4_unlock_group(sb, group);
5511 ext4_mb_unload_buddy(&e4b);
5512 put_bh(bitmap_bh);
5513 out_dbg:
5514 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
5515 free, group, grp->bb_free);
5516 return free;
5517 }
5518
5519 /*
5520 * releases all non-used preallocated blocks for given inode
5521 *
5522 * It's important to discard preallocations under i_data_sem
5523 * We don't want another block to be served from the prealloc
5524 * space when we are discarding the inode prealloc space.
5525 *
5526 * FIXME!! Make sure it is valid at all the call sites
5527 */
ext4_discard_preallocations(struct inode * inode,unsigned int needed)5528 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
5529 {
5530 struct ext4_inode_info *ei = EXT4_I(inode);
5531 struct super_block *sb = inode->i_sb;
5532 struct buffer_head *bitmap_bh = NULL;
5533 struct ext4_prealloc_space *pa, *tmp;
5534 ext4_group_t group = 0;
5535 LIST_HEAD(list);
5536 struct ext4_buddy e4b;
5537 struct rb_node *iter;
5538 int err;
5539
5540 if (!S_ISREG(inode->i_mode)) {
5541 return;
5542 }
5543
5544 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5545 return;
5546
5547 mb_debug(sb, "discard preallocation for inode %lu\n",
5548 inode->i_ino);
5549 trace_ext4_discard_preallocations(inode,
5550 atomic_read(&ei->i_prealloc_active), needed);
5551
5552 if (needed == 0)
5553 needed = UINT_MAX;
5554
5555 repeat:
5556 /* first, collect all pa's in the inode */
5557 write_lock(&ei->i_prealloc_lock);
5558 for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
5559 iter = rb_next(iter)) {
5560 pa = rb_entry(iter, struct ext4_prealloc_space,
5561 pa_node.inode_node);
5562 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5563
5564 spin_lock(&pa->pa_lock);
5565 if (atomic_read(&pa->pa_count)) {
5566 /* this shouldn't happen often - nobody should
5567 * use preallocation while we're discarding it */
5568 spin_unlock(&pa->pa_lock);
5569 write_unlock(&ei->i_prealloc_lock);
5570 ext4_msg(sb, KERN_ERR,
5571 "uh-oh! used pa while discarding");
5572 WARN_ON(1);
5573 schedule_timeout_uninterruptible(HZ);
5574 goto repeat;
5575
5576 }
5577 if (pa->pa_deleted == 0) {
5578 ext4_mb_mark_pa_deleted(sb, pa);
5579 spin_unlock(&pa->pa_lock);
5580 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5581 list_add(&pa->u.pa_tmp_list, &list);
5582 needed--;
5583 continue;
5584 }
5585
5586 /* someone is deleting pa right now */
5587 spin_unlock(&pa->pa_lock);
5588 write_unlock(&ei->i_prealloc_lock);
5589
5590 /* we have to wait here because pa_deleted
5591 * doesn't mean pa is already unlinked from
5592 * the list. as we might be called from
5593 * ->clear_inode() the inode will get freed
5594 * and concurrent thread which is unlinking
5595 * pa from inode's list may access already
5596 * freed memory, bad-bad-bad */
5597
5598 /* XXX: if this happens too often, we can
5599 * add a flag to force wait only in case
5600 * of ->clear_inode(), but not in case of
5601 * regular truncate */
5602 schedule_timeout_uninterruptible(HZ);
5603 goto repeat;
5604 }
5605 write_unlock(&ei->i_prealloc_lock);
5606
5607 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5608 BUG_ON(pa->pa_type != MB_INODE_PA);
5609 group = ext4_get_group_number(sb, pa->pa_pstart);
5610
5611 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5612 GFP_NOFS|__GFP_NOFAIL);
5613 if (err) {
5614 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5615 err, group);
5616 continue;
5617 }
5618
5619 bitmap_bh = ext4_read_block_bitmap(sb, group);
5620 if (IS_ERR(bitmap_bh)) {
5621 err = PTR_ERR(bitmap_bh);
5622 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5623 err, group);
5624 ext4_mb_unload_buddy(&e4b);
5625 continue;
5626 }
5627
5628 ext4_lock_group(sb, group);
5629 list_del(&pa->pa_group_list);
5630 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5631 ext4_unlock_group(sb, group);
5632
5633 ext4_mb_unload_buddy(&e4b);
5634 put_bh(bitmap_bh);
5635
5636 list_del(&pa->u.pa_tmp_list);
5637 ext4_mb_pa_free(pa);
5638 }
5639 }
5640
ext4_mb_pa_alloc(struct ext4_allocation_context * ac)5641 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5642 {
5643 struct ext4_prealloc_space *pa;
5644
5645 BUG_ON(ext4_pspace_cachep == NULL);
5646 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5647 if (!pa)
5648 return -ENOMEM;
5649 atomic_set(&pa->pa_count, 1);
5650 ac->ac_pa = pa;
5651 return 0;
5652 }
5653
ext4_mb_pa_put_free(struct ext4_allocation_context * ac)5654 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
5655 {
5656 struct ext4_prealloc_space *pa = ac->ac_pa;
5657
5658 BUG_ON(!pa);
5659 ac->ac_pa = NULL;
5660 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5661 /*
5662 * current function is only called due to an error or due to
5663 * len of found blocks < len of requested blocks hence the PA has not
5664 * been added to grp->bb_prealloc_list. So we don't need to lock it
5665 */
5666 pa->pa_deleted = 1;
5667 ext4_mb_pa_free(pa);
5668 }
5669
5670 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block * sb)5671 static inline void ext4_mb_show_pa(struct super_block *sb)
5672 {
5673 ext4_group_t i, ngroups;
5674
5675 if (ext4_forced_shutdown(sb))
5676 return;
5677
5678 ngroups = ext4_get_groups_count(sb);
5679 mb_debug(sb, "groups: ");
5680 for (i = 0; i < ngroups; i++) {
5681 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5682 struct ext4_prealloc_space *pa;
5683 ext4_grpblk_t start;
5684 struct list_head *cur;
5685
5686 if (!grp)
5687 continue;
5688 ext4_lock_group(sb, i);
5689 list_for_each(cur, &grp->bb_prealloc_list) {
5690 pa = list_entry(cur, struct ext4_prealloc_space,
5691 pa_group_list);
5692 spin_lock(&pa->pa_lock);
5693 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5694 NULL, &start);
5695 spin_unlock(&pa->pa_lock);
5696 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5697 pa->pa_len);
5698 }
5699 ext4_unlock_group(sb, i);
5700 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5701 grp->bb_fragments);
5702 }
5703 }
5704
ext4_mb_show_ac(struct ext4_allocation_context * ac)5705 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5706 {
5707 struct super_block *sb = ac->ac_sb;
5708
5709 if (ext4_forced_shutdown(sb))
5710 return;
5711
5712 mb_debug(sb, "Can't allocate:"
5713 " Allocation context details:");
5714 mb_debug(sb, "status %u flags 0x%x",
5715 ac->ac_status, ac->ac_flags);
5716 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5717 "goal %lu/%lu/%lu@%lu, "
5718 "best %lu/%lu/%lu@%lu cr %d",
5719 (unsigned long)ac->ac_o_ex.fe_group,
5720 (unsigned long)ac->ac_o_ex.fe_start,
5721 (unsigned long)ac->ac_o_ex.fe_len,
5722 (unsigned long)ac->ac_o_ex.fe_logical,
5723 (unsigned long)ac->ac_g_ex.fe_group,
5724 (unsigned long)ac->ac_g_ex.fe_start,
5725 (unsigned long)ac->ac_g_ex.fe_len,
5726 (unsigned long)ac->ac_g_ex.fe_logical,
5727 (unsigned long)ac->ac_b_ex.fe_group,
5728 (unsigned long)ac->ac_b_ex.fe_start,
5729 (unsigned long)ac->ac_b_ex.fe_len,
5730 (unsigned long)ac->ac_b_ex.fe_logical,
5731 (int)ac->ac_criteria);
5732 mb_debug(sb, "%u found", ac->ac_found);
5733 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no");
5734 if (ac->ac_pa)
5735 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5736 "group pa" : "inode pa");
5737 ext4_mb_show_pa(sb);
5738 }
5739 #else
ext4_mb_show_pa(struct super_block * sb)5740 static inline void ext4_mb_show_pa(struct super_block *sb)
5741 {
5742 }
ext4_mb_show_ac(struct ext4_allocation_context * ac)5743 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5744 {
5745 ext4_mb_show_pa(ac->ac_sb);
5746 }
5747 #endif
5748
5749 /*
5750 * We use locality group preallocation for small size file. The size of the
5751 * file is determined by the current size or the resulting size after
5752 * allocation which ever is larger
5753 *
5754 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5755 */
ext4_mb_group_or_file(struct ext4_allocation_context * ac)5756 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5757 {
5758 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5759 int bsbits = ac->ac_sb->s_blocksize_bits;
5760 loff_t size, isize;
5761 bool inode_pa_eligible, group_pa_eligible;
5762
5763 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5764 return;
5765
5766 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5767 return;
5768
5769 group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5770 inode_pa_eligible = true;
5771 size = extent_logical_end(sbi, &ac->ac_o_ex);
5772 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5773 >> bsbits;
5774
5775 /* No point in using inode preallocation for closed files */
5776 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5777 !inode_is_open_for_write(ac->ac_inode))
5778 inode_pa_eligible = false;
5779
5780 size = max(size, isize);
5781 /* Don't use group allocation for large files */
5782 if (size > sbi->s_mb_stream_request)
5783 group_pa_eligible = false;
5784
5785 if (!group_pa_eligible) {
5786 if (inode_pa_eligible)
5787 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5788 else
5789 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5790 return;
5791 }
5792
5793 BUG_ON(ac->ac_lg != NULL);
5794 /*
5795 * locality group prealloc space are per cpu. The reason for having
5796 * per cpu locality group is to reduce the contention between block
5797 * request from multiple CPUs.
5798 */
5799 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5800
5801 /* we're going to use group allocation */
5802 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5803
5804 /* serialize all allocations in the group */
5805 mutex_lock(&ac->ac_lg->lg_mutex);
5806 }
5807
5808 static noinline_for_stack void
ext4_mb_initialize_context(struct ext4_allocation_context * ac,struct ext4_allocation_request * ar)5809 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5810 struct ext4_allocation_request *ar)
5811 {
5812 struct super_block *sb = ar->inode->i_sb;
5813 struct ext4_sb_info *sbi = EXT4_SB(sb);
5814 struct ext4_super_block *es = sbi->s_es;
5815 ext4_group_t group;
5816 unsigned int len;
5817 ext4_fsblk_t goal;
5818 ext4_grpblk_t block;
5819
5820 /* we can't allocate > group size */
5821 len = ar->len;
5822
5823 /* just a dirty hack to filter too big requests */
5824 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5825 len = EXT4_CLUSTERS_PER_GROUP(sb);
5826
5827 /* start searching from the goal */
5828 goal = ar->goal;
5829 if (goal < le32_to_cpu(es->s_first_data_block) ||
5830 goal >= ext4_blocks_count(es))
5831 goal = le32_to_cpu(es->s_first_data_block);
5832 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5833
5834 /* set up allocation goals */
5835 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5836 ac->ac_status = AC_STATUS_CONTINUE;
5837 ac->ac_sb = sb;
5838 ac->ac_inode = ar->inode;
5839 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5840 ac->ac_o_ex.fe_group = group;
5841 ac->ac_o_ex.fe_start = block;
5842 ac->ac_o_ex.fe_len = len;
5843 ac->ac_g_ex = ac->ac_o_ex;
5844 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
5845 ac->ac_flags = ar->flags;
5846
5847 /* we have to define context: we'll work with a file or
5848 * locality group. this is a policy, actually */
5849 ext4_mb_group_or_file(ac);
5850
5851 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5852 "left: %u/%u, right %u/%u to %swritable\n",
5853 (unsigned) ar->len, (unsigned) ar->logical,
5854 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5855 (unsigned) ar->lleft, (unsigned) ar->pleft,
5856 (unsigned) ar->lright, (unsigned) ar->pright,
5857 inode_is_open_for_write(ar->inode) ? "" : "non-");
5858 }
5859
5860 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block * sb,struct ext4_locality_group * lg,int order,int total_entries)5861 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5862 struct ext4_locality_group *lg,
5863 int order, int total_entries)
5864 {
5865 ext4_group_t group = 0;
5866 struct ext4_buddy e4b;
5867 LIST_HEAD(discard_list);
5868 struct ext4_prealloc_space *pa, *tmp;
5869
5870 mb_debug(sb, "discard locality group preallocation\n");
5871
5872 spin_lock(&lg->lg_prealloc_lock);
5873 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5874 pa_node.lg_list,
5875 lockdep_is_held(&lg->lg_prealloc_lock)) {
5876 spin_lock(&pa->pa_lock);
5877 if (atomic_read(&pa->pa_count)) {
5878 /*
5879 * This is the pa that we just used
5880 * for block allocation. So don't
5881 * free that
5882 */
5883 spin_unlock(&pa->pa_lock);
5884 continue;
5885 }
5886 if (pa->pa_deleted) {
5887 spin_unlock(&pa->pa_lock);
5888 continue;
5889 }
5890 /* only lg prealloc space */
5891 BUG_ON(pa->pa_type != MB_GROUP_PA);
5892
5893 /* seems this one can be freed ... */
5894 ext4_mb_mark_pa_deleted(sb, pa);
5895 spin_unlock(&pa->pa_lock);
5896
5897 list_del_rcu(&pa->pa_node.lg_list);
5898 list_add(&pa->u.pa_tmp_list, &discard_list);
5899
5900 total_entries--;
5901 if (total_entries <= 5) {
5902 /*
5903 * we want to keep only 5 entries
5904 * allowing it to grow to 8. This
5905 * mak sure we don't call discard
5906 * soon for this list.
5907 */
5908 break;
5909 }
5910 }
5911 spin_unlock(&lg->lg_prealloc_lock);
5912
5913 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5914 int err;
5915
5916 group = ext4_get_group_number(sb, pa->pa_pstart);
5917 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5918 GFP_NOFS|__GFP_NOFAIL);
5919 if (err) {
5920 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5921 err, group);
5922 continue;
5923 }
5924 ext4_lock_group(sb, group);
5925 list_del(&pa->pa_group_list);
5926 ext4_mb_release_group_pa(&e4b, pa);
5927 ext4_unlock_group(sb, group);
5928
5929 ext4_mb_unload_buddy(&e4b);
5930 list_del(&pa->u.pa_tmp_list);
5931 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5932 }
5933 }
5934
5935 /*
5936 * We have incremented pa_count. So it cannot be freed at this
5937 * point. Also we hold lg_mutex. So no parallel allocation is
5938 * possible from this lg. That means pa_free cannot be updated.
5939 *
5940 * A parallel ext4_mb_discard_group_preallocations is possible.
5941 * which can cause the lg_prealloc_list to be updated.
5942 */
5943
ext4_mb_add_n_trim(struct ext4_allocation_context * ac)5944 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5945 {
5946 int order, added = 0, lg_prealloc_count = 1;
5947 struct super_block *sb = ac->ac_sb;
5948 struct ext4_locality_group *lg = ac->ac_lg;
5949 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5950
5951 order = fls(pa->pa_free) - 1;
5952 if (order > PREALLOC_TB_SIZE - 1)
5953 /* The max size of hash table is PREALLOC_TB_SIZE */
5954 order = PREALLOC_TB_SIZE - 1;
5955 /* Add the prealloc space to lg */
5956 spin_lock(&lg->lg_prealloc_lock);
5957 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5958 pa_node.lg_list,
5959 lockdep_is_held(&lg->lg_prealloc_lock)) {
5960 spin_lock(&tmp_pa->pa_lock);
5961 if (tmp_pa->pa_deleted) {
5962 spin_unlock(&tmp_pa->pa_lock);
5963 continue;
5964 }
5965 if (!added && pa->pa_free < tmp_pa->pa_free) {
5966 /* Add to the tail of the previous entry */
5967 list_add_tail_rcu(&pa->pa_node.lg_list,
5968 &tmp_pa->pa_node.lg_list);
5969 added = 1;
5970 /*
5971 * we want to count the total
5972 * number of entries in the list
5973 */
5974 }
5975 spin_unlock(&tmp_pa->pa_lock);
5976 lg_prealloc_count++;
5977 }
5978 if (!added)
5979 list_add_tail_rcu(&pa->pa_node.lg_list,
5980 &lg->lg_prealloc_list[order]);
5981 spin_unlock(&lg->lg_prealloc_lock);
5982
5983 /* Now trim the list to be not more than 8 elements */
5984 if (lg_prealloc_count > 8)
5985 ext4_mb_discard_lg_preallocations(sb, lg,
5986 order, lg_prealloc_count);
5987 }
5988
5989 /*
5990 * release all resource we used in allocation
5991 */
ext4_mb_release_context(struct ext4_allocation_context * ac)5992 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5993 {
5994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5995 struct ext4_prealloc_space *pa = ac->ac_pa;
5996 if (pa) {
5997 if (pa->pa_type == MB_GROUP_PA) {
5998 /* see comment in ext4_mb_use_group_pa() */
5999 spin_lock(&pa->pa_lock);
6000 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6001 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6002 pa->pa_free -= ac->ac_b_ex.fe_len;
6003 pa->pa_len -= ac->ac_b_ex.fe_len;
6004 spin_unlock(&pa->pa_lock);
6005
6006 /*
6007 * We want to add the pa to the right bucket.
6008 * Remove it from the list and while adding
6009 * make sure the list to which we are adding
6010 * doesn't grow big.
6011 */
6012 if (likely(pa->pa_free)) {
6013 spin_lock(pa->pa_node_lock.lg_lock);
6014 list_del_rcu(&pa->pa_node.lg_list);
6015 spin_unlock(pa->pa_node_lock.lg_lock);
6016 ext4_mb_add_n_trim(ac);
6017 }
6018 }
6019
6020 ext4_mb_put_pa(ac, ac->ac_sb, pa);
6021 }
6022 if (ac->ac_bitmap_page)
6023 put_page(ac->ac_bitmap_page);
6024 if (ac->ac_buddy_page)
6025 put_page(ac->ac_buddy_page);
6026 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
6027 mutex_unlock(&ac->ac_lg->lg_mutex);
6028 ext4_mb_collect_stats(ac);
6029 return 0;
6030 }
6031
ext4_mb_discard_preallocations(struct super_block * sb,int needed)6032 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
6033 {
6034 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
6035 int ret;
6036 int freed = 0, busy = 0;
6037 int retry = 0;
6038
6039 trace_ext4_mb_discard_preallocations(sb, needed);
6040
6041 if (needed == 0)
6042 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
6043 repeat:
6044 for (i = 0; i < ngroups && needed > 0; i++) {
6045 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
6046 freed += ret;
6047 needed -= ret;
6048 cond_resched();
6049 }
6050
6051 if (needed > 0 && busy && ++retry < 3) {
6052 busy = 0;
6053 goto repeat;
6054 }
6055
6056 return freed;
6057 }
6058
ext4_mb_discard_preallocations_should_retry(struct super_block * sb,struct ext4_allocation_context * ac,u64 * seq)6059 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
6060 struct ext4_allocation_context *ac, u64 *seq)
6061 {
6062 int freed;
6063 u64 seq_retry = 0;
6064 bool ret = false;
6065
6066 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
6067 if (freed) {
6068 ret = true;
6069 goto out_dbg;
6070 }
6071 seq_retry = ext4_get_discard_pa_seq_sum();
6072 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6073 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
6074 *seq = seq_retry;
6075 ret = true;
6076 }
6077
6078 out_dbg:
6079 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
6080 return ret;
6081 }
6082
6083 /*
6084 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6085 * linearly starting at the goal block and also excludes the blocks which
6086 * are going to be in use after fast commit replay.
6087 */
6088 static ext4_fsblk_t
ext4_mb_new_blocks_simple(struct ext4_allocation_request * ar,int * errp)6089 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6090 {
6091 struct buffer_head *bitmap_bh;
6092 struct super_block *sb = ar->inode->i_sb;
6093 struct ext4_sb_info *sbi = EXT4_SB(sb);
6094 ext4_group_t group, nr;
6095 ext4_grpblk_t blkoff;
6096 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6097 ext4_grpblk_t i = 0;
6098 ext4_fsblk_t goal, block;
6099 struct ext4_super_block *es = sbi->s_es;
6100
6101 goal = ar->goal;
6102 if (goal < le32_to_cpu(es->s_first_data_block) ||
6103 goal >= ext4_blocks_count(es))
6104 goal = le32_to_cpu(es->s_first_data_block);
6105
6106 ar->len = 0;
6107 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6108 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6109 bitmap_bh = ext4_read_block_bitmap(sb, group);
6110 if (IS_ERR(bitmap_bh)) {
6111 *errp = PTR_ERR(bitmap_bh);
6112 pr_warn("Failed to read block bitmap\n");
6113 return 0;
6114 }
6115
6116 while (1) {
6117 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6118 blkoff);
6119 if (i >= max)
6120 break;
6121 if (ext4_fc_replay_check_excluded(sb,
6122 ext4_group_first_block_no(sb, group) +
6123 EXT4_C2B(sbi, i))) {
6124 blkoff = i + 1;
6125 } else
6126 break;
6127 }
6128 brelse(bitmap_bh);
6129 if (i < max)
6130 break;
6131
6132 if (++group >= ext4_get_groups_count(sb))
6133 group = 0;
6134
6135 blkoff = 0;
6136 }
6137
6138 if (i >= max) {
6139 *errp = -ENOSPC;
6140 return 0;
6141 }
6142
6143 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
6144 ext4_mb_mark_bb(sb, block, 1, 1);
6145 ar->len = 1;
6146
6147 return block;
6148 }
6149
6150 /*
6151 * Main entry point into mballoc to allocate blocks
6152 * it tries to use preallocation first, then falls back
6153 * to usual allocation
6154 */
ext4_mb_new_blocks(handle_t * handle,struct ext4_allocation_request * ar,int * errp)6155 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6156 struct ext4_allocation_request *ar, int *errp)
6157 {
6158 struct ext4_allocation_context *ac = NULL;
6159 struct ext4_sb_info *sbi;
6160 struct super_block *sb;
6161 ext4_fsblk_t block = 0;
6162 unsigned int inquota = 0;
6163 unsigned int reserv_clstrs = 0;
6164 int retries = 0;
6165 u64 seq;
6166
6167 might_sleep();
6168 sb = ar->inode->i_sb;
6169 sbi = EXT4_SB(sb);
6170
6171 trace_ext4_request_blocks(ar);
6172 if (sbi->s_mount_state & EXT4_FC_REPLAY)
6173 return ext4_mb_new_blocks_simple(ar, errp);
6174
6175 /* Allow to use superuser reservation for quota file */
6176 if (ext4_is_quota_file(ar->inode))
6177 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6178
6179 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
6180 /* Without delayed allocation we need to verify
6181 * there is enough free blocks to do block allocation
6182 * and verify allocation doesn't exceed the quota limits.
6183 */
6184 while (ar->len &&
6185 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
6186
6187 /* let others to free the space */
6188 cond_resched();
6189 ar->len = ar->len >> 1;
6190 }
6191 if (!ar->len) {
6192 ext4_mb_show_pa(sb);
6193 *errp = -ENOSPC;
6194 return 0;
6195 }
6196 reserv_clstrs = ar->len;
6197 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
6198 dquot_alloc_block_nofail(ar->inode,
6199 EXT4_C2B(sbi, ar->len));
6200 } else {
6201 while (ar->len &&
6202 dquot_alloc_block(ar->inode,
6203 EXT4_C2B(sbi, ar->len))) {
6204
6205 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6206 ar->len--;
6207 }
6208 }
6209 inquota = ar->len;
6210 if (ar->len == 0) {
6211 *errp = -EDQUOT;
6212 goto out;
6213 }
6214 }
6215
6216 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
6217 if (!ac) {
6218 ar->len = 0;
6219 *errp = -ENOMEM;
6220 goto out;
6221 }
6222
6223 ext4_mb_initialize_context(ac, ar);
6224
6225 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
6226 seq = this_cpu_read(discard_pa_seq);
6227 if (!ext4_mb_use_preallocated(ac)) {
6228 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6229 ext4_mb_normalize_request(ac, ar);
6230
6231 *errp = ext4_mb_pa_alloc(ac);
6232 if (*errp)
6233 goto errout;
6234 repeat:
6235 /* allocate space in core */
6236 *errp = ext4_mb_regular_allocator(ac);
6237 /*
6238 * pa allocated above is added to grp->bb_prealloc_list only
6239 * when we were able to allocate some block i.e. when
6240 * ac->ac_status == AC_STATUS_FOUND.
6241 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6242 * So we have to free this pa here itself.
6243 */
6244 if (*errp) {
6245 ext4_mb_pa_put_free(ac);
6246 ext4_discard_allocated_blocks(ac);
6247 goto errout;
6248 }
6249 if (ac->ac_status == AC_STATUS_FOUND &&
6250 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
6251 ext4_mb_pa_put_free(ac);
6252 }
6253 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6254 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
6255 if (*errp) {
6256 ext4_discard_allocated_blocks(ac);
6257 goto errout;
6258 } else {
6259 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6260 ar->len = ac->ac_b_ex.fe_len;
6261 }
6262 } else {
6263 if (++retries < 3 &&
6264 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
6265 goto repeat;
6266 /*
6267 * If block allocation fails then the pa allocated above
6268 * needs to be freed here itself.
6269 */
6270 ext4_mb_pa_put_free(ac);
6271 *errp = -ENOSPC;
6272 }
6273
6274 if (*errp) {
6275 errout:
6276 ac->ac_b_ex.fe_len = 0;
6277 ar->len = 0;
6278 ext4_mb_show_ac(ac);
6279 }
6280 ext4_mb_release_context(ac);
6281 kmem_cache_free(ext4_ac_cachep, ac);
6282 out:
6283 if (inquota && ar->len < inquota)
6284 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
6285 if (!ar->len) {
6286 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
6287 /* release all the reserved blocks if non delalloc */
6288 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
6289 reserv_clstrs);
6290 }
6291
6292 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
6293
6294 return block;
6295 }
6296
6297 /*
6298 * We can merge two free data extents only if the physical blocks
6299 * are contiguous, AND the extents were freed by the same transaction,
6300 * AND the blocks are associated with the same group.
6301 */
ext4_try_merge_freed_extent(struct ext4_sb_info * sbi,struct ext4_free_data * entry,struct ext4_free_data * new_entry,struct rb_root * entry_rb_root)6302 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6303 struct ext4_free_data *entry,
6304 struct ext4_free_data *new_entry,
6305 struct rb_root *entry_rb_root)
6306 {
6307 if ((entry->efd_tid != new_entry->efd_tid) ||
6308 (entry->efd_group != new_entry->efd_group))
6309 return;
6310 if (entry->efd_start_cluster + entry->efd_count ==
6311 new_entry->efd_start_cluster) {
6312 new_entry->efd_start_cluster = entry->efd_start_cluster;
6313 new_entry->efd_count += entry->efd_count;
6314 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6315 entry->efd_start_cluster) {
6316 new_entry->efd_count += entry->efd_count;
6317 } else
6318 return;
6319 spin_lock(&sbi->s_md_lock);
6320 list_del(&entry->efd_list);
6321 spin_unlock(&sbi->s_md_lock);
6322 rb_erase(&entry->efd_node, entry_rb_root);
6323 kmem_cache_free(ext4_free_data_cachep, entry);
6324 }
6325
6326 static noinline_for_stack void
ext4_mb_free_metadata(handle_t * handle,struct ext4_buddy * e4b,struct ext4_free_data * new_entry)6327 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
6328 struct ext4_free_data *new_entry)
6329 {
6330 ext4_group_t group = e4b->bd_group;
6331 ext4_grpblk_t cluster;
6332 ext4_grpblk_t clusters = new_entry->efd_count;
6333 struct ext4_free_data *entry;
6334 struct ext4_group_info *db = e4b->bd_info;
6335 struct super_block *sb = e4b->bd_sb;
6336 struct ext4_sb_info *sbi = EXT4_SB(sb);
6337 struct rb_node **n = &db->bb_free_root.rb_node, *node;
6338 struct rb_node *parent = NULL, *new_node;
6339
6340 BUG_ON(!ext4_handle_valid(handle));
6341 BUG_ON(e4b->bd_bitmap_page == NULL);
6342 BUG_ON(e4b->bd_buddy_page == NULL);
6343
6344 new_node = &new_entry->efd_node;
6345 cluster = new_entry->efd_start_cluster;
6346
6347 if (!*n) {
6348 /* first free block exent. We need to
6349 protect buddy cache from being freed,
6350 * otherwise we'll refresh it from
6351 * on-disk bitmap and lose not-yet-available
6352 * blocks */
6353 get_page(e4b->bd_buddy_page);
6354 get_page(e4b->bd_bitmap_page);
6355 }
6356 while (*n) {
6357 parent = *n;
6358 entry = rb_entry(parent, struct ext4_free_data, efd_node);
6359 if (cluster < entry->efd_start_cluster)
6360 n = &(*n)->rb_left;
6361 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
6362 n = &(*n)->rb_right;
6363 else {
6364 ext4_grp_locked_error(sb, group, 0,
6365 ext4_group_first_block_no(sb, group) +
6366 EXT4_C2B(sbi, cluster),
6367 "Block already on to-be-freed list");
6368 kmem_cache_free(ext4_free_data_cachep, new_entry);
6369 return;
6370 }
6371 }
6372
6373 rb_link_node(new_node, parent, n);
6374 rb_insert_color(new_node, &db->bb_free_root);
6375
6376 /* Now try to see the extent can be merged to left and right */
6377 node = rb_prev(new_node);
6378 if (node) {
6379 entry = rb_entry(node, struct ext4_free_data, efd_node);
6380 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6381 &(db->bb_free_root));
6382 }
6383
6384 node = rb_next(new_node);
6385 if (node) {
6386 entry = rb_entry(node, struct ext4_free_data, efd_node);
6387 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6388 &(db->bb_free_root));
6389 }
6390
6391 spin_lock(&sbi->s_md_lock);
6392 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
6393 sbi->s_mb_free_pending += clusters;
6394 spin_unlock(&sbi->s_md_lock);
6395 }
6396
ext4_free_blocks_simple(struct inode * inode,ext4_fsblk_t block,unsigned long count)6397 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6398 unsigned long count)
6399 {
6400 struct buffer_head *bitmap_bh;
6401 struct super_block *sb = inode->i_sb;
6402 struct ext4_group_desc *gdp;
6403 struct buffer_head *gdp_bh;
6404 ext4_group_t group;
6405 ext4_grpblk_t blkoff;
6406 int already_freed = 0, err, i;
6407
6408 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
6409 bitmap_bh = ext4_read_block_bitmap(sb, group);
6410 if (IS_ERR(bitmap_bh)) {
6411 pr_warn("Failed to read block bitmap\n");
6412 return;
6413 }
6414 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
6415 if (!gdp)
6416 goto err_out;
6417
6418 for (i = 0; i < count; i++) {
6419 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
6420 already_freed++;
6421 }
6422 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
6423 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
6424 if (err)
6425 goto err_out;
6426 ext4_free_group_clusters_set(
6427 sb, gdp, ext4_free_group_clusters(sb, gdp) +
6428 count - already_freed);
6429 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
6430 ext4_group_desc_csum_set(sb, group, gdp);
6431 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
6432 sync_dirty_buffer(bitmap_bh);
6433 sync_dirty_buffer(gdp_bh);
6434
6435 err_out:
6436 brelse(bitmap_bh);
6437 }
6438
6439 /**
6440 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6441 * Used by ext4_free_blocks()
6442 * @handle: handle for this transaction
6443 * @inode: inode
6444 * @block: starting physical block to be freed
6445 * @count: number of blocks to be freed
6446 * @flags: flags used by ext4_free_blocks
6447 */
ext4_mb_clear_bb(handle_t * handle,struct inode * inode,ext4_fsblk_t block,unsigned long count,int flags)6448 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6449 ext4_fsblk_t block, unsigned long count,
6450 int flags)
6451 {
6452 struct buffer_head *bitmap_bh = NULL;
6453 struct super_block *sb = inode->i_sb;
6454 struct ext4_group_desc *gdp;
6455 struct ext4_group_info *grp;
6456 unsigned int overflow;
6457 ext4_grpblk_t bit;
6458 struct buffer_head *gd_bh;
6459 ext4_group_t block_group;
6460 struct ext4_sb_info *sbi;
6461 struct ext4_buddy e4b;
6462 unsigned int count_clusters;
6463 int err = 0;
6464 int ret;
6465
6466 sbi = EXT4_SB(sb);
6467
6468 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6469 !ext4_inode_block_valid(inode, block, count)) {
6470 ext4_error(sb, "Freeing blocks in system zone - "
6471 "Block = %llu, count = %lu", block, count);
6472 /* err = 0. ext4_std_error should be a no op */
6473 goto error_return;
6474 }
6475 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6476
6477 do_more:
6478 overflow = 0;
6479 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6480
6481 grp = ext4_get_group_info(sb, block_group);
6482 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
6483 return;
6484
6485 /*
6486 * Check to see if we are freeing blocks across a group
6487 * boundary.
6488 */
6489 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6490 overflow = EXT4_C2B(sbi, bit) + count -
6491 EXT4_BLOCKS_PER_GROUP(sb);
6492 count -= overflow;
6493 /* The range changed so it's no longer validated */
6494 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6495 }
6496 count_clusters = EXT4_NUM_B2C(sbi, count);
6497 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6498 if (IS_ERR(bitmap_bh)) {
6499 err = PTR_ERR(bitmap_bh);
6500 bitmap_bh = NULL;
6501 goto error_return;
6502 }
6503 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
6504 if (!gdp) {
6505 err = -EIO;
6506 goto error_return;
6507 }
6508
6509 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6510 !ext4_inode_block_valid(inode, block, count)) {
6511 ext4_error(sb, "Freeing blocks in system zone - "
6512 "Block = %llu, count = %lu", block, count);
6513 /* err = 0. ext4_std_error should be a no op */
6514 goto error_return;
6515 }
6516
6517 BUFFER_TRACE(bitmap_bh, "getting write access");
6518 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6519 EXT4_JTR_NONE);
6520 if (err)
6521 goto error_return;
6522
6523 /*
6524 * We are about to modify some metadata. Call the journal APIs
6525 * to unshare ->b_data if a currently-committing transaction is
6526 * using it
6527 */
6528 BUFFER_TRACE(gd_bh, "get_write_access");
6529 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6530 if (err)
6531 goto error_return;
6532 #ifdef AGGRESSIVE_CHECK
6533 {
6534 int i;
6535 for (i = 0; i < count_clusters; i++)
6536 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
6537 }
6538 #endif
6539 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6540
6541 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6542 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6543 GFP_NOFS|__GFP_NOFAIL);
6544 if (err)
6545 goto error_return;
6546
6547 /*
6548 * We need to make sure we don't reuse the freed block until after the
6549 * transaction is committed. We make an exception if the inode is to be
6550 * written in writeback mode since writeback mode has weak data
6551 * consistency guarantees.
6552 */
6553 if (ext4_handle_valid(handle) &&
6554 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6555 !ext4_should_writeback_data(inode))) {
6556 struct ext4_free_data *new_entry;
6557 /*
6558 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6559 * to fail.
6560 */
6561 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6562 GFP_NOFS|__GFP_NOFAIL);
6563 new_entry->efd_start_cluster = bit;
6564 new_entry->efd_group = block_group;
6565 new_entry->efd_count = count_clusters;
6566 new_entry->efd_tid = handle->h_transaction->t_tid;
6567
6568 ext4_lock_group(sb, block_group);
6569 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6570 ext4_mb_free_metadata(handle, &e4b, new_entry);
6571 } else {
6572 /* need to update group_info->bb_free and bitmap
6573 * with group lock held. generate_buddy look at
6574 * them with group lock_held
6575 */
6576 if (test_opt(sb, DISCARD)) {
6577 err = ext4_issue_discard(sb, block_group, bit,
6578 count_clusters, NULL);
6579 if (err && err != -EOPNOTSUPP)
6580 ext4_msg(sb, KERN_WARNING, "discard request in"
6581 " group:%u block:%d count:%lu failed"
6582 " with %d", block_group, bit, count,
6583 err);
6584 } else
6585 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6586
6587 ext4_lock_group(sb, block_group);
6588 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6589 mb_free_blocks(inode, &e4b, bit, count_clusters);
6590 }
6591
6592 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6593 ext4_free_group_clusters_set(sb, gdp, ret);
6594 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
6595 ext4_group_desc_csum_set(sb, block_group, gdp);
6596 ext4_unlock_group(sb, block_group);
6597
6598 if (sbi->s_log_groups_per_flex) {
6599 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6600 atomic64_add(count_clusters,
6601 &sbi_array_rcu_deref(sbi, s_flex_groups,
6602 flex_group)->free_clusters);
6603 }
6604
6605 /*
6606 * on a bigalloc file system, defer the s_freeclusters_counter
6607 * update to the caller (ext4_remove_space and friends) so they
6608 * can determine if a cluster freed here should be rereserved
6609 */
6610 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6611 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6612 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6613 percpu_counter_add(&sbi->s_freeclusters_counter,
6614 count_clusters);
6615 }
6616
6617 ext4_mb_unload_buddy(&e4b);
6618
6619 /* We dirtied the bitmap block */
6620 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6621 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6622
6623 /* And the group descriptor block */
6624 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6625 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6626 if (!err)
6627 err = ret;
6628
6629 if (overflow && !err) {
6630 block += count;
6631 count = overflow;
6632 put_bh(bitmap_bh);
6633 /* The range changed so it's no longer validated */
6634 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6635 goto do_more;
6636 }
6637 error_return:
6638 brelse(bitmap_bh);
6639 ext4_std_error(sb, err);
6640 }
6641
6642 /**
6643 * ext4_free_blocks() -- Free given blocks and update quota
6644 * @handle: handle for this transaction
6645 * @inode: inode
6646 * @bh: optional buffer of the block to be freed
6647 * @block: starting physical block to be freed
6648 * @count: number of blocks to be freed
6649 * @flags: flags used by ext4_free_blocks
6650 */
ext4_free_blocks(handle_t * handle,struct inode * inode,struct buffer_head * bh,ext4_fsblk_t block,unsigned long count,int flags)6651 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6652 struct buffer_head *bh, ext4_fsblk_t block,
6653 unsigned long count, int flags)
6654 {
6655 struct super_block *sb = inode->i_sb;
6656 unsigned int overflow;
6657 struct ext4_sb_info *sbi;
6658
6659 sbi = EXT4_SB(sb);
6660
6661 if (bh) {
6662 if (block)
6663 BUG_ON(block != bh->b_blocknr);
6664 else
6665 block = bh->b_blocknr;
6666 }
6667
6668 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6669 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
6670 return;
6671 }
6672
6673 might_sleep();
6674
6675 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6676 !ext4_inode_block_valid(inode, block, count)) {
6677 ext4_error(sb, "Freeing blocks not in datazone - "
6678 "block = %llu, count = %lu", block, count);
6679 return;
6680 }
6681 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6682
6683 ext4_debug("freeing block %llu\n", block);
6684 trace_ext4_free_blocks(inode, block, count, flags);
6685
6686 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6687 BUG_ON(count > 1);
6688
6689 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6690 inode, bh, block);
6691 }
6692
6693 /*
6694 * If the extent to be freed does not begin on a cluster
6695 * boundary, we need to deal with partial clusters at the
6696 * beginning and end of the extent. Normally we will free
6697 * blocks at the beginning or the end unless we are explicitly
6698 * requested to avoid doing so.
6699 */
6700 overflow = EXT4_PBLK_COFF(sbi, block);
6701 if (overflow) {
6702 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6703 overflow = sbi->s_cluster_ratio - overflow;
6704 block += overflow;
6705 if (count > overflow)
6706 count -= overflow;
6707 else
6708 return;
6709 } else {
6710 block -= overflow;
6711 count += overflow;
6712 }
6713 /* The range changed so it's no longer validated */
6714 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6715 }
6716 overflow = EXT4_LBLK_COFF(sbi, count);
6717 if (overflow) {
6718 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6719 if (count > overflow)
6720 count -= overflow;
6721 else
6722 return;
6723 } else
6724 count += sbi->s_cluster_ratio - overflow;
6725 /* The range changed so it's no longer validated */
6726 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6727 }
6728
6729 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6730 int i;
6731 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6732
6733 for (i = 0; i < count; i++) {
6734 cond_resched();
6735 if (is_metadata)
6736 bh = sb_find_get_block(inode->i_sb, block + i);
6737 ext4_forget(handle, is_metadata, inode, bh, block + i);
6738 }
6739 }
6740
6741 ext4_mb_clear_bb(handle, inode, block, count, flags);
6742 }
6743
6744 /**
6745 * ext4_group_add_blocks() -- Add given blocks to an existing group
6746 * @handle: handle to this transaction
6747 * @sb: super block
6748 * @block: start physical block to add to the block group
6749 * @count: number of blocks to free
6750 *
6751 * This marks the blocks as free in the bitmap and buddy.
6752 */
ext4_group_add_blocks(handle_t * handle,struct super_block * sb,ext4_fsblk_t block,unsigned long count)6753 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6754 ext4_fsblk_t block, unsigned long count)
6755 {
6756 struct buffer_head *bitmap_bh = NULL;
6757 struct buffer_head *gd_bh;
6758 ext4_group_t block_group;
6759 ext4_grpblk_t bit;
6760 unsigned int i;
6761 struct ext4_group_desc *desc;
6762 struct ext4_sb_info *sbi = EXT4_SB(sb);
6763 struct ext4_buddy e4b;
6764 int err = 0, ret, free_clusters_count;
6765 ext4_grpblk_t clusters_freed;
6766 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6767 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6768 unsigned long cluster_count = last_cluster - first_cluster + 1;
6769
6770 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6771
6772 if (count == 0)
6773 return 0;
6774
6775 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6776 /*
6777 * Check to see if we are freeing blocks across a group
6778 * boundary.
6779 */
6780 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6781 ext4_warning(sb, "too many blocks added to group %u",
6782 block_group);
6783 err = -EINVAL;
6784 goto error_return;
6785 }
6786
6787 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6788 if (IS_ERR(bitmap_bh)) {
6789 err = PTR_ERR(bitmap_bh);
6790 bitmap_bh = NULL;
6791 goto error_return;
6792 }
6793
6794 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6795 if (!desc) {
6796 err = -EIO;
6797 goto error_return;
6798 }
6799
6800 if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6801 ext4_error(sb, "Adding blocks in system zones - "
6802 "Block = %llu, count = %lu",
6803 block, count);
6804 err = -EINVAL;
6805 goto error_return;
6806 }
6807
6808 BUFFER_TRACE(bitmap_bh, "getting write access");
6809 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6810 EXT4_JTR_NONE);
6811 if (err)
6812 goto error_return;
6813
6814 /*
6815 * We are about to modify some metadata. Call the journal APIs
6816 * to unshare ->b_data if a currently-committing transaction is
6817 * using it
6818 */
6819 BUFFER_TRACE(gd_bh, "get_write_access");
6820 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6821 if (err)
6822 goto error_return;
6823
6824 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6825 BUFFER_TRACE(bitmap_bh, "clear bit");
6826 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6827 ext4_error(sb, "bit already cleared for block %llu",
6828 (ext4_fsblk_t)(block + i));
6829 BUFFER_TRACE(bitmap_bh, "bit already cleared");
6830 } else {
6831 clusters_freed++;
6832 }
6833 }
6834
6835 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6836 if (err)
6837 goto error_return;
6838
6839 /*
6840 * need to update group_info->bb_free and bitmap
6841 * with group lock held. generate_buddy look at
6842 * them with group lock_held
6843 */
6844 ext4_lock_group(sb, block_group);
6845 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6846 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6847 free_clusters_count = clusters_freed +
6848 ext4_free_group_clusters(sb, desc);
6849 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6850 ext4_block_bitmap_csum_set(sb, desc, bitmap_bh);
6851 ext4_group_desc_csum_set(sb, block_group, desc);
6852 ext4_unlock_group(sb, block_group);
6853 percpu_counter_add(&sbi->s_freeclusters_counter,
6854 clusters_freed);
6855
6856 if (sbi->s_log_groups_per_flex) {
6857 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6858 atomic64_add(clusters_freed,
6859 &sbi_array_rcu_deref(sbi, s_flex_groups,
6860 flex_group)->free_clusters);
6861 }
6862
6863 ext4_mb_unload_buddy(&e4b);
6864
6865 /* We dirtied the bitmap block */
6866 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6867 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6868
6869 /* And the group descriptor block */
6870 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6871 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6872 if (!err)
6873 err = ret;
6874
6875 error_return:
6876 brelse(bitmap_bh);
6877 ext4_std_error(sb, err);
6878 return err;
6879 }
6880
6881 /**
6882 * ext4_trim_extent -- function to TRIM one single free extent in the group
6883 * @sb: super block for the file system
6884 * @start: starting block of the free extent in the alloc. group
6885 * @count: number of blocks to TRIM
6886 * @e4b: ext4 buddy for the group
6887 *
6888 * Trim "count" blocks starting at "start" in the "group". To assure that no
6889 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6890 * be called with under the group lock.
6891 */
ext4_trim_extent(struct super_block * sb,int start,int count,struct ext4_buddy * e4b)6892 static int ext4_trim_extent(struct super_block *sb,
6893 int start, int count, struct ext4_buddy *e4b)
6894 __releases(bitlock)
6895 __acquires(bitlock)
6896 {
6897 struct ext4_free_extent ex;
6898 ext4_group_t group = e4b->bd_group;
6899 int ret = 0;
6900
6901 trace_ext4_trim_extent(sb, group, start, count);
6902
6903 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6904
6905 ex.fe_start = start;
6906 ex.fe_group = group;
6907 ex.fe_len = count;
6908
6909 /*
6910 * Mark blocks used, so no one can reuse them while
6911 * being trimmed.
6912 */
6913 mb_mark_used(e4b, &ex);
6914 ext4_unlock_group(sb, group);
6915 ret = ext4_issue_discard(sb, group, start, count, NULL);
6916 ext4_lock_group(sb, group);
6917 mb_free_blocks(NULL, e4b, start, ex.fe_len);
6918 return ret;
6919 }
6920
ext4_last_grp_cluster(struct super_block * sb,ext4_group_t grp)6921 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6922 ext4_group_t grp)
6923 {
6924 unsigned long nr_clusters_in_group;
6925
6926 if (grp < (ext4_get_groups_count(sb) - 1))
6927 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6928 else
6929 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6930 ext4_group_first_block_no(sb, grp))
6931 >> EXT4_CLUSTER_BITS(sb);
6932
6933 return nr_clusters_in_group - 1;
6934 }
6935
ext4_trim_interrupted(void)6936 static bool ext4_trim_interrupted(void)
6937 {
6938 return fatal_signal_pending(current) || freezing(current);
6939 }
6940
ext4_try_to_trim_range(struct super_block * sb,struct ext4_buddy * e4b,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)6941 static int ext4_try_to_trim_range(struct super_block *sb,
6942 struct ext4_buddy *e4b, ext4_grpblk_t start,
6943 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6944 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6945 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6946 {
6947 ext4_grpblk_t next, count, free_count, last, origin_start;
6948 bool set_trimmed = false;
6949 void *bitmap;
6950
6951 last = ext4_last_grp_cluster(sb, e4b->bd_group);
6952 bitmap = e4b->bd_bitmap;
6953 if (start == 0 && max >= last)
6954 set_trimmed = true;
6955 origin_start = start;
6956 start = max(e4b->bd_info->bb_first_free, start);
6957 count = 0;
6958 free_count = 0;
6959
6960 while (start <= max) {
6961 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6962 if (start > max)
6963 break;
6964
6965 next = mb_find_next_bit(bitmap, last + 1, start);
6966 if (origin_start == 0 && next >= last)
6967 set_trimmed = true;
6968
6969 if ((next - start) >= minblocks) {
6970 int ret = ext4_trim_extent(sb, start, next - start, e4b);
6971
6972 if (ret && ret != -EOPNOTSUPP)
6973 return count;
6974 count += next - start;
6975 }
6976 free_count += next - start;
6977 start = next + 1;
6978
6979 if (ext4_trim_interrupted())
6980 return count;
6981
6982 if (need_resched()) {
6983 ext4_unlock_group(sb, e4b->bd_group);
6984 cond_resched();
6985 ext4_lock_group(sb, e4b->bd_group);
6986 }
6987
6988 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6989 break;
6990 }
6991
6992 if (set_trimmed)
6993 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6994
6995 return count;
6996 }
6997
6998 /**
6999 * ext4_trim_all_free -- function to trim all free space in alloc. group
7000 * @sb: super block for file system
7001 * @group: group to be trimmed
7002 * @start: first group block to examine
7003 * @max: last group block to examine
7004 * @minblocks: minimum extent block count
7005 *
7006 * ext4_trim_all_free walks through group's block bitmap searching for free
7007 * extents. When the free extent is found, mark it as used in group buddy
7008 * bitmap. Then issue a TRIM command on this extent and free the extent in
7009 * the group buddy bitmap.
7010 */
7011 static ext4_grpblk_t
ext4_trim_all_free(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t max,ext4_grpblk_t minblocks)7012 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
7013 ext4_grpblk_t start, ext4_grpblk_t max,
7014 ext4_grpblk_t minblocks)
7015 {
7016 struct ext4_buddy e4b;
7017 int ret;
7018
7019 trace_ext4_trim_all_free(sb, group, start, max);
7020
7021 ret = ext4_mb_load_buddy(sb, group, &e4b);
7022 if (ret) {
7023 ext4_warning(sb, "Error %d loading buddy information for %u",
7024 ret, group);
7025 return ret;
7026 }
7027
7028 ext4_lock_group(sb, group);
7029
7030 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
7031 minblocks < EXT4_SB(sb)->s_last_trim_minblks)
7032 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
7033 else
7034 ret = 0;
7035
7036 ext4_unlock_group(sb, group);
7037 ext4_mb_unload_buddy(&e4b);
7038
7039 ext4_debug("trimmed %d blocks in the group %d\n",
7040 ret, group);
7041
7042 return ret;
7043 }
7044
7045 /**
7046 * ext4_trim_fs() -- trim ioctl handle function
7047 * @sb: superblock for filesystem
7048 * @range: fstrim_range structure
7049 *
7050 * start: First Byte to trim
7051 * len: number of Bytes to trim from start
7052 * minlen: minimum extent length in Bytes
7053 * ext4_trim_fs goes through all allocation groups containing Bytes from
7054 * start to start+len. For each such a group ext4_trim_all_free function
7055 * is invoked to trim all free space.
7056 */
ext4_trim_fs(struct super_block * sb,struct fstrim_range * range)7057 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
7058 {
7059 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
7060 struct ext4_group_info *grp;
7061 ext4_group_t group, first_group, last_group;
7062 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
7063 uint64_t start, end, minlen, trimmed = 0;
7064 ext4_fsblk_t first_data_blk =
7065 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7066 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
7067 int ret = 0;
7068
7069 start = range->start >> sb->s_blocksize_bits;
7070 end = start + (range->len >> sb->s_blocksize_bits) - 1;
7071 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
7072 range->minlen >> sb->s_blocksize_bits);
7073
7074 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
7075 start >= max_blks ||
7076 range->len < sb->s_blocksize)
7077 return -EINVAL;
7078 /* No point to try to trim less than discard granularity */
7079 if (range->minlen < discard_granularity) {
7080 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
7081 discard_granularity >> sb->s_blocksize_bits);
7082 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
7083 goto out;
7084 }
7085 if (end >= max_blks - 1)
7086 end = max_blks - 1;
7087 if (end <= first_data_blk)
7088 goto out;
7089 if (start < first_data_blk)
7090 start = first_data_blk;
7091
7092 /* Determine first and last group to examine based on start and end */
7093 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
7094 &first_group, &first_cluster);
7095 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
7096 &last_group, &last_cluster);
7097
7098 /* end now represents the last cluster to discard in this group */
7099 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7100
7101 for (group = first_group; group <= last_group; group++) {
7102 if (ext4_trim_interrupted())
7103 break;
7104 grp = ext4_get_group_info(sb, group);
7105 if (!grp)
7106 continue;
7107 /* We only do this if the grp has never been initialized */
7108 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
7109 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
7110 if (ret)
7111 break;
7112 }
7113
7114 /*
7115 * For all the groups except the last one, last cluster will
7116 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
7117 * change it for the last group, note that last_cluster is
7118 * already computed earlier by ext4_get_group_no_and_offset()
7119 */
7120 if (group == last_group)
7121 end = last_cluster;
7122 if (grp->bb_free >= minlen) {
7123 cnt = ext4_trim_all_free(sb, group, first_cluster,
7124 end, minlen);
7125 if (cnt < 0) {
7126 ret = cnt;
7127 break;
7128 }
7129 trimmed += cnt;
7130 }
7131
7132 /*
7133 * For every group except the first one, we are sure
7134 * that the first cluster to discard will be cluster #0.
7135 */
7136 first_cluster = 0;
7137 }
7138
7139 if (!ret)
7140 EXT4_SB(sb)->s_last_trim_minblks = minlen;
7141
7142 out:
7143 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
7144 return ret;
7145 }
7146
7147 /* Iterate all the free extents in the group. */
7148 int
ext4_mballoc_query_range(struct super_block * sb,ext4_group_t group,ext4_grpblk_t start,ext4_grpblk_t end,ext4_mballoc_query_range_fn formatter,void * priv)7149 ext4_mballoc_query_range(
7150 struct super_block *sb,
7151 ext4_group_t group,
7152 ext4_grpblk_t start,
7153 ext4_grpblk_t end,
7154 ext4_mballoc_query_range_fn formatter,
7155 void *priv)
7156 {
7157 void *bitmap;
7158 ext4_grpblk_t next;
7159 struct ext4_buddy e4b;
7160 int error;
7161
7162 error = ext4_mb_load_buddy(sb, group, &e4b);
7163 if (error)
7164 return error;
7165 bitmap = e4b.bd_bitmap;
7166
7167 ext4_lock_group(sb, group);
7168
7169 start = max(e4b.bd_info->bb_first_free, start);
7170 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
7171 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7172
7173 while (start <= end) {
7174 start = mb_find_next_zero_bit(bitmap, end + 1, start);
7175 if (start > end)
7176 break;
7177 next = mb_find_next_bit(bitmap, end + 1, start);
7178
7179 ext4_unlock_group(sb, group);
7180 error = formatter(sb, group, start, next - start, priv);
7181 if (error)
7182 goto out_unload;
7183 ext4_lock_group(sb, group);
7184
7185 start = next + 1;
7186 }
7187
7188 ext4_unlock_group(sb, group);
7189 out_unload:
7190 ext4_mb_unload_buddy(&e4b);
7191
7192 return error;
7193 }
7194