1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <linux/freezer.h> 20 #include <trace/events/ext4.h> 21 22 /* 23 * MUSTDO: 24 * - test ext4_ext_search_left() and ext4_ext_search_right() 25 * - search for metadata in few groups 26 * 27 * TODO v4: 28 * - normalization should take into account whether file is still open 29 * - discard preallocations if no free space left (policy?) 30 * - don't normalize tails 31 * - quota 32 * - reservation for superuser 33 * 34 * TODO v3: 35 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 36 * - track min/max extents in each group for better group selection 37 * - mb_mark_used() may allocate chunk right after splitting buddy 38 * - tree of groups sorted by number of free blocks 39 * - error handling 40 */ 41 42 /* 43 * The allocation request involve request for multiple number of blocks 44 * near to the goal(block) value specified. 45 * 46 * During initialization phase of the allocator we decide to use the 47 * group preallocation or inode preallocation depending on the size of 48 * the file. The size of the file could be the resulting file size we 49 * would have after allocation, or the current file size, which ever 50 * is larger. If the size is less than sbi->s_mb_stream_request we 51 * select to use the group preallocation. The default value of 52 * s_mb_stream_request is 16 blocks. This can also be tuned via 53 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 54 * terms of number of blocks. 55 * 56 * The main motivation for having small file use group preallocation is to 57 * ensure that we have small files closer together on the disk. 58 * 59 * First stage the allocator looks at the inode prealloc list, 60 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 61 * spaces for this particular inode. The inode prealloc space is 62 * represented as: 63 * 64 * pa_lstart -> the logical start block for this prealloc space 65 * pa_pstart -> the physical start block for this prealloc space 66 * pa_len -> length for this prealloc space (in clusters) 67 * pa_free -> free space available in this prealloc space (in clusters) 68 * 69 * The inode preallocation space is used looking at the _logical_ start 70 * block. If only the logical file block falls within the range of prealloc 71 * space we will consume the particular prealloc space. This makes sure that 72 * we have contiguous physical blocks representing the file blocks 73 * 74 * The important thing to be noted in case of inode prealloc space is that 75 * we don't modify the values associated to inode prealloc space except 76 * pa_free. 77 * 78 * If we are not able to find blocks in the inode prealloc space and if we 79 * have the group allocation flag set then we look at the locality group 80 * prealloc space. These are per CPU prealloc list represented as 81 * 82 * ext4_sb_info.s_locality_groups[smp_processor_id()] 83 * 84 * The reason for having a per cpu locality group is to reduce the contention 85 * between CPUs. It is possible to get scheduled at this point. 86 * 87 * The locality group prealloc space is used looking at whether we have 88 * enough free space (pa_free) within the prealloc space. 89 * 90 * If we can't allocate blocks via inode prealloc or/and locality group 91 * prealloc then we look at the buddy cache. The buddy cache is represented 92 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 93 * mapped to the buddy and bitmap information regarding different 94 * groups. The buddy information is attached to buddy cache inode so that 95 * we can access them through the page cache. The information regarding 96 * each group is loaded via ext4_mb_load_buddy. The information involve 97 * block bitmap and buddy information. The information are stored in the 98 * inode as: 99 * 100 * { page } 101 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 102 * 103 * 104 * one block each for bitmap and buddy information. So for each group we 105 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 106 * blocksize) blocks. So it can have information regarding groups_per_page 107 * which is blocks_per_page/2 108 * 109 * The buddy cache inode is not stored on disk. The inode is thrown 110 * away when the filesystem is unmounted. 111 * 112 * We look for count number of blocks in the buddy cache. If we were able 113 * to locate that many free blocks we return with additional information 114 * regarding rest of the contiguous physical block available 115 * 116 * Before allocating blocks via buddy cache we normalize the request 117 * blocks. This ensure we ask for more blocks that we needed. The extra 118 * blocks that we get after allocation is added to the respective prealloc 119 * list. In case of inode preallocation we follow a list of heuristics 120 * based on file size. This can be found in ext4_mb_normalize_request. If 121 * we are doing a group prealloc we try to normalize the request to 122 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 123 * dependent on the cluster size; for non-bigalloc file systems, it is 124 * 512 blocks. This can be tuned via 125 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 126 * terms of number of blocks. If we have mounted the file system with -O 127 * stripe=<value> option the group prealloc request is normalized to the 128 * smallest multiple of the stripe value (sbi->s_stripe) which is 129 * greater than the default mb_group_prealloc. 130 * 131 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 132 * structures in two data structures: 133 * 134 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 135 * 136 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 137 * 138 * This is an array of lists where the index in the array represents the 139 * largest free order in the buddy bitmap of the participating group infos of 140 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 141 * number of buddy bitmap orders possible) number of lists. Group-infos are 142 * placed in appropriate lists. 143 * 144 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 145 * 146 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 147 * 148 * This is an array of lists where in the i-th list there are groups with 149 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 150 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 151 * Note that we don't bother with a special list for completely empty groups 152 * so we only have MB_NUM_ORDERS(sb) lists. 153 * 154 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 155 * structures to decide the order in which groups are to be traversed for 156 * fulfilling an allocation request. 157 * 158 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order 159 * >= the order of the request. We directly look at the largest free order list 160 * in the data structure (1) above where largest_free_order = order of the 161 * request. If that list is empty, we look at remaining list in the increasing 162 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED 163 * lookup in O(1) time. 164 * 165 * At CR_GOAL_LEN_FAST, we only consider groups where 166 * average fragment size > request size. So, we lookup a group which has average 167 * fragment size just above or equal to request size using our average fragment 168 * size group lists (data structure 2) in O(1) time. 169 * 170 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied 171 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in 172 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg 173 * fragment size > goal length. So before falling to the slower 174 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and 175 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big 176 * enough average fragment size. This increases the chances of finding a 177 * suitable block group in O(1) time and results in faster allocation at the 178 * cost of reduced size of allocation. 179 * 180 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 181 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and 182 * CR_GOAL_LEN_FAST phase. 183 * 184 * The regular allocator (using the buddy cache) supports a few tunables. 185 * 186 * /sys/fs/ext4/<partition>/mb_min_to_scan 187 * /sys/fs/ext4/<partition>/mb_max_to_scan 188 * /sys/fs/ext4/<partition>/mb_order2_req 189 * /sys/fs/ext4/<partition>/mb_linear_limit 190 * 191 * The regular allocator uses buddy scan only if the request len is power of 192 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 193 * value of s_mb_order2_reqs can be tuned via 194 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 195 * stripe size (sbi->s_stripe), we try to search for contiguous block in 196 * stripe size. This should result in better allocation on RAID setups. If 197 * not, we search in the specific group using bitmap for best extents. The 198 * tunable min_to_scan and max_to_scan control the behaviour here. 199 * min_to_scan indicate how long the mballoc __must__ look for a best 200 * extent and max_to_scan indicates how long the mballoc __can__ look for a 201 * best extent in the found extents. Searching for the blocks starts with 202 * the group specified as the goal value in allocation context via 203 * ac_g_ex. Each group is first checked based on the criteria whether it 204 * can be used for allocation. ext4_mb_good_group explains how the groups are 205 * checked. 206 * 207 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 208 * get traversed linearly. That may result in subsequent allocations being not 209 * close to each other. And so, the underlying device may get filled up in a 210 * non-linear fashion. While that may not matter on non-rotational devices, for 211 * rotational devices that may result in higher seek times. "mb_linear_limit" 212 * tells mballoc how many groups mballoc should search linearly before 213 * performing consulting above data structures for more efficient lookups. For 214 * non rotational devices, this value defaults to 0 and for rotational devices 215 * this is set to MB_DEFAULT_LINEAR_LIMIT. 216 * 217 * Both the prealloc space are getting populated as above. So for the first 218 * request we will hit the buddy cache which will result in this prealloc 219 * space getting filled. The prealloc space is then later used for the 220 * subsequent request. 221 */ 222 223 /* 224 * mballoc operates on the following data: 225 * - on-disk bitmap 226 * - in-core buddy (actually includes buddy and bitmap) 227 * - preallocation descriptors (PAs) 228 * 229 * there are two types of preallocations: 230 * - inode 231 * assiged to specific inode and can be used for this inode only. 232 * it describes part of inode's space preallocated to specific 233 * physical blocks. any block from that preallocated can be used 234 * independent. the descriptor just tracks number of blocks left 235 * unused. so, before taking some block from descriptor, one must 236 * make sure corresponded logical block isn't allocated yet. this 237 * also means that freeing any block within descriptor's range 238 * must discard all preallocated blocks. 239 * - locality group 240 * assigned to specific locality group which does not translate to 241 * permanent set of inodes: inode can join and leave group. space 242 * from this type of preallocation can be used for any inode. thus 243 * it's consumed from the beginning to the end. 244 * 245 * relation between them can be expressed as: 246 * in-core buddy = on-disk bitmap + preallocation descriptors 247 * 248 * this mean blocks mballoc considers used are: 249 * - allocated blocks (persistent) 250 * - preallocated blocks (non-persistent) 251 * 252 * consistency in mballoc world means that at any time a block is either 253 * free or used in ALL structures. notice: "any time" should not be read 254 * literally -- time is discrete and delimited by locks. 255 * 256 * to keep it simple, we don't use block numbers, instead we count number of 257 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 258 * 259 * all operations can be expressed as: 260 * - init buddy: buddy = on-disk + PAs 261 * - new PA: buddy += N; PA = N 262 * - use inode PA: on-disk += N; PA -= N 263 * - discard inode PA buddy -= on-disk - PA; PA = 0 264 * - use locality group PA on-disk += N; PA -= N 265 * - discard locality group PA buddy -= PA; PA = 0 266 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 267 * is used in real operation because we can't know actual used 268 * bits from PA, only from on-disk bitmap 269 * 270 * if we follow this strict logic, then all operations above should be atomic. 271 * given some of them can block, we'd have to use something like semaphores 272 * killing performance on high-end SMP hardware. let's try to relax it using 273 * the following knowledge: 274 * 1) if buddy is referenced, it's already initialized 275 * 2) while block is used in buddy and the buddy is referenced, 276 * nobody can re-allocate that block 277 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 278 * bit set and PA claims same block, it's OK. IOW, one can set bit in 279 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 280 * block 281 * 282 * so, now we're building a concurrency table: 283 * - init buddy vs. 284 * - new PA 285 * blocks for PA are allocated in the buddy, buddy must be referenced 286 * until PA is linked to allocation group to avoid concurrent buddy init 287 * - use inode PA 288 * we need to make sure that either on-disk bitmap or PA has uptodate data 289 * given (3) we care that PA-=N operation doesn't interfere with init 290 * - discard inode PA 291 * the simplest way would be to have buddy initialized by the discard 292 * - use locality group PA 293 * again PA-=N must be serialized with init 294 * - discard locality group PA 295 * the simplest way would be to have buddy initialized by the discard 296 * - new PA vs. 297 * - use inode PA 298 * i_data_sem serializes them 299 * - discard inode PA 300 * discard process must wait until PA isn't used by another process 301 * - use locality group PA 302 * some mutex should serialize them 303 * - discard locality group PA 304 * discard process must wait until PA isn't used by another process 305 * - use inode PA 306 * - use inode PA 307 * i_data_sem or another mutex should serializes them 308 * - discard inode PA 309 * discard process must wait until PA isn't used by another process 310 * - use locality group PA 311 * nothing wrong here -- they're different PAs covering different blocks 312 * - discard locality group PA 313 * discard process must wait until PA isn't used by another process 314 * 315 * now we're ready to make few consequences: 316 * - PA is referenced and while it is no discard is possible 317 * - PA is referenced until block isn't marked in on-disk bitmap 318 * - PA changes only after on-disk bitmap 319 * - discard must not compete with init. either init is done before 320 * any discard or they're serialized somehow 321 * - buddy init as sum of on-disk bitmap and PAs is done atomically 322 * 323 * a special case when we've used PA to emptiness. no need to modify buddy 324 * in this case, but we should care about concurrent init 325 * 326 */ 327 328 /* 329 * Logic in few words: 330 * 331 * - allocation: 332 * load group 333 * find blocks 334 * mark bits in on-disk bitmap 335 * release group 336 * 337 * - use preallocation: 338 * find proper PA (per-inode or group) 339 * load group 340 * mark bits in on-disk bitmap 341 * release group 342 * release PA 343 * 344 * - free: 345 * load group 346 * mark bits in on-disk bitmap 347 * release group 348 * 349 * - discard preallocations in group: 350 * mark PAs deleted 351 * move them onto local list 352 * load on-disk bitmap 353 * load group 354 * remove PA from object (inode or locality group) 355 * mark free blocks in-core 356 * 357 * - discard inode's preallocations: 358 */ 359 360 /* 361 * Locking rules 362 * 363 * Locks: 364 * - bitlock on a group (group) 365 * - object (inode/locality) (object) 366 * - per-pa lock (pa) 367 * - cr_power2_aligned lists lock (cr_power2_aligned) 368 * - cr_goal_len_fast lists lock (cr_goal_len_fast) 369 * 370 * Paths: 371 * - new pa 372 * object 373 * group 374 * 375 * - find and use pa: 376 * pa 377 * 378 * - release consumed pa: 379 * pa 380 * group 381 * object 382 * 383 * - generate in-core bitmap: 384 * group 385 * pa 386 * 387 * - discard all for given object (inode, locality group): 388 * object 389 * pa 390 * group 391 * 392 * - discard all for given group: 393 * group 394 * pa 395 * group 396 * object 397 * 398 * - allocation path (ext4_mb_regular_allocator) 399 * group 400 * cr_power2_aligned/cr_goal_len_fast 401 */ 402 static struct kmem_cache *ext4_pspace_cachep; 403 static struct kmem_cache *ext4_ac_cachep; 404 static struct kmem_cache *ext4_free_data_cachep; 405 406 /* We create slab caches for groupinfo data structures based on the 407 * superblock block size. There will be one per mounted filesystem for 408 * each unique s_blocksize_bits */ 409 #define NR_GRPINFO_CACHES 8 410 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 411 412 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 413 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 414 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 415 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 416 }; 417 418 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 419 ext4_group_t group); 420 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 421 422 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 423 ext4_group_t group, enum criteria cr); 424 425 static int ext4_try_to_trim_range(struct super_block *sb, 426 struct ext4_buddy *e4b, ext4_grpblk_t start, 427 ext4_grpblk_t max, ext4_grpblk_t minblocks); 428 429 /* 430 * The algorithm using this percpu seq counter goes below: 431 * 1. We sample the percpu discard_pa_seq counter before trying for block 432 * allocation in ext4_mb_new_blocks(). 433 * 2. We increment this percpu discard_pa_seq counter when we either allocate 434 * or free these blocks i.e. while marking those blocks as used/free in 435 * mb_mark_used()/mb_free_blocks(). 436 * 3. We also increment this percpu seq counter when we successfully identify 437 * that the bb_prealloc_list is not empty and hence proceed for discarding 438 * of those PAs inside ext4_mb_discard_group_preallocations(). 439 * 440 * Now to make sure that the regular fast path of block allocation is not 441 * affected, as a small optimization we only sample the percpu seq counter 442 * on that cpu. Only when the block allocation fails and when freed blocks 443 * found were 0, that is when we sample percpu seq counter for all cpus using 444 * below function ext4_get_discard_pa_seq_sum(). This happens after making 445 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 446 */ 447 static DEFINE_PER_CPU(u64, discard_pa_seq); 448 static inline u64 ext4_get_discard_pa_seq_sum(void) 449 { 450 int __cpu; 451 u64 __seq = 0; 452 453 for_each_possible_cpu(__cpu) 454 __seq += per_cpu(discard_pa_seq, __cpu); 455 return __seq; 456 } 457 458 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 459 { 460 #if BITS_PER_LONG == 64 461 *bit += ((unsigned long) addr & 7UL) << 3; 462 addr = (void *) ((unsigned long) addr & ~7UL); 463 #elif BITS_PER_LONG == 32 464 *bit += ((unsigned long) addr & 3UL) << 3; 465 addr = (void *) ((unsigned long) addr & ~3UL); 466 #else 467 #error "how many bits you are?!" 468 #endif 469 return addr; 470 } 471 472 static inline int mb_test_bit(int bit, void *addr) 473 { 474 /* 475 * ext4_test_bit on architecture like powerpc 476 * needs unsigned long aligned address 477 */ 478 addr = mb_correct_addr_and_bit(&bit, addr); 479 return ext4_test_bit(bit, addr); 480 } 481 482 static inline void mb_set_bit(int bit, void *addr) 483 { 484 addr = mb_correct_addr_and_bit(&bit, addr); 485 ext4_set_bit(bit, addr); 486 } 487 488 static inline void mb_clear_bit(int bit, void *addr) 489 { 490 addr = mb_correct_addr_and_bit(&bit, addr); 491 ext4_clear_bit(bit, addr); 492 } 493 494 static inline int mb_test_and_clear_bit(int bit, void *addr) 495 { 496 addr = mb_correct_addr_and_bit(&bit, addr); 497 return ext4_test_and_clear_bit(bit, addr); 498 } 499 500 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 501 { 502 int fix = 0, ret, tmpmax; 503 addr = mb_correct_addr_and_bit(&fix, addr); 504 tmpmax = max + fix; 505 start += fix; 506 507 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 508 if (ret > max) 509 return max; 510 return ret; 511 } 512 513 static inline int mb_find_next_bit(void *addr, int max, int start) 514 { 515 int fix = 0, ret, tmpmax; 516 addr = mb_correct_addr_and_bit(&fix, addr); 517 tmpmax = max + fix; 518 start += fix; 519 520 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 521 if (ret > max) 522 return max; 523 return ret; 524 } 525 526 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 527 { 528 char *bb; 529 530 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 531 BUG_ON(max == NULL); 532 533 if (order > e4b->bd_blkbits + 1) { 534 *max = 0; 535 return NULL; 536 } 537 538 /* at order 0 we see each particular block */ 539 if (order == 0) { 540 *max = 1 << (e4b->bd_blkbits + 3); 541 return e4b->bd_bitmap; 542 } 543 544 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 545 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 546 547 return bb; 548 } 549 550 #ifdef DOUBLE_CHECK 551 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 552 int first, int count) 553 { 554 int i; 555 struct super_block *sb = e4b->bd_sb; 556 557 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 558 return; 559 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 560 for (i = 0; i < count; i++) { 561 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 562 ext4_fsblk_t blocknr; 563 564 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 565 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 566 ext4_grp_locked_error(sb, e4b->bd_group, 567 inode ? inode->i_ino : 0, 568 blocknr, 569 "freeing block already freed " 570 "(bit %u)", 571 first + i); 572 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 573 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 574 } 575 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 576 } 577 } 578 579 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 580 { 581 int i; 582 583 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584 return; 585 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 586 for (i = 0; i < count; i++) { 587 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 588 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 589 } 590 } 591 592 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 593 { 594 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 595 return; 596 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 597 unsigned char *b1, *b2; 598 int i; 599 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 600 b2 = (unsigned char *) bitmap; 601 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 602 if (b1[i] != b2[i]) { 603 ext4_msg(e4b->bd_sb, KERN_ERR, 604 "corruption in group %u " 605 "at byte %u(%u): %x in copy != %x " 606 "on disk/prealloc", 607 e4b->bd_group, i, i * 8, b1[i], b2[i]); 608 BUG(); 609 } 610 } 611 } 612 } 613 614 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 615 struct ext4_group_info *grp, ext4_group_t group) 616 { 617 struct buffer_head *bh; 618 619 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 620 if (!grp->bb_bitmap) 621 return; 622 623 bh = ext4_read_block_bitmap(sb, group); 624 if (IS_ERR_OR_NULL(bh)) { 625 kfree(grp->bb_bitmap); 626 grp->bb_bitmap = NULL; 627 return; 628 } 629 630 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 631 put_bh(bh); 632 } 633 634 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 635 { 636 kfree(grp->bb_bitmap); 637 } 638 639 #else 640 static inline void mb_free_blocks_double(struct inode *inode, 641 struct ext4_buddy *e4b, int first, int count) 642 { 643 return; 644 } 645 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 646 int first, int count) 647 { 648 return; 649 } 650 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 651 { 652 return; 653 } 654 655 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 656 struct ext4_group_info *grp, ext4_group_t group) 657 { 658 return; 659 } 660 661 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 662 { 663 return; 664 } 665 #endif 666 667 #ifdef AGGRESSIVE_CHECK 668 669 #define MB_CHECK_ASSERT(assert) \ 670 do { \ 671 if (!(assert)) { \ 672 printk(KERN_EMERG \ 673 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 674 function, file, line, # assert); \ 675 BUG(); \ 676 } \ 677 } while (0) 678 679 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 680 const char *function, int line) 681 { 682 struct super_block *sb = e4b->bd_sb; 683 int order = e4b->bd_blkbits + 1; 684 int max; 685 int max2; 686 int i; 687 int j; 688 int k; 689 int count; 690 struct ext4_group_info *grp; 691 int fragments = 0; 692 int fstart; 693 struct list_head *cur; 694 void *buddy; 695 void *buddy2; 696 697 if (e4b->bd_info->bb_check_counter++ % 10) 698 return 0; 699 700 while (order > 1) { 701 buddy = mb_find_buddy(e4b, order, &max); 702 MB_CHECK_ASSERT(buddy); 703 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 704 MB_CHECK_ASSERT(buddy2); 705 MB_CHECK_ASSERT(buddy != buddy2); 706 MB_CHECK_ASSERT(max * 2 == max2); 707 708 count = 0; 709 for (i = 0; i < max; i++) { 710 711 if (mb_test_bit(i, buddy)) { 712 /* only single bit in buddy2 may be 0 */ 713 if (!mb_test_bit(i << 1, buddy2)) { 714 MB_CHECK_ASSERT( 715 mb_test_bit((i<<1)+1, buddy2)); 716 } 717 continue; 718 } 719 720 /* both bits in buddy2 must be 1 */ 721 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 722 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 723 724 for (j = 0; j < (1 << order); j++) { 725 k = (i * (1 << order)) + j; 726 MB_CHECK_ASSERT( 727 !mb_test_bit(k, e4b->bd_bitmap)); 728 } 729 count++; 730 } 731 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 732 order--; 733 } 734 735 fstart = -1; 736 buddy = mb_find_buddy(e4b, 0, &max); 737 for (i = 0; i < max; i++) { 738 if (!mb_test_bit(i, buddy)) { 739 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 740 if (fstart == -1) { 741 fragments++; 742 fstart = i; 743 } 744 continue; 745 } 746 fstart = -1; 747 /* check used bits only */ 748 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 749 buddy2 = mb_find_buddy(e4b, j, &max2); 750 k = i >> j; 751 MB_CHECK_ASSERT(k < max2); 752 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 753 } 754 } 755 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 756 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 757 758 grp = ext4_get_group_info(sb, e4b->bd_group); 759 if (!grp) 760 return NULL; 761 list_for_each(cur, &grp->bb_prealloc_list) { 762 ext4_group_t groupnr; 763 struct ext4_prealloc_space *pa; 764 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 765 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 766 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 767 for (i = 0; i < pa->pa_len; i++) 768 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 769 } 770 return 0; 771 } 772 #undef MB_CHECK_ASSERT 773 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 774 __FILE__, __func__, __LINE__) 775 #else 776 #define mb_check_buddy(e4b) 777 #endif 778 779 /* 780 * Divide blocks started from @first with length @len into 781 * smaller chunks with power of 2 blocks. 782 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 783 * then increase bb_counters[] for corresponded chunk size. 784 */ 785 static void ext4_mb_mark_free_simple(struct super_block *sb, 786 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 787 struct ext4_group_info *grp) 788 { 789 struct ext4_sb_info *sbi = EXT4_SB(sb); 790 ext4_grpblk_t min; 791 ext4_grpblk_t max; 792 ext4_grpblk_t chunk; 793 unsigned int border; 794 795 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 796 797 border = 2 << sb->s_blocksize_bits; 798 799 while (len > 0) { 800 /* find how many blocks can be covered since this position */ 801 max = ffs(first | border) - 1; 802 803 /* find how many blocks of power 2 we need to mark */ 804 min = fls(len) - 1; 805 806 if (max < min) 807 min = max; 808 chunk = 1 << min; 809 810 /* mark multiblock chunks only */ 811 grp->bb_counters[min]++; 812 if (min > 0) 813 mb_clear_bit(first >> min, 814 buddy + sbi->s_mb_offsets[min]); 815 816 len -= chunk; 817 first += chunk; 818 } 819 } 820 821 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 822 { 823 int order; 824 825 /* 826 * We don't bother with a special lists groups with only 1 block free 827 * extents and for completely empty groups. 828 */ 829 order = fls(len) - 2; 830 if (order < 0) 831 return 0; 832 if (order == MB_NUM_ORDERS(sb)) 833 order--; 834 return order; 835 } 836 837 /* Move group to appropriate avg_fragment_size list */ 838 static void 839 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 840 { 841 struct ext4_sb_info *sbi = EXT4_SB(sb); 842 int new_order; 843 844 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 845 return; 846 847 new_order = mb_avg_fragment_size_order(sb, 848 grp->bb_free / grp->bb_fragments); 849 if (new_order == grp->bb_avg_fragment_size_order) 850 return; 851 852 if (grp->bb_avg_fragment_size_order != -1) { 853 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 854 grp->bb_avg_fragment_size_order]); 855 list_del(&grp->bb_avg_fragment_size_node); 856 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 857 grp->bb_avg_fragment_size_order]); 858 } 859 grp->bb_avg_fragment_size_order = new_order; 860 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 861 grp->bb_avg_fragment_size_order]); 862 list_add_tail(&grp->bb_avg_fragment_size_node, 863 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 864 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 865 grp->bb_avg_fragment_size_order]); 866 } 867 868 /* 869 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 870 * cr level needs an update. 871 */ 872 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, 873 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 874 { 875 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 876 struct ext4_group_info *iter; 877 int i; 878 879 if (ac->ac_status == AC_STATUS_FOUND) 880 return; 881 882 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) 883 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); 884 885 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 886 if (list_empty(&sbi->s_mb_largest_free_orders[i])) 887 continue; 888 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 889 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 890 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 891 continue; 892 } 893 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 894 bb_largest_free_order_node) { 895 if (sbi->s_mb_stats) 896 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); 897 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { 898 *group = iter->bb_group; 899 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; 900 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 901 return; 902 } 903 } 904 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 905 } 906 907 /* Increment cr and search again if no group is found */ 908 *new_cr = CR_GOAL_LEN_FAST; 909 } 910 911 /* 912 * Find a suitable group of given order from the average fragments list. 913 */ 914 static struct ext4_group_info * 915 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) 916 { 917 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 918 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; 919 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; 920 struct ext4_group_info *grp = NULL, *iter; 921 enum criteria cr = ac->ac_criteria; 922 923 if (list_empty(frag_list)) 924 return NULL; 925 read_lock(frag_list_lock); 926 if (list_empty(frag_list)) { 927 read_unlock(frag_list_lock); 928 return NULL; 929 } 930 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) { 931 if (sbi->s_mb_stats) 932 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 933 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { 934 grp = iter; 935 break; 936 } 937 } 938 read_unlock(frag_list_lock); 939 return grp; 940 } 941 942 /* 943 * Choose next group by traversing average fragment size list of suitable 944 * order. Updates *new_cr if cr level needs an update. 945 */ 946 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, 947 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 948 { 949 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 950 struct ext4_group_info *grp = NULL; 951 int i; 952 953 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { 954 if (sbi->s_mb_stats) 955 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); 956 } 957 958 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 959 i < MB_NUM_ORDERS(ac->ac_sb); i++) { 960 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); 961 if (grp) { 962 *group = grp->bb_group; 963 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; 964 return; 965 } 966 } 967 968 /* 969 * CR_BEST_AVAIL_LEN works based on the concept that we have 970 * a larger normalized goal len request which can be trimmed to 971 * a smaller goal len such that it can still satisfy original 972 * request len. However, allocation request for non-regular 973 * files never gets normalized. 974 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). 975 */ 976 if (ac->ac_flags & EXT4_MB_HINT_DATA) 977 *new_cr = CR_BEST_AVAIL_LEN; 978 else 979 *new_cr = CR_GOAL_LEN_SLOW; 980 } 981 982 /* 983 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment 984 * order we have and proactively trim the goal request length to that order to 985 * find a suitable group faster. 986 * 987 * This optimizes allocation speed at the cost of slightly reduced 988 * preallocations. However, we make sure that we don't trim the request too 989 * much and fall to CR_GOAL_LEN_SLOW in that case. 990 */ 991 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, 992 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 993 { 994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 995 struct ext4_group_info *grp = NULL; 996 int i, order, min_order; 997 unsigned long num_stripe_clusters = 0; 998 999 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { 1000 if (sbi->s_mb_stats) 1001 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); 1002 } 1003 1004 /* 1005 * mb_avg_fragment_size_order() returns order in a way that makes 1006 * retrieving back the length using (1 << order) inaccurate. Hence, use 1007 * fls() instead since we need to know the actual length while modifying 1008 * goal length. 1009 */ 1010 order = fls(ac->ac_g_ex.fe_len) - 1; 1011 min_order = order - sbi->s_mb_best_avail_max_trim_order; 1012 if (min_order < 0) 1013 min_order = 0; 1014 1015 if (sbi->s_stripe > 0) { 1016 /* 1017 * We are assuming that stripe size is always a multiple of 1018 * cluster ratio otherwise __ext4_fill_super exists early. 1019 */ 1020 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); 1021 if (1 << min_order < num_stripe_clusters) 1022 /* 1023 * We consider 1 order less because later we round 1024 * up the goal len to num_stripe_clusters 1025 */ 1026 min_order = fls(num_stripe_clusters) - 1; 1027 } 1028 1029 if (1 << min_order < ac->ac_o_ex.fe_len) 1030 min_order = fls(ac->ac_o_ex.fe_len); 1031 1032 for (i = order; i >= min_order; i--) { 1033 int frag_order; 1034 /* 1035 * Scale down goal len to make sure we find something 1036 * in the free fragments list. Basically, reduce 1037 * preallocations. 1038 */ 1039 ac->ac_g_ex.fe_len = 1 << i; 1040 1041 if (num_stripe_clusters > 0) { 1042 /* 1043 * Try to round up the adjusted goal length to 1044 * stripe size (in cluster units) multiple for 1045 * efficiency. 1046 */ 1047 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, 1048 num_stripe_clusters); 1049 } 1050 1051 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1052 ac->ac_g_ex.fe_len); 1053 1054 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order); 1055 if (grp) { 1056 *group = grp->bb_group; 1057 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; 1058 return; 1059 } 1060 } 1061 1062 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1063 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1064 *new_cr = CR_GOAL_LEN_SLOW; 1065 } 1066 1067 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 1068 { 1069 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1070 return 0; 1071 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) 1072 return 0; 1073 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1074 return 0; 1075 return 1; 1076 } 1077 1078 /* 1079 * Return next linear group for allocation. If linear traversal should not be 1080 * performed, this function just returns the same group 1081 */ 1082 static ext4_group_t 1083 next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group, 1084 ext4_group_t ngroups) 1085 { 1086 if (!should_optimize_scan(ac)) 1087 goto inc_and_return; 1088 1089 if (ac->ac_groups_linear_remaining) { 1090 ac->ac_groups_linear_remaining--; 1091 goto inc_and_return; 1092 } 1093 1094 return group; 1095 inc_and_return: 1096 /* 1097 * Artificially restricted ngroups for non-extent 1098 * files makes group > ngroups possible on first loop. 1099 */ 1100 return group + 1 >= ngroups ? 0 : group + 1; 1101 } 1102 1103 /* 1104 * ext4_mb_choose_next_group: choose next group for allocation. 1105 * 1106 * @ac Allocation Context 1107 * @new_cr This is an output parameter. If the there is no good group 1108 * available at current CR level, this field is updated to indicate 1109 * the new cr level that should be used. 1110 * @group This is an input / output parameter. As an input it indicates the 1111 * next group that the allocator intends to use for allocation. As 1112 * output, this field indicates the next group that should be used as 1113 * determined by the optimization functions. 1114 * @ngroups Total number of groups 1115 */ 1116 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1117 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1118 { 1119 *new_cr = ac->ac_criteria; 1120 1121 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 1122 *group = next_linear_group(ac, *group, ngroups); 1123 return; 1124 } 1125 1126 if (*new_cr == CR_POWER2_ALIGNED) { 1127 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups); 1128 } else if (*new_cr == CR_GOAL_LEN_FAST) { 1129 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups); 1130 } else if (*new_cr == CR_BEST_AVAIL_LEN) { 1131 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups); 1132 } else { 1133 /* 1134 * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1135 * bb_free. But until that happens, we should never come here. 1136 */ 1137 WARN_ON(1); 1138 } 1139 } 1140 1141 /* 1142 * Cache the order of the largest free extent we have available in this block 1143 * group. 1144 */ 1145 static void 1146 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1147 { 1148 struct ext4_sb_info *sbi = EXT4_SB(sb); 1149 int i; 1150 1151 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 1152 if (grp->bb_counters[i] > 0) 1153 break; 1154 /* No need to move between order lists? */ 1155 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 1156 i == grp->bb_largest_free_order) { 1157 grp->bb_largest_free_order = i; 1158 return; 1159 } 1160 1161 if (grp->bb_largest_free_order >= 0) { 1162 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1163 grp->bb_largest_free_order]); 1164 list_del_init(&grp->bb_largest_free_order_node); 1165 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1166 grp->bb_largest_free_order]); 1167 } 1168 grp->bb_largest_free_order = i; 1169 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1170 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1171 grp->bb_largest_free_order]); 1172 list_add_tail(&grp->bb_largest_free_order_node, 1173 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1174 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1175 grp->bb_largest_free_order]); 1176 } 1177 } 1178 1179 static noinline_for_stack 1180 void ext4_mb_generate_buddy(struct super_block *sb, 1181 void *buddy, void *bitmap, ext4_group_t group, 1182 struct ext4_group_info *grp) 1183 { 1184 struct ext4_sb_info *sbi = EXT4_SB(sb); 1185 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1186 ext4_grpblk_t i = 0; 1187 ext4_grpblk_t first; 1188 ext4_grpblk_t len; 1189 unsigned free = 0; 1190 unsigned fragments = 0; 1191 unsigned long long period = get_cycles(); 1192 1193 /* initialize buddy from bitmap which is aggregation 1194 * of on-disk bitmap and preallocations */ 1195 i = mb_find_next_zero_bit(bitmap, max, 0); 1196 grp->bb_first_free = i; 1197 while (i < max) { 1198 fragments++; 1199 first = i; 1200 i = mb_find_next_bit(bitmap, max, i); 1201 len = i - first; 1202 free += len; 1203 if (len > 1) 1204 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1205 else 1206 grp->bb_counters[0]++; 1207 if (i < max) 1208 i = mb_find_next_zero_bit(bitmap, max, i); 1209 } 1210 grp->bb_fragments = fragments; 1211 1212 if (free != grp->bb_free) { 1213 ext4_grp_locked_error(sb, group, 0, 0, 1214 "block bitmap and bg descriptor " 1215 "inconsistent: %u vs %u free clusters", 1216 free, grp->bb_free); 1217 /* 1218 * If we intend to continue, we consider group descriptor 1219 * corrupt and update bb_free using bitmap value 1220 */ 1221 grp->bb_free = free; 1222 ext4_mark_group_bitmap_corrupted(sb, group, 1223 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1224 } 1225 mb_set_largest_free_order(sb, grp); 1226 mb_update_avg_fragment_size(sb, grp); 1227 1228 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1229 1230 period = get_cycles() - period; 1231 atomic_inc(&sbi->s_mb_buddies_generated); 1232 atomic64_add(period, &sbi->s_mb_generation_time); 1233 } 1234 1235 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 1236 { 1237 int count; 1238 int order = 1; 1239 void *buddy; 1240 1241 while ((buddy = mb_find_buddy(e4b, order++, &count))) 1242 mb_set_bits(buddy, 0, count); 1243 1244 e4b->bd_info->bb_fragments = 0; 1245 memset(e4b->bd_info->bb_counters, 0, 1246 sizeof(*e4b->bd_info->bb_counters) * 1247 (e4b->bd_sb->s_blocksize_bits + 2)); 1248 1249 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 1250 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); 1251 } 1252 1253 /* The buddy information is attached the buddy cache inode 1254 * for convenience. The information regarding each group 1255 * is loaded via ext4_mb_load_buddy. The information involve 1256 * block bitmap and buddy information. The information are 1257 * stored in the inode as 1258 * 1259 * { page } 1260 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1261 * 1262 * 1263 * one block each for bitmap and buddy information. 1264 * So for each group we take up 2 blocks. A page can 1265 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1266 * So it can have information regarding groups_per_page which 1267 * is blocks_per_page/2 1268 * 1269 * Locking note: This routine takes the block group lock of all groups 1270 * for this page; do not hold this lock when calling this routine! 1271 */ 1272 1273 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1274 { 1275 ext4_group_t ngroups; 1276 unsigned int blocksize; 1277 int blocks_per_page; 1278 int groups_per_page; 1279 int err = 0; 1280 int i; 1281 ext4_group_t first_group, group; 1282 int first_block; 1283 struct super_block *sb; 1284 struct buffer_head *bhs; 1285 struct buffer_head **bh = NULL; 1286 struct inode *inode; 1287 char *data; 1288 char *bitmap; 1289 struct ext4_group_info *grinfo; 1290 1291 inode = page->mapping->host; 1292 sb = inode->i_sb; 1293 ngroups = ext4_get_groups_count(sb); 1294 blocksize = i_blocksize(inode); 1295 blocks_per_page = PAGE_SIZE / blocksize; 1296 1297 mb_debug(sb, "init page %lu\n", page->index); 1298 1299 groups_per_page = blocks_per_page >> 1; 1300 if (groups_per_page == 0) 1301 groups_per_page = 1; 1302 1303 /* allocate buffer_heads to read bitmaps */ 1304 if (groups_per_page > 1) { 1305 i = sizeof(struct buffer_head *) * groups_per_page; 1306 bh = kzalloc(i, gfp); 1307 if (bh == NULL) 1308 return -ENOMEM; 1309 } else 1310 bh = &bhs; 1311 1312 first_group = page->index * blocks_per_page / 2; 1313 1314 /* read all groups the page covers into the cache */ 1315 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1316 if (group >= ngroups) 1317 break; 1318 1319 grinfo = ext4_get_group_info(sb, group); 1320 if (!grinfo) 1321 continue; 1322 /* 1323 * If page is uptodate then we came here after online resize 1324 * which added some new uninitialized group info structs, so 1325 * we must skip all initialized uptodate buddies on the page, 1326 * which may be currently in use by an allocating task. 1327 */ 1328 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1329 bh[i] = NULL; 1330 continue; 1331 } 1332 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1333 if (IS_ERR(bh[i])) { 1334 err = PTR_ERR(bh[i]); 1335 bh[i] = NULL; 1336 goto out; 1337 } 1338 mb_debug(sb, "read bitmap for group %u\n", group); 1339 } 1340 1341 /* wait for I/O completion */ 1342 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1343 int err2; 1344 1345 if (!bh[i]) 1346 continue; 1347 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1348 if (!err) 1349 err = err2; 1350 } 1351 1352 first_block = page->index * blocks_per_page; 1353 for (i = 0; i < blocks_per_page; i++) { 1354 group = (first_block + i) >> 1; 1355 if (group >= ngroups) 1356 break; 1357 1358 if (!bh[group - first_group]) 1359 /* skip initialized uptodate buddy */ 1360 continue; 1361 1362 if (!buffer_verified(bh[group - first_group])) 1363 /* Skip faulty bitmaps */ 1364 continue; 1365 err = 0; 1366 1367 /* 1368 * data carry information regarding this 1369 * particular group in the format specified 1370 * above 1371 * 1372 */ 1373 data = page_address(page) + (i * blocksize); 1374 bitmap = bh[group - first_group]->b_data; 1375 1376 /* 1377 * We place the buddy block and bitmap block 1378 * close together 1379 */ 1380 grinfo = ext4_get_group_info(sb, group); 1381 if (!grinfo) { 1382 err = -EFSCORRUPTED; 1383 goto out; 1384 } 1385 if ((first_block + i) & 1) { 1386 /* this is block of buddy */ 1387 BUG_ON(incore == NULL); 1388 mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1389 group, page->index, i * blocksize); 1390 trace_ext4_mb_buddy_bitmap_load(sb, group); 1391 grinfo->bb_fragments = 0; 1392 memset(grinfo->bb_counters, 0, 1393 sizeof(*grinfo->bb_counters) * 1394 (MB_NUM_ORDERS(sb))); 1395 /* 1396 * incore got set to the group block bitmap below 1397 */ 1398 ext4_lock_group(sb, group); 1399 /* init the buddy */ 1400 memset(data, 0xff, blocksize); 1401 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1402 ext4_unlock_group(sb, group); 1403 incore = NULL; 1404 } else { 1405 /* this is block of bitmap */ 1406 BUG_ON(incore != NULL); 1407 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1408 group, page->index, i * blocksize); 1409 trace_ext4_mb_bitmap_load(sb, group); 1410 1411 /* see comments in ext4_mb_put_pa() */ 1412 ext4_lock_group(sb, group); 1413 memcpy(data, bitmap, blocksize); 1414 1415 /* mark all preallocated blks used in in-core bitmap */ 1416 ext4_mb_generate_from_pa(sb, data, group); 1417 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); 1418 ext4_unlock_group(sb, group); 1419 1420 /* set incore so that the buddy information can be 1421 * generated using this 1422 */ 1423 incore = data; 1424 } 1425 } 1426 SetPageUptodate(page); 1427 1428 out: 1429 if (bh) { 1430 for (i = 0; i < groups_per_page; i++) 1431 brelse(bh[i]); 1432 if (bh != &bhs) 1433 kfree(bh); 1434 } 1435 return err; 1436 } 1437 1438 /* 1439 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1440 * on the same buddy page doesn't happen whild holding the buddy page lock. 1441 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1442 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1443 */ 1444 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1445 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1446 { 1447 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1448 int block, pnum, poff; 1449 int blocks_per_page; 1450 struct page *page; 1451 1452 e4b->bd_buddy_page = NULL; 1453 e4b->bd_bitmap_page = NULL; 1454 1455 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1456 /* 1457 * the buddy cache inode stores the block bitmap 1458 * and buddy information in consecutive blocks. 1459 * So for each group we need two blocks. 1460 */ 1461 block = group * 2; 1462 pnum = block / blocks_per_page; 1463 poff = block % blocks_per_page; 1464 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1465 if (!page) 1466 return -ENOMEM; 1467 BUG_ON(page->mapping != inode->i_mapping); 1468 e4b->bd_bitmap_page = page; 1469 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1470 1471 if (blocks_per_page >= 2) { 1472 /* buddy and bitmap are on the same page */ 1473 return 0; 1474 } 1475 1476 block++; 1477 pnum = block / blocks_per_page; 1478 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1479 if (!page) 1480 return -ENOMEM; 1481 BUG_ON(page->mapping != inode->i_mapping); 1482 e4b->bd_buddy_page = page; 1483 return 0; 1484 } 1485 1486 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1487 { 1488 if (e4b->bd_bitmap_page) { 1489 unlock_page(e4b->bd_bitmap_page); 1490 put_page(e4b->bd_bitmap_page); 1491 } 1492 if (e4b->bd_buddy_page) { 1493 unlock_page(e4b->bd_buddy_page); 1494 put_page(e4b->bd_buddy_page); 1495 } 1496 } 1497 1498 /* 1499 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1500 * block group lock of all groups for this page; do not hold the BG lock when 1501 * calling this routine! 1502 */ 1503 static noinline_for_stack 1504 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1505 { 1506 1507 struct ext4_group_info *this_grp; 1508 struct ext4_buddy e4b; 1509 struct page *page; 1510 int ret = 0; 1511 1512 might_sleep(); 1513 mb_debug(sb, "init group %u\n", group); 1514 this_grp = ext4_get_group_info(sb, group); 1515 if (!this_grp) 1516 return -EFSCORRUPTED; 1517 1518 /* 1519 * This ensures that we don't reinit the buddy cache 1520 * page which map to the group from which we are already 1521 * allocating. If we are looking at the buddy cache we would 1522 * have taken a reference using ext4_mb_load_buddy and that 1523 * would have pinned buddy page to page cache. 1524 * The call to ext4_mb_get_buddy_page_lock will mark the 1525 * page accessed. 1526 */ 1527 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1528 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1529 /* 1530 * somebody initialized the group 1531 * return without doing anything 1532 */ 1533 goto err; 1534 } 1535 1536 page = e4b.bd_bitmap_page; 1537 ret = ext4_mb_init_cache(page, NULL, gfp); 1538 if (ret) 1539 goto err; 1540 if (!PageUptodate(page)) { 1541 ret = -EIO; 1542 goto err; 1543 } 1544 1545 if (e4b.bd_buddy_page == NULL) { 1546 /* 1547 * If both the bitmap and buddy are in 1548 * the same page we don't need to force 1549 * init the buddy 1550 */ 1551 ret = 0; 1552 goto err; 1553 } 1554 /* init buddy cache */ 1555 page = e4b.bd_buddy_page; 1556 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1557 if (ret) 1558 goto err; 1559 if (!PageUptodate(page)) { 1560 ret = -EIO; 1561 goto err; 1562 } 1563 err: 1564 ext4_mb_put_buddy_page_lock(&e4b); 1565 return ret; 1566 } 1567 1568 /* 1569 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1570 * block group lock of all groups for this page; do not hold the BG lock when 1571 * calling this routine! 1572 */ 1573 static noinline_for_stack int 1574 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1575 struct ext4_buddy *e4b, gfp_t gfp) 1576 { 1577 int blocks_per_page; 1578 int block; 1579 int pnum; 1580 int poff; 1581 struct page *page; 1582 int ret; 1583 struct ext4_group_info *grp; 1584 struct ext4_sb_info *sbi = EXT4_SB(sb); 1585 struct inode *inode = sbi->s_buddy_cache; 1586 1587 might_sleep(); 1588 mb_debug(sb, "load group %u\n", group); 1589 1590 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1591 grp = ext4_get_group_info(sb, group); 1592 if (!grp) 1593 return -EFSCORRUPTED; 1594 1595 e4b->bd_blkbits = sb->s_blocksize_bits; 1596 e4b->bd_info = grp; 1597 e4b->bd_sb = sb; 1598 e4b->bd_group = group; 1599 e4b->bd_buddy_page = NULL; 1600 e4b->bd_bitmap_page = NULL; 1601 1602 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1603 /* 1604 * we need full data about the group 1605 * to make a good selection 1606 */ 1607 ret = ext4_mb_init_group(sb, group, gfp); 1608 if (ret) 1609 return ret; 1610 } 1611 1612 /* 1613 * the buddy cache inode stores the block bitmap 1614 * and buddy information in consecutive blocks. 1615 * So for each group we need two blocks. 1616 */ 1617 block = group * 2; 1618 pnum = block / blocks_per_page; 1619 poff = block % blocks_per_page; 1620 1621 /* we could use find_or_create_page(), but it locks page 1622 * what we'd like to avoid in fast path ... */ 1623 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1624 if (page == NULL || !PageUptodate(page)) { 1625 if (page) 1626 /* 1627 * drop the page reference and try 1628 * to get the page with lock. If we 1629 * are not uptodate that implies 1630 * somebody just created the page but 1631 * is yet to initialize the same. So 1632 * wait for it to initialize. 1633 */ 1634 put_page(page); 1635 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1636 if (page) { 1637 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1638 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 1639 /* should never happen */ 1640 unlock_page(page); 1641 ret = -EINVAL; 1642 goto err; 1643 } 1644 if (!PageUptodate(page)) { 1645 ret = ext4_mb_init_cache(page, NULL, gfp); 1646 if (ret) { 1647 unlock_page(page); 1648 goto err; 1649 } 1650 mb_cmp_bitmaps(e4b, page_address(page) + 1651 (poff * sb->s_blocksize)); 1652 } 1653 unlock_page(page); 1654 } 1655 } 1656 if (page == NULL) { 1657 ret = -ENOMEM; 1658 goto err; 1659 } 1660 if (!PageUptodate(page)) { 1661 ret = -EIO; 1662 goto err; 1663 } 1664 1665 /* Pages marked accessed already */ 1666 e4b->bd_bitmap_page = page; 1667 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1668 1669 block++; 1670 pnum = block / blocks_per_page; 1671 poff = block % blocks_per_page; 1672 1673 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1674 if (page == NULL || !PageUptodate(page)) { 1675 if (page) 1676 put_page(page); 1677 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1678 if (page) { 1679 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1680 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 1681 /* should never happen */ 1682 unlock_page(page); 1683 ret = -EINVAL; 1684 goto err; 1685 } 1686 if (!PageUptodate(page)) { 1687 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1688 gfp); 1689 if (ret) { 1690 unlock_page(page); 1691 goto err; 1692 } 1693 } 1694 unlock_page(page); 1695 } 1696 } 1697 if (page == NULL) { 1698 ret = -ENOMEM; 1699 goto err; 1700 } 1701 if (!PageUptodate(page)) { 1702 ret = -EIO; 1703 goto err; 1704 } 1705 1706 /* Pages marked accessed already */ 1707 e4b->bd_buddy_page = page; 1708 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1709 1710 return 0; 1711 1712 err: 1713 if (page) 1714 put_page(page); 1715 if (e4b->bd_bitmap_page) 1716 put_page(e4b->bd_bitmap_page); 1717 1718 e4b->bd_buddy = NULL; 1719 e4b->bd_bitmap = NULL; 1720 return ret; 1721 } 1722 1723 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1724 struct ext4_buddy *e4b) 1725 { 1726 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1727 } 1728 1729 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1730 { 1731 if (e4b->bd_bitmap_page) 1732 put_page(e4b->bd_bitmap_page); 1733 if (e4b->bd_buddy_page) 1734 put_page(e4b->bd_buddy_page); 1735 } 1736 1737 1738 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1739 { 1740 int order = 1, max; 1741 void *bb; 1742 1743 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1744 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1745 1746 while (order <= e4b->bd_blkbits + 1) { 1747 bb = mb_find_buddy(e4b, order, &max); 1748 if (!mb_test_bit(block >> order, bb)) { 1749 /* this block is part of buddy of order 'order' */ 1750 return order; 1751 } 1752 order++; 1753 } 1754 return 0; 1755 } 1756 1757 static void mb_clear_bits(void *bm, int cur, int len) 1758 { 1759 __u32 *addr; 1760 1761 len = cur + len; 1762 while (cur < len) { 1763 if ((cur & 31) == 0 && (len - cur) >= 32) { 1764 /* fast path: clear whole word at once */ 1765 addr = bm + (cur >> 3); 1766 *addr = 0; 1767 cur += 32; 1768 continue; 1769 } 1770 mb_clear_bit(cur, bm); 1771 cur++; 1772 } 1773 } 1774 1775 /* clear bits in given range 1776 * will return first found zero bit if any, -1 otherwise 1777 */ 1778 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1779 { 1780 __u32 *addr; 1781 int zero_bit = -1; 1782 1783 len = cur + len; 1784 while (cur < len) { 1785 if ((cur & 31) == 0 && (len - cur) >= 32) { 1786 /* fast path: clear whole word at once */ 1787 addr = bm + (cur >> 3); 1788 if (*addr != (__u32)(-1) && zero_bit == -1) 1789 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1790 *addr = 0; 1791 cur += 32; 1792 continue; 1793 } 1794 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1795 zero_bit = cur; 1796 cur++; 1797 } 1798 1799 return zero_bit; 1800 } 1801 1802 void mb_set_bits(void *bm, int cur, int len) 1803 { 1804 __u32 *addr; 1805 1806 len = cur + len; 1807 while (cur < len) { 1808 if ((cur & 31) == 0 && (len - cur) >= 32) { 1809 /* fast path: set whole word at once */ 1810 addr = bm + (cur >> 3); 1811 *addr = 0xffffffff; 1812 cur += 32; 1813 continue; 1814 } 1815 mb_set_bit(cur, bm); 1816 cur++; 1817 } 1818 } 1819 1820 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1821 { 1822 if (mb_test_bit(*bit + side, bitmap)) { 1823 mb_clear_bit(*bit, bitmap); 1824 (*bit) -= side; 1825 return 1; 1826 } 1827 else { 1828 (*bit) += side; 1829 mb_set_bit(*bit, bitmap); 1830 return -1; 1831 } 1832 } 1833 1834 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1835 { 1836 int max; 1837 int order = 1; 1838 void *buddy = mb_find_buddy(e4b, order, &max); 1839 1840 while (buddy) { 1841 void *buddy2; 1842 1843 /* Bits in range [first; last] are known to be set since 1844 * corresponding blocks were allocated. Bits in range 1845 * (first; last) will stay set because they form buddies on 1846 * upper layer. We just deal with borders if they don't 1847 * align with upper layer and then go up. 1848 * Releasing entire group is all about clearing 1849 * single bit of highest order buddy. 1850 */ 1851 1852 /* Example: 1853 * --------------------------------- 1854 * | 1 | 1 | 1 | 1 | 1855 * --------------------------------- 1856 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1857 * --------------------------------- 1858 * 0 1 2 3 4 5 6 7 1859 * \_____________________/ 1860 * 1861 * Neither [1] nor [6] is aligned to above layer. 1862 * Left neighbour [0] is free, so mark it busy, 1863 * decrease bb_counters and extend range to 1864 * [0; 6] 1865 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1866 * mark [6] free, increase bb_counters and shrink range to 1867 * [0; 5]. 1868 * Then shift range to [0; 2], go up and do the same. 1869 */ 1870 1871 1872 if (first & 1) 1873 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1874 if (!(last & 1)) 1875 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1876 if (first > last) 1877 break; 1878 order++; 1879 1880 buddy2 = mb_find_buddy(e4b, order, &max); 1881 if (!buddy2) { 1882 mb_clear_bits(buddy, first, last - first + 1); 1883 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1884 break; 1885 } 1886 first >>= 1; 1887 last >>= 1; 1888 buddy = buddy2; 1889 } 1890 } 1891 1892 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1893 int first, int count) 1894 { 1895 int left_is_free = 0; 1896 int right_is_free = 0; 1897 int block; 1898 int last = first + count - 1; 1899 struct super_block *sb = e4b->bd_sb; 1900 1901 if (WARN_ON(count == 0)) 1902 return; 1903 BUG_ON(last >= (sb->s_blocksize << 3)); 1904 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1905 /* Don't bother if the block group is corrupt. */ 1906 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1907 return; 1908 1909 mb_check_buddy(e4b); 1910 mb_free_blocks_double(inode, e4b, first, count); 1911 1912 this_cpu_inc(discard_pa_seq); 1913 e4b->bd_info->bb_free += count; 1914 if (first < e4b->bd_info->bb_first_free) 1915 e4b->bd_info->bb_first_free = first; 1916 1917 /* access memory sequentially: check left neighbour, 1918 * clear range and then check right neighbour 1919 */ 1920 if (first != 0) 1921 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1922 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1923 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1924 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1925 1926 if (unlikely(block != -1)) { 1927 struct ext4_sb_info *sbi = EXT4_SB(sb); 1928 ext4_fsblk_t blocknr; 1929 1930 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1931 blocknr += EXT4_C2B(sbi, block); 1932 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 1933 ext4_grp_locked_error(sb, e4b->bd_group, 1934 inode ? inode->i_ino : 0, 1935 blocknr, 1936 "freeing already freed block (bit %u); block bitmap corrupt.", 1937 block); 1938 ext4_mark_group_bitmap_corrupted( 1939 sb, e4b->bd_group, 1940 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1941 } else { 1942 mb_regenerate_buddy(e4b); 1943 } 1944 goto done; 1945 } 1946 1947 /* let's maintain fragments counter */ 1948 if (left_is_free && right_is_free) 1949 e4b->bd_info->bb_fragments--; 1950 else if (!left_is_free && !right_is_free) 1951 e4b->bd_info->bb_fragments++; 1952 1953 /* buddy[0] == bd_bitmap is a special case, so handle 1954 * it right away and let mb_buddy_mark_free stay free of 1955 * zero order checks. 1956 * Check if neighbours are to be coaleasced, 1957 * adjust bitmap bb_counters and borders appropriately. 1958 */ 1959 if (first & 1) { 1960 first += !left_is_free; 1961 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1962 } 1963 if (!(last & 1)) { 1964 last -= !right_is_free; 1965 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1966 } 1967 1968 if (first <= last) 1969 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1970 1971 done: 1972 mb_set_largest_free_order(sb, e4b->bd_info); 1973 mb_update_avg_fragment_size(sb, e4b->bd_info); 1974 mb_check_buddy(e4b); 1975 } 1976 1977 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1978 int needed, struct ext4_free_extent *ex) 1979 { 1980 int next = block; 1981 int max, order; 1982 void *buddy; 1983 1984 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1985 BUG_ON(ex == NULL); 1986 1987 buddy = mb_find_buddy(e4b, 0, &max); 1988 BUG_ON(buddy == NULL); 1989 BUG_ON(block >= max); 1990 if (mb_test_bit(block, buddy)) { 1991 ex->fe_len = 0; 1992 ex->fe_start = 0; 1993 ex->fe_group = 0; 1994 return 0; 1995 } 1996 1997 /* find actual order */ 1998 order = mb_find_order_for_block(e4b, block); 1999 block = block >> order; 2000 2001 ex->fe_len = 1 << order; 2002 ex->fe_start = block << order; 2003 ex->fe_group = e4b->bd_group; 2004 2005 /* calc difference from given start */ 2006 next = next - ex->fe_start; 2007 ex->fe_len -= next; 2008 ex->fe_start += next; 2009 2010 while (needed > ex->fe_len && 2011 mb_find_buddy(e4b, order, &max)) { 2012 2013 if (block + 1 >= max) 2014 break; 2015 2016 next = (block + 1) * (1 << order); 2017 if (mb_test_bit(next, e4b->bd_bitmap)) 2018 break; 2019 2020 order = mb_find_order_for_block(e4b, next); 2021 2022 block = next >> order; 2023 ex->fe_len += 1 << order; 2024 } 2025 2026 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 2027 /* Should never happen! (but apparently sometimes does?!?) */ 2028 WARN_ON(1); 2029 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 2030 "corruption or bug in mb_find_extent " 2031 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 2032 block, order, needed, ex->fe_group, ex->fe_start, 2033 ex->fe_len, ex->fe_logical); 2034 ex->fe_len = 0; 2035 ex->fe_start = 0; 2036 ex->fe_group = 0; 2037 } 2038 return ex->fe_len; 2039 } 2040 2041 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 2042 { 2043 int ord; 2044 int mlen = 0; 2045 int max = 0; 2046 int cur; 2047 int start = ex->fe_start; 2048 int len = ex->fe_len; 2049 unsigned ret = 0; 2050 int len0 = len; 2051 void *buddy; 2052 bool split = false; 2053 2054 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 2055 BUG_ON(e4b->bd_group != ex->fe_group); 2056 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2057 mb_check_buddy(e4b); 2058 mb_mark_used_double(e4b, start, len); 2059 2060 this_cpu_inc(discard_pa_seq); 2061 e4b->bd_info->bb_free -= len; 2062 if (e4b->bd_info->bb_first_free == start) 2063 e4b->bd_info->bb_first_free += len; 2064 2065 /* let's maintain fragments counter */ 2066 if (start != 0) 2067 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 2068 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 2069 max = !mb_test_bit(start + len, e4b->bd_bitmap); 2070 if (mlen && max) 2071 e4b->bd_info->bb_fragments++; 2072 else if (!mlen && !max) 2073 e4b->bd_info->bb_fragments--; 2074 2075 /* let's maintain buddy itself */ 2076 while (len) { 2077 if (!split) 2078 ord = mb_find_order_for_block(e4b, start); 2079 2080 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 2081 /* the whole chunk may be allocated at once! */ 2082 mlen = 1 << ord; 2083 if (!split) 2084 buddy = mb_find_buddy(e4b, ord, &max); 2085 else 2086 split = false; 2087 BUG_ON((start >> ord) >= max); 2088 mb_set_bit(start >> ord, buddy); 2089 e4b->bd_info->bb_counters[ord]--; 2090 start += mlen; 2091 len -= mlen; 2092 BUG_ON(len < 0); 2093 continue; 2094 } 2095 2096 /* store for history */ 2097 if (ret == 0) 2098 ret = len | (ord << 16); 2099 2100 /* we have to split large buddy */ 2101 BUG_ON(ord <= 0); 2102 buddy = mb_find_buddy(e4b, ord, &max); 2103 mb_set_bit(start >> ord, buddy); 2104 e4b->bd_info->bb_counters[ord]--; 2105 2106 ord--; 2107 cur = (start >> ord) & ~1U; 2108 buddy = mb_find_buddy(e4b, ord, &max); 2109 mb_clear_bit(cur, buddy); 2110 mb_clear_bit(cur + 1, buddy); 2111 e4b->bd_info->bb_counters[ord]++; 2112 e4b->bd_info->bb_counters[ord]++; 2113 split = true; 2114 } 2115 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 2116 2117 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 2118 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2119 mb_check_buddy(e4b); 2120 2121 return ret; 2122 } 2123 2124 /* 2125 * Must be called under group lock! 2126 */ 2127 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2128 struct ext4_buddy *e4b) 2129 { 2130 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2131 int ret; 2132 2133 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2134 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2135 2136 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2137 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2138 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2139 2140 /* preallocation can change ac_b_ex, thus we store actually 2141 * allocated blocks for history */ 2142 ac->ac_f_ex = ac->ac_b_ex; 2143 2144 ac->ac_status = AC_STATUS_FOUND; 2145 ac->ac_tail = ret & 0xffff; 2146 ac->ac_buddy = ret >> 16; 2147 2148 /* 2149 * take the page reference. We want the page to be pinned 2150 * so that we don't get a ext4_mb_init_cache_call for this 2151 * group until we update the bitmap. That would mean we 2152 * double allocate blocks. The reference is dropped 2153 * in ext4_mb_release_context 2154 */ 2155 ac->ac_bitmap_page = e4b->bd_bitmap_page; 2156 get_page(ac->ac_bitmap_page); 2157 ac->ac_buddy_page = e4b->bd_buddy_page; 2158 get_page(ac->ac_buddy_page); 2159 /* store last allocated for subsequent stream allocation */ 2160 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2161 spin_lock(&sbi->s_md_lock); 2162 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2163 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2164 spin_unlock(&sbi->s_md_lock); 2165 } 2166 /* 2167 * As we've just preallocated more space than 2168 * user requested originally, we store allocated 2169 * space in a special descriptor. 2170 */ 2171 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2172 ext4_mb_new_preallocation(ac); 2173 2174 } 2175 2176 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2177 struct ext4_buddy *e4b, 2178 int finish_group) 2179 { 2180 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2181 struct ext4_free_extent *bex = &ac->ac_b_ex; 2182 struct ext4_free_extent *gex = &ac->ac_g_ex; 2183 2184 if (ac->ac_status == AC_STATUS_FOUND) 2185 return; 2186 /* 2187 * We don't want to scan for a whole year 2188 */ 2189 if (ac->ac_found > sbi->s_mb_max_to_scan && 2190 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2191 ac->ac_status = AC_STATUS_BREAK; 2192 return; 2193 } 2194 2195 /* 2196 * Haven't found good chunk so far, let's continue 2197 */ 2198 if (bex->fe_len < gex->fe_len) 2199 return; 2200 2201 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2202 ext4_mb_use_best_found(ac, e4b); 2203 } 2204 2205 /* 2206 * The routine checks whether found extent is good enough. If it is, 2207 * then the extent gets marked used and flag is set to the context 2208 * to stop scanning. Otherwise, the extent is compared with the 2209 * previous found extent and if new one is better, then it's stored 2210 * in the context. Later, the best found extent will be used, if 2211 * mballoc can't find good enough extent. 2212 * 2213 * The algorithm used is roughly as follows: 2214 * 2215 * * If free extent found is exactly as big as goal, then 2216 * stop the scan and use it immediately 2217 * 2218 * * If free extent found is smaller than goal, then keep retrying 2219 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2220 * that stop scanning and use whatever we have. 2221 * 2222 * * If free extent found is bigger than goal, then keep retrying 2223 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2224 * stopping the scan and using the extent. 2225 * 2226 * 2227 * FIXME: real allocation policy is to be designed yet! 2228 */ 2229 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2230 struct ext4_free_extent *ex, 2231 struct ext4_buddy *e4b) 2232 { 2233 struct ext4_free_extent *bex = &ac->ac_b_ex; 2234 struct ext4_free_extent *gex = &ac->ac_g_ex; 2235 2236 BUG_ON(ex->fe_len <= 0); 2237 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2238 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2239 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2240 2241 ac->ac_found++; 2242 ac->ac_cX_found[ac->ac_criteria]++; 2243 2244 /* 2245 * The special case - take what you catch first 2246 */ 2247 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2248 *bex = *ex; 2249 ext4_mb_use_best_found(ac, e4b); 2250 return; 2251 } 2252 2253 /* 2254 * Let's check whether the chuck is good enough 2255 */ 2256 if (ex->fe_len == gex->fe_len) { 2257 *bex = *ex; 2258 ext4_mb_use_best_found(ac, e4b); 2259 return; 2260 } 2261 2262 /* 2263 * If this is first found extent, just store it in the context 2264 */ 2265 if (bex->fe_len == 0) { 2266 *bex = *ex; 2267 return; 2268 } 2269 2270 /* 2271 * If new found extent is better, store it in the context 2272 */ 2273 if (bex->fe_len < gex->fe_len) { 2274 /* if the request isn't satisfied, any found extent 2275 * larger than previous best one is better */ 2276 if (ex->fe_len > bex->fe_len) 2277 *bex = *ex; 2278 } else if (ex->fe_len > gex->fe_len) { 2279 /* if the request is satisfied, then we try to find 2280 * an extent that still satisfy the request, but is 2281 * smaller than previous one */ 2282 if (ex->fe_len < bex->fe_len) 2283 *bex = *ex; 2284 } 2285 2286 ext4_mb_check_limits(ac, e4b, 0); 2287 } 2288 2289 static noinline_for_stack 2290 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2291 struct ext4_buddy *e4b) 2292 { 2293 struct ext4_free_extent ex = ac->ac_b_ex; 2294 ext4_group_t group = ex.fe_group; 2295 int max; 2296 int err; 2297 2298 BUG_ON(ex.fe_len <= 0); 2299 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2300 if (err) 2301 return; 2302 2303 ext4_lock_group(ac->ac_sb, group); 2304 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2305 2306 if (max > 0) { 2307 ac->ac_b_ex = ex; 2308 ext4_mb_use_best_found(ac, e4b); 2309 } 2310 2311 ext4_unlock_group(ac->ac_sb, group); 2312 ext4_mb_unload_buddy(e4b); 2313 } 2314 2315 static noinline_for_stack 2316 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2317 struct ext4_buddy *e4b) 2318 { 2319 ext4_group_t group = ac->ac_g_ex.fe_group; 2320 int max; 2321 int err; 2322 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2323 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2324 struct ext4_free_extent ex; 2325 2326 if (!grp) 2327 return -EFSCORRUPTED; 2328 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2329 return 0; 2330 if (grp->bb_free == 0) 2331 return 0; 2332 2333 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2334 if (err) 2335 return err; 2336 2337 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2338 ext4_mb_unload_buddy(e4b); 2339 return 0; 2340 } 2341 2342 ext4_lock_group(ac->ac_sb, group); 2343 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2344 ac->ac_g_ex.fe_len, &ex); 2345 ex.fe_logical = 0xDEADFA11; /* debug value */ 2346 2347 if (max >= ac->ac_g_ex.fe_len && 2348 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { 2349 ext4_fsblk_t start; 2350 2351 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2352 /* use do_div to get remainder (would be 64-bit modulo) */ 2353 if (do_div(start, sbi->s_stripe) == 0) { 2354 ac->ac_found++; 2355 ac->ac_b_ex = ex; 2356 ext4_mb_use_best_found(ac, e4b); 2357 } 2358 } else if (max >= ac->ac_g_ex.fe_len) { 2359 BUG_ON(ex.fe_len <= 0); 2360 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2361 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2362 ac->ac_found++; 2363 ac->ac_b_ex = ex; 2364 ext4_mb_use_best_found(ac, e4b); 2365 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2366 /* Sometimes, caller may want to merge even small 2367 * number of blocks to an existing extent */ 2368 BUG_ON(ex.fe_len <= 0); 2369 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2370 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2371 ac->ac_found++; 2372 ac->ac_b_ex = ex; 2373 ext4_mb_use_best_found(ac, e4b); 2374 } 2375 ext4_unlock_group(ac->ac_sb, group); 2376 ext4_mb_unload_buddy(e4b); 2377 2378 return 0; 2379 } 2380 2381 /* 2382 * The routine scans buddy structures (not bitmap!) from given order 2383 * to max order and tries to find big enough chunk to satisfy the req 2384 */ 2385 static noinline_for_stack 2386 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2387 struct ext4_buddy *e4b) 2388 { 2389 struct super_block *sb = ac->ac_sb; 2390 struct ext4_group_info *grp = e4b->bd_info; 2391 void *buddy; 2392 int i; 2393 int k; 2394 int max; 2395 2396 BUG_ON(ac->ac_2order <= 0); 2397 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2398 if (grp->bb_counters[i] == 0) 2399 continue; 2400 2401 buddy = mb_find_buddy(e4b, i, &max); 2402 if (WARN_RATELIMIT(buddy == NULL, 2403 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2404 continue; 2405 2406 k = mb_find_next_zero_bit(buddy, max, 0); 2407 if (k >= max) { 2408 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2409 "%d free clusters of order %d. But found 0", 2410 grp->bb_counters[i], i); 2411 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2412 e4b->bd_group, 2413 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2414 break; 2415 } 2416 ac->ac_found++; 2417 ac->ac_cX_found[ac->ac_criteria]++; 2418 2419 ac->ac_b_ex.fe_len = 1 << i; 2420 ac->ac_b_ex.fe_start = k << i; 2421 ac->ac_b_ex.fe_group = e4b->bd_group; 2422 2423 ext4_mb_use_best_found(ac, e4b); 2424 2425 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2426 2427 if (EXT4_SB(sb)->s_mb_stats) 2428 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2429 2430 break; 2431 } 2432 } 2433 2434 /* 2435 * The routine scans the group and measures all found extents. 2436 * In order to optimize scanning, caller must pass number of 2437 * free blocks in the group, so the routine can know upper limit. 2438 */ 2439 static noinline_for_stack 2440 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2441 struct ext4_buddy *e4b) 2442 { 2443 struct super_block *sb = ac->ac_sb; 2444 void *bitmap = e4b->bd_bitmap; 2445 struct ext4_free_extent ex; 2446 int i, j, freelen; 2447 int free; 2448 2449 free = e4b->bd_info->bb_free; 2450 if (WARN_ON(free <= 0)) 2451 return; 2452 2453 i = e4b->bd_info->bb_first_free; 2454 2455 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2456 i = mb_find_next_zero_bit(bitmap, 2457 EXT4_CLUSTERS_PER_GROUP(sb), i); 2458 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2459 /* 2460 * IF we have corrupt bitmap, we won't find any 2461 * free blocks even though group info says we 2462 * have free blocks 2463 */ 2464 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2465 "%d free clusters as per " 2466 "group info. But bitmap says 0", 2467 free); 2468 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2469 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2470 break; 2471 } 2472 2473 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { 2474 /* 2475 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are 2476 * sure that this group will have a large enough 2477 * continuous free extent, so skip over the smaller free 2478 * extents 2479 */ 2480 j = mb_find_next_bit(bitmap, 2481 EXT4_CLUSTERS_PER_GROUP(sb), i); 2482 freelen = j - i; 2483 2484 if (freelen < ac->ac_g_ex.fe_len) { 2485 i = j; 2486 free -= freelen; 2487 continue; 2488 } 2489 } 2490 2491 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2492 if (WARN_ON(ex.fe_len <= 0)) 2493 break; 2494 if (free < ex.fe_len) { 2495 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2496 "%d free clusters as per " 2497 "group info. But got %d blocks", 2498 free, ex.fe_len); 2499 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2500 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2501 /* 2502 * The number of free blocks differs. This mostly 2503 * indicate that the bitmap is corrupt. So exit 2504 * without claiming the space. 2505 */ 2506 break; 2507 } 2508 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2509 ext4_mb_measure_extent(ac, &ex, e4b); 2510 2511 i += ex.fe_len; 2512 free -= ex.fe_len; 2513 } 2514 2515 ext4_mb_check_limits(ac, e4b, 1); 2516 } 2517 2518 /* 2519 * This is a special case for storages like raid5 2520 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2521 */ 2522 static noinline_for_stack 2523 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2524 struct ext4_buddy *e4b) 2525 { 2526 struct super_block *sb = ac->ac_sb; 2527 struct ext4_sb_info *sbi = EXT4_SB(sb); 2528 void *bitmap = e4b->bd_bitmap; 2529 struct ext4_free_extent ex; 2530 ext4_fsblk_t first_group_block; 2531 ext4_fsblk_t a; 2532 ext4_grpblk_t i, stripe; 2533 int max; 2534 2535 BUG_ON(sbi->s_stripe == 0); 2536 2537 /* find first stripe-aligned block in group */ 2538 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2539 2540 a = first_group_block + sbi->s_stripe - 1; 2541 do_div(a, sbi->s_stripe); 2542 i = (a * sbi->s_stripe) - first_group_block; 2543 2544 stripe = EXT4_B2C(sbi, sbi->s_stripe); 2545 i = EXT4_B2C(sbi, i); 2546 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2547 if (!mb_test_bit(i, bitmap)) { 2548 max = mb_find_extent(e4b, i, stripe, &ex); 2549 if (max >= stripe) { 2550 ac->ac_found++; 2551 ac->ac_cX_found[ac->ac_criteria]++; 2552 ex.fe_logical = 0xDEADF00D; /* debug value */ 2553 ac->ac_b_ex = ex; 2554 ext4_mb_use_best_found(ac, e4b); 2555 break; 2556 } 2557 } 2558 i += stripe; 2559 } 2560 } 2561 2562 /* 2563 * This is also called BEFORE we load the buddy bitmap. 2564 * Returns either 1 or 0 indicating that the group is either suitable 2565 * for the allocation or not. 2566 */ 2567 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2568 ext4_group_t group, enum criteria cr) 2569 { 2570 ext4_grpblk_t free, fragments; 2571 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2572 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2573 2574 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); 2575 2576 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2577 return false; 2578 2579 free = grp->bb_free; 2580 if (free == 0) 2581 return false; 2582 2583 fragments = grp->bb_fragments; 2584 if (fragments == 0) 2585 return false; 2586 2587 switch (cr) { 2588 case CR_POWER2_ALIGNED: 2589 BUG_ON(ac->ac_2order == 0); 2590 2591 /* Avoid using the first bg of a flexgroup for data files */ 2592 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2593 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2594 ((group % flex_size) == 0)) 2595 return false; 2596 2597 if (free < ac->ac_g_ex.fe_len) 2598 return false; 2599 2600 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2601 return true; 2602 2603 if (grp->bb_largest_free_order < ac->ac_2order) 2604 return false; 2605 2606 return true; 2607 case CR_GOAL_LEN_FAST: 2608 case CR_BEST_AVAIL_LEN: 2609 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2610 return true; 2611 break; 2612 case CR_GOAL_LEN_SLOW: 2613 if (free >= ac->ac_g_ex.fe_len) 2614 return true; 2615 break; 2616 case CR_ANY_FREE: 2617 return true; 2618 default: 2619 BUG(); 2620 } 2621 2622 return false; 2623 } 2624 2625 /* 2626 * This could return negative error code if something goes wrong 2627 * during ext4_mb_init_group(). This should not be called with 2628 * ext4_lock_group() held. 2629 * 2630 * Note: because we are conditionally operating with the group lock in 2631 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2632 * function using __acquire and __release. This means we need to be 2633 * super careful before messing with the error path handling via "goto 2634 * out"! 2635 */ 2636 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2637 ext4_group_t group, enum criteria cr) 2638 { 2639 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2640 struct super_block *sb = ac->ac_sb; 2641 struct ext4_sb_info *sbi = EXT4_SB(sb); 2642 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2643 ext4_grpblk_t free; 2644 int ret = 0; 2645 2646 if (!grp) 2647 return -EFSCORRUPTED; 2648 if (sbi->s_mb_stats) 2649 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2650 if (should_lock) { 2651 ext4_lock_group(sb, group); 2652 __release(ext4_group_lock_ptr(sb, group)); 2653 } 2654 free = grp->bb_free; 2655 if (free == 0) 2656 goto out; 2657 /* 2658 * In all criterias except CR_ANY_FREE we try to avoid groups that 2659 * can't possibly satisfy the full goal request due to insufficient 2660 * free blocks. 2661 */ 2662 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) 2663 goto out; 2664 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2665 goto out; 2666 if (should_lock) { 2667 __acquire(ext4_group_lock_ptr(sb, group)); 2668 ext4_unlock_group(sb, group); 2669 } 2670 2671 /* We only do this if the grp has never been initialized */ 2672 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2673 struct ext4_group_desc *gdp = 2674 ext4_get_group_desc(sb, group, NULL); 2675 int ret; 2676 2677 /* 2678 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic 2679 * search to find large good chunks almost for free. If buddy 2680 * data is not ready, then this optimization makes no sense. But 2681 * we never skip the first block group in a flex_bg, since this 2682 * gets used for metadata block allocation, and we want to make 2683 * sure we locate metadata blocks in the first block group in 2684 * the flex_bg if possible. 2685 */ 2686 if (!ext4_mb_cr_expensive(cr) && 2687 (!sbi->s_log_groups_per_flex || 2688 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2689 !(ext4_has_group_desc_csum(sb) && 2690 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2691 return 0; 2692 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2693 if (ret) 2694 return ret; 2695 } 2696 2697 if (should_lock) { 2698 ext4_lock_group(sb, group); 2699 __release(ext4_group_lock_ptr(sb, group)); 2700 } 2701 ret = ext4_mb_good_group(ac, group, cr); 2702 out: 2703 if (should_lock) { 2704 __acquire(ext4_group_lock_ptr(sb, group)); 2705 ext4_unlock_group(sb, group); 2706 } 2707 return ret; 2708 } 2709 2710 /* 2711 * Start prefetching @nr block bitmaps starting at @group. 2712 * Return the next group which needs to be prefetched. 2713 */ 2714 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2715 unsigned int nr, int *cnt) 2716 { 2717 ext4_group_t ngroups = ext4_get_groups_count(sb); 2718 struct buffer_head *bh; 2719 struct blk_plug plug; 2720 2721 blk_start_plug(&plug); 2722 while (nr-- > 0) { 2723 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2724 NULL); 2725 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2726 2727 /* 2728 * Prefetch block groups with free blocks; but don't 2729 * bother if it is marked uninitialized on disk, since 2730 * it won't require I/O to read. Also only try to 2731 * prefetch once, so we avoid getblk() call, which can 2732 * be expensive. 2733 */ 2734 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2735 EXT4_MB_GRP_NEED_INIT(grp) && 2736 ext4_free_group_clusters(sb, gdp) > 0 ) { 2737 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2738 if (bh && !IS_ERR(bh)) { 2739 if (!buffer_uptodate(bh) && cnt) 2740 (*cnt)++; 2741 brelse(bh); 2742 } 2743 } 2744 if (++group >= ngroups) 2745 group = 0; 2746 } 2747 blk_finish_plug(&plug); 2748 return group; 2749 } 2750 2751 /* 2752 * Prefetching reads the block bitmap into the buffer cache; but we 2753 * need to make sure that the buddy bitmap in the page cache has been 2754 * initialized. Note that ext4_mb_init_group() will block if the I/O 2755 * is not yet completed, or indeed if it was not initiated by 2756 * ext4_mb_prefetch did not start the I/O. 2757 * 2758 * TODO: We should actually kick off the buddy bitmap setup in a work 2759 * queue when the buffer I/O is completed, so that we don't block 2760 * waiting for the block allocation bitmap read to finish when 2761 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2762 */ 2763 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2764 unsigned int nr) 2765 { 2766 struct ext4_group_desc *gdp; 2767 struct ext4_group_info *grp; 2768 2769 while (nr-- > 0) { 2770 if (!group) 2771 group = ext4_get_groups_count(sb); 2772 group--; 2773 gdp = ext4_get_group_desc(sb, group, NULL); 2774 grp = ext4_get_group_info(sb, group); 2775 2776 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2777 ext4_free_group_clusters(sb, gdp) > 0) { 2778 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2779 break; 2780 } 2781 } 2782 } 2783 2784 static noinline_for_stack int 2785 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2786 { 2787 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2788 enum criteria new_cr, cr = CR_GOAL_LEN_FAST; 2789 int err = 0, first_err = 0; 2790 unsigned int nr = 0, prefetch_ios = 0; 2791 struct ext4_sb_info *sbi; 2792 struct super_block *sb; 2793 struct ext4_buddy e4b; 2794 int lost; 2795 2796 sb = ac->ac_sb; 2797 sbi = EXT4_SB(sb); 2798 ngroups = ext4_get_groups_count(sb); 2799 /* non-extent files are limited to low blocks/groups */ 2800 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2801 ngroups = sbi->s_blockfile_groups; 2802 2803 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2804 2805 /* first, try the goal */ 2806 err = ext4_mb_find_by_goal(ac, &e4b); 2807 if (err || ac->ac_status == AC_STATUS_FOUND) 2808 goto out; 2809 2810 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2811 goto out; 2812 2813 /* 2814 * ac->ac_2order is set only if the fe_len is a power of 2 2815 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED 2816 * so that we try exact allocation using buddy. 2817 */ 2818 i = fls(ac->ac_g_ex.fe_len); 2819 ac->ac_2order = 0; 2820 /* 2821 * We search using buddy data only if the order of the request 2822 * is greater than equal to the sbi_s_mb_order2_reqs 2823 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2824 * We also support searching for power-of-two requests only for 2825 * requests upto maximum buddy size we have constructed. 2826 */ 2827 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2828 if (is_power_of_2(ac->ac_g_ex.fe_len)) 2829 ac->ac_2order = array_index_nospec(i - 1, 2830 MB_NUM_ORDERS(sb)); 2831 } 2832 2833 /* if stream allocation is enabled, use global goal */ 2834 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2835 /* TBD: may be hot point */ 2836 spin_lock(&sbi->s_md_lock); 2837 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2838 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2839 spin_unlock(&sbi->s_md_lock); 2840 } 2841 2842 /* 2843 * Let's just scan groups to find more-less suitable blocks We 2844 * start with CR_GOAL_LEN_FAST, unless it is power of 2 2845 * aligned, in which case let's do that faster approach first. 2846 */ 2847 if (ac->ac_2order) 2848 cr = CR_POWER2_ALIGNED; 2849 repeat: 2850 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2851 ac->ac_criteria = cr; 2852 /* 2853 * searching for the right group start 2854 * from the goal value specified 2855 */ 2856 group = ac->ac_g_ex.fe_group; 2857 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2858 prefetch_grp = group; 2859 2860 for (i = 0, new_cr = cr; i < ngroups; i++, 2861 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 2862 int ret = 0; 2863 2864 cond_resched(); 2865 if (new_cr != cr) { 2866 cr = new_cr; 2867 goto repeat; 2868 } 2869 2870 /* 2871 * Batch reads of the block allocation bitmaps 2872 * to get multiple READs in flight; limit 2873 * prefetching at inexpensive CR, otherwise mballoc 2874 * can spend a lot of time loading imperfect groups 2875 */ 2876 if ((prefetch_grp == group) && 2877 (ext4_mb_cr_expensive(cr) || 2878 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2879 nr = sbi->s_mb_prefetch; 2880 if (ext4_has_feature_flex_bg(sb)) { 2881 nr = 1 << sbi->s_log_groups_per_flex; 2882 nr -= group & (nr - 1); 2883 nr = min(nr, sbi->s_mb_prefetch); 2884 } 2885 prefetch_grp = ext4_mb_prefetch(sb, group, 2886 nr, &prefetch_ios); 2887 } 2888 2889 /* This now checks without needing the buddy page */ 2890 ret = ext4_mb_good_group_nolock(ac, group, cr); 2891 if (ret <= 0) { 2892 if (!first_err) 2893 first_err = ret; 2894 continue; 2895 } 2896 2897 err = ext4_mb_load_buddy(sb, group, &e4b); 2898 if (err) 2899 goto out; 2900 2901 ext4_lock_group(sb, group); 2902 2903 /* 2904 * We need to check again after locking the 2905 * block group 2906 */ 2907 ret = ext4_mb_good_group(ac, group, cr); 2908 if (ret == 0) { 2909 ext4_unlock_group(sb, group); 2910 ext4_mb_unload_buddy(&e4b); 2911 continue; 2912 } 2913 2914 ac->ac_groups_scanned++; 2915 if (cr == CR_POWER2_ALIGNED) 2916 ext4_mb_simple_scan_group(ac, &e4b); 2917 else if ((cr == CR_GOAL_LEN_FAST || 2918 cr == CR_BEST_AVAIL_LEN) && 2919 sbi->s_stripe && 2920 !(ac->ac_g_ex.fe_len % 2921 EXT4_B2C(sbi, sbi->s_stripe))) 2922 ext4_mb_scan_aligned(ac, &e4b); 2923 else 2924 ext4_mb_complex_scan_group(ac, &e4b); 2925 2926 ext4_unlock_group(sb, group); 2927 ext4_mb_unload_buddy(&e4b); 2928 2929 if (ac->ac_status != AC_STATUS_CONTINUE) 2930 break; 2931 } 2932 /* Processed all groups and haven't found blocks */ 2933 if (sbi->s_mb_stats && i == ngroups) 2934 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2935 2936 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) 2937 /* Reset goal length to original goal length before 2938 * falling into CR_GOAL_LEN_SLOW */ 2939 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 2940 } 2941 2942 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2943 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2944 /* 2945 * We've been searching too long. Let's try to allocate 2946 * the best chunk we've found so far 2947 */ 2948 ext4_mb_try_best_found(ac, &e4b); 2949 if (ac->ac_status != AC_STATUS_FOUND) { 2950 /* 2951 * Someone more lucky has already allocated it. 2952 * The only thing we can do is just take first 2953 * found block(s) 2954 */ 2955 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2956 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2957 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2958 ac->ac_b_ex.fe_len, lost); 2959 2960 ac->ac_b_ex.fe_group = 0; 2961 ac->ac_b_ex.fe_start = 0; 2962 ac->ac_b_ex.fe_len = 0; 2963 ac->ac_status = AC_STATUS_CONTINUE; 2964 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2965 cr = CR_ANY_FREE; 2966 goto repeat; 2967 } 2968 } 2969 2970 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2971 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2972 out: 2973 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2974 err = first_err; 2975 2976 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2977 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2978 ac->ac_flags, cr, err); 2979 2980 if (nr) 2981 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2982 2983 return err; 2984 } 2985 2986 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2987 { 2988 struct super_block *sb = pde_data(file_inode(seq->file)); 2989 ext4_group_t group; 2990 2991 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2992 return NULL; 2993 group = *pos + 1; 2994 return (void *) ((unsigned long) group); 2995 } 2996 2997 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2998 { 2999 struct super_block *sb = pde_data(file_inode(seq->file)); 3000 ext4_group_t group; 3001 3002 ++*pos; 3003 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3004 return NULL; 3005 group = *pos + 1; 3006 return (void *) ((unsigned long) group); 3007 } 3008 3009 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 3010 { 3011 struct super_block *sb = pde_data(file_inode(seq->file)); 3012 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 3013 int i; 3014 int err, buddy_loaded = 0; 3015 struct ext4_buddy e4b; 3016 struct ext4_group_info *grinfo; 3017 unsigned char blocksize_bits = min_t(unsigned char, 3018 sb->s_blocksize_bits, 3019 EXT4_MAX_BLOCK_LOG_SIZE); 3020 struct sg { 3021 struct ext4_group_info info; 3022 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 3023 } sg; 3024 3025 group--; 3026 if (group == 0) 3027 seq_puts(seq, "#group: free frags first [" 3028 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 3029 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 3030 3031 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 3032 sizeof(struct ext4_group_info); 3033 3034 grinfo = ext4_get_group_info(sb, group); 3035 if (!grinfo) 3036 return 0; 3037 /* Load the group info in memory only if not already loaded. */ 3038 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 3039 err = ext4_mb_load_buddy(sb, group, &e4b); 3040 if (err) { 3041 seq_printf(seq, "#%-5u: I/O error\n", group); 3042 return 0; 3043 } 3044 buddy_loaded = 1; 3045 } 3046 3047 memcpy(&sg, grinfo, i); 3048 3049 if (buddy_loaded) 3050 ext4_mb_unload_buddy(&e4b); 3051 3052 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 3053 sg.info.bb_fragments, sg.info.bb_first_free); 3054 for (i = 0; i <= 13; i++) 3055 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 3056 sg.info.bb_counters[i] : 0); 3057 seq_puts(seq, " ]\n"); 3058 3059 return 0; 3060 } 3061 3062 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 3063 { 3064 } 3065 3066 const struct seq_operations ext4_mb_seq_groups_ops = { 3067 .start = ext4_mb_seq_groups_start, 3068 .next = ext4_mb_seq_groups_next, 3069 .stop = ext4_mb_seq_groups_stop, 3070 .show = ext4_mb_seq_groups_show, 3071 }; 3072 3073 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 3074 { 3075 struct super_block *sb = seq->private; 3076 struct ext4_sb_info *sbi = EXT4_SB(sb); 3077 3078 seq_puts(seq, "mballoc:\n"); 3079 if (!sbi->s_mb_stats) { 3080 seq_puts(seq, "\tmb stats collection turned off.\n"); 3081 seq_puts( 3082 seq, 3083 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 3084 return 0; 3085 } 3086 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 3087 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 3088 3089 seq_printf(seq, "\tgroups_scanned: %u\n", 3090 atomic_read(&sbi->s_bal_groups_scanned)); 3091 3092 /* CR_POWER2_ALIGNED stats */ 3093 seq_puts(seq, "\tcr_p2_aligned_stats:\n"); 3094 seq_printf(seq, "\t\thits: %llu\n", 3095 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); 3096 seq_printf( 3097 seq, "\t\tgroups_considered: %llu\n", 3098 atomic64_read( 3099 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); 3100 seq_printf(seq, "\t\textents_scanned: %u\n", 3101 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3102 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3103 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3104 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3105 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); 3106 3107 /* CR_GOAL_LEN_FAST stats */ 3108 seq_puts(seq, "\tcr_goal_fast_stats:\n"); 3109 seq_printf(seq, "\t\thits: %llu\n", 3110 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); 3111 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3112 atomic64_read( 3113 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); 3114 seq_printf(seq, "\t\textents_scanned: %u\n", 3115 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3116 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3117 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3118 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3119 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); 3120 3121 /* CR_BEST_AVAIL_LEN stats */ 3122 seq_puts(seq, "\tcr_best_avail_stats:\n"); 3123 seq_printf(seq, "\t\thits: %llu\n", 3124 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); 3125 seq_printf( 3126 seq, "\t\tgroups_considered: %llu\n", 3127 atomic64_read( 3128 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); 3129 seq_printf(seq, "\t\textents_scanned: %u\n", 3130 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3131 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3132 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3133 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3134 atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); 3135 3136 /* CR_GOAL_LEN_SLOW stats */ 3137 seq_puts(seq, "\tcr_goal_slow_stats:\n"); 3138 seq_printf(seq, "\t\thits: %llu\n", 3139 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); 3140 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3141 atomic64_read( 3142 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); 3143 seq_printf(seq, "\t\textents_scanned: %u\n", 3144 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); 3145 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3146 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); 3147 3148 /* CR_ANY_FREE stats */ 3149 seq_puts(seq, "\tcr_any_free_stats:\n"); 3150 seq_printf(seq, "\t\thits: %llu\n", 3151 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); 3152 seq_printf( 3153 seq, "\t\tgroups_considered: %llu\n", 3154 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); 3155 seq_printf(seq, "\t\textents_scanned: %u\n", 3156 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); 3157 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3158 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); 3159 3160 /* Aggregates */ 3161 seq_printf(seq, "\textents_scanned: %u\n", 3162 atomic_read(&sbi->s_bal_ex_scanned)); 3163 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 3164 seq_printf(seq, "\t\tlen_goal_hits: %u\n", 3165 atomic_read(&sbi->s_bal_len_goals)); 3166 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 3167 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 3168 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 3169 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 3170 atomic_read(&sbi->s_mb_buddies_generated), 3171 ext4_get_groups_count(sb)); 3172 seq_printf(seq, "\tbuddies_time_used: %llu\n", 3173 atomic64_read(&sbi->s_mb_generation_time)); 3174 seq_printf(seq, "\tpreallocated: %u\n", 3175 atomic_read(&sbi->s_mb_preallocated)); 3176 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); 3177 return 0; 3178 } 3179 3180 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3181 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 3182 { 3183 struct super_block *sb = pde_data(file_inode(seq->file)); 3184 unsigned long position; 3185 3186 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3187 return NULL; 3188 position = *pos + 1; 3189 return (void *) ((unsigned long) position); 3190 } 3191 3192 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3193 { 3194 struct super_block *sb = pde_data(file_inode(seq->file)); 3195 unsigned long position; 3196 3197 ++*pos; 3198 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3199 return NULL; 3200 position = *pos + 1; 3201 return (void *) ((unsigned long) position); 3202 } 3203 3204 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3205 { 3206 struct super_block *sb = pde_data(file_inode(seq->file)); 3207 struct ext4_sb_info *sbi = EXT4_SB(sb); 3208 unsigned long position = ((unsigned long) v); 3209 struct ext4_group_info *grp; 3210 unsigned int count; 3211 3212 position--; 3213 if (position >= MB_NUM_ORDERS(sb)) { 3214 position -= MB_NUM_ORDERS(sb); 3215 if (position == 0) 3216 seq_puts(seq, "avg_fragment_size_lists:\n"); 3217 3218 count = 0; 3219 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 3220 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 3221 bb_avg_fragment_size_node) 3222 count++; 3223 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 3224 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3225 (unsigned int)position, count); 3226 return 0; 3227 } 3228 3229 if (position == 0) { 3230 seq_printf(seq, "optimize_scan: %d\n", 3231 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3232 seq_puts(seq, "max_free_order_lists:\n"); 3233 } 3234 count = 0; 3235 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3236 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3237 bb_largest_free_order_node) 3238 count++; 3239 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3240 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3241 (unsigned int)position, count); 3242 3243 return 0; 3244 } 3245 3246 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3247 { 3248 } 3249 3250 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3251 .start = ext4_mb_seq_structs_summary_start, 3252 .next = ext4_mb_seq_structs_summary_next, 3253 .stop = ext4_mb_seq_structs_summary_stop, 3254 .show = ext4_mb_seq_structs_summary_show, 3255 }; 3256 3257 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3258 { 3259 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3260 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3261 3262 BUG_ON(!cachep); 3263 return cachep; 3264 } 3265 3266 /* 3267 * Allocate the top-level s_group_info array for the specified number 3268 * of groups 3269 */ 3270 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3271 { 3272 struct ext4_sb_info *sbi = EXT4_SB(sb); 3273 unsigned size; 3274 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3275 3276 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3277 EXT4_DESC_PER_BLOCK_BITS(sb); 3278 if (size <= sbi->s_group_info_size) 3279 return 0; 3280 3281 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3282 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3283 if (!new_groupinfo) { 3284 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3285 return -ENOMEM; 3286 } 3287 rcu_read_lock(); 3288 old_groupinfo = rcu_dereference(sbi->s_group_info); 3289 if (old_groupinfo) 3290 memcpy(new_groupinfo, old_groupinfo, 3291 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3292 rcu_read_unlock(); 3293 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3294 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3295 if (old_groupinfo) 3296 ext4_kvfree_array_rcu(old_groupinfo); 3297 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3298 sbi->s_group_info_size); 3299 return 0; 3300 } 3301 3302 /* Create and initialize ext4_group_info data for the given group. */ 3303 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3304 struct ext4_group_desc *desc) 3305 { 3306 int i; 3307 int metalen = 0; 3308 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3309 struct ext4_sb_info *sbi = EXT4_SB(sb); 3310 struct ext4_group_info **meta_group_info; 3311 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3312 3313 /* 3314 * First check if this group is the first of a reserved block. 3315 * If it's true, we have to allocate a new table of pointers 3316 * to ext4_group_info structures 3317 */ 3318 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3319 metalen = sizeof(*meta_group_info) << 3320 EXT4_DESC_PER_BLOCK_BITS(sb); 3321 meta_group_info = kmalloc(metalen, GFP_NOFS); 3322 if (meta_group_info == NULL) { 3323 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3324 "for a buddy group"); 3325 return -ENOMEM; 3326 } 3327 rcu_read_lock(); 3328 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3329 rcu_read_unlock(); 3330 } 3331 3332 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3333 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3334 3335 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3336 if (meta_group_info[i] == NULL) { 3337 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3338 goto exit_group_info; 3339 } 3340 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3341 &(meta_group_info[i]->bb_state)); 3342 3343 /* 3344 * initialize bb_free to be able to skip 3345 * empty groups without initialization 3346 */ 3347 if (ext4_has_group_desc_csum(sb) && 3348 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3349 meta_group_info[i]->bb_free = 3350 ext4_free_clusters_after_init(sb, group, desc); 3351 } else { 3352 meta_group_info[i]->bb_free = 3353 ext4_free_group_clusters(sb, desc); 3354 } 3355 3356 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3357 init_rwsem(&meta_group_info[i]->alloc_sem); 3358 meta_group_info[i]->bb_free_root = RB_ROOT; 3359 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3360 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 3361 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3362 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3363 meta_group_info[i]->bb_group = group; 3364 3365 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3366 return 0; 3367 3368 exit_group_info: 3369 /* If a meta_group_info table has been allocated, release it now */ 3370 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3371 struct ext4_group_info ***group_info; 3372 3373 rcu_read_lock(); 3374 group_info = rcu_dereference(sbi->s_group_info); 3375 kfree(group_info[idx]); 3376 group_info[idx] = NULL; 3377 rcu_read_unlock(); 3378 } 3379 return -ENOMEM; 3380 } /* ext4_mb_add_groupinfo */ 3381 3382 static int ext4_mb_init_backend(struct super_block *sb) 3383 { 3384 ext4_group_t ngroups = ext4_get_groups_count(sb); 3385 ext4_group_t i; 3386 struct ext4_sb_info *sbi = EXT4_SB(sb); 3387 int err; 3388 struct ext4_group_desc *desc; 3389 struct ext4_group_info ***group_info; 3390 struct kmem_cache *cachep; 3391 3392 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3393 if (err) 3394 return err; 3395 3396 sbi->s_buddy_cache = new_inode(sb); 3397 if (sbi->s_buddy_cache == NULL) { 3398 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3399 goto err_freesgi; 3400 } 3401 /* To avoid potentially colliding with an valid on-disk inode number, 3402 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3403 * not in the inode hash, so it should never be found by iget(), but 3404 * this will avoid confusion if it ever shows up during debugging. */ 3405 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3406 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3407 for (i = 0; i < ngroups; i++) { 3408 cond_resched(); 3409 desc = ext4_get_group_desc(sb, i, NULL); 3410 if (desc == NULL) { 3411 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3412 goto err_freebuddy; 3413 } 3414 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3415 goto err_freebuddy; 3416 } 3417 3418 if (ext4_has_feature_flex_bg(sb)) { 3419 /* a single flex group is supposed to be read by a single IO. 3420 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3421 * unsigned integer, so the maximum shift is 32. 3422 */ 3423 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3424 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3425 goto err_freebuddy; 3426 } 3427 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3428 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3429 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3430 } else { 3431 sbi->s_mb_prefetch = 32; 3432 } 3433 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3434 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3435 /* now many real IOs to prefetch within a single allocation at cr=0 3436 * given cr=0 is an CPU-related optimization we shouldn't try to 3437 * load too many groups, at some point we should start to use what 3438 * we've got in memory. 3439 * with an average random access time 5ms, it'd take a second to get 3440 * 200 groups (* N with flex_bg), so let's make this limit 4 3441 */ 3442 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3443 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3444 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3445 3446 return 0; 3447 3448 err_freebuddy: 3449 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3450 while (i-- > 0) { 3451 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3452 3453 if (grp) 3454 kmem_cache_free(cachep, grp); 3455 } 3456 i = sbi->s_group_info_size; 3457 rcu_read_lock(); 3458 group_info = rcu_dereference(sbi->s_group_info); 3459 while (i-- > 0) 3460 kfree(group_info[i]); 3461 rcu_read_unlock(); 3462 iput(sbi->s_buddy_cache); 3463 err_freesgi: 3464 rcu_read_lock(); 3465 kvfree(rcu_dereference(sbi->s_group_info)); 3466 rcu_read_unlock(); 3467 return -ENOMEM; 3468 } 3469 3470 static void ext4_groupinfo_destroy_slabs(void) 3471 { 3472 int i; 3473 3474 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3475 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3476 ext4_groupinfo_caches[i] = NULL; 3477 } 3478 } 3479 3480 static int ext4_groupinfo_create_slab(size_t size) 3481 { 3482 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3483 int slab_size; 3484 int blocksize_bits = order_base_2(size); 3485 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3486 struct kmem_cache *cachep; 3487 3488 if (cache_index >= NR_GRPINFO_CACHES) 3489 return -EINVAL; 3490 3491 if (unlikely(cache_index < 0)) 3492 cache_index = 0; 3493 3494 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3495 if (ext4_groupinfo_caches[cache_index]) { 3496 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3497 return 0; /* Already created */ 3498 } 3499 3500 slab_size = offsetof(struct ext4_group_info, 3501 bb_counters[blocksize_bits + 2]); 3502 3503 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3504 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3505 NULL); 3506 3507 ext4_groupinfo_caches[cache_index] = cachep; 3508 3509 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3510 if (!cachep) { 3511 printk(KERN_EMERG 3512 "EXT4-fs: no memory for groupinfo slab cache\n"); 3513 return -ENOMEM; 3514 } 3515 3516 return 0; 3517 } 3518 3519 static void ext4_discard_work(struct work_struct *work) 3520 { 3521 struct ext4_sb_info *sbi = container_of(work, 3522 struct ext4_sb_info, s_discard_work); 3523 struct super_block *sb = sbi->s_sb; 3524 struct ext4_free_data *fd, *nfd; 3525 struct ext4_buddy e4b; 3526 LIST_HEAD(discard_list); 3527 ext4_group_t grp, load_grp; 3528 int err = 0; 3529 3530 spin_lock(&sbi->s_md_lock); 3531 list_splice_init(&sbi->s_discard_list, &discard_list); 3532 spin_unlock(&sbi->s_md_lock); 3533 3534 load_grp = UINT_MAX; 3535 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3536 /* 3537 * If filesystem is umounting or no memory or suffering 3538 * from no space, give up the discard 3539 */ 3540 if ((sb->s_flags & SB_ACTIVE) && !err && 3541 !atomic_read(&sbi->s_retry_alloc_pending)) { 3542 grp = fd->efd_group; 3543 if (grp != load_grp) { 3544 if (load_grp != UINT_MAX) 3545 ext4_mb_unload_buddy(&e4b); 3546 3547 err = ext4_mb_load_buddy(sb, grp, &e4b); 3548 if (err) { 3549 kmem_cache_free(ext4_free_data_cachep, fd); 3550 load_grp = UINT_MAX; 3551 continue; 3552 } else { 3553 load_grp = grp; 3554 } 3555 } 3556 3557 ext4_lock_group(sb, grp); 3558 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3559 fd->efd_start_cluster + fd->efd_count - 1, 1); 3560 ext4_unlock_group(sb, grp); 3561 } 3562 kmem_cache_free(ext4_free_data_cachep, fd); 3563 } 3564 3565 if (load_grp != UINT_MAX) 3566 ext4_mb_unload_buddy(&e4b); 3567 } 3568 3569 int ext4_mb_init(struct super_block *sb) 3570 { 3571 struct ext4_sb_info *sbi = EXT4_SB(sb); 3572 unsigned i, j; 3573 unsigned offset, offset_incr; 3574 unsigned max; 3575 int ret; 3576 3577 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3578 3579 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3580 if (sbi->s_mb_offsets == NULL) { 3581 ret = -ENOMEM; 3582 goto out; 3583 } 3584 3585 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3586 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3587 if (sbi->s_mb_maxs == NULL) { 3588 ret = -ENOMEM; 3589 goto out; 3590 } 3591 3592 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3593 if (ret < 0) 3594 goto out; 3595 3596 /* order 0 is regular bitmap */ 3597 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3598 sbi->s_mb_offsets[0] = 0; 3599 3600 i = 1; 3601 offset = 0; 3602 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3603 max = sb->s_blocksize << 2; 3604 do { 3605 sbi->s_mb_offsets[i] = offset; 3606 sbi->s_mb_maxs[i] = max; 3607 offset += offset_incr; 3608 offset_incr = offset_incr >> 1; 3609 max = max >> 1; 3610 i++; 3611 } while (i < MB_NUM_ORDERS(sb)); 3612 3613 sbi->s_mb_avg_fragment_size = 3614 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3615 GFP_KERNEL); 3616 if (!sbi->s_mb_avg_fragment_size) { 3617 ret = -ENOMEM; 3618 goto out; 3619 } 3620 sbi->s_mb_avg_fragment_size_locks = 3621 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3622 GFP_KERNEL); 3623 if (!sbi->s_mb_avg_fragment_size_locks) { 3624 ret = -ENOMEM; 3625 goto out; 3626 } 3627 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3628 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 3629 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 3630 } 3631 sbi->s_mb_largest_free_orders = 3632 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3633 GFP_KERNEL); 3634 if (!sbi->s_mb_largest_free_orders) { 3635 ret = -ENOMEM; 3636 goto out; 3637 } 3638 sbi->s_mb_largest_free_orders_locks = 3639 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3640 GFP_KERNEL); 3641 if (!sbi->s_mb_largest_free_orders_locks) { 3642 ret = -ENOMEM; 3643 goto out; 3644 } 3645 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3646 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3647 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3648 } 3649 3650 spin_lock_init(&sbi->s_md_lock); 3651 sbi->s_mb_free_pending = 0; 3652 INIT_LIST_HEAD(&sbi->s_freed_data_list); 3653 INIT_LIST_HEAD(&sbi->s_discard_list); 3654 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3655 atomic_set(&sbi->s_retry_alloc_pending, 0); 3656 3657 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3658 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3659 sbi->s_mb_stats = MB_DEFAULT_STATS; 3660 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3661 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3662 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; 3663 3664 /* 3665 * The default group preallocation is 512, which for 4k block 3666 * sizes translates to 2 megabytes. However for bigalloc file 3667 * systems, this is probably too big (i.e, if the cluster size 3668 * is 1 megabyte, then group preallocation size becomes half a 3669 * gigabyte!). As a default, we will keep a two megabyte 3670 * group pralloc size for cluster sizes up to 64k, and after 3671 * that, we will force a minimum group preallocation size of 3672 * 32 clusters. This translates to 8 megs when the cluster 3673 * size is 256k, and 32 megs when the cluster size is 1 meg, 3674 * which seems reasonable as a default. 3675 */ 3676 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3677 sbi->s_cluster_bits, 32); 3678 /* 3679 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3680 * to the lowest multiple of s_stripe which is bigger than 3681 * the s_mb_group_prealloc as determined above. We want 3682 * the preallocation size to be an exact multiple of the 3683 * RAID stripe size so that preallocations don't fragment 3684 * the stripes. 3685 */ 3686 if (sbi->s_stripe > 1) { 3687 sbi->s_mb_group_prealloc = roundup( 3688 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); 3689 } 3690 3691 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3692 if (sbi->s_locality_groups == NULL) { 3693 ret = -ENOMEM; 3694 goto out; 3695 } 3696 for_each_possible_cpu(i) { 3697 struct ext4_locality_group *lg; 3698 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3699 mutex_init(&lg->lg_mutex); 3700 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3701 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3702 spin_lock_init(&lg->lg_prealloc_lock); 3703 } 3704 3705 if (bdev_nonrot(sb->s_bdev)) 3706 sbi->s_mb_max_linear_groups = 0; 3707 else 3708 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3709 /* init file for buddy data */ 3710 ret = ext4_mb_init_backend(sb); 3711 if (ret != 0) 3712 goto out_free_locality_groups; 3713 3714 return 0; 3715 3716 out_free_locality_groups: 3717 free_percpu(sbi->s_locality_groups); 3718 sbi->s_locality_groups = NULL; 3719 out: 3720 kfree(sbi->s_mb_avg_fragment_size); 3721 kfree(sbi->s_mb_avg_fragment_size_locks); 3722 kfree(sbi->s_mb_largest_free_orders); 3723 kfree(sbi->s_mb_largest_free_orders_locks); 3724 kfree(sbi->s_mb_offsets); 3725 sbi->s_mb_offsets = NULL; 3726 kfree(sbi->s_mb_maxs); 3727 sbi->s_mb_maxs = NULL; 3728 return ret; 3729 } 3730 3731 /* need to called with the ext4 group lock held */ 3732 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3733 { 3734 struct ext4_prealloc_space *pa; 3735 struct list_head *cur, *tmp; 3736 int count = 0; 3737 3738 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3739 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3740 list_del(&pa->pa_group_list); 3741 count++; 3742 kmem_cache_free(ext4_pspace_cachep, pa); 3743 } 3744 return count; 3745 } 3746 3747 int ext4_mb_release(struct super_block *sb) 3748 { 3749 ext4_group_t ngroups = ext4_get_groups_count(sb); 3750 ext4_group_t i; 3751 int num_meta_group_infos; 3752 struct ext4_group_info *grinfo, ***group_info; 3753 struct ext4_sb_info *sbi = EXT4_SB(sb); 3754 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3755 int count; 3756 3757 if (test_opt(sb, DISCARD)) { 3758 /* 3759 * wait the discard work to drain all of ext4_free_data 3760 */ 3761 flush_work(&sbi->s_discard_work); 3762 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3763 } 3764 3765 if (sbi->s_group_info) { 3766 for (i = 0; i < ngroups; i++) { 3767 cond_resched(); 3768 grinfo = ext4_get_group_info(sb, i); 3769 if (!grinfo) 3770 continue; 3771 mb_group_bb_bitmap_free(grinfo); 3772 ext4_lock_group(sb, i); 3773 count = ext4_mb_cleanup_pa(grinfo); 3774 if (count) 3775 mb_debug(sb, "mballoc: %d PAs left\n", 3776 count); 3777 ext4_unlock_group(sb, i); 3778 kmem_cache_free(cachep, grinfo); 3779 } 3780 num_meta_group_infos = (ngroups + 3781 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3782 EXT4_DESC_PER_BLOCK_BITS(sb); 3783 rcu_read_lock(); 3784 group_info = rcu_dereference(sbi->s_group_info); 3785 for (i = 0; i < num_meta_group_infos; i++) 3786 kfree(group_info[i]); 3787 kvfree(group_info); 3788 rcu_read_unlock(); 3789 } 3790 kfree(sbi->s_mb_avg_fragment_size); 3791 kfree(sbi->s_mb_avg_fragment_size_locks); 3792 kfree(sbi->s_mb_largest_free_orders); 3793 kfree(sbi->s_mb_largest_free_orders_locks); 3794 kfree(sbi->s_mb_offsets); 3795 kfree(sbi->s_mb_maxs); 3796 iput(sbi->s_buddy_cache); 3797 if (sbi->s_mb_stats) { 3798 ext4_msg(sb, KERN_INFO, 3799 "mballoc: %u blocks %u reqs (%u success)", 3800 atomic_read(&sbi->s_bal_allocated), 3801 atomic_read(&sbi->s_bal_reqs), 3802 atomic_read(&sbi->s_bal_success)); 3803 ext4_msg(sb, KERN_INFO, 3804 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3805 "%u 2^N hits, %u breaks, %u lost", 3806 atomic_read(&sbi->s_bal_ex_scanned), 3807 atomic_read(&sbi->s_bal_groups_scanned), 3808 atomic_read(&sbi->s_bal_goals), 3809 atomic_read(&sbi->s_bal_2orders), 3810 atomic_read(&sbi->s_bal_breaks), 3811 atomic_read(&sbi->s_mb_lost_chunks)); 3812 ext4_msg(sb, KERN_INFO, 3813 "mballoc: %u generated and it took %llu", 3814 atomic_read(&sbi->s_mb_buddies_generated), 3815 atomic64_read(&sbi->s_mb_generation_time)); 3816 ext4_msg(sb, KERN_INFO, 3817 "mballoc: %u preallocated, %u discarded", 3818 atomic_read(&sbi->s_mb_preallocated), 3819 atomic_read(&sbi->s_mb_discarded)); 3820 } 3821 3822 free_percpu(sbi->s_locality_groups); 3823 3824 return 0; 3825 } 3826 3827 static inline int ext4_issue_discard(struct super_block *sb, 3828 ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3829 struct bio **biop) 3830 { 3831 ext4_fsblk_t discard_block; 3832 3833 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3834 ext4_group_first_block_no(sb, block_group)); 3835 count = EXT4_C2B(EXT4_SB(sb), count); 3836 trace_ext4_discard_blocks(sb, 3837 (unsigned long long) discard_block, count); 3838 if (biop) { 3839 return __blkdev_issue_discard(sb->s_bdev, 3840 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3841 (sector_t)count << (sb->s_blocksize_bits - 9), 3842 GFP_NOFS, biop); 3843 } else 3844 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3845 } 3846 3847 static void ext4_free_data_in_buddy(struct super_block *sb, 3848 struct ext4_free_data *entry) 3849 { 3850 struct ext4_buddy e4b; 3851 struct ext4_group_info *db; 3852 int err, count = 0; 3853 3854 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3855 entry->efd_count, entry->efd_group, entry); 3856 3857 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3858 /* we expect to find existing buddy because it's pinned */ 3859 BUG_ON(err != 0); 3860 3861 spin_lock(&EXT4_SB(sb)->s_md_lock); 3862 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3863 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3864 3865 db = e4b.bd_info; 3866 /* there are blocks to put in buddy to make them really free */ 3867 count += entry->efd_count; 3868 ext4_lock_group(sb, entry->efd_group); 3869 /* Take it out of per group rb tree */ 3870 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3871 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3872 3873 /* 3874 * Clear the trimmed flag for the group so that the next 3875 * ext4_trim_fs can trim it. 3876 * If the volume is mounted with -o discard, online discard 3877 * is supported and the free blocks will be trimmed online. 3878 */ 3879 if (!test_opt(sb, DISCARD)) 3880 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3881 3882 if (!db->bb_free_root.rb_node) { 3883 /* No more items in the per group rb tree 3884 * balance refcounts from ext4_mb_free_metadata() 3885 */ 3886 put_page(e4b.bd_buddy_page); 3887 put_page(e4b.bd_bitmap_page); 3888 } 3889 ext4_unlock_group(sb, entry->efd_group); 3890 ext4_mb_unload_buddy(&e4b); 3891 3892 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3893 } 3894 3895 /* 3896 * This function is called by the jbd2 layer once the commit has finished, 3897 * so we know we can free the blocks that were released with that commit. 3898 */ 3899 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3900 { 3901 struct ext4_sb_info *sbi = EXT4_SB(sb); 3902 struct ext4_free_data *entry, *tmp; 3903 LIST_HEAD(freed_data_list); 3904 struct list_head *cut_pos = NULL; 3905 bool wake; 3906 3907 spin_lock(&sbi->s_md_lock); 3908 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3909 if (entry->efd_tid != commit_tid) 3910 break; 3911 cut_pos = &entry->efd_list; 3912 } 3913 if (cut_pos) 3914 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3915 cut_pos); 3916 spin_unlock(&sbi->s_md_lock); 3917 3918 list_for_each_entry(entry, &freed_data_list, efd_list) 3919 ext4_free_data_in_buddy(sb, entry); 3920 3921 if (test_opt(sb, DISCARD)) { 3922 spin_lock(&sbi->s_md_lock); 3923 wake = list_empty(&sbi->s_discard_list); 3924 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3925 spin_unlock(&sbi->s_md_lock); 3926 if (wake) 3927 queue_work(system_unbound_wq, &sbi->s_discard_work); 3928 } else { 3929 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3930 kmem_cache_free(ext4_free_data_cachep, entry); 3931 } 3932 } 3933 3934 int __init ext4_init_mballoc(void) 3935 { 3936 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3937 SLAB_RECLAIM_ACCOUNT); 3938 if (ext4_pspace_cachep == NULL) 3939 goto out; 3940 3941 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3942 SLAB_RECLAIM_ACCOUNT); 3943 if (ext4_ac_cachep == NULL) 3944 goto out_pa_free; 3945 3946 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3947 SLAB_RECLAIM_ACCOUNT); 3948 if (ext4_free_data_cachep == NULL) 3949 goto out_ac_free; 3950 3951 return 0; 3952 3953 out_ac_free: 3954 kmem_cache_destroy(ext4_ac_cachep); 3955 out_pa_free: 3956 kmem_cache_destroy(ext4_pspace_cachep); 3957 out: 3958 return -ENOMEM; 3959 } 3960 3961 void ext4_exit_mballoc(void) 3962 { 3963 /* 3964 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3965 * before destroying the slab cache. 3966 */ 3967 rcu_barrier(); 3968 kmem_cache_destroy(ext4_pspace_cachep); 3969 kmem_cache_destroy(ext4_ac_cachep); 3970 kmem_cache_destroy(ext4_free_data_cachep); 3971 ext4_groupinfo_destroy_slabs(); 3972 } 3973 3974 3975 /* 3976 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3977 * Returns 0 if success or error code 3978 */ 3979 static noinline_for_stack int 3980 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 3981 handle_t *handle, unsigned int reserv_clstrs) 3982 { 3983 struct buffer_head *bitmap_bh = NULL; 3984 struct ext4_group_desc *gdp; 3985 struct buffer_head *gdp_bh; 3986 struct ext4_sb_info *sbi; 3987 struct super_block *sb; 3988 ext4_fsblk_t block; 3989 int err, len; 3990 3991 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3992 BUG_ON(ac->ac_b_ex.fe_len <= 0); 3993 3994 sb = ac->ac_sb; 3995 sbi = EXT4_SB(sb); 3996 3997 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 3998 if (IS_ERR(bitmap_bh)) { 3999 return PTR_ERR(bitmap_bh); 4000 } 4001 4002 BUFFER_TRACE(bitmap_bh, "getting write access"); 4003 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 4004 EXT4_JTR_NONE); 4005 if (err) 4006 goto out_err; 4007 4008 err = -EIO; 4009 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 4010 if (!gdp) 4011 goto out_err; 4012 4013 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 4014 ext4_free_group_clusters(sb, gdp)); 4015 4016 BUFFER_TRACE(gdp_bh, "get_write_access"); 4017 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 4018 if (err) 4019 goto out_err; 4020 4021 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4022 4023 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4024 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 4025 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 4026 "fs metadata", block, block+len); 4027 /* File system mounted not to panic on error 4028 * Fix the bitmap and return EFSCORRUPTED 4029 * We leak some of the blocks here. 4030 */ 4031 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 4032 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 4033 ac->ac_b_ex.fe_len); 4034 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 4035 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4036 if (!err) 4037 err = -EFSCORRUPTED; 4038 goto out_err; 4039 } 4040 4041 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 4042 #ifdef AGGRESSIVE_CHECK 4043 { 4044 int i; 4045 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 4046 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 4047 bitmap_bh->b_data)); 4048 } 4049 } 4050 #endif 4051 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 4052 ac->ac_b_ex.fe_len); 4053 if (ext4_has_group_desc_csum(sb) && 4054 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4055 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4056 ext4_free_group_clusters_set(sb, gdp, 4057 ext4_free_clusters_after_init(sb, 4058 ac->ac_b_ex.fe_group, gdp)); 4059 } 4060 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 4061 ext4_free_group_clusters_set(sb, gdp, len); 4062 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4063 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 4064 4065 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 4066 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 4067 /* 4068 * Now reduce the dirty block count also. Should not go negative 4069 */ 4070 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 4071 /* release all the reserved blocks if non delalloc */ 4072 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4073 reserv_clstrs); 4074 4075 if (sbi->s_log_groups_per_flex) { 4076 ext4_group_t flex_group = ext4_flex_group(sbi, 4077 ac->ac_b_ex.fe_group); 4078 atomic64_sub(ac->ac_b_ex.fe_len, 4079 &sbi_array_rcu_deref(sbi, s_flex_groups, 4080 flex_group)->free_clusters); 4081 } 4082 4083 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4084 if (err) 4085 goto out_err; 4086 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 4087 4088 out_err: 4089 brelse(bitmap_bh); 4090 return err; 4091 } 4092 4093 /* 4094 * Idempotent helper for Ext4 fast commit replay path to set the state of 4095 * blocks in bitmaps and update counters. 4096 */ 4097 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 4098 int len, int state) 4099 { 4100 struct buffer_head *bitmap_bh = NULL; 4101 struct ext4_group_desc *gdp; 4102 struct buffer_head *gdp_bh; 4103 struct ext4_sb_info *sbi = EXT4_SB(sb); 4104 ext4_group_t group; 4105 ext4_grpblk_t blkoff; 4106 int i, err = 0; 4107 int already; 4108 unsigned int clen, clen_changed, thisgrp_len; 4109 4110 while (len > 0) { 4111 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 4112 4113 /* 4114 * Check to see if we are freeing blocks across a group 4115 * boundary. 4116 * In case of flex_bg, this can happen that (block, len) may 4117 * span across more than one group. In that case we need to 4118 * get the corresponding group metadata to work with. 4119 * For this we have goto again loop. 4120 */ 4121 thisgrp_len = min_t(unsigned int, (unsigned int)len, 4122 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 4123 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 4124 4125 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 4126 ext4_error(sb, "Marking blocks in system zone - " 4127 "Block = %llu, len = %u", 4128 block, thisgrp_len); 4129 bitmap_bh = NULL; 4130 break; 4131 } 4132 4133 bitmap_bh = ext4_read_block_bitmap(sb, group); 4134 if (IS_ERR(bitmap_bh)) { 4135 err = PTR_ERR(bitmap_bh); 4136 bitmap_bh = NULL; 4137 break; 4138 } 4139 4140 err = -EIO; 4141 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 4142 if (!gdp) 4143 break; 4144 4145 ext4_lock_group(sb, group); 4146 already = 0; 4147 for (i = 0; i < clen; i++) 4148 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 4149 !state) 4150 already++; 4151 4152 clen_changed = clen - already; 4153 if (state) 4154 mb_set_bits(bitmap_bh->b_data, blkoff, clen); 4155 else 4156 mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 4157 if (ext4_has_group_desc_csum(sb) && 4158 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4159 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4160 ext4_free_group_clusters_set(sb, gdp, 4161 ext4_free_clusters_after_init(sb, group, gdp)); 4162 } 4163 if (state) 4164 clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 4165 else 4166 clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 4167 4168 ext4_free_group_clusters_set(sb, gdp, clen); 4169 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4170 ext4_group_desc_csum_set(sb, group, gdp); 4171 4172 ext4_unlock_group(sb, group); 4173 4174 if (sbi->s_log_groups_per_flex) { 4175 ext4_group_t flex_group = ext4_flex_group(sbi, group); 4176 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4177 s_flex_groups, flex_group); 4178 4179 if (state) 4180 atomic64_sub(clen_changed, &fg->free_clusters); 4181 else 4182 atomic64_add(clen_changed, &fg->free_clusters); 4183 4184 } 4185 4186 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 4187 if (err) 4188 break; 4189 sync_dirty_buffer(bitmap_bh); 4190 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 4191 sync_dirty_buffer(gdp_bh); 4192 if (err) 4193 break; 4194 4195 block += thisgrp_len; 4196 len -= thisgrp_len; 4197 brelse(bitmap_bh); 4198 BUG_ON(len < 0); 4199 } 4200 4201 if (err) 4202 brelse(bitmap_bh); 4203 } 4204 4205 /* 4206 * here we normalize request for locality group 4207 * Group request are normalized to s_mb_group_prealloc, which goes to 4208 * s_strip if we set the same via mount option. 4209 * s_mb_group_prealloc can be configured via 4210 * /sys/fs/ext4/<partition>/mb_group_prealloc 4211 * 4212 * XXX: should we try to preallocate more than the group has now? 4213 */ 4214 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4215 { 4216 struct super_block *sb = ac->ac_sb; 4217 struct ext4_locality_group *lg = ac->ac_lg; 4218 4219 BUG_ON(lg == NULL); 4220 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4221 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4222 } 4223 4224 /* 4225 * This function returns the next element to look at during inode 4226 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4227 * (ei->i_prealloc_lock) 4228 * 4229 * new_start The start of the range we want to compare 4230 * cur_start The existing start that we are comparing against 4231 * node The node of the rb_tree 4232 */ 4233 static inline struct rb_node* 4234 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4235 { 4236 if (new_start < cur_start) 4237 return node->rb_left; 4238 else 4239 return node->rb_right; 4240 } 4241 4242 static inline void 4243 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4244 ext4_lblk_t start, loff_t end) 4245 { 4246 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4247 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4248 struct ext4_prealloc_space *tmp_pa; 4249 ext4_lblk_t tmp_pa_start; 4250 loff_t tmp_pa_end; 4251 struct rb_node *iter; 4252 4253 read_lock(&ei->i_prealloc_lock); 4254 for (iter = ei->i_prealloc_node.rb_node; iter; 4255 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4256 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4257 pa_node.inode_node); 4258 tmp_pa_start = tmp_pa->pa_lstart; 4259 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4260 4261 spin_lock(&tmp_pa->pa_lock); 4262 if (tmp_pa->pa_deleted == 0) 4263 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4264 spin_unlock(&tmp_pa->pa_lock); 4265 } 4266 read_unlock(&ei->i_prealloc_lock); 4267 } 4268 4269 /* 4270 * Given an allocation context "ac" and a range "start", "end", check 4271 * and adjust boundaries if the range overlaps with any of the existing 4272 * preallocatoins stored in the corresponding inode of the allocation context. 4273 * 4274 * Parameters: 4275 * ac allocation context 4276 * start start of the new range 4277 * end end of the new range 4278 */ 4279 static inline void 4280 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4281 ext4_lblk_t *start, loff_t *end) 4282 { 4283 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4284 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4285 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4286 struct rb_node *iter; 4287 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; 4288 loff_t new_end, tmp_pa_end, left_pa_end = -1; 4289 4290 new_start = *start; 4291 new_end = *end; 4292 4293 /* 4294 * Adjust the normalized range so that it doesn't overlap with any 4295 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4296 * so it doesn't change underneath us. 4297 */ 4298 read_lock(&ei->i_prealloc_lock); 4299 4300 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4301 for (iter = ei->i_prealloc_node.rb_node; iter; 4302 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4303 tmp_pa_start, iter)) { 4304 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4305 pa_node.inode_node); 4306 tmp_pa_start = tmp_pa->pa_lstart; 4307 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4308 4309 /* PA must not overlap original request */ 4310 spin_lock(&tmp_pa->pa_lock); 4311 if (tmp_pa->pa_deleted == 0) 4312 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4313 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4314 spin_unlock(&tmp_pa->pa_lock); 4315 } 4316 4317 /* 4318 * Step 2: check if the found PA is left or right neighbor and 4319 * get the other neighbor 4320 */ 4321 if (tmp_pa) { 4322 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4323 struct rb_node *tmp; 4324 4325 left_pa = tmp_pa; 4326 tmp = rb_next(&left_pa->pa_node.inode_node); 4327 if (tmp) { 4328 right_pa = rb_entry(tmp, 4329 struct ext4_prealloc_space, 4330 pa_node.inode_node); 4331 } 4332 } else { 4333 struct rb_node *tmp; 4334 4335 right_pa = tmp_pa; 4336 tmp = rb_prev(&right_pa->pa_node.inode_node); 4337 if (tmp) { 4338 left_pa = rb_entry(tmp, 4339 struct ext4_prealloc_space, 4340 pa_node.inode_node); 4341 } 4342 } 4343 } 4344 4345 /* Step 3: get the non deleted neighbors */ 4346 if (left_pa) { 4347 for (iter = &left_pa->pa_node.inode_node;; 4348 iter = rb_prev(iter)) { 4349 if (!iter) { 4350 left_pa = NULL; 4351 break; 4352 } 4353 4354 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4355 pa_node.inode_node); 4356 left_pa = tmp_pa; 4357 spin_lock(&tmp_pa->pa_lock); 4358 if (tmp_pa->pa_deleted == 0) { 4359 spin_unlock(&tmp_pa->pa_lock); 4360 break; 4361 } 4362 spin_unlock(&tmp_pa->pa_lock); 4363 } 4364 } 4365 4366 if (right_pa) { 4367 for (iter = &right_pa->pa_node.inode_node;; 4368 iter = rb_next(iter)) { 4369 if (!iter) { 4370 right_pa = NULL; 4371 break; 4372 } 4373 4374 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4375 pa_node.inode_node); 4376 right_pa = tmp_pa; 4377 spin_lock(&tmp_pa->pa_lock); 4378 if (tmp_pa->pa_deleted == 0) { 4379 spin_unlock(&tmp_pa->pa_lock); 4380 break; 4381 } 4382 spin_unlock(&tmp_pa->pa_lock); 4383 } 4384 } 4385 4386 if (left_pa) { 4387 left_pa_end = pa_logical_end(sbi, left_pa); 4388 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4389 } 4390 4391 if (right_pa) { 4392 right_pa_start = right_pa->pa_lstart; 4393 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4394 } 4395 4396 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4397 if (left_pa) { 4398 if (left_pa_end > new_start) 4399 new_start = left_pa_end; 4400 } 4401 4402 if (right_pa) { 4403 if (right_pa_start < new_end) 4404 new_end = right_pa_start; 4405 } 4406 read_unlock(&ei->i_prealloc_lock); 4407 4408 /* XXX: extra loop to check we really don't overlap preallocations */ 4409 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4410 4411 *start = new_start; 4412 *end = new_end; 4413 } 4414 4415 /* 4416 * Normalization means making request better in terms of 4417 * size and alignment 4418 */ 4419 static noinline_for_stack void 4420 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4421 struct ext4_allocation_request *ar) 4422 { 4423 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4424 struct ext4_super_block *es = sbi->s_es; 4425 int bsbits, max; 4426 loff_t size, start_off, end; 4427 loff_t orig_size __maybe_unused; 4428 ext4_lblk_t start; 4429 4430 /* do normalize only data requests, metadata requests 4431 do not need preallocation */ 4432 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4433 return; 4434 4435 /* sometime caller may want exact blocks */ 4436 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4437 return; 4438 4439 /* caller may indicate that preallocation isn't 4440 * required (it's a tail, for example) */ 4441 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4442 return; 4443 4444 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4445 ext4_mb_normalize_group_request(ac); 4446 return ; 4447 } 4448 4449 bsbits = ac->ac_sb->s_blocksize_bits; 4450 4451 /* first, let's learn actual file size 4452 * given current request is allocated */ 4453 size = extent_logical_end(sbi, &ac->ac_o_ex); 4454 size = size << bsbits; 4455 if (size < i_size_read(ac->ac_inode)) 4456 size = i_size_read(ac->ac_inode); 4457 orig_size = size; 4458 4459 /* max size of free chunks */ 4460 max = 2 << bsbits; 4461 4462 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4463 (req <= (size) || max <= (chunk_size)) 4464 4465 /* first, try to predict filesize */ 4466 /* XXX: should this table be tunable? */ 4467 start_off = 0; 4468 if (size <= 16 * 1024) { 4469 size = 16 * 1024; 4470 } else if (size <= 32 * 1024) { 4471 size = 32 * 1024; 4472 } else if (size <= 64 * 1024) { 4473 size = 64 * 1024; 4474 } else if (size <= 128 * 1024) { 4475 size = 128 * 1024; 4476 } else if (size <= 256 * 1024) { 4477 size = 256 * 1024; 4478 } else if (size <= 512 * 1024) { 4479 size = 512 * 1024; 4480 } else if (size <= 1024 * 1024) { 4481 size = 1024 * 1024; 4482 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4483 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4484 (21 - bsbits)) << 21; 4485 size = 2 * 1024 * 1024; 4486 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4487 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4488 (22 - bsbits)) << 22; 4489 size = 4 * 1024 * 1024; 4490 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 4491 (8<<20)>>bsbits, max, 8 * 1024)) { 4492 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4493 (23 - bsbits)) << 23; 4494 size = 8 * 1024 * 1024; 4495 } else { 4496 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4497 size = (loff_t) EXT4_C2B(sbi, 4498 ac->ac_o_ex.fe_len) << bsbits; 4499 } 4500 size = size >> bsbits; 4501 start = start_off >> bsbits; 4502 4503 /* 4504 * For tiny groups (smaller than 8MB) the chosen allocation 4505 * alignment may be larger than group size. Make sure the 4506 * alignment does not move allocation to a different group which 4507 * makes mballoc fail assertions later. 4508 */ 4509 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4510 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4511 4512 /* avoid unnecessary preallocation that may trigger assertions */ 4513 if (start + size > EXT_MAX_BLOCKS) 4514 size = EXT_MAX_BLOCKS - start; 4515 4516 /* don't cover already allocated blocks in selected range */ 4517 if (ar->pleft && start <= ar->lleft) { 4518 size -= ar->lleft + 1 - start; 4519 start = ar->lleft + 1; 4520 } 4521 if (ar->pright && start + size - 1 >= ar->lright) 4522 size -= start + size - ar->lright; 4523 4524 /* 4525 * Trim allocation request for filesystems with artificially small 4526 * groups. 4527 */ 4528 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4529 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4530 4531 end = start + size; 4532 4533 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4534 4535 size = end - start; 4536 4537 /* 4538 * In this function "start" and "size" are normalized for better 4539 * alignment and length such that we could preallocate more blocks. 4540 * This normalization is done such that original request of 4541 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4542 * "size" boundaries. 4543 * (Note fe_len can be relaxed since FS block allocation API does not 4544 * provide gurantee on number of contiguous blocks allocation since that 4545 * depends upon free space left, etc). 4546 * In case of inode pa, later we use the allocated blocks 4547 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4548 * range of goal/best blocks [start, size] to put it at the 4549 * ac_o_ex.fe_logical extent of this inode. 4550 * (See ext4_mb_use_inode_pa() for more details) 4551 */ 4552 if (start + size <= ac->ac_o_ex.fe_logical || 4553 start > ac->ac_o_ex.fe_logical) { 4554 ext4_msg(ac->ac_sb, KERN_ERR, 4555 "start %lu, size %lu, fe_logical %lu", 4556 (unsigned long) start, (unsigned long) size, 4557 (unsigned long) ac->ac_o_ex.fe_logical); 4558 BUG(); 4559 } 4560 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4561 4562 /* now prepare goal request */ 4563 4564 /* XXX: is it better to align blocks WRT to logical 4565 * placement or satisfy big request as is */ 4566 ac->ac_g_ex.fe_logical = start; 4567 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4568 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 4569 4570 /* define goal start in order to merge */ 4571 if (ar->pright && (ar->lright == (start + size)) && 4572 ar->pright >= size && 4573 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4574 /* merge to the right */ 4575 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4576 &ac->ac_g_ex.fe_group, 4577 &ac->ac_g_ex.fe_start); 4578 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4579 } 4580 if (ar->pleft && (ar->lleft + 1 == start) && 4581 ar->pleft + 1 < ext4_blocks_count(es)) { 4582 /* merge to the left */ 4583 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4584 &ac->ac_g_ex.fe_group, 4585 &ac->ac_g_ex.fe_start); 4586 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4587 } 4588 4589 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4590 orig_size, start); 4591 } 4592 4593 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4594 { 4595 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4596 4597 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4598 atomic_inc(&sbi->s_bal_reqs); 4599 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4600 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4601 atomic_inc(&sbi->s_bal_success); 4602 4603 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4604 for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4605 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4606 } 4607 4608 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4609 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4610 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4611 atomic_inc(&sbi->s_bal_goals); 4612 /* did we allocate as much as normalizer originally wanted? */ 4613 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) 4614 atomic_inc(&sbi->s_bal_len_goals); 4615 4616 if (ac->ac_found > sbi->s_mb_max_to_scan) 4617 atomic_inc(&sbi->s_bal_breaks); 4618 } 4619 4620 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4621 trace_ext4_mballoc_alloc(ac); 4622 else 4623 trace_ext4_mballoc_prealloc(ac); 4624 } 4625 4626 /* 4627 * Called on failure; free up any blocks from the inode PA for this 4628 * context. We don't need this for MB_GROUP_PA because we only change 4629 * pa_free in ext4_mb_release_context(), but on failure, we've already 4630 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4631 */ 4632 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4633 { 4634 struct ext4_prealloc_space *pa = ac->ac_pa; 4635 struct ext4_buddy e4b; 4636 int err; 4637 4638 if (pa == NULL) { 4639 if (ac->ac_f_ex.fe_len == 0) 4640 return; 4641 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4642 if (WARN_RATELIMIT(err, 4643 "ext4: mb_load_buddy failed (%d)", err)) 4644 /* 4645 * This should never happen since we pin the 4646 * pages in the ext4_allocation_context so 4647 * ext4_mb_load_buddy() should never fail. 4648 */ 4649 return; 4650 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4651 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4652 ac->ac_f_ex.fe_len); 4653 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4654 ext4_mb_unload_buddy(&e4b); 4655 return; 4656 } 4657 if (pa->pa_type == MB_INODE_PA) { 4658 spin_lock(&pa->pa_lock); 4659 pa->pa_free += ac->ac_b_ex.fe_len; 4660 spin_unlock(&pa->pa_lock); 4661 } 4662 } 4663 4664 /* 4665 * use blocks preallocated to inode 4666 */ 4667 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4668 struct ext4_prealloc_space *pa) 4669 { 4670 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4671 ext4_fsblk_t start; 4672 ext4_fsblk_t end; 4673 int len; 4674 4675 /* found preallocated blocks, use them */ 4676 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4677 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4678 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4679 len = EXT4_NUM_B2C(sbi, end - start); 4680 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4681 &ac->ac_b_ex.fe_start); 4682 ac->ac_b_ex.fe_len = len; 4683 ac->ac_status = AC_STATUS_FOUND; 4684 ac->ac_pa = pa; 4685 4686 BUG_ON(start < pa->pa_pstart); 4687 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4688 BUG_ON(pa->pa_free < len); 4689 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4690 pa->pa_free -= len; 4691 4692 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4693 } 4694 4695 /* 4696 * use blocks preallocated to locality group 4697 */ 4698 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4699 struct ext4_prealloc_space *pa) 4700 { 4701 unsigned int len = ac->ac_o_ex.fe_len; 4702 4703 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4704 &ac->ac_b_ex.fe_group, 4705 &ac->ac_b_ex.fe_start); 4706 ac->ac_b_ex.fe_len = len; 4707 ac->ac_status = AC_STATUS_FOUND; 4708 ac->ac_pa = pa; 4709 4710 /* we don't correct pa_pstart or pa_len here to avoid 4711 * possible race when the group is being loaded concurrently 4712 * instead we correct pa later, after blocks are marked 4713 * in on-disk bitmap -- see ext4_mb_release_context() 4714 * Other CPUs are prevented from allocating from this pa by lg_mutex 4715 */ 4716 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4717 pa->pa_lstart, len, pa); 4718 } 4719 4720 /* 4721 * Return the prealloc space that have minimal distance 4722 * from the goal block. @cpa is the prealloc 4723 * space that is having currently known minimal distance 4724 * from the goal block. 4725 */ 4726 static struct ext4_prealloc_space * 4727 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4728 struct ext4_prealloc_space *pa, 4729 struct ext4_prealloc_space *cpa) 4730 { 4731 ext4_fsblk_t cur_distance, new_distance; 4732 4733 if (cpa == NULL) { 4734 atomic_inc(&pa->pa_count); 4735 return pa; 4736 } 4737 cur_distance = abs(goal_block - cpa->pa_pstart); 4738 new_distance = abs(goal_block - pa->pa_pstart); 4739 4740 if (cur_distance <= new_distance) 4741 return cpa; 4742 4743 /* drop the previous reference */ 4744 atomic_dec(&cpa->pa_count); 4745 atomic_inc(&pa->pa_count); 4746 return pa; 4747 } 4748 4749 /* 4750 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 4751 */ 4752 static bool 4753 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 4754 struct ext4_prealloc_space *pa) 4755 { 4756 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4757 ext4_fsblk_t start; 4758 4759 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 4760 return true; 4761 4762 /* 4763 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 4764 * in ext4_mb_normalize_request and will keep same with ac_o_ex 4765 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 4766 * consistent with ext4_mb_find_by_goal. 4767 */ 4768 start = pa->pa_pstart + 4769 (ac->ac_g_ex.fe_logical - pa->pa_lstart); 4770 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 4771 return false; 4772 4773 if (ac->ac_g_ex.fe_len > pa->pa_len - 4774 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 4775 return false; 4776 4777 return true; 4778 } 4779 4780 /* 4781 * search goal blocks in preallocated space 4782 */ 4783 static noinline_for_stack bool 4784 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4785 { 4786 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4787 int order, i; 4788 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4789 struct ext4_locality_group *lg; 4790 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL; 4791 struct rb_node *iter; 4792 ext4_fsblk_t goal_block; 4793 4794 /* only data can be preallocated */ 4795 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4796 return false; 4797 4798 /* 4799 * first, try per-file preallocation by searching the inode pa rbtree. 4800 * 4801 * Here, we can't do a direct traversal of the tree because 4802 * ext4_mb_discard_group_preallocation() can paralelly mark the pa 4803 * deleted and that can cause direct traversal to skip some entries. 4804 */ 4805 read_lock(&ei->i_prealloc_lock); 4806 4807 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { 4808 goto try_group_pa; 4809 } 4810 4811 /* 4812 * Step 1: Find a pa with logical start immediately adjacent to the 4813 * original logical start. This could be on the left or right. 4814 * 4815 * (tmp_pa->pa_lstart never changes so we can skip locking for it). 4816 */ 4817 for (iter = ei->i_prealloc_node.rb_node; iter; 4818 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4819 tmp_pa->pa_lstart, iter)) { 4820 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4821 pa_node.inode_node); 4822 } 4823 4824 /* 4825 * Step 2: The adjacent pa might be to the right of logical start, find 4826 * the left adjacent pa. After this step we'd have a valid tmp_pa whose 4827 * logical start is towards the left of original request's logical start 4828 */ 4829 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4830 struct rb_node *tmp; 4831 tmp = rb_prev(&tmp_pa->pa_node.inode_node); 4832 4833 if (tmp) { 4834 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space, 4835 pa_node.inode_node); 4836 } else { 4837 /* 4838 * If there is no adjacent pa to the left then finding 4839 * an overlapping pa is not possible hence stop searching 4840 * inode pa tree 4841 */ 4842 goto try_group_pa; 4843 } 4844 } 4845 4846 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4847 4848 /* 4849 * Step 3: If the left adjacent pa is deleted, keep moving left to find 4850 * the first non deleted adjacent pa. After this step we should have a 4851 * valid tmp_pa which is guaranteed to be non deleted. 4852 */ 4853 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { 4854 if (!iter) { 4855 /* 4856 * no non deleted left adjacent pa, so stop searching 4857 * inode pa tree 4858 */ 4859 goto try_group_pa; 4860 } 4861 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4862 pa_node.inode_node); 4863 spin_lock(&tmp_pa->pa_lock); 4864 if (tmp_pa->pa_deleted == 0) { 4865 /* 4866 * We will keep holding the pa_lock from 4867 * this point on because we don't want group discard 4868 * to delete this pa underneath us. Since group 4869 * discard is anyways an ENOSPC operation it 4870 * should be okay for it to wait a few more cycles. 4871 */ 4872 break; 4873 } else { 4874 spin_unlock(&tmp_pa->pa_lock); 4875 } 4876 } 4877 4878 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4879 BUG_ON(tmp_pa->pa_deleted == 1); 4880 4881 /* 4882 * Step 4: We now have the non deleted left adjacent pa. Only this 4883 * pa can possibly satisfy the request hence check if it overlaps 4884 * original logical start and stop searching if it doesn't. 4885 */ 4886 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { 4887 spin_unlock(&tmp_pa->pa_lock); 4888 goto try_group_pa; 4889 } 4890 4891 /* non-extent files can't have physical blocks past 2^32 */ 4892 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4893 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4894 EXT4_MAX_BLOCK_FILE_PHYS)) { 4895 /* 4896 * Since PAs don't overlap, we won't find any other PA to 4897 * satisfy this. 4898 */ 4899 spin_unlock(&tmp_pa->pa_lock); 4900 goto try_group_pa; 4901 } 4902 4903 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 4904 atomic_inc(&tmp_pa->pa_count); 4905 ext4_mb_use_inode_pa(ac, tmp_pa); 4906 spin_unlock(&tmp_pa->pa_lock); 4907 read_unlock(&ei->i_prealloc_lock); 4908 return true; 4909 } else { 4910 /* 4911 * We found a valid overlapping pa but couldn't use it because 4912 * it had no free blocks. This should ideally never happen 4913 * because: 4914 * 4915 * 1. When a new inode pa is added to rbtree it must have 4916 * pa_free > 0 since otherwise we won't actually need 4917 * preallocation. 4918 * 4919 * 2. An inode pa that is in the rbtree can only have it's 4920 * pa_free become zero when another thread calls: 4921 * ext4_mb_new_blocks 4922 * ext4_mb_use_preallocated 4923 * ext4_mb_use_inode_pa 4924 * 4925 * 3. Further, after the above calls make pa_free == 0, we will 4926 * immediately remove it from the rbtree in: 4927 * ext4_mb_new_blocks 4928 * ext4_mb_release_context 4929 * ext4_mb_put_pa 4930 * 4931 * 4. Since the pa_free becoming 0 and pa_free getting removed 4932 * from tree both happen in ext4_mb_new_blocks, which is always 4933 * called with i_data_sem held for data allocations, we can be 4934 * sure that another process will never see a pa in rbtree with 4935 * pa_free == 0. 4936 */ 4937 WARN_ON_ONCE(tmp_pa->pa_free == 0); 4938 } 4939 spin_unlock(&tmp_pa->pa_lock); 4940 try_group_pa: 4941 read_unlock(&ei->i_prealloc_lock); 4942 4943 /* can we use group allocation? */ 4944 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 4945 return false; 4946 4947 /* inode may have no locality group for some reason */ 4948 lg = ac->ac_lg; 4949 if (lg == NULL) 4950 return false; 4951 order = fls(ac->ac_o_ex.fe_len) - 1; 4952 if (order > PREALLOC_TB_SIZE - 1) 4953 /* The max size of hash table is PREALLOC_TB_SIZE */ 4954 order = PREALLOC_TB_SIZE - 1; 4955 4956 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 4957 /* 4958 * search for the prealloc space that is having 4959 * minimal distance from the goal block. 4960 */ 4961 for (i = order; i < PREALLOC_TB_SIZE; i++) { 4962 rcu_read_lock(); 4963 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4964 pa_node.lg_list) { 4965 spin_lock(&tmp_pa->pa_lock); 4966 if (tmp_pa->pa_deleted == 0 && 4967 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 4968 4969 cpa = ext4_mb_check_group_pa(goal_block, 4970 tmp_pa, cpa); 4971 } 4972 spin_unlock(&tmp_pa->pa_lock); 4973 } 4974 rcu_read_unlock(); 4975 } 4976 if (cpa) { 4977 ext4_mb_use_group_pa(ac, cpa); 4978 return true; 4979 } 4980 return false; 4981 } 4982 4983 /* 4984 * the function goes through all preallocation in this group and marks them 4985 * used in in-core bitmap. buddy must be generated from this bitmap 4986 * Need to be called with ext4 group lock held 4987 */ 4988 static noinline_for_stack 4989 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4990 ext4_group_t group) 4991 { 4992 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4993 struct ext4_prealloc_space *pa; 4994 struct list_head *cur; 4995 ext4_group_t groupnr; 4996 ext4_grpblk_t start; 4997 int preallocated = 0; 4998 int len; 4999 5000 if (!grp) 5001 return; 5002 5003 /* all form of preallocation discards first load group, 5004 * so the only competing code is preallocation use. 5005 * we don't need any locking here 5006 * notice we do NOT ignore preallocations with pa_deleted 5007 * otherwise we could leave used blocks available for 5008 * allocation in buddy when concurrent ext4_mb_put_pa() 5009 * is dropping preallocation 5010 */ 5011 list_for_each(cur, &grp->bb_prealloc_list) { 5012 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 5013 spin_lock(&pa->pa_lock); 5014 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5015 &groupnr, &start); 5016 len = pa->pa_len; 5017 spin_unlock(&pa->pa_lock); 5018 if (unlikely(len == 0)) 5019 continue; 5020 BUG_ON(groupnr != group); 5021 mb_set_bits(bitmap, start, len); 5022 preallocated += len; 5023 } 5024 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 5025 } 5026 5027 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 5028 struct ext4_prealloc_space *pa) 5029 { 5030 struct ext4_inode_info *ei; 5031 5032 if (pa->pa_deleted) { 5033 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 5034 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 5035 pa->pa_len); 5036 return; 5037 } 5038 5039 pa->pa_deleted = 1; 5040 5041 if (pa->pa_type == MB_INODE_PA) { 5042 ei = EXT4_I(pa->pa_inode); 5043 atomic_dec(&ei->i_prealloc_active); 5044 } 5045 } 5046 5047 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 5048 { 5049 BUG_ON(!pa); 5050 BUG_ON(atomic_read(&pa->pa_count)); 5051 BUG_ON(pa->pa_deleted == 0); 5052 kmem_cache_free(ext4_pspace_cachep, pa); 5053 } 5054 5055 static void ext4_mb_pa_callback(struct rcu_head *head) 5056 { 5057 struct ext4_prealloc_space *pa; 5058 5059 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 5060 ext4_mb_pa_free(pa); 5061 } 5062 5063 /* 5064 * drops a reference to preallocated space descriptor 5065 * if this was the last reference and the space is consumed 5066 */ 5067 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 5068 struct super_block *sb, struct ext4_prealloc_space *pa) 5069 { 5070 ext4_group_t grp; 5071 ext4_fsblk_t grp_blk; 5072 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 5073 5074 /* in this short window concurrent discard can set pa_deleted */ 5075 spin_lock(&pa->pa_lock); 5076 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 5077 spin_unlock(&pa->pa_lock); 5078 return; 5079 } 5080 5081 if (pa->pa_deleted == 1) { 5082 spin_unlock(&pa->pa_lock); 5083 return; 5084 } 5085 5086 ext4_mb_mark_pa_deleted(sb, pa); 5087 spin_unlock(&pa->pa_lock); 5088 5089 grp_blk = pa->pa_pstart; 5090 /* 5091 * If doing group-based preallocation, pa_pstart may be in the 5092 * next group when pa is used up 5093 */ 5094 if (pa->pa_type == MB_GROUP_PA) 5095 grp_blk--; 5096 5097 grp = ext4_get_group_number(sb, grp_blk); 5098 5099 /* 5100 * possible race: 5101 * 5102 * P1 (buddy init) P2 (regular allocation) 5103 * find block B in PA 5104 * copy on-disk bitmap to buddy 5105 * mark B in on-disk bitmap 5106 * drop PA from group 5107 * mark all PAs in buddy 5108 * 5109 * thus, P1 initializes buddy with B available. to prevent this 5110 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 5111 * against that pair 5112 */ 5113 ext4_lock_group(sb, grp); 5114 list_del(&pa->pa_group_list); 5115 ext4_unlock_group(sb, grp); 5116 5117 if (pa->pa_type == MB_INODE_PA) { 5118 write_lock(pa->pa_node_lock.inode_lock); 5119 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5120 write_unlock(pa->pa_node_lock.inode_lock); 5121 ext4_mb_pa_free(pa); 5122 } else { 5123 spin_lock(pa->pa_node_lock.lg_lock); 5124 list_del_rcu(&pa->pa_node.lg_list); 5125 spin_unlock(pa->pa_node_lock.lg_lock); 5126 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5127 } 5128 } 5129 5130 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 5131 { 5132 struct rb_node **iter = &root->rb_node, *parent = NULL; 5133 struct ext4_prealloc_space *iter_pa, *new_pa; 5134 ext4_lblk_t iter_start, new_start; 5135 5136 while (*iter) { 5137 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 5138 pa_node.inode_node); 5139 new_pa = rb_entry(new, struct ext4_prealloc_space, 5140 pa_node.inode_node); 5141 iter_start = iter_pa->pa_lstart; 5142 new_start = new_pa->pa_lstart; 5143 5144 parent = *iter; 5145 if (new_start < iter_start) 5146 iter = &((*iter)->rb_left); 5147 else 5148 iter = &((*iter)->rb_right); 5149 } 5150 5151 rb_link_node(new, parent, iter); 5152 rb_insert_color(new, root); 5153 } 5154 5155 /* 5156 * creates new preallocated space for given inode 5157 */ 5158 static noinline_for_stack void 5159 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 5160 { 5161 struct super_block *sb = ac->ac_sb; 5162 struct ext4_sb_info *sbi = EXT4_SB(sb); 5163 struct ext4_prealloc_space *pa; 5164 struct ext4_group_info *grp; 5165 struct ext4_inode_info *ei; 5166 5167 /* preallocate only when found space is larger then requested */ 5168 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5169 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5170 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5171 BUG_ON(ac->ac_pa == NULL); 5172 5173 pa = ac->ac_pa; 5174 5175 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { 5176 struct ext4_free_extent ex = { 5177 .fe_logical = ac->ac_g_ex.fe_logical, 5178 .fe_len = ac->ac_orig_goal_len, 5179 }; 5180 loff_t orig_goal_end = extent_logical_end(sbi, &ex); 5181 5182 /* we can't allocate as much as normalizer wants. 5183 * so, found space must get proper lstart 5184 * to cover original request */ 5185 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 5186 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 5187 5188 /* 5189 * Use the below logic for adjusting best extent as it keeps 5190 * fragmentation in check while ensuring logical range of best 5191 * extent doesn't overflow out of goal extent: 5192 * 5193 * 1. Check if best ex can be kept at end of goal (before 5194 * cr_best_avail trimmed it) and still cover original start 5195 * 2. Else, check if best ex can be kept at start of goal and 5196 * still cover original start 5197 * 3. Else, keep the best ex at start of original request. 5198 */ 5199 ex.fe_len = ac->ac_b_ex.fe_len; 5200 5201 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); 5202 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) 5203 goto adjust_bex; 5204 5205 ex.fe_logical = ac->ac_g_ex.fe_logical; 5206 if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) 5207 goto adjust_bex; 5208 5209 ex.fe_logical = ac->ac_o_ex.fe_logical; 5210 adjust_bex: 5211 ac->ac_b_ex.fe_logical = ex.fe_logical; 5212 5213 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 5214 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 5215 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); 5216 } 5217 5218 pa->pa_lstart = ac->ac_b_ex.fe_logical; 5219 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5220 pa->pa_len = ac->ac_b_ex.fe_len; 5221 pa->pa_free = pa->pa_len; 5222 spin_lock_init(&pa->pa_lock); 5223 INIT_LIST_HEAD(&pa->pa_group_list); 5224 pa->pa_deleted = 0; 5225 pa->pa_type = MB_INODE_PA; 5226 5227 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5228 pa->pa_len, pa->pa_lstart); 5229 trace_ext4_mb_new_inode_pa(ac, pa); 5230 5231 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 5232 ext4_mb_use_inode_pa(ac, pa); 5233 5234 ei = EXT4_I(ac->ac_inode); 5235 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5236 if (!grp) 5237 return; 5238 5239 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 5240 pa->pa_inode = ac->ac_inode; 5241 5242 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5243 5244 write_lock(pa->pa_node_lock.inode_lock); 5245 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 5246 write_unlock(pa->pa_node_lock.inode_lock); 5247 atomic_inc(&ei->i_prealloc_active); 5248 } 5249 5250 /* 5251 * creates new preallocated space for locality group inodes belongs to 5252 */ 5253 static noinline_for_stack void 5254 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5255 { 5256 struct super_block *sb = ac->ac_sb; 5257 struct ext4_locality_group *lg; 5258 struct ext4_prealloc_space *pa; 5259 struct ext4_group_info *grp; 5260 5261 /* preallocate only when found space is larger then requested */ 5262 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5263 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5264 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5265 BUG_ON(ac->ac_pa == NULL); 5266 5267 pa = ac->ac_pa; 5268 5269 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5270 pa->pa_lstart = pa->pa_pstart; 5271 pa->pa_len = ac->ac_b_ex.fe_len; 5272 pa->pa_free = pa->pa_len; 5273 spin_lock_init(&pa->pa_lock); 5274 INIT_LIST_HEAD(&pa->pa_node.lg_list); 5275 INIT_LIST_HEAD(&pa->pa_group_list); 5276 pa->pa_deleted = 0; 5277 pa->pa_type = MB_GROUP_PA; 5278 5279 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5280 pa->pa_len, pa->pa_lstart); 5281 trace_ext4_mb_new_group_pa(ac, pa); 5282 5283 ext4_mb_use_group_pa(ac, pa); 5284 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5285 5286 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5287 if (!grp) 5288 return; 5289 lg = ac->ac_lg; 5290 BUG_ON(lg == NULL); 5291 5292 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5293 pa->pa_inode = NULL; 5294 5295 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5296 5297 /* 5298 * We will later add the new pa to the right bucket 5299 * after updating the pa_free in ext4_mb_release_context 5300 */ 5301 } 5302 5303 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5304 { 5305 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5306 ext4_mb_new_group_pa(ac); 5307 else 5308 ext4_mb_new_inode_pa(ac); 5309 } 5310 5311 /* 5312 * finds all unused blocks in on-disk bitmap, frees them in 5313 * in-core bitmap and buddy. 5314 * @pa must be unlinked from inode and group lists, so that 5315 * nobody else can find/use it. 5316 * the caller MUST hold group/inode locks. 5317 * TODO: optimize the case when there are no in-core structures yet 5318 */ 5319 static noinline_for_stack int 5320 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5321 struct ext4_prealloc_space *pa) 5322 { 5323 struct super_block *sb = e4b->bd_sb; 5324 struct ext4_sb_info *sbi = EXT4_SB(sb); 5325 unsigned int end; 5326 unsigned int next; 5327 ext4_group_t group; 5328 ext4_grpblk_t bit; 5329 unsigned long long grp_blk_start; 5330 int free = 0; 5331 5332 BUG_ON(pa->pa_deleted == 0); 5333 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5334 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5335 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5336 end = bit + pa->pa_len; 5337 5338 while (bit < end) { 5339 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5340 if (bit >= end) 5341 break; 5342 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5343 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5344 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5345 (unsigned) next - bit, (unsigned) group); 5346 free += next - bit; 5347 5348 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5349 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5350 EXT4_C2B(sbi, bit)), 5351 next - bit); 5352 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5353 bit = next + 1; 5354 } 5355 if (free != pa->pa_free) { 5356 ext4_msg(e4b->bd_sb, KERN_CRIT, 5357 "pa %p: logic %lu, phys. %lu, len %d", 5358 pa, (unsigned long) pa->pa_lstart, 5359 (unsigned long) pa->pa_pstart, 5360 pa->pa_len); 5361 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5362 free, pa->pa_free); 5363 /* 5364 * pa is already deleted so we use the value obtained 5365 * from the bitmap and continue. 5366 */ 5367 } 5368 atomic_add(free, &sbi->s_mb_discarded); 5369 5370 return 0; 5371 } 5372 5373 static noinline_for_stack int 5374 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5375 struct ext4_prealloc_space *pa) 5376 { 5377 struct super_block *sb = e4b->bd_sb; 5378 ext4_group_t group; 5379 ext4_grpblk_t bit; 5380 5381 trace_ext4_mb_release_group_pa(sb, pa); 5382 BUG_ON(pa->pa_deleted == 0); 5383 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5384 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5385 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5386 e4b->bd_group, group, pa->pa_pstart); 5387 return 0; 5388 } 5389 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5390 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5391 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5392 5393 return 0; 5394 } 5395 5396 /* 5397 * releases all preallocations in given group 5398 * 5399 * first, we need to decide discard policy: 5400 * - when do we discard 5401 * 1) ENOSPC 5402 * - how many do we discard 5403 * 1) how many requested 5404 */ 5405 static noinline_for_stack int 5406 ext4_mb_discard_group_preallocations(struct super_block *sb, 5407 ext4_group_t group, int *busy) 5408 { 5409 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5410 struct buffer_head *bitmap_bh = NULL; 5411 struct ext4_prealloc_space *pa, *tmp; 5412 LIST_HEAD(list); 5413 struct ext4_buddy e4b; 5414 struct ext4_inode_info *ei; 5415 int err; 5416 int free = 0; 5417 5418 if (!grp) 5419 return 0; 5420 mb_debug(sb, "discard preallocation for group %u\n", group); 5421 if (list_empty(&grp->bb_prealloc_list)) 5422 goto out_dbg; 5423 5424 bitmap_bh = ext4_read_block_bitmap(sb, group); 5425 if (IS_ERR(bitmap_bh)) { 5426 err = PTR_ERR(bitmap_bh); 5427 ext4_error_err(sb, -err, 5428 "Error %d reading block bitmap for %u", 5429 err, group); 5430 goto out_dbg; 5431 } 5432 5433 err = ext4_mb_load_buddy(sb, group, &e4b); 5434 if (err) { 5435 ext4_warning(sb, "Error %d loading buddy information for %u", 5436 err, group); 5437 put_bh(bitmap_bh); 5438 goto out_dbg; 5439 } 5440 5441 ext4_lock_group(sb, group); 5442 list_for_each_entry_safe(pa, tmp, 5443 &grp->bb_prealloc_list, pa_group_list) { 5444 spin_lock(&pa->pa_lock); 5445 if (atomic_read(&pa->pa_count)) { 5446 spin_unlock(&pa->pa_lock); 5447 *busy = 1; 5448 continue; 5449 } 5450 if (pa->pa_deleted) { 5451 spin_unlock(&pa->pa_lock); 5452 continue; 5453 } 5454 5455 /* seems this one can be freed ... */ 5456 ext4_mb_mark_pa_deleted(sb, pa); 5457 5458 if (!free) 5459 this_cpu_inc(discard_pa_seq); 5460 5461 /* we can trust pa_free ... */ 5462 free += pa->pa_free; 5463 5464 spin_unlock(&pa->pa_lock); 5465 5466 list_del(&pa->pa_group_list); 5467 list_add(&pa->u.pa_tmp_list, &list); 5468 } 5469 5470 /* now free all selected PAs */ 5471 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5472 5473 /* remove from object (inode or locality group) */ 5474 if (pa->pa_type == MB_GROUP_PA) { 5475 spin_lock(pa->pa_node_lock.lg_lock); 5476 list_del_rcu(&pa->pa_node.lg_list); 5477 spin_unlock(pa->pa_node_lock.lg_lock); 5478 } else { 5479 write_lock(pa->pa_node_lock.inode_lock); 5480 ei = EXT4_I(pa->pa_inode); 5481 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5482 write_unlock(pa->pa_node_lock.inode_lock); 5483 } 5484 5485 list_del(&pa->u.pa_tmp_list); 5486 5487 if (pa->pa_type == MB_GROUP_PA) { 5488 ext4_mb_release_group_pa(&e4b, pa); 5489 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5490 } else { 5491 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5492 ext4_mb_pa_free(pa); 5493 } 5494 } 5495 5496 ext4_unlock_group(sb, group); 5497 ext4_mb_unload_buddy(&e4b); 5498 put_bh(bitmap_bh); 5499 out_dbg: 5500 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5501 free, group, grp->bb_free); 5502 return free; 5503 } 5504 5505 /* 5506 * releases all non-used preallocated blocks for given inode 5507 * 5508 * It's important to discard preallocations under i_data_sem 5509 * We don't want another block to be served from the prealloc 5510 * space when we are discarding the inode prealloc space. 5511 * 5512 * FIXME!! Make sure it is valid at all the call sites 5513 */ 5514 void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 5515 { 5516 struct ext4_inode_info *ei = EXT4_I(inode); 5517 struct super_block *sb = inode->i_sb; 5518 struct buffer_head *bitmap_bh = NULL; 5519 struct ext4_prealloc_space *pa, *tmp; 5520 ext4_group_t group = 0; 5521 LIST_HEAD(list); 5522 struct ext4_buddy e4b; 5523 struct rb_node *iter; 5524 int err; 5525 5526 if (!S_ISREG(inode->i_mode)) { 5527 return; 5528 } 5529 5530 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5531 return; 5532 5533 mb_debug(sb, "discard preallocation for inode %lu\n", 5534 inode->i_ino); 5535 trace_ext4_discard_preallocations(inode, 5536 atomic_read(&ei->i_prealloc_active), needed); 5537 5538 if (needed == 0) 5539 needed = UINT_MAX; 5540 5541 repeat: 5542 /* first, collect all pa's in the inode */ 5543 write_lock(&ei->i_prealloc_lock); 5544 for (iter = rb_first(&ei->i_prealloc_node); iter && needed; 5545 iter = rb_next(iter)) { 5546 pa = rb_entry(iter, struct ext4_prealloc_space, 5547 pa_node.inode_node); 5548 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5549 5550 spin_lock(&pa->pa_lock); 5551 if (atomic_read(&pa->pa_count)) { 5552 /* this shouldn't happen often - nobody should 5553 * use preallocation while we're discarding it */ 5554 spin_unlock(&pa->pa_lock); 5555 write_unlock(&ei->i_prealloc_lock); 5556 ext4_msg(sb, KERN_ERR, 5557 "uh-oh! used pa while discarding"); 5558 WARN_ON(1); 5559 schedule_timeout_uninterruptible(HZ); 5560 goto repeat; 5561 5562 } 5563 if (pa->pa_deleted == 0) { 5564 ext4_mb_mark_pa_deleted(sb, pa); 5565 spin_unlock(&pa->pa_lock); 5566 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5567 list_add(&pa->u.pa_tmp_list, &list); 5568 needed--; 5569 continue; 5570 } 5571 5572 /* someone is deleting pa right now */ 5573 spin_unlock(&pa->pa_lock); 5574 write_unlock(&ei->i_prealloc_lock); 5575 5576 /* we have to wait here because pa_deleted 5577 * doesn't mean pa is already unlinked from 5578 * the list. as we might be called from 5579 * ->clear_inode() the inode will get freed 5580 * and concurrent thread which is unlinking 5581 * pa from inode's list may access already 5582 * freed memory, bad-bad-bad */ 5583 5584 /* XXX: if this happens too often, we can 5585 * add a flag to force wait only in case 5586 * of ->clear_inode(), but not in case of 5587 * regular truncate */ 5588 schedule_timeout_uninterruptible(HZ); 5589 goto repeat; 5590 } 5591 write_unlock(&ei->i_prealloc_lock); 5592 5593 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5594 BUG_ON(pa->pa_type != MB_INODE_PA); 5595 group = ext4_get_group_number(sb, pa->pa_pstart); 5596 5597 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5598 GFP_NOFS|__GFP_NOFAIL); 5599 if (err) { 5600 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5601 err, group); 5602 continue; 5603 } 5604 5605 bitmap_bh = ext4_read_block_bitmap(sb, group); 5606 if (IS_ERR(bitmap_bh)) { 5607 err = PTR_ERR(bitmap_bh); 5608 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5609 err, group); 5610 ext4_mb_unload_buddy(&e4b); 5611 continue; 5612 } 5613 5614 ext4_lock_group(sb, group); 5615 list_del(&pa->pa_group_list); 5616 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5617 ext4_unlock_group(sb, group); 5618 5619 ext4_mb_unload_buddy(&e4b); 5620 put_bh(bitmap_bh); 5621 5622 list_del(&pa->u.pa_tmp_list); 5623 ext4_mb_pa_free(pa); 5624 } 5625 } 5626 5627 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5628 { 5629 struct ext4_prealloc_space *pa; 5630 5631 BUG_ON(ext4_pspace_cachep == NULL); 5632 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5633 if (!pa) 5634 return -ENOMEM; 5635 atomic_set(&pa->pa_count, 1); 5636 ac->ac_pa = pa; 5637 return 0; 5638 } 5639 5640 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5641 { 5642 struct ext4_prealloc_space *pa = ac->ac_pa; 5643 5644 BUG_ON(!pa); 5645 ac->ac_pa = NULL; 5646 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5647 /* 5648 * current function is only called due to an error or due to 5649 * len of found blocks < len of requested blocks hence the PA has not 5650 * been added to grp->bb_prealloc_list. So we don't need to lock it 5651 */ 5652 pa->pa_deleted = 1; 5653 ext4_mb_pa_free(pa); 5654 } 5655 5656 #ifdef CONFIG_EXT4_DEBUG 5657 static inline void ext4_mb_show_pa(struct super_block *sb) 5658 { 5659 ext4_group_t i, ngroups; 5660 5661 if (ext4_forced_shutdown(sb)) 5662 return; 5663 5664 ngroups = ext4_get_groups_count(sb); 5665 mb_debug(sb, "groups: "); 5666 for (i = 0; i < ngroups; i++) { 5667 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5668 struct ext4_prealloc_space *pa; 5669 ext4_grpblk_t start; 5670 struct list_head *cur; 5671 5672 if (!grp) 5673 continue; 5674 ext4_lock_group(sb, i); 5675 list_for_each(cur, &grp->bb_prealloc_list) { 5676 pa = list_entry(cur, struct ext4_prealloc_space, 5677 pa_group_list); 5678 spin_lock(&pa->pa_lock); 5679 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5680 NULL, &start); 5681 spin_unlock(&pa->pa_lock); 5682 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5683 pa->pa_len); 5684 } 5685 ext4_unlock_group(sb, i); 5686 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5687 grp->bb_fragments); 5688 } 5689 } 5690 5691 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5692 { 5693 struct super_block *sb = ac->ac_sb; 5694 5695 if (ext4_forced_shutdown(sb)) 5696 return; 5697 5698 mb_debug(sb, "Can't allocate:" 5699 " Allocation context details:"); 5700 mb_debug(sb, "status %u flags 0x%x", 5701 ac->ac_status, ac->ac_flags); 5702 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5703 "goal %lu/%lu/%lu@%lu, " 5704 "best %lu/%lu/%lu@%lu cr %d", 5705 (unsigned long)ac->ac_o_ex.fe_group, 5706 (unsigned long)ac->ac_o_ex.fe_start, 5707 (unsigned long)ac->ac_o_ex.fe_len, 5708 (unsigned long)ac->ac_o_ex.fe_logical, 5709 (unsigned long)ac->ac_g_ex.fe_group, 5710 (unsigned long)ac->ac_g_ex.fe_start, 5711 (unsigned long)ac->ac_g_ex.fe_len, 5712 (unsigned long)ac->ac_g_ex.fe_logical, 5713 (unsigned long)ac->ac_b_ex.fe_group, 5714 (unsigned long)ac->ac_b_ex.fe_start, 5715 (unsigned long)ac->ac_b_ex.fe_len, 5716 (unsigned long)ac->ac_b_ex.fe_logical, 5717 (int)ac->ac_criteria); 5718 mb_debug(sb, "%u found", ac->ac_found); 5719 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); 5720 if (ac->ac_pa) 5721 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5722 "group pa" : "inode pa"); 5723 ext4_mb_show_pa(sb); 5724 } 5725 #else 5726 static inline void ext4_mb_show_pa(struct super_block *sb) 5727 { 5728 } 5729 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5730 { 5731 ext4_mb_show_pa(ac->ac_sb); 5732 } 5733 #endif 5734 5735 /* 5736 * We use locality group preallocation for small size file. The size of the 5737 * file is determined by the current size or the resulting size after 5738 * allocation which ever is larger 5739 * 5740 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5741 */ 5742 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5743 { 5744 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5745 int bsbits = ac->ac_sb->s_blocksize_bits; 5746 loff_t size, isize; 5747 bool inode_pa_eligible, group_pa_eligible; 5748 5749 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5750 return; 5751 5752 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5753 return; 5754 5755 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5756 inode_pa_eligible = true; 5757 size = extent_logical_end(sbi, &ac->ac_o_ex); 5758 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5759 >> bsbits; 5760 5761 /* No point in using inode preallocation for closed files */ 5762 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5763 !inode_is_open_for_write(ac->ac_inode)) 5764 inode_pa_eligible = false; 5765 5766 size = max(size, isize); 5767 /* Don't use group allocation for large files */ 5768 if (size > sbi->s_mb_stream_request) 5769 group_pa_eligible = false; 5770 5771 if (!group_pa_eligible) { 5772 if (inode_pa_eligible) 5773 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5774 else 5775 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5776 return; 5777 } 5778 5779 BUG_ON(ac->ac_lg != NULL); 5780 /* 5781 * locality group prealloc space are per cpu. The reason for having 5782 * per cpu locality group is to reduce the contention between block 5783 * request from multiple CPUs. 5784 */ 5785 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5786 5787 /* we're going to use group allocation */ 5788 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5789 5790 /* serialize all allocations in the group */ 5791 mutex_lock(&ac->ac_lg->lg_mutex); 5792 } 5793 5794 static noinline_for_stack void 5795 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5796 struct ext4_allocation_request *ar) 5797 { 5798 struct super_block *sb = ar->inode->i_sb; 5799 struct ext4_sb_info *sbi = EXT4_SB(sb); 5800 struct ext4_super_block *es = sbi->s_es; 5801 ext4_group_t group; 5802 unsigned int len; 5803 ext4_fsblk_t goal; 5804 ext4_grpblk_t block; 5805 5806 /* we can't allocate > group size */ 5807 len = ar->len; 5808 5809 /* just a dirty hack to filter too big requests */ 5810 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5811 len = EXT4_CLUSTERS_PER_GROUP(sb); 5812 5813 /* start searching from the goal */ 5814 goal = ar->goal; 5815 if (goal < le32_to_cpu(es->s_first_data_block) || 5816 goal >= ext4_blocks_count(es)) 5817 goal = le32_to_cpu(es->s_first_data_block); 5818 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5819 5820 /* set up allocation goals */ 5821 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5822 ac->ac_status = AC_STATUS_CONTINUE; 5823 ac->ac_sb = sb; 5824 ac->ac_inode = ar->inode; 5825 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5826 ac->ac_o_ex.fe_group = group; 5827 ac->ac_o_ex.fe_start = block; 5828 ac->ac_o_ex.fe_len = len; 5829 ac->ac_g_ex = ac->ac_o_ex; 5830 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 5831 ac->ac_flags = ar->flags; 5832 5833 /* we have to define context: we'll work with a file or 5834 * locality group. this is a policy, actually */ 5835 ext4_mb_group_or_file(ac); 5836 5837 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5838 "left: %u/%u, right %u/%u to %swritable\n", 5839 (unsigned) ar->len, (unsigned) ar->logical, 5840 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5841 (unsigned) ar->lleft, (unsigned) ar->pleft, 5842 (unsigned) ar->lright, (unsigned) ar->pright, 5843 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5844 } 5845 5846 static noinline_for_stack void 5847 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5848 struct ext4_locality_group *lg, 5849 int order, int total_entries) 5850 { 5851 ext4_group_t group = 0; 5852 struct ext4_buddy e4b; 5853 LIST_HEAD(discard_list); 5854 struct ext4_prealloc_space *pa, *tmp; 5855 5856 mb_debug(sb, "discard locality group preallocation\n"); 5857 5858 spin_lock(&lg->lg_prealloc_lock); 5859 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5860 pa_node.lg_list, 5861 lockdep_is_held(&lg->lg_prealloc_lock)) { 5862 spin_lock(&pa->pa_lock); 5863 if (atomic_read(&pa->pa_count)) { 5864 /* 5865 * This is the pa that we just used 5866 * for block allocation. So don't 5867 * free that 5868 */ 5869 spin_unlock(&pa->pa_lock); 5870 continue; 5871 } 5872 if (pa->pa_deleted) { 5873 spin_unlock(&pa->pa_lock); 5874 continue; 5875 } 5876 /* only lg prealloc space */ 5877 BUG_ON(pa->pa_type != MB_GROUP_PA); 5878 5879 /* seems this one can be freed ... */ 5880 ext4_mb_mark_pa_deleted(sb, pa); 5881 spin_unlock(&pa->pa_lock); 5882 5883 list_del_rcu(&pa->pa_node.lg_list); 5884 list_add(&pa->u.pa_tmp_list, &discard_list); 5885 5886 total_entries--; 5887 if (total_entries <= 5) { 5888 /* 5889 * we want to keep only 5 entries 5890 * allowing it to grow to 8. This 5891 * mak sure we don't call discard 5892 * soon for this list. 5893 */ 5894 break; 5895 } 5896 } 5897 spin_unlock(&lg->lg_prealloc_lock); 5898 5899 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5900 int err; 5901 5902 group = ext4_get_group_number(sb, pa->pa_pstart); 5903 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5904 GFP_NOFS|__GFP_NOFAIL); 5905 if (err) { 5906 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5907 err, group); 5908 continue; 5909 } 5910 ext4_lock_group(sb, group); 5911 list_del(&pa->pa_group_list); 5912 ext4_mb_release_group_pa(&e4b, pa); 5913 ext4_unlock_group(sb, group); 5914 5915 ext4_mb_unload_buddy(&e4b); 5916 list_del(&pa->u.pa_tmp_list); 5917 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5918 } 5919 } 5920 5921 /* 5922 * We have incremented pa_count. So it cannot be freed at this 5923 * point. Also we hold lg_mutex. So no parallel allocation is 5924 * possible from this lg. That means pa_free cannot be updated. 5925 * 5926 * A parallel ext4_mb_discard_group_preallocations is possible. 5927 * which can cause the lg_prealloc_list to be updated. 5928 */ 5929 5930 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 5931 { 5932 int order, added = 0, lg_prealloc_count = 1; 5933 struct super_block *sb = ac->ac_sb; 5934 struct ext4_locality_group *lg = ac->ac_lg; 5935 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 5936 5937 order = fls(pa->pa_free) - 1; 5938 if (order > PREALLOC_TB_SIZE - 1) 5939 /* The max size of hash table is PREALLOC_TB_SIZE */ 5940 order = PREALLOC_TB_SIZE - 1; 5941 /* Add the prealloc space to lg */ 5942 spin_lock(&lg->lg_prealloc_lock); 5943 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5944 pa_node.lg_list, 5945 lockdep_is_held(&lg->lg_prealloc_lock)) { 5946 spin_lock(&tmp_pa->pa_lock); 5947 if (tmp_pa->pa_deleted) { 5948 spin_unlock(&tmp_pa->pa_lock); 5949 continue; 5950 } 5951 if (!added && pa->pa_free < tmp_pa->pa_free) { 5952 /* Add to the tail of the previous entry */ 5953 list_add_tail_rcu(&pa->pa_node.lg_list, 5954 &tmp_pa->pa_node.lg_list); 5955 added = 1; 5956 /* 5957 * we want to count the total 5958 * number of entries in the list 5959 */ 5960 } 5961 spin_unlock(&tmp_pa->pa_lock); 5962 lg_prealloc_count++; 5963 } 5964 if (!added) 5965 list_add_tail_rcu(&pa->pa_node.lg_list, 5966 &lg->lg_prealloc_list[order]); 5967 spin_unlock(&lg->lg_prealloc_lock); 5968 5969 /* Now trim the list to be not more than 8 elements */ 5970 if (lg_prealloc_count > 8) 5971 ext4_mb_discard_lg_preallocations(sb, lg, 5972 order, lg_prealloc_count); 5973 } 5974 5975 /* 5976 * release all resource we used in allocation 5977 */ 5978 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5979 { 5980 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5981 struct ext4_prealloc_space *pa = ac->ac_pa; 5982 if (pa) { 5983 if (pa->pa_type == MB_GROUP_PA) { 5984 /* see comment in ext4_mb_use_group_pa() */ 5985 spin_lock(&pa->pa_lock); 5986 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5987 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5988 pa->pa_free -= ac->ac_b_ex.fe_len; 5989 pa->pa_len -= ac->ac_b_ex.fe_len; 5990 spin_unlock(&pa->pa_lock); 5991 5992 /* 5993 * We want to add the pa to the right bucket. 5994 * Remove it from the list and while adding 5995 * make sure the list to which we are adding 5996 * doesn't grow big. 5997 */ 5998 if (likely(pa->pa_free)) { 5999 spin_lock(pa->pa_node_lock.lg_lock); 6000 list_del_rcu(&pa->pa_node.lg_list); 6001 spin_unlock(pa->pa_node_lock.lg_lock); 6002 ext4_mb_add_n_trim(ac); 6003 } 6004 } 6005 6006 ext4_mb_put_pa(ac, ac->ac_sb, pa); 6007 } 6008 if (ac->ac_bitmap_page) 6009 put_page(ac->ac_bitmap_page); 6010 if (ac->ac_buddy_page) 6011 put_page(ac->ac_buddy_page); 6012 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 6013 mutex_unlock(&ac->ac_lg->lg_mutex); 6014 ext4_mb_collect_stats(ac); 6015 return 0; 6016 } 6017 6018 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 6019 { 6020 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 6021 int ret; 6022 int freed = 0, busy = 0; 6023 int retry = 0; 6024 6025 trace_ext4_mb_discard_preallocations(sb, needed); 6026 6027 if (needed == 0) 6028 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 6029 repeat: 6030 for (i = 0; i < ngroups && needed > 0; i++) { 6031 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 6032 freed += ret; 6033 needed -= ret; 6034 cond_resched(); 6035 } 6036 6037 if (needed > 0 && busy && ++retry < 3) { 6038 busy = 0; 6039 goto repeat; 6040 } 6041 6042 return freed; 6043 } 6044 6045 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 6046 struct ext4_allocation_context *ac, u64 *seq) 6047 { 6048 int freed; 6049 u64 seq_retry = 0; 6050 bool ret = false; 6051 6052 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 6053 if (freed) { 6054 ret = true; 6055 goto out_dbg; 6056 } 6057 seq_retry = ext4_get_discard_pa_seq_sum(); 6058 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 6059 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 6060 *seq = seq_retry; 6061 ret = true; 6062 } 6063 6064 out_dbg: 6065 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 6066 return ret; 6067 } 6068 6069 /* 6070 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 6071 * linearly starting at the goal block and also excludes the blocks which 6072 * are going to be in use after fast commit replay. 6073 */ 6074 static ext4_fsblk_t 6075 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 6076 { 6077 struct buffer_head *bitmap_bh; 6078 struct super_block *sb = ar->inode->i_sb; 6079 struct ext4_sb_info *sbi = EXT4_SB(sb); 6080 ext4_group_t group, nr; 6081 ext4_grpblk_t blkoff; 6082 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 6083 ext4_grpblk_t i = 0; 6084 ext4_fsblk_t goal, block; 6085 struct ext4_super_block *es = sbi->s_es; 6086 6087 goal = ar->goal; 6088 if (goal < le32_to_cpu(es->s_first_data_block) || 6089 goal >= ext4_blocks_count(es)) 6090 goal = le32_to_cpu(es->s_first_data_block); 6091 6092 ar->len = 0; 6093 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6094 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 6095 bitmap_bh = ext4_read_block_bitmap(sb, group); 6096 if (IS_ERR(bitmap_bh)) { 6097 *errp = PTR_ERR(bitmap_bh); 6098 pr_warn("Failed to read block bitmap\n"); 6099 return 0; 6100 } 6101 6102 while (1) { 6103 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6104 blkoff); 6105 if (i >= max) 6106 break; 6107 if (ext4_fc_replay_check_excluded(sb, 6108 ext4_group_first_block_no(sb, group) + 6109 EXT4_C2B(sbi, i))) { 6110 blkoff = i + 1; 6111 } else 6112 break; 6113 } 6114 brelse(bitmap_bh); 6115 if (i < max) 6116 break; 6117 6118 if (++group >= ext4_get_groups_count(sb)) 6119 group = 0; 6120 6121 blkoff = 0; 6122 } 6123 6124 if (i >= max) { 6125 *errp = -ENOSPC; 6126 return 0; 6127 } 6128 6129 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 6130 ext4_mb_mark_bb(sb, block, 1, 1); 6131 ar->len = 1; 6132 6133 return block; 6134 } 6135 6136 /* 6137 * Main entry point into mballoc to allocate blocks 6138 * it tries to use preallocation first, then falls back 6139 * to usual allocation 6140 */ 6141 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 6142 struct ext4_allocation_request *ar, int *errp) 6143 { 6144 struct ext4_allocation_context *ac = NULL; 6145 struct ext4_sb_info *sbi; 6146 struct super_block *sb; 6147 ext4_fsblk_t block = 0; 6148 unsigned int inquota = 0; 6149 unsigned int reserv_clstrs = 0; 6150 int retries = 0; 6151 u64 seq; 6152 6153 might_sleep(); 6154 sb = ar->inode->i_sb; 6155 sbi = EXT4_SB(sb); 6156 6157 trace_ext4_request_blocks(ar); 6158 if (sbi->s_mount_state & EXT4_FC_REPLAY) 6159 return ext4_mb_new_blocks_simple(ar, errp); 6160 6161 /* Allow to use superuser reservation for quota file */ 6162 if (ext4_is_quota_file(ar->inode)) 6163 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 6164 6165 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 6166 /* Without delayed allocation we need to verify 6167 * there is enough free blocks to do block allocation 6168 * and verify allocation doesn't exceed the quota limits. 6169 */ 6170 while (ar->len && 6171 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 6172 6173 /* let others to free the space */ 6174 cond_resched(); 6175 ar->len = ar->len >> 1; 6176 } 6177 if (!ar->len) { 6178 ext4_mb_show_pa(sb); 6179 *errp = -ENOSPC; 6180 return 0; 6181 } 6182 reserv_clstrs = ar->len; 6183 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 6184 dquot_alloc_block_nofail(ar->inode, 6185 EXT4_C2B(sbi, ar->len)); 6186 } else { 6187 while (ar->len && 6188 dquot_alloc_block(ar->inode, 6189 EXT4_C2B(sbi, ar->len))) { 6190 6191 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 6192 ar->len--; 6193 } 6194 } 6195 inquota = ar->len; 6196 if (ar->len == 0) { 6197 *errp = -EDQUOT; 6198 goto out; 6199 } 6200 } 6201 6202 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 6203 if (!ac) { 6204 ar->len = 0; 6205 *errp = -ENOMEM; 6206 goto out; 6207 } 6208 6209 ext4_mb_initialize_context(ac, ar); 6210 6211 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 6212 seq = this_cpu_read(discard_pa_seq); 6213 if (!ext4_mb_use_preallocated(ac)) { 6214 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 6215 ext4_mb_normalize_request(ac, ar); 6216 6217 *errp = ext4_mb_pa_alloc(ac); 6218 if (*errp) 6219 goto errout; 6220 repeat: 6221 /* allocate space in core */ 6222 *errp = ext4_mb_regular_allocator(ac); 6223 /* 6224 * pa allocated above is added to grp->bb_prealloc_list only 6225 * when we were able to allocate some block i.e. when 6226 * ac->ac_status == AC_STATUS_FOUND. 6227 * And error from above mean ac->ac_status != AC_STATUS_FOUND 6228 * So we have to free this pa here itself. 6229 */ 6230 if (*errp) { 6231 ext4_mb_pa_put_free(ac); 6232 ext4_discard_allocated_blocks(ac); 6233 goto errout; 6234 } 6235 if (ac->ac_status == AC_STATUS_FOUND && 6236 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 6237 ext4_mb_pa_put_free(ac); 6238 } 6239 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 6240 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 6241 if (*errp) { 6242 ext4_discard_allocated_blocks(ac); 6243 goto errout; 6244 } else { 6245 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6246 ar->len = ac->ac_b_ex.fe_len; 6247 } 6248 } else { 6249 if (++retries < 3 && 6250 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6251 goto repeat; 6252 /* 6253 * If block allocation fails then the pa allocated above 6254 * needs to be freed here itself. 6255 */ 6256 ext4_mb_pa_put_free(ac); 6257 *errp = -ENOSPC; 6258 } 6259 6260 if (*errp) { 6261 errout: 6262 ac->ac_b_ex.fe_len = 0; 6263 ar->len = 0; 6264 ext4_mb_show_ac(ac); 6265 } 6266 ext4_mb_release_context(ac); 6267 kmem_cache_free(ext4_ac_cachep, ac); 6268 out: 6269 if (inquota && ar->len < inquota) 6270 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 6271 if (!ar->len) { 6272 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 6273 /* release all the reserved blocks if non delalloc */ 6274 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 6275 reserv_clstrs); 6276 } 6277 6278 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6279 6280 return block; 6281 } 6282 6283 /* 6284 * We can merge two free data extents only if the physical blocks 6285 * are contiguous, AND the extents were freed by the same transaction, 6286 * AND the blocks are associated with the same group. 6287 */ 6288 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 6289 struct ext4_free_data *entry, 6290 struct ext4_free_data *new_entry, 6291 struct rb_root *entry_rb_root) 6292 { 6293 if ((entry->efd_tid != new_entry->efd_tid) || 6294 (entry->efd_group != new_entry->efd_group)) 6295 return; 6296 if (entry->efd_start_cluster + entry->efd_count == 6297 new_entry->efd_start_cluster) { 6298 new_entry->efd_start_cluster = entry->efd_start_cluster; 6299 new_entry->efd_count += entry->efd_count; 6300 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 6301 entry->efd_start_cluster) { 6302 new_entry->efd_count += entry->efd_count; 6303 } else 6304 return; 6305 spin_lock(&sbi->s_md_lock); 6306 list_del(&entry->efd_list); 6307 spin_unlock(&sbi->s_md_lock); 6308 rb_erase(&entry->efd_node, entry_rb_root); 6309 kmem_cache_free(ext4_free_data_cachep, entry); 6310 } 6311 6312 static noinline_for_stack void 6313 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 6314 struct ext4_free_data *new_entry) 6315 { 6316 ext4_group_t group = e4b->bd_group; 6317 ext4_grpblk_t cluster; 6318 ext4_grpblk_t clusters = new_entry->efd_count; 6319 struct ext4_free_data *entry; 6320 struct ext4_group_info *db = e4b->bd_info; 6321 struct super_block *sb = e4b->bd_sb; 6322 struct ext4_sb_info *sbi = EXT4_SB(sb); 6323 struct rb_node **n = &db->bb_free_root.rb_node, *node; 6324 struct rb_node *parent = NULL, *new_node; 6325 6326 BUG_ON(!ext4_handle_valid(handle)); 6327 BUG_ON(e4b->bd_bitmap_page == NULL); 6328 BUG_ON(e4b->bd_buddy_page == NULL); 6329 6330 new_node = &new_entry->efd_node; 6331 cluster = new_entry->efd_start_cluster; 6332 6333 if (!*n) { 6334 /* first free block exent. We need to 6335 protect buddy cache from being freed, 6336 * otherwise we'll refresh it from 6337 * on-disk bitmap and lose not-yet-available 6338 * blocks */ 6339 get_page(e4b->bd_buddy_page); 6340 get_page(e4b->bd_bitmap_page); 6341 } 6342 while (*n) { 6343 parent = *n; 6344 entry = rb_entry(parent, struct ext4_free_data, efd_node); 6345 if (cluster < entry->efd_start_cluster) 6346 n = &(*n)->rb_left; 6347 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6348 n = &(*n)->rb_right; 6349 else { 6350 ext4_grp_locked_error(sb, group, 0, 6351 ext4_group_first_block_no(sb, group) + 6352 EXT4_C2B(sbi, cluster), 6353 "Block already on to-be-freed list"); 6354 kmem_cache_free(ext4_free_data_cachep, new_entry); 6355 return; 6356 } 6357 } 6358 6359 rb_link_node(new_node, parent, n); 6360 rb_insert_color(new_node, &db->bb_free_root); 6361 6362 /* Now try to see the extent can be merged to left and right */ 6363 node = rb_prev(new_node); 6364 if (node) { 6365 entry = rb_entry(node, struct ext4_free_data, efd_node); 6366 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6367 &(db->bb_free_root)); 6368 } 6369 6370 node = rb_next(new_node); 6371 if (node) { 6372 entry = rb_entry(node, struct ext4_free_data, efd_node); 6373 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6374 &(db->bb_free_root)); 6375 } 6376 6377 spin_lock(&sbi->s_md_lock); 6378 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 6379 sbi->s_mb_free_pending += clusters; 6380 spin_unlock(&sbi->s_md_lock); 6381 } 6382 6383 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6384 unsigned long count) 6385 { 6386 struct buffer_head *bitmap_bh; 6387 struct super_block *sb = inode->i_sb; 6388 struct ext4_group_desc *gdp; 6389 struct buffer_head *gdp_bh; 6390 ext4_group_t group; 6391 ext4_grpblk_t blkoff; 6392 int already_freed = 0, err, i; 6393 6394 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6395 bitmap_bh = ext4_read_block_bitmap(sb, group); 6396 if (IS_ERR(bitmap_bh)) { 6397 pr_warn("Failed to read block bitmap\n"); 6398 return; 6399 } 6400 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 6401 if (!gdp) 6402 goto err_out; 6403 6404 for (i = 0; i < count; i++) { 6405 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 6406 already_freed++; 6407 } 6408 mb_clear_bits(bitmap_bh->b_data, blkoff, count); 6409 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 6410 if (err) 6411 goto err_out; 6412 ext4_free_group_clusters_set( 6413 sb, gdp, ext4_free_group_clusters(sb, gdp) + 6414 count - already_freed); 6415 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6416 ext4_group_desc_csum_set(sb, group, gdp); 6417 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 6418 sync_dirty_buffer(bitmap_bh); 6419 sync_dirty_buffer(gdp_bh); 6420 6421 err_out: 6422 brelse(bitmap_bh); 6423 } 6424 6425 /** 6426 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6427 * Used by ext4_free_blocks() 6428 * @handle: handle for this transaction 6429 * @inode: inode 6430 * @block: starting physical block to be freed 6431 * @count: number of blocks to be freed 6432 * @flags: flags used by ext4_free_blocks 6433 */ 6434 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6435 ext4_fsblk_t block, unsigned long count, 6436 int flags) 6437 { 6438 struct buffer_head *bitmap_bh = NULL; 6439 struct super_block *sb = inode->i_sb; 6440 struct ext4_group_desc *gdp; 6441 struct ext4_group_info *grp; 6442 unsigned int overflow; 6443 ext4_grpblk_t bit; 6444 struct buffer_head *gd_bh; 6445 ext4_group_t block_group; 6446 struct ext4_sb_info *sbi; 6447 struct ext4_buddy e4b; 6448 unsigned int count_clusters; 6449 int err = 0; 6450 int ret; 6451 6452 sbi = EXT4_SB(sb); 6453 6454 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6455 !ext4_inode_block_valid(inode, block, count)) { 6456 ext4_error(sb, "Freeing blocks in system zone - " 6457 "Block = %llu, count = %lu", block, count); 6458 /* err = 0. ext4_std_error should be a no op */ 6459 goto error_return; 6460 } 6461 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6462 6463 do_more: 6464 overflow = 0; 6465 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6466 6467 grp = ext4_get_group_info(sb, block_group); 6468 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6469 return; 6470 6471 /* 6472 * Check to see if we are freeing blocks across a group 6473 * boundary. 6474 */ 6475 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6476 overflow = EXT4_C2B(sbi, bit) + count - 6477 EXT4_BLOCKS_PER_GROUP(sb); 6478 count -= overflow; 6479 /* The range changed so it's no longer validated */ 6480 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6481 } 6482 count_clusters = EXT4_NUM_B2C(sbi, count); 6483 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6484 if (IS_ERR(bitmap_bh)) { 6485 err = PTR_ERR(bitmap_bh); 6486 bitmap_bh = NULL; 6487 goto error_return; 6488 } 6489 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 6490 if (!gdp) { 6491 err = -EIO; 6492 goto error_return; 6493 } 6494 6495 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6496 !ext4_inode_block_valid(inode, block, count)) { 6497 ext4_error(sb, "Freeing blocks in system zone - " 6498 "Block = %llu, count = %lu", block, count); 6499 /* err = 0. ext4_std_error should be a no op */ 6500 goto error_return; 6501 } 6502 6503 BUFFER_TRACE(bitmap_bh, "getting write access"); 6504 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6505 EXT4_JTR_NONE); 6506 if (err) 6507 goto error_return; 6508 6509 /* 6510 * We are about to modify some metadata. Call the journal APIs 6511 * to unshare ->b_data if a currently-committing transaction is 6512 * using it 6513 */ 6514 BUFFER_TRACE(gd_bh, "get_write_access"); 6515 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6516 if (err) 6517 goto error_return; 6518 #ifdef AGGRESSIVE_CHECK 6519 { 6520 int i; 6521 for (i = 0; i < count_clusters; i++) 6522 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6523 } 6524 #endif 6525 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6526 6527 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6528 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6529 GFP_NOFS|__GFP_NOFAIL); 6530 if (err) 6531 goto error_return; 6532 6533 /* 6534 * We need to make sure we don't reuse the freed block until after the 6535 * transaction is committed. We make an exception if the inode is to be 6536 * written in writeback mode since writeback mode has weak data 6537 * consistency guarantees. 6538 */ 6539 if (ext4_handle_valid(handle) && 6540 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6541 !ext4_should_writeback_data(inode))) { 6542 struct ext4_free_data *new_entry; 6543 /* 6544 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6545 * to fail. 6546 */ 6547 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6548 GFP_NOFS|__GFP_NOFAIL); 6549 new_entry->efd_start_cluster = bit; 6550 new_entry->efd_group = block_group; 6551 new_entry->efd_count = count_clusters; 6552 new_entry->efd_tid = handle->h_transaction->t_tid; 6553 6554 ext4_lock_group(sb, block_group); 6555 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6556 ext4_mb_free_metadata(handle, &e4b, new_entry); 6557 } else { 6558 /* need to update group_info->bb_free and bitmap 6559 * with group lock held. generate_buddy look at 6560 * them with group lock_held 6561 */ 6562 if (test_opt(sb, DISCARD)) { 6563 err = ext4_issue_discard(sb, block_group, bit, 6564 count_clusters, NULL); 6565 if (err && err != -EOPNOTSUPP) 6566 ext4_msg(sb, KERN_WARNING, "discard request in" 6567 " group:%u block:%d count:%lu failed" 6568 " with %d", block_group, bit, count, 6569 err); 6570 } else 6571 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6572 6573 ext4_lock_group(sb, block_group); 6574 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6575 mb_free_blocks(inode, &e4b, bit, count_clusters); 6576 } 6577 6578 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6579 ext4_free_group_clusters_set(sb, gdp, ret); 6580 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6581 ext4_group_desc_csum_set(sb, block_group, gdp); 6582 ext4_unlock_group(sb, block_group); 6583 6584 if (sbi->s_log_groups_per_flex) { 6585 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6586 atomic64_add(count_clusters, 6587 &sbi_array_rcu_deref(sbi, s_flex_groups, 6588 flex_group)->free_clusters); 6589 } 6590 6591 /* 6592 * on a bigalloc file system, defer the s_freeclusters_counter 6593 * update to the caller (ext4_remove_space and friends) so they 6594 * can determine if a cluster freed here should be rereserved 6595 */ 6596 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6597 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6598 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6599 percpu_counter_add(&sbi->s_freeclusters_counter, 6600 count_clusters); 6601 } 6602 6603 ext4_mb_unload_buddy(&e4b); 6604 6605 /* We dirtied the bitmap block */ 6606 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6607 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6608 6609 /* And the group descriptor block */ 6610 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6611 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6612 if (!err) 6613 err = ret; 6614 6615 if (overflow && !err) { 6616 block += count; 6617 count = overflow; 6618 put_bh(bitmap_bh); 6619 /* The range changed so it's no longer validated */ 6620 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6621 goto do_more; 6622 } 6623 error_return: 6624 brelse(bitmap_bh); 6625 ext4_std_error(sb, err); 6626 } 6627 6628 /** 6629 * ext4_free_blocks() -- Free given blocks and update quota 6630 * @handle: handle for this transaction 6631 * @inode: inode 6632 * @bh: optional buffer of the block to be freed 6633 * @block: starting physical block to be freed 6634 * @count: number of blocks to be freed 6635 * @flags: flags used by ext4_free_blocks 6636 */ 6637 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6638 struct buffer_head *bh, ext4_fsblk_t block, 6639 unsigned long count, int flags) 6640 { 6641 struct super_block *sb = inode->i_sb; 6642 unsigned int overflow; 6643 struct ext4_sb_info *sbi; 6644 6645 sbi = EXT4_SB(sb); 6646 6647 if (bh) { 6648 if (block) 6649 BUG_ON(block != bh->b_blocknr); 6650 else 6651 block = bh->b_blocknr; 6652 } 6653 6654 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6655 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 6656 return; 6657 } 6658 6659 might_sleep(); 6660 6661 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6662 !ext4_inode_block_valid(inode, block, count)) { 6663 ext4_error(sb, "Freeing blocks not in datazone - " 6664 "block = %llu, count = %lu", block, count); 6665 return; 6666 } 6667 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6668 6669 ext4_debug("freeing block %llu\n", block); 6670 trace_ext4_free_blocks(inode, block, count, flags); 6671 6672 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6673 BUG_ON(count > 1); 6674 6675 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6676 inode, bh, block); 6677 } 6678 6679 /* 6680 * If the extent to be freed does not begin on a cluster 6681 * boundary, we need to deal with partial clusters at the 6682 * beginning and end of the extent. Normally we will free 6683 * blocks at the beginning or the end unless we are explicitly 6684 * requested to avoid doing so. 6685 */ 6686 overflow = EXT4_PBLK_COFF(sbi, block); 6687 if (overflow) { 6688 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6689 overflow = sbi->s_cluster_ratio - overflow; 6690 block += overflow; 6691 if (count > overflow) 6692 count -= overflow; 6693 else 6694 return; 6695 } else { 6696 block -= overflow; 6697 count += overflow; 6698 } 6699 /* The range changed so it's no longer validated */ 6700 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6701 } 6702 overflow = EXT4_LBLK_COFF(sbi, count); 6703 if (overflow) { 6704 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6705 if (count > overflow) 6706 count -= overflow; 6707 else 6708 return; 6709 } else 6710 count += sbi->s_cluster_ratio - overflow; 6711 /* The range changed so it's no longer validated */ 6712 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6713 } 6714 6715 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6716 int i; 6717 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6718 6719 for (i = 0; i < count; i++) { 6720 cond_resched(); 6721 if (is_metadata) 6722 bh = sb_find_get_block(inode->i_sb, block + i); 6723 ext4_forget(handle, is_metadata, inode, bh, block + i); 6724 } 6725 } 6726 6727 ext4_mb_clear_bb(handle, inode, block, count, flags); 6728 } 6729 6730 /** 6731 * ext4_group_add_blocks() -- Add given blocks to an existing group 6732 * @handle: handle to this transaction 6733 * @sb: super block 6734 * @block: start physical block to add to the block group 6735 * @count: number of blocks to free 6736 * 6737 * This marks the blocks as free in the bitmap and buddy. 6738 */ 6739 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6740 ext4_fsblk_t block, unsigned long count) 6741 { 6742 struct buffer_head *bitmap_bh = NULL; 6743 struct buffer_head *gd_bh; 6744 ext4_group_t block_group; 6745 ext4_grpblk_t bit; 6746 unsigned int i; 6747 struct ext4_group_desc *desc; 6748 struct ext4_sb_info *sbi = EXT4_SB(sb); 6749 struct ext4_buddy e4b; 6750 int err = 0, ret, free_clusters_count; 6751 ext4_grpblk_t clusters_freed; 6752 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6753 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6754 unsigned long cluster_count = last_cluster - first_cluster + 1; 6755 6756 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6757 6758 if (count == 0) 6759 return 0; 6760 6761 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6762 /* 6763 * Check to see if we are freeing blocks across a group 6764 * boundary. 6765 */ 6766 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6767 ext4_warning(sb, "too many blocks added to group %u", 6768 block_group); 6769 err = -EINVAL; 6770 goto error_return; 6771 } 6772 6773 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6774 if (IS_ERR(bitmap_bh)) { 6775 err = PTR_ERR(bitmap_bh); 6776 bitmap_bh = NULL; 6777 goto error_return; 6778 } 6779 6780 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6781 if (!desc) { 6782 err = -EIO; 6783 goto error_return; 6784 } 6785 6786 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6787 ext4_error(sb, "Adding blocks in system zones - " 6788 "Block = %llu, count = %lu", 6789 block, count); 6790 err = -EINVAL; 6791 goto error_return; 6792 } 6793 6794 BUFFER_TRACE(bitmap_bh, "getting write access"); 6795 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6796 EXT4_JTR_NONE); 6797 if (err) 6798 goto error_return; 6799 6800 /* 6801 * We are about to modify some metadata. Call the journal APIs 6802 * to unshare ->b_data if a currently-committing transaction is 6803 * using it 6804 */ 6805 BUFFER_TRACE(gd_bh, "get_write_access"); 6806 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6807 if (err) 6808 goto error_return; 6809 6810 for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 6811 BUFFER_TRACE(bitmap_bh, "clear bit"); 6812 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 6813 ext4_error(sb, "bit already cleared for block %llu", 6814 (ext4_fsblk_t)(block + i)); 6815 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 6816 } else { 6817 clusters_freed++; 6818 } 6819 } 6820 6821 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6822 if (err) 6823 goto error_return; 6824 6825 /* 6826 * need to update group_info->bb_free and bitmap 6827 * with group lock held. generate_buddy look at 6828 * them with group lock_held 6829 */ 6830 ext4_lock_group(sb, block_group); 6831 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6832 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6833 free_clusters_count = clusters_freed + 6834 ext4_free_group_clusters(sb, desc); 6835 ext4_free_group_clusters_set(sb, desc, free_clusters_count); 6836 ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6837 ext4_group_desc_csum_set(sb, block_group, desc); 6838 ext4_unlock_group(sb, block_group); 6839 percpu_counter_add(&sbi->s_freeclusters_counter, 6840 clusters_freed); 6841 6842 if (sbi->s_log_groups_per_flex) { 6843 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6844 atomic64_add(clusters_freed, 6845 &sbi_array_rcu_deref(sbi, s_flex_groups, 6846 flex_group)->free_clusters); 6847 } 6848 6849 ext4_mb_unload_buddy(&e4b); 6850 6851 /* We dirtied the bitmap block */ 6852 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6853 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6854 6855 /* And the group descriptor block */ 6856 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6857 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6858 if (!err) 6859 err = ret; 6860 6861 error_return: 6862 brelse(bitmap_bh); 6863 ext4_std_error(sb, err); 6864 return err; 6865 } 6866 6867 /** 6868 * ext4_trim_extent -- function to TRIM one single free extent in the group 6869 * @sb: super block for the file system 6870 * @start: starting block of the free extent in the alloc. group 6871 * @count: number of blocks to TRIM 6872 * @e4b: ext4 buddy for the group 6873 * 6874 * Trim "count" blocks starting at "start" in the "group". To assure that no 6875 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6876 * be called with under the group lock. 6877 */ 6878 static int ext4_trim_extent(struct super_block *sb, 6879 int start, int count, struct ext4_buddy *e4b) 6880 __releases(bitlock) 6881 __acquires(bitlock) 6882 { 6883 struct ext4_free_extent ex; 6884 ext4_group_t group = e4b->bd_group; 6885 int ret = 0; 6886 6887 trace_ext4_trim_extent(sb, group, start, count); 6888 6889 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6890 6891 ex.fe_start = start; 6892 ex.fe_group = group; 6893 ex.fe_len = count; 6894 6895 /* 6896 * Mark blocks used, so no one can reuse them while 6897 * being trimmed. 6898 */ 6899 mb_mark_used(e4b, &ex); 6900 ext4_unlock_group(sb, group); 6901 ret = ext4_issue_discard(sb, group, start, count, NULL); 6902 ext4_lock_group(sb, group); 6903 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6904 return ret; 6905 } 6906 6907 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, 6908 ext4_group_t grp) 6909 { 6910 unsigned long nr_clusters_in_group; 6911 6912 if (grp < (ext4_get_groups_count(sb) - 1)) 6913 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb); 6914 else 6915 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - 6916 ext4_group_first_block_no(sb, grp)) 6917 >> EXT4_CLUSTER_BITS(sb); 6918 6919 return nr_clusters_in_group - 1; 6920 } 6921 6922 static bool ext4_trim_interrupted(void) 6923 { 6924 return fatal_signal_pending(current) || freezing(current); 6925 } 6926 6927 static int ext4_try_to_trim_range(struct super_block *sb, 6928 struct ext4_buddy *e4b, ext4_grpblk_t start, 6929 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6930 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6931 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6932 { 6933 ext4_grpblk_t next, count, free_count, last, origin_start; 6934 bool set_trimmed = false; 6935 void *bitmap; 6936 6937 last = ext4_last_grp_cluster(sb, e4b->bd_group); 6938 bitmap = e4b->bd_bitmap; 6939 if (start == 0 && max >= last) 6940 set_trimmed = true; 6941 origin_start = start; 6942 start = max(e4b->bd_info->bb_first_free, start); 6943 count = 0; 6944 free_count = 0; 6945 6946 while (start <= max) { 6947 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6948 if (start > max) 6949 break; 6950 6951 next = mb_find_next_bit(bitmap, last + 1, start); 6952 if (origin_start == 0 && next >= last) 6953 set_trimmed = true; 6954 6955 if ((next - start) >= minblocks) { 6956 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6957 6958 if (ret && ret != -EOPNOTSUPP) 6959 return count; 6960 count += next - start; 6961 } 6962 free_count += next - start; 6963 start = next + 1; 6964 6965 if (ext4_trim_interrupted()) 6966 return count; 6967 6968 if (need_resched()) { 6969 ext4_unlock_group(sb, e4b->bd_group); 6970 cond_resched(); 6971 ext4_lock_group(sb, e4b->bd_group); 6972 } 6973 6974 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6975 break; 6976 } 6977 6978 if (set_trimmed) 6979 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); 6980 6981 return count; 6982 } 6983 6984 /** 6985 * ext4_trim_all_free -- function to trim all free space in alloc. group 6986 * @sb: super block for file system 6987 * @group: group to be trimmed 6988 * @start: first group block to examine 6989 * @max: last group block to examine 6990 * @minblocks: minimum extent block count 6991 * 6992 * ext4_trim_all_free walks through group's block bitmap searching for free 6993 * extents. When the free extent is found, mark it as used in group buddy 6994 * bitmap. Then issue a TRIM command on this extent and free the extent in 6995 * the group buddy bitmap. 6996 */ 6997 static ext4_grpblk_t 6998 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 6999 ext4_grpblk_t start, ext4_grpblk_t max, 7000 ext4_grpblk_t minblocks) 7001 { 7002 struct ext4_buddy e4b; 7003 int ret; 7004 7005 trace_ext4_trim_all_free(sb, group, start, max); 7006 7007 ret = ext4_mb_load_buddy(sb, group, &e4b); 7008 if (ret) { 7009 ext4_warning(sb, "Error %d loading buddy information for %u", 7010 ret, group); 7011 return ret; 7012 } 7013 7014 ext4_lock_group(sb, group); 7015 7016 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 7017 minblocks < EXT4_SB(sb)->s_last_trim_minblks) 7018 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 7019 else 7020 ret = 0; 7021 7022 ext4_unlock_group(sb, group); 7023 ext4_mb_unload_buddy(&e4b); 7024 7025 ext4_debug("trimmed %d blocks in the group %d\n", 7026 ret, group); 7027 7028 return ret; 7029 } 7030 7031 /** 7032 * ext4_trim_fs() -- trim ioctl handle function 7033 * @sb: superblock for filesystem 7034 * @range: fstrim_range structure 7035 * 7036 * start: First Byte to trim 7037 * len: number of Bytes to trim from start 7038 * minlen: minimum extent length in Bytes 7039 * ext4_trim_fs goes through all allocation groups containing Bytes from 7040 * start to start+len. For each such a group ext4_trim_all_free function 7041 * is invoked to trim all free space. 7042 */ 7043 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 7044 { 7045 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 7046 struct ext4_group_info *grp; 7047 ext4_group_t group, first_group, last_group; 7048 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 7049 uint64_t start, end, minlen, trimmed = 0; 7050 ext4_fsblk_t first_data_blk = 7051 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 7052 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 7053 int ret = 0; 7054 7055 start = range->start >> sb->s_blocksize_bits; 7056 end = start + (range->len >> sb->s_blocksize_bits) - 1; 7057 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7058 range->minlen >> sb->s_blocksize_bits); 7059 7060 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 7061 start >= max_blks || 7062 range->len < sb->s_blocksize) 7063 return -EINVAL; 7064 /* No point to try to trim less than discard granularity */ 7065 if (range->minlen < discard_granularity) { 7066 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 7067 discard_granularity >> sb->s_blocksize_bits); 7068 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 7069 goto out; 7070 } 7071 if (end >= max_blks - 1) 7072 end = max_blks - 1; 7073 if (end <= first_data_blk) 7074 goto out; 7075 if (start < first_data_blk) 7076 start = first_data_blk; 7077 7078 /* Determine first and last group to examine based on start and end */ 7079 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 7080 &first_group, &first_cluster); 7081 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 7082 &last_group, &last_cluster); 7083 7084 /* end now represents the last cluster to discard in this group */ 7085 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7086 7087 for (group = first_group; group <= last_group; group++) { 7088 if (ext4_trim_interrupted()) 7089 break; 7090 grp = ext4_get_group_info(sb, group); 7091 if (!grp) 7092 continue; 7093 /* We only do this if the grp has never been initialized */ 7094 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 7095 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 7096 if (ret) 7097 break; 7098 } 7099 7100 /* 7101 * For all the groups except the last one, last cluster will 7102 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 7103 * change it for the last group, note that last_cluster is 7104 * already computed earlier by ext4_get_group_no_and_offset() 7105 */ 7106 if (group == last_group) 7107 end = last_cluster; 7108 if (grp->bb_free >= minlen) { 7109 cnt = ext4_trim_all_free(sb, group, first_cluster, 7110 end, minlen); 7111 if (cnt < 0) { 7112 ret = cnt; 7113 break; 7114 } 7115 trimmed += cnt; 7116 } 7117 7118 /* 7119 * For every group except the first one, we are sure 7120 * that the first cluster to discard will be cluster #0. 7121 */ 7122 first_cluster = 0; 7123 } 7124 7125 if (!ret) 7126 EXT4_SB(sb)->s_last_trim_minblks = minlen; 7127 7128 out: 7129 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 7130 return ret; 7131 } 7132 7133 /* Iterate all the free extents in the group. */ 7134 int 7135 ext4_mballoc_query_range( 7136 struct super_block *sb, 7137 ext4_group_t group, 7138 ext4_grpblk_t start, 7139 ext4_grpblk_t end, 7140 ext4_mballoc_query_range_fn formatter, 7141 void *priv) 7142 { 7143 void *bitmap; 7144 ext4_grpblk_t next; 7145 struct ext4_buddy e4b; 7146 int error; 7147 7148 error = ext4_mb_load_buddy(sb, group, &e4b); 7149 if (error) 7150 return error; 7151 bitmap = e4b.bd_bitmap; 7152 7153 ext4_lock_group(sb, group); 7154 7155 start = max(e4b.bd_info->bb_first_free, start); 7156 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 7157 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7158 7159 while (start <= end) { 7160 start = mb_find_next_zero_bit(bitmap, end + 1, start); 7161 if (start > end) 7162 break; 7163 next = mb_find_next_bit(bitmap, end + 1, start); 7164 7165 ext4_unlock_group(sb, group); 7166 error = formatter(sb, group, start, next - start, priv); 7167 if (error) 7168 goto out_unload; 7169 ext4_lock_group(sb, group); 7170 7171 start = next + 1; 7172 } 7173 7174 ext4_unlock_group(sb, group); 7175 out_unload: 7176 ext4_mb_unload_buddy(&e4b); 7177 7178 return error; 7179 } 7180