1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <trace/events/ext4.h> 20 21 /* 22 * MUSTDO: 23 * - test ext4_ext_search_left() and ext4_ext_search_right() 24 * - search for metadata in few groups 25 * 26 * TODO v4: 27 * - normalization should take into account whether file is still open 28 * - discard preallocations if no free space left (policy?) 29 * - don't normalize tails 30 * - quota 31 * - reservation for superuser 32 * 33 * TODO v3: 34 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35 * - track min/max extents in each group for better group selection 36 * - mb_mark_used() may allocate chunk right after splitting buddy 37 * - tree of groups sorted by number of free blocks 38 * - error handling 39 */ 40 41 /* 42 * The allocation request involve request for multiple number of blocks 43 * near to the goal(block) value specified. 44 * 45 * During initialization phase of the allocator we decide to use the 46 * group preallocation or inode preallocation depending on the size of 47 * the file. The size of the file could be the resulting file size we 48 * would have after allocation, or the current file size, which ever 49 * is larger. If the size is less than sbi->s_mb_stream_request we 50 * select to use the group preallocation. The default value of 51 * s_mb_stream_request is 16 blocks. This can also be tuned via 52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53 * terms of number of blocks. 54 * 55 * The main motivation for having small file use group preallocation is to 56 * ensure that we have small files closer together on the disk. 57 * 58 * First stage the allocator looks at the inode prealloc list, 59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60 * spaces for this particular inode. The inode prealloc space is 61 * represented as: 62 * 63 * pa_lstart -> the logical start block for this prealloc space 64 * pa_pstart -> the physical start block for this prealloc space 65 * pa_len -> length for this prealloc space (in clusters) 66 * pa_free -> free space available in this prealloc space (in clusters) 67 * 68 * The inode preallocation space is used looking at the _logical_ start 69 * block. If only the logical file block falls within the range of prealloc 70 * space we will consume the particular prealloc space. This makes sure that 71 * we have contiguous physical blocks representing the file blocks 72 * 73 * The important thing to be noted in case of inode prealloc space is that 74 * we don't modify the values associated to inode prealloc space except 75 * pa_free. 76 * 77 * If we are not able to find blocks in the inode prealloc space and if we 78 * have the group allocation flag set then we look at the locality group 79 * prealloc space. These are per CPU prealloc list represented as 80 * 81 * ext4_sb_info.s_locality_groups[smp_processor_id()] 82 * 83 * The reason for having a per cpu locality group is to reduce the contention 84 * between CPUs. It is possible to get scheduled at this point. 85 * 86 * The locality group prealloc space is used looking at whether we have 87 * enough free space (pa_free) within the prealloc space. 88 * 89 * If we can't allocate blocks via inode prealloc or/and locality group 90 * prealloc then we look at the buddy cache. The buddy cache is represented 91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92 * mapped to the buddy and bitmap information regarding different 93 * groups. The buddy information is attached to buddy cache inode so that 94 * we can access them through the page cache. The information regarding 95 * each group is loaded via ext4_mb_load_buddy. The information involve 96 * block bitmap and buddy information. The information are stored in the 97 * inode as: 98 * 99 * { page } 100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101 * 102 * 103 * one block each for bitmap and buddy information. So for each group we 104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105 * blocksize) blocks. So it can have information regarding groups_per_page 106 * which is blocks_per_page/2 107 * 108 * The buddy cache inode is not stored on disk. The inode is thrown 109 * away when the filesystem is unmounted. 110 * 111 * We look for count number of blocks in the buddy cache. If we were able 112 * to locate that many free blocks we return with additional information 113 * regarding rest of the contiguous physical block available 114 * 115 * Before allocating blocks via buddy cache we normalize the request 116 * blocks. This ensure we ask for more blocks that we needed. The extra 117 * blocks that we get after allocation is added to the respective prealloc 118 * list. In case of inode preallocation we follow a list of heuristics 119 * based on file size. This can be found in ext4_mb_normalize_request. If 120 * we are doing a group prealloc we try to normalize the request to 121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 122 * dependent on the cluster size; for non-bigalloc file systems, it is 123 * 512 blocks. This can be tuned via 124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125 * terms of number of blocks. If we have mounted the file system with -O 126 * stripe=<value> option the group prealloc request is normalized to the 127 * smallest multiple of the stripe value (sbi->s_stripe) which is 128 * greater than the default mb_group_prealloc. 129 * 130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131 * structures in two data structures: 132 * 133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134 * 135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136 * 137 * This is an array of lists where the index in the array represents the 138 * largest free order in the buddy bitmap of the participating group infos of 139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140 * number of buddy bitmap orders possible) number of lists. Group-infos are 141 * placed in appropriate lists. 142 * 143 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144 * 145 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146 * 147 * This is an array of lists where in the i-th list there are groups with 148 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 149 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 150 * Note that we don't bother with a special list for completely empty groups 151 * so we only have MB_NUM_ORDERS(sb) lists. 152 * 153 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154 * structures to decide the order in which groups are to be traversed for 155 * fulfilling an allocation request. 156 * 157 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order 158 * >= the order of the request. We directly look at the largest free order list 159 * in the data structure (1) above where largest_free_order = order of the 160 * request. If that list is empty, we look at remaining list in the increasing 161 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED 162 * lookup in O(1) time. 163 * 164 * At CR_GOAL_LEN_FAST, we only consider groups where 165 * average fragment size > request size. So, we lookup a group which has average 166 * fragment size just above or equal to request size using our average fragment 167 * size group lists (data structure 2) in O(1) time. 168 * 169 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied 170 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in 171 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg 172 * fragment size > goal length. So before falling to the slower 173 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and 174 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big 175 * enough average fragment size. This increases the chances of finding a 176 * suitable block group in O(1) time and results in faster allocation at the 177 * cost of reduced size of allocation. 178 * 179 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 180 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and 181 * CR_GOAL_LEN_FAST phase. 182 * 183 * The regular allocator (using the buddy cache) supports a few tunables. 184 * 185 * /sys/fs/ext4/<partition>/mb_min_to_scan 186 * /sys/fs/ext4/<partition>/mb_max_to_scan 187 * /sys/fs/ext4/<partition>/mb_order2_req 188 * /sys/fs/ext4/<partition>/mb_linear_limit 189 * 190 * The regular allocator uses buddy scan only if the request len is power of 191 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 192 * value of s_mb_order2_reqs can be tuned via 193 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 194 * stripe size (sbi->s_stripe), we try to search for contiguous block in 195 * stripe size. This should result in better allocation on RAID setups. If 196 * not, we search in the specific group using bitmap for best extents. The 197 * tunable min_to_scan and max_to_scan control the behaviour here. 198 * min_to_scan indicate how long the mballoc __must__ look for a best 199 * extent and max_to_scan indicates how long the mballoc __can__ look for a 200 * best extent in the found extents. Searching for the blocks starts with 201 * the group specified as the goal value in allocation context via 202 * ac_g_ex. Each group is first checked based on the criteria whether it 203 * can be used for allocation. ext4_mb_good_group explains how the groups are 204 * checked. 205 * 206 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 207 * get traversed linearly. That may result in subsequent allocations being not 208 * close to each other. And so, the underlying device may get filled up in a 209 * non-linear fashion. While that may not matter on non-rotational devices, for 210 * rotational devices that may result in higher seek times. "mb_linear_limit" 211 * tells mballoc how many groups mballoc should search linearly before 212 * performing consulting above data structures for more efficient lookups. For 213 * non rotational devices, this value defaults to 0 and for rotational devices 214 * this is set to MB_DEFAULT_LINEAR_LIMIT. 215 * 216 * Both the prealloc space are getting populated as above. So for the first 217 * request we will hit the buddy cache which will result in this prealloc 218 * space getting filled. The prealloc space is then later used for the 219 * subsequent request. 220 */ 221 222 /* 223 * mballoc operates on the following data: 224 * - on-disk bitmap 225 * - in-core buddy (actually includes buddy and bitmap) 226 * - preallocation descriptors (PAs) 227 * 228 * there are two types of preallocations: 229 * - inode 230 * assiged to specific inode and can be used for this inode only. 231 * it describes part of inode's space preallocated to specific 232 * physical blocks. any block from that preallocated can be used 233 * independent. the descriptor just tracks number of blocks left 234 * unused. so, before taking some block from descriptor, one must 235 * make sure corresponded logical block isn't allocated yet. this 236 * also means that freeing any block within descriptor's range 237 * must discard all preallocated blocks. 238 * - locality group 239 * assigned to specific locality group which does not translate to 240 * permanent set of inodes: inode can join and leave group. space 241 * from this type of preallocation can be used for any inode. thus 242 * it's consumed from the beginning to the end. 243 * 244 * relation between them can be expressed as: 245 * in-core buddy = on-disk bitmap + preallocation descriptors 246 * 247 * this mean blocks mballoc considers used are: 248 * - allocated blocks (persistent) 249 * - preallocated blocks (non-persistent) 250 * 251 * consistency in mballoc world means that at any time a block is either 252 * free or used in ALL structures. notice: "any time" should not be read 253 * literally -- time is discrete and delimited by locks. 254 * 255 * to keep it simple, we don't use block numbers, instead we count number of 256 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 257 * 258 * all operations can be expressed as: 259 * - init buddy: buddy = on-disk + PAs 260 * - new PA: buddy += N; PA = N 261 * - use inode PA: on-disk += N; PA -= N 262 * - discard inode PA buddy -= on-disk - PA; PA = 0 263 * - use locality group PA on-disk += N; PA -= N 264 * - discard locality group PA buddy -= PA; PA = 0 265 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 266 * is used in real operation because we can't know actual used 267 * bits from PA, only from on-disk bitmap 268 * 269 * if we follow this strict logic, then all operations above should be atomic. 270 * given some of them can block, we'd have to use something like semaphores 271 * killing performance on high-end SMP hardware. let's try to relax it using 272 * the following knowledge: 273 * 1) if buddy is referenced, it's already initialized 274 * 2) while block is used in buddy and the buddy is referenced, 275 * nobody can re-allocate that block 276 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 277 * bit set and PA claims same block, it's OK. IOW, one can set bit in 278 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 279 * block 280 * 281 * so, now we're building a concurrency table: 282 * - init buddy vs. 283 * - new PA 284 * blocks for PA are allocated in the buddy, buddy must be referenced 285 * until PA is linked to allocation group to avoid concurrent buddy init 286 * - use inode PA 287 * we need to make sure that either on-disk bitmap or PA has uptodate data 288 * given (3) we care that PA-=N operation doesn't interfere with init 289 * - discard inode PA 290 * the simplest way would be to have buddy initialized by the discard 291 * - use locality group PA 292 * again PA-=N must be serialized with init 293 * - discard locality group PA 294 * the simplest way would be to have buddy initialized by the discard 295 * - new PA vs. 296 * - use inode PA 297 * i_data_sem serializes them 298 * - discard inode PA 299 * discard process must wait until PA isn't used by another process 300 * - use locality group PA 301 * some mutex should serialize them 302 * - discard locality group PA 303 * discard process must wait until PA isn't used by another process 304 * - use inode PA 305 * - use inode PA 306 * i_data_sem or another mutex should serializes them 307 * - discard inode PA 308 * discard process must wait until PA isn't used by another process 309 * - use locality group PA 310 * nothing wrong here -- they're different PAs covering different blocks 311 * - discard locality group PA 312 * discard process must wait until PA isn't used by another process 313 * 314 * now we're ready to make few consequences: 315 * - PA is referenced and while it is no discard is possible 316 * - PA is referenced until block isn't marked in on-disk bitmap 317 * - PA changes only after on-disk bitmap 318 * - discard must not compete with init. either init is done before 319 * any discard or they're serialized somehow 320 * - buddy init as sum of on-disk bitmap and PAs is done atomically 321 * 322 * a special case when we've used PA to emptiness. no need to modify buddy 323 * in this case, but we should care about concurrent init 324 * 325 */ 326 327 /* 328 * Logic in few words: 329 * 330 * - allocation: 331 * load group 332 * find blocks 333 * mark bits in on-disk bitmap 334 * release group 335 * 336 * - use preallocation: 337 * find proper PA (per-inode or group) 338 * load group 339 * mark bits in on-disk bitmap 340 * release group 341 * release PA 342 * 343 * - free: 344 * load group 345 * mark bits in on-disk bitmap 346 * release group 347 * 348 * - discard preallocations in group: 349 * mark PAs deleted 350 * move them onto local list 351 * load on-disk bitmap 352 * load group 353 * remove PA from object (inode or locality group) 354 * mark free blocks in-core 355 * 356 * - discard inode's preallocations: 357 */ 358 359 /* 360 * Locking rules 361 * 362 * Locks: 363 * - bitlock on a group (group) 364 * - object (inode/locality) (object) 365 * - per-pa lock (pa) 366 * - cr_power2_aligned lists lock (cr_power2_aligned) 367 * - cr_goal_len_fast lists lock (cr_goal_len_fast) 368 * 369 * Paths: 370 * - new pa 371 * object 372 * group 373 * 374 * - find and use pa: 375 * pa 376 * 377 * - release consumed pa: 378 * pa 379 * group 380 * object 381 * 382 * - generate in-core bitmap: 383 * group 384 * pa 385 * 386 * - discard all for given object (inode, locality group): 387 * object 388 * pa 389 * group 390 * 391 * - discard all for given group: 392 * group 393 * pa 394 * group 395 * object 396 * 397 * - allocation path (ext4_mb_regular_allocator) 398 * group 399 * cr_power2_aligned/cr_goal_len_fast 400 */ 401 static struct kmem_cache *ext4_pspace_cachep; 402 static struct kmem_cache *ext4_ac_cachep; 403 static struct kmem_cache *ext4_free_data_cachep; 404 405 /* We create slab caches for groupinfo data structures based on the 406 * superblock block size. There will be one per mounted filesystem for 407 * each unique s_blocksize_bits */ 408 #define NR_GRPINFO_CACHES 8 409 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 410 411 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 412 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 413 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 414 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 415 }; 416 417 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 418 ext4_group_t group); 419 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 420 ext4_group_t group); 421 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 422 423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 424 ext4_group_t group, enum criteria cr); 425 426 static int ext4_try_to_trim_range(struct super_block *sb, 427 struct ext4_buddy *e4b, ext4_grpblk_t start, 428 ext4_grpblk_t max, ext4_grpblk_t minblocks); 429 430 /* 431 * The algorithm using this percpu seq counter goes below: 432 * 1. We sample the percpu discard_pa_seq counter before trying for block 433 * allocation in ext4_mb_new_blocks(). 434 * 2. We increment this percpu discard_pa_seq counter when we either allocate 435 * or free these blocks i.e. while marking those blocks as used/free in 436 * mb_mark_used()/mb_free_blocks(). 437 * 3. We also increment this percpu seq counter when we successfully identify 438 * that the bb_prealloc_list is not empty and hence proceed for discarding 439 * of those PAs inside ext4_mb_discard_group_preallocations(). 440 * 441 * Now to make sure that the regular fast path of block allocation is not 442 * affected, as a small optimization we only sample the percpu seq counter 443 * on that cpu. Only when the block allocation fails and when freed blocks 444 * found were 0, that is when we sample percpu seq counter for all cpus using 445 * below function ext4_get_discard_pa_seq_sum(). This happens after making 446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 447 */ 448 static DEFINE_PER_CPU(u64, discard_pa_seq); 449 static inline u64 ext4_get_discard_pa_seq_sum(void) 450 { 451 int __cpu; 452 u64 __seq = 0; 453 454 for_each_possible_cpu(__cpu) 455 __seq += per_cpu(discard_pa_seq, __cpu); 456 return __seq; 457 } 458 459 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 460 { 461 #if BITS_PER_LONG == 64 462 *bit += ((unsigned long) addr & 7UL) << 3; 463 addr = (void *) ((unsigned long) addr & ~7UL); 464 #elif BITS_PER_LONG == 32 465 *bit += ((unsigned long) addr & 3UL) << 3; 466 addr = (void *) ((unsigned long) addr & ~3UL); 467 #else 468 #error "how many bits you are?!" 469 #endif 470 return addr; 471 } 472 473 static inline int mb_test_bit(int bit, void *addr) 474 { 475 /* 476 * ext4_test_bit on architecture like powerpc 477 * needs unsigned long aligned address 478 */ 479 addr = mb_correct_addr_and_bit(&bit, addr); 480 return ext4_test_bit(bit, addr); 481 } 482 483 static inline void mb_set_bit(int bit, void *addr) 484 { 485 addr = mb_correct_addr_and_bit(&bit, addr); 486 ext4_set_bit(bit, addr); 487 } 488 489 static inline void mb_clear_bit(int bit, void *addr) 490 { 491 addr = mb_correct_addr_and_bit(&bit, addr); 492 ext4_clear_bit(bit, addr); 493 } 494 495 static inline int mb_test_and_clear_bit(int bit, void *addr) 496 { 497 addr = mb_correct_addr_and_bit(&bit, addr); 498 return ext4_test_and_clear_bit(bit, addr); 499 } 500 501 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 502 { 503 int fix = 0, ret, tmpmax; 504 addr = mb_correct_addr_and_bit(&fix, addr); 505 tmpmax = max + fix; 506 start += fix; 507 508 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 509 if (ret > max) 510 return max; 511 return ret; 512 } 513 514 static inline int mb_find_next_bit(void *addr, int max, int start) 515 { 516 int fix = 0, ret, tmpmax; 517 addr = mb_correct_addr_and_bit(&fix, addr); 518 tmpmax = max + fix; 519 start += fix; 520 521 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 522 if (ret > max) 523 return max; 524 return ret; 525 } 526 527 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 528 { 529 char *bb; 530 531 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 532 BUG_ON(max == NULL); 533 534 if (order > e4b->bd_blkbits + 1) { 535 *max = 0; 536 return NULL; 537 } 538 539 /* at order 0 we see each particular block */ 540 if (order == 0) { 541 *max = 1 << (e4b->bd_blkbits + 3); 542 return e4b->bd_bitmap; 543 } 544 545 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 546 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 547 548 return bb; 549 } 550 551 #ifdef DOUBLE_CHECK 552 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 553 int first, int count) 554 { 555 int i; 556 struct super_block *sb = e4b->bd_sb; 557 558 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 559 return; 560 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 561 for (i = 0; i < count; i++) { 562 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 563 ext4_fsblk_t blocknr; 564 565 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 566 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 567 ext4_grp_locked_error(sb, e4b->bd_group, 568 inode ? inode->i_ino : 0, 569 blocknr, 570 "freeing block already freed " 571 "(bit %u)", 572 first + i); 573 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 574 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 575 } 576 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 577 } 578 } 579 580 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 581 { 582 int i; 583 584 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 585 return; 586 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 587 for (i = 0; i < count; i++) { 588 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 589 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 590 } 591 } 592 593 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 594 { 595 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 596 return; 597 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 598 unsigned char *b1, *b2; 599 int i; 600 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 601 b2 = (unsigned char *) bitmap; 602 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 603 if (b1[i] != b2[i]) { 604 ext4_msg(e4b->bd_sb, KERN_ERR, 605 "corruption in group %u " 606 "at byte %u(%u): %x in copy != %x " 607 "on disk/prealloc", 608 e4b->bd_group, i, i * 8, b1[i], b2[i]); 609 BUG(); 610 } 611 } 612 } 613 } 614 615 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 616 struct ext4_group_info *grp, ext4_group_t group) 617 { 618 struct buffer_head *bh; 619 620 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 621 if (!grp->bb_bitmap) 622 return; 623 624 bh = ext4_read_block_bitmap(sb, group); 625 if (IS_ERR_OR_NULL(bh)) { 626 kfree(grp->bb_bitmap); 627 grp->bb_bitmap = NULL; 628 return; 629 } 630 631 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 632 put_bh(bh); 633 } 634 635 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 636 { 637 kfree(grp->bb_bitmap); 638 } 639 640 #else 641 static inline void mb_free_blocks_double(struct inode *inode, 642 struct ext4_buddy *e4b, int first, int count) 643 { 644 return; 645 } 646 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 647 int first, int count) 648 { 649 return; 650 } 651 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 652 { 653 return; 654 } 655 656 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 657 struct ext4_group_info *grp, ext4_group_t group) 658 { 659 return; 660 } 661 662 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 663 { 664 return; 665 } 666 #endif 667 668 #ifdef AGGRESSIVE_CHECK 669 670 #define MB_CHECK_ASSERT(assert) \ 671 do { \ 672 if (!(assert)) { \ 673 printk(KERN_EMERG \ 674 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 675 function, file, line, # assert); \ 676 BUG(); \ 677 } \ 678 } while (0) 679 680 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 681 const char *function, int line) 682 { 683 struct super_block *sb = e4b->bd_sb; 684 int order = e4b->bd_blkbits + 1; 685 int max; 686 int max2; 687 int i; 688 int j; 689 int k; 690 int count; 691 struct ext4_group_info *grp; 692 int fragments = 0; 693 int fstart; 694 struct list_head *cur; 695 void *buddy; 696 void *buddy2; 697 698 if (e4b->bd_info->bb_check_counter++ % 10) 699 return 0; 700 701 while (order > 1) { 702 buddy = mb_find_buddy(e4b, order, &max); 703 MB_CHECK_ASSERT(buddy); 704 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 705 MB_CHECK_ASSERT(buddy2); 706 MB_CHECK_ASSERT(buddy != buddy2); 707 MB_CHECK_ASSERT(max * 2 == max2); 708 709 count = 0; 710 for (i = 0; i < max; i++) { 711 712 if (mb_test_bit(i, buddy)) { 713 /* only single bit in buddy2 may be 0 */ 714 if (!mb_test_bit(i << 1, buddy2)) { 715 MB_CHECK_ASSERT( 716 mb_test_bit((i<<1)+1, buddy2)); 717 } 718 continue; 719 } 720 721 /* both bits in buddy2 must be 1 */ 722 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 723 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 724 725 for (j = 0; j < (1 << order); j++) { 726 k = (i * (1 << order)) + j; 727 MB_CHECK_ASSERT( 728 !mb_test_bit(k, e4b->bd_bitmap)); 729 } 730 count++; 731 } 732 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 733 order--; 734 } 735 736 fstart = -1; 737 buddy = mb_find_buddy(e4b, 0, &max); 738 for (i = 0; i < max; i++) { 739 if (!mb_test_bit(i, buddy)) { 740 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 741 if (fstart == -1) { 742 fragments++; 743 fstart = i; 744 } 745 continue; 746 } 747 fstart = -1; 748 /* check used bits only */ 749 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 750 buddy2 = mb_find_buddy(e4b, j, &max2); 751 k = i >> j; 752 MB_CHECK_ASSERT(k < max2); 753 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 754 } 755 } 756 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 757 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 758 759 grp = ext4_get_group_info(sb, e4b->bd_group); 760 if (!grp) 761 return NULL; 762 list_for_each(cur, &grp->bb_prealloc_list) { 763 ext4_group_t groupnr; 764 struct ext4_prealloc_space *pa; 765 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 767 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 768 for (i = 0; i < pa->pa_len; i++) 769 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 770 } 771 return 0; 772 } 773 #undef MB_CHECK_ASSERT 774 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 775 __FILE__, __func__, __LINE__) 776 #else 777 #define mb_check_buddy(e4b) 778 #endif 779 780 /* 781 * Divide blocks started from @first with length @len into 782 * smaller chunks with power of 2 blocks. 783 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 784 * then increase bb_counters[] for corresponded chunk size. 785 */ 786 static void ext4_mb_mark_free_simple(struct super_block *sb, 787 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 788 struct ext4_group_info *grp) 789 { 790 struct ext4_sb_info *sbi = EXT4_SB(sb); 791 ext4_grpblk_t min; 792 ext4_grpblk_t max; 793 ext4_grpblk_t chunk; 794 unsigned int border; 795 796 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 797 798 border = 2 << sb->s_blocksize_bits; 799 800 while (len > 0) { 801 /* find how many blocks can be covered since this position */ 802 max = ffs(first | border) - 1; 803 804 /* find how many blocks of power 2 we need to mark */ 805 min = fls(len) - 1; 806 807 if (max < min) 808 min = max; 809 chunk = 1 << min; 810 811 /* mark multiblock chunks only */ 812 grp->bb_counters[min]++; 813 if (min > 0) 814 mb_clear_bit(first >> min, 815 buddy + sbi->s_mb_offsets[min]); 816 817 len -= chunk; 818 first += chunk; 819 } 820 } 821 822 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 823 { 824 int order; 825 826 /* 827 * We don't bother with a special lists groups with only 1 block free 828 * extents and for completely empty groups. 829 */ 830 order = fls(len) - 2; 831 if (order < 0) 832 return 0; 833 if (order == MB_NUM_ORDERS(sb)) 834 order--; 835 return order; 836 } 837 838 /* Move group to appropriate avg_fragment_size list */ 839 static void 840 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 841 { 842 struct ext4_sb_info *sbi = EXT4_SB(sb); 843 int new_order; 844 845 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 846 return; 847 848 new_order = mb_avg_fragment_size_order(sb, 849 grp->bb_free / grp->bb_fragments); 850 if (new_order == grp->bb_avg_fragment_size_order) 851 return; 852 853 if (grp->bb_avg_fragment_size_order != -1) { 854 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 855 grp->bb_avg_fragment_size_order]); 856 list_del(&grp->bb_avg_fragment_size_node); 857 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 858 grp->bb_avg_fragment_size_order]); 859 } 860 grp->bb_avg_fragment_size_order = new_order; 861 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 862 grp->bb_avg_fragment_size_order]); 863 list_add_tail(&grp->bb_avg_fragment_size_node, 864 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 865 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 866 grp->bb_avg_fragment_size_order]); 867 } 868 869 /* 870 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 871 * cr level needs an update. 872 */ 873 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, 874 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 875 { 876 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 877 struct ext4_group_info *iter, *grp; 878 int i; 879 880 if (ac->ac_status == AC_STATUS_FOUND) 881 return; 882 883 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) 884 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); 885 886 grp = NULL; 887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 888 if (list_empty(&sbi->s_mb_largest_free_orders[i])) 889 continue; 890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 893 continue; 894 } 895 grp = NULL; 896 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 897 bb_largest_free_order_node) { 898 if (sbi->s_mb_stats) 899 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); 900 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { 901 grp = iter; 902 break; 903 } 904 } 905 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 906 if (grp) 907 break; 908 } 909 910 if (!grp) { 911 /* Increment cr and search again */ 912 *new_cr = CR_GOAL_LEN_FAST; 913 } else { 914 *group = grp->bb_group; 915 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; 916 } 917 } 918 919 /* 920 * Find a suitable group of given order from the average fragments list. 921 */ 922 static struct ext4_group_info * 923 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) 924 { 925 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 926 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; 927 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; 928 struct ext4_group_info *grp = NULL, *iter; 929 enum criteria cr = ac->ac_criteria; 930 931 if (list_empty(frag_list)) 932 return NULL; 933 read_lock(frag_list_lock); 934 if (list_empty(frag_list)) { 935 read_unlock(frag_list_lock); 936 return NULL; 937 } 938 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) { 939 if (sbi->s_mb_stats) 940 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 941 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { 942 grp = iter; 943 break; 944 } 945 } 946 read_unlock(frag_list_lock); 947 return grp; 948 } 949 950 /* 951 * Choose next group by traversing average fragment size list of suitable 952 * order. Updates *new_cr if cr level needs an update. 953 */ 954 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, 955 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 956 { 957 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 958 struct ext4_group_info *grp = NULL; 959 int i; 960 961 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { 962 if (sbi->s_mb_stats) 963 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); 964 } 965 966 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 967 i < MB_NUM_ORDERS(ac->ac_sb); i++) { 968 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); 969 if (grp) 970 break; 971 } 972 973 if (grp) { 974 *group = grp->bb_group; 975 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; 976 } else { 977 *new_cr = CR_BEST_AVAIL_LEN; 978 } 979 } 980 981 /* 982 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment 983 * order we have and proactively trim the goal request length to that order to 984 * find a suitable group faster. 985 * 986 * This optimizes allocation speed at the cost of slightly reduced 987 * preallocations. However, we make sure that we don't trim the request too 988 * much and fall to CR_GOAL_LEN_SLOW in that case. 989 */ 990 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, 991 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 992 { 993 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 994 struct ext4_group_info *grp = NULL; 995 int i, order, min_order; 996 unsigned long num_stripe_clusters = 0; 997 998 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { 999 if (sbi->s_mb_stats) 1000 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); 1001 } 1002 1003 /* 1004 * mb_avg_fragment_size_order() returns order in a way that makes 1005 * retrieving back the length using (1 << order) inaccurate. Hence, use 1006 * fls() instead since we need to know the actual length while modifying 1007 * goal length. 1008 */ 1009 order = fls(ac->ac_g_ex.fe_len); 1010 min_order = order - sbi->s_mb_best_avail_max_trim_order; 1011 if (min_order < 0) 1012 min_order = 0; 1013 1014 if (1 << min_order < ac->ac_o_ex.fe_len) 1015 min_order = fls(ac->ac_o_ex.fe_len) + 1; 1016 1017 if (sbi->s_stripe > 0) { 1018 /* 1019 * We are assuming that stripe size is always a multiple of 1020 * cluster ratio otherwise __ext4_fill_super exists early. 1021 */ 1022 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); 1023 if (1 << min_order < num_stripe_clusters) 1024 min_order = fls(num_stripe_clusters); 1025 } 1026 1027 for (i = order; i >= min_order; i--) { 1028 int frag_order; 1029 /* 1030 * Scale down goal len to make sure we find something 1031 * in the free fragments list. Basically, reduce 1032 * preallocations. 1033 */ 1034 ac->ac_g_ex.fe_len = 1 << i; 1035 1036 if (num_stripe_clusters > 0) { 1037 /* 1038 * Try to round up the adjusted goal length to 1039 * stripe size (in cluster units) multiple for 1040 * efficiency. 1041 */ 1042 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, 1043 num_stripe_clusters); 1044 } 1045 1046 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1047 ac->ac_g_ex.fe_len); 1048 1049 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order); 1050 if (grp) 1051 break; 1052 } 1053 1054 if (grp) { 1055 *group = grp->bb_group; 1056 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; 1057 } else { 1058 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1059 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1060 *new_cr = CR_GOAL_LEN_SLOW; 1061 } 1062 } 1063 1064 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 1065 { 1066 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1067 return 0; 1068 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) 1069 return 0; 1070 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1071 return 0; 1072 return 1; 1073 } 1074 1075 /* 1076 * Return next linear group for allocation. If linear traversal should not be 1077 * performed, this function just returns the same group 1078 */ 1079 static int 1080 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 1081 { 1082 if (!should_optimize_scan(ac)) 1083 goto inc_and_return; 1084 1085 if (ac->ac_groups_linear_remaining) { 1086 ac->ac_groups_linear_remaining--; 1087 goto inc_and_return; 1088 } 1089 1090 return group; 1091 inc_and_return: 1092 /* 1093 * Artificially restricted ngroups for non-extent 1094 * files makes group > ngroups possible on first loop. 1095 */ 1096 return group + 1 >= ngroups ? 0 : group + 1; 1097 } 1098 1099 /* 1100 * ext4_mb_choose_next_group: choose next group for allocation. 1101 * 1102 * @ac Allocation Context 1103 * @new_cr This is an output parameter. If the there is no good group 1104 * available at current CR level, this field is updated to indicate 1105 * the new cr level that should be used. 1106 * @group This is an input / output parameter. As an input it indicates the 1107 * next group that the allocator intends to use for allocation. As 1108 * output, this field indicates the next group that should be used as 1109 * determined by the optimization functions. 1110 * @ngroups Total number of groups 1111 */ 1112 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1113 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1114 { 1115 *new_cr = ac->ac_criteria; 1116 1117 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 1118 *group = next_linear_group(ac, *group, ngroups); 1119 return; 1120 } 1121 1122 if (*new_cr == CR_POWER2_ALIGNED) { 1123 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups); 1124 } else if (*new_cr == CR_GOAL_LEN_FAST) { 1125 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups); 1126 } else if (*new_cr == CR_BEST_AVAIL_LEN) { 1127 ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups); 1128 } else { 1129 /* 1130 * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1131 * bb_free. But until that happens, we should never come here. 1132 */ 1133 WARN_ON(1); 1134 } 1135 } 1136 1137 /* 1138 * Cache the order of the largest free extent we have available in this block 1139 * group. 1140 */ 1141 static void 1142 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1143 { 1144 struct ext4_sb_info *sbi = EXT4_SB(sb); 1145 int i; 1146 1147 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 1148 if (grp->bb_counters[i] > 0) 1149 break; 1150 /* No need to move between order lists? */ 1151 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 1152 i == grp->bb_largest_free_order) { 1153 grp->bb_largest_free_order = i; 1154 return; 1155 } 1156 1157 if (grp->bb_largest_free_order >= 0) { 1158 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1159 grp->bb_largest_free_order]); 1160 list_del_init(&grp->bb_largest_free_order_node); 1161 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1162 grp->bb_largest_free_order]); 1163 } 1164 grp->bb_largest_free_order = i; 1165 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1166 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1167 grp->bb_largest_free_order]); 1168 list_add_tail(&grp->bb_largest_free_order_node, 1169 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1170 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1171 grp->bb_largest_free_order]); 1172 } 1173 } 1174 1175 static noinline_for_stack 1176 void ext4_mb_generate_buddy(struct super_block *sb, 1177 void *buddy, void *bitmap, ext4_group_t group, 1178 struct ext4_group_info *grp) 1179 { 1180 struct ext4_sb_info *sbi = EXT4_SB(sb); 1181 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1182 ext4_grpblk_t i = 0; 1183 ext4_grpblk_t first; 1184 ext4_grpblk_t len; 1185 unsigned free = 0; 1186 unsigned fragments = 0; 1187 unsigned long long period = get_cycles(); 1188 1189 /* initialize buddy from bitmap which is aggregation 1190 * of on-disk bitmap and preallocations */ 1191 i = mb_find_next_zero_bit(bitmap, max, 0); 1192 grp->bb_first_free = i; 1193 while (i < max) { 1194 fragments++; 1195 first = i; 1196 i = mb_find_next_bit(bitmap, max, i); 1197 len = i - first; 1198 free += len; 1199 if (len > 1) 1200 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1201 else 1202 grp->bb_counters[0]++; 1203 if (i < max) 1204 i = mb_find_next_zero_bit(bitmap, max, i); 1205 } 1206 grp->bb_fragments = fragments; 1207 1208 if (free != grp->bb_free) { 1209 ext4_grp_locked_error(sb, group, 0, 0, 1210 "block bitmap and bg descriptor " 1211 "inconsistent: %u vs %u free clusters", 1212 free, grp->bb_free); 1213 /* 1214 * If we intend to continue, we consider group descriptor 1215 * corrupt and update bb_free using bitmap value 1216 */ 1217 grp->bb_free = free; 1218 ext4_mark_group_bitmap_corrupted(sb, group, 1219 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1220 } 1221 mb_set_largest_free_order(sb, grp); 1222 mb_update_avg_fragment_size(sb, grp); 1223 1224 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1225 1226 period = get_cycles() - period; 1227 atomic_inc(&sbi->s_mb_buddies_generated); 1228 atomic64_add(period, &sbi->s_mb_generation_time); 1229 } 1230 1231 /* The buddy information is attached the buddy cache inode 1232 * for convenience. The information regarding each group 1233 * is loaded via ext4_mb_load_buddy. The information involve 1234 * block bitmap and buddy information. The information are 1235 * stored in the inode as 1236 * 1237 * { page } 1238 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1239 * 1240 * 1241 * one block each for bitmap and buddy information. 1242 * So for each group we take up 2 blocks. A page can 1243 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1244 * So it can have information regarding groups_per_page which 1245 * is blocks_per_page/2 1246 * 1247 * Locking note: This routine takes the block group lock of all groups 1248 * for this page; do not hold this lock when calling this routine! 1249 */ 1250 1251 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1252 { 1253 ext4_group_t ngroups; 1254 int blocksize; 1255 int blocks_per_page; 1256 int groups_per_page; 1257 int err = 0; 1258 int i; 1259 ext4_group_t first_group, group; 1260 int first_block; 1261 struct super_block *sb; 1262 struct buffer_head *bhs; 1263 struct buffer_head **bh = NULL; 1264 struct inode *inode; 1265 char *data; 1266 char *bitmap; 1267 struct ext4_group_info *grinfo; 1268 1269 inode = page->mapping->host; 1270 sb = inode->i_sb; 1271 ngroups = ext4_get_groups_count(sb); 1272 blocksize = i_blocksize(inode); 1273 blocks_per_page = PAGE_SIZE / blocksize; 1274 1275 mb_debug(sb, "init page %lu\n", page->index); 1276 1277 groups_per_page = blocks_per_page >> 1; 1278 if (groups_per_page == 0) 1279 groups_per_page = 1; 1280 1281 /* allocate buffer_heads to read bitmaps */ 1282 if (groups_per_page > 1) { 1283 i = sizeof(struct buffer_head *) * groups_per_page; 1284 bh = kzalloc(i, gfp); 1285 if (bh == NULL) 1286 return -ENOMEM; 1287 } else 1288 bh = &bhs; 1289 1290 first_group = page->index * blocks_per_page / 2; 1291 1292 /* read all groups the page covers into the cache */ 1293 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1294 if (group >= ngroups) 1295 break; 1296 1297 grinfo = ext4_get_group_info(sb, group); 1298 if (!grinfo) 1299 continue; 1300 /* 1301 * If page is uptodate then we came here after online resize 1302 * which added some new uninitialized group info structs, so 1303 * we must skip all initialized uptodate buddies on the page, 1304 * which may be currently in use by an allocating task. 1305 */ 1306 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1307 bh[i] = NULL; 1308 continue; 1309 } 1310 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1311 if (IS_ERR(bh[i])) { 1312 err = PTR_ERR(bh[i]); 1313 bh[i] = NULL; 1314 goto out; 1315 } 1316 mb_debug(sb, "read bitmap for group %u\n", group); 1317 } 1318 1319 /* wait for I/O completion */ 1320 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1321 int err2; 1322 1323 if (!bh[i]) 1324 continue; 1325 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1326 if (!err) 1327 err = err2; 1328 } 1329 1330 first_block = page->index * blocks_per_page; 1331 for (i = 0; i < blocks_per_page; i++) { 1332 group = (first_block + i) >> 1; 1333 if (group >= ngroups) 1334 break; 1335 1336 if (!bh[group - first_group]) 1337 /* skip initialized uptodate buddy */ 1338 continue; 1339 1340 if (!buffer_verified(bh[group - first_group])) 1341 /* Skip faulty bitmaps */ 1342 continue; 1343 err = 0; 1344 1345 /* 1346 * data carry information regarding this 1347 * particular group in the format specified 1348 * above 1349 * 1350 */ 1351 data = page_address(page) + (i * blocksize); 1352 bitmap = bh[group - first_group]->b_data; 1353 1354 /* 1355 * We place the buddy block and bitmap block 1356 * close together 1357 */ 1358 if ((first_block + i) & 1) { 1359 /* this is block of buddy */ 1360 BUG_ON(incore == NULL); 1361 mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1362 group, page->index, i * blocksize); 1363 trace_ext4_mb_buddy_bitmap_load(sb, group); 1364 grinfo = ext4_get_group_info(sb, group); 1365 if (!grinfo) { 1366 err = -EFSCORRUPTED; 1367 goto out; 1368 } 1369 grinfo->bb_fragments = 0; 1370 memset(grinfo->bb_counters, 0, 1371 sizeof(*grinfo->bb_counters) * 1372 (MB_NUM_ORDERS(sb))); 1373 /* 1374 * incore got set to the group block bitmap below 1375 */ 1376 ext4_lock_group(sb, group); 1377 /* init the buddy */ 1378 memset(data, 0xff, blocksize); 1379 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1380 ext4_unlock_group(sb, group); 1381 incore = NULL; 1382 } else { 1383 /* this is block of bitmap */ 1384 BUG_ON(incore != NULL); 1385 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1386 group, page->index, i * blocksize); 1387 trace_ext4_mb_bitmap_load(sb, group); 1388 1389 /* see comments in ext4_mb_put_pa() */ 1390 ext4_lock_group(sb, group); 1391 memcpy(data, bitmap, blocksize); 1392 1393 /* mark all preallocated blks used in in-core bitmap */ 1394 ext4_mb_generate_from_pa(sb, data, group); 1395 ext4_mb_generate_from_freelist(sb, data, group); 1396 ext4_unlock_group(sb, group); 1397 1398 /* set incore so that the buddy information can be 1399 * generated using this 1400 */ 1401 incore = data; 1402 } 1403 } 1404 SetPageUptodate(page); 1405 1406 out: 1407 if (bh) { 1408 for (i = 0; i < groups_per_page; i++) 1409 brelse(bh[i]); 1410 if (bh != &bhs) 1411 kfree(bh); 1412 } 1413 return err; 1414 } 1415 1416 /* 1417 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1418 * on the same buddy page doesn't happen whild holding the buddy page lock. 1419 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1420 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1421 */ 1422 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1423 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1424 { 1425 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1426 int block, pnum, poff; 1427 int blocks_per_page; 1428 struct page *page; 1429 1430 e4b->bd_buddy_page = NULL; 1431 e4b->bd_bitmap_page = NULL; 1432 1433 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1434 /* 1435 * the buddy cache inode stores the block bitmap 1436 * and buddy information in consecutive blocks. 1437 * So for each group we need two blocks. 1438 */ 1439 block = group * 2; 1440 pnum = block / blocks_per_page; 1441 poff = block % blocks_per_page; 1442 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1443 if (!page) 1444 return -ENOMEM; 1445 BUG_ON(page->mapping != inode->i_mapping); 1446 e4b->bd_bitmap_page = page; 1447 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1448 1449 if (blocks_per_page >= 2) { 1450 /* buddy and bitmap are on the same page */ 1451 return 0; 1452 } 1453 1454 block++; 1455 pnum = block / blocks_per_page; 1456 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1457 if (!page) 1458 return -ENOMEM; 1459 BUG_ON(page->mapping != inode->i_mapping); 1460 e4b->bd_buddy_page = page; 1461 return 0; 1462 } 1463 1464 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1465 { 1466 if (e4b->bd_bitmap_page) { 1467 unlock_page(e4b->bd_bitmap_page); 1468 put_page(e4b->bd_bitmap_page); 1469 } 1470 if (e4b->bd_buddy_page) { 1471 unlock_page(e4b->bd_buddy_page); 1472 put_page(e4b->bd_buddy_page); 1473 } 1474 } 1475 1476 /* 1477 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1478 * block group lock of all groups for this page; do not hold the BG lock when 1479 * calling this routine! 1480 */ 1481 static noinline_for_stack 1482 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1483 { 1484 1485 struct ext4_group_info *this_grp; 1486 struct ext4_buddy e4b; 1487 struct page *page; 1488 int ret = 0; 1489 1490 might_sleep(); 1491 mb_debug(sb, "init group %u\n", group); 1492 this_grp = ext4_get_group_info(sb, group); 1493 if (!this_grp) 1494 return -EFSCORRUPTED; 1495 1496 /* 1497 * This ensures that we don't reinit the buddy cache 1498 * page which map to the group from which we are already 1499 * allocating. If we are looking at the buddy cache we would 1500 * have taken a reference using ext4_mb_load_buddy and that 1501 * would have pinned buddy page to page cache. 1502 * The call to ext4_mb_get_buddy_page_lock will mark the 1503 * page accessed. 1504 */ 1505 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1506 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1507 /* 1508 * somebody initialized the group 1509 * return without doing anything 1510 */ 1511 goto err; 1512 } 1513 1514 page = e4b.bd_bitmap_page; 1515 ret = ext4_mb_init_cache(page, NULL, gfp); 1516 if (ret) 1517 goto err; 1518 if (!PageUptodate(page)) { 1519 ret = -EIO; 1520 goto err; 1521 } 1522 1523 if (e4b.bd_buddy_page == NULL) { 1524 /* 1525 * If both the bitmap and buddy are in 1526 * the same page we don't need to force 1527 * init the buddy 1528 */ 1529 ret = 0; 1530 goto err; 1531 } 1532 /* init buddy cache */ 1533 page = e4b.bd_buddy_page; 1534 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1535 if (ret) 1536 goto err; 1537 if (!PageUptodate(page)) { 1538 ret = -EIO; 1539 goto err; 1540 } 1541 err: 1542 ext4_mb_put_buddy_page_lock(&e4b); 1543 return ret; 1544 } 1545 1546 /* 1547 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1548 * block group lock of all groups for this page; do not hold the BG lock when 1549 * calling this routine! 1550 */ 1551 static noinline_for_stack int 1552 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1553 struct ext4_buddy *e4b, gfp_t gfp) 1554 { 1555 int blocks_per_page; 1556 int block; 1557 int pnum; 1558 int poff; 1559 struct page *page; 1560 int ret; 1561 struct ext4_group_info *grp; 1562 struct ext4_sb_info *sbi = EXT4_SB(sb); 1563 struct inode *inode = sbi->s_buddy_cache; 1564 1565 might_sleep(); 1566 mb_debug(sb, "load group %u\n", group); 1567 1568 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1569 grp = ext4_get_group_info(sb, group); 1570 if (!grp) 1571 return -EFSCORRUPTED; 1572 1573 e4b->bd_blkbits = sb->s_blocksize_bits; 1574 e4b->bd_info = grp; 1575 e4b->bd_sb = sb; 1576 e4b->bd_group = group; 1577 e4b->bd_buddy_page = NULL; 1578 e4b->bd_bitmap_page = NULL; 1579 1580 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1581 /* 1582 * we need full data about the group 1583 * to make a good selection 1584 */ 1585 ret = ext4_mb_init_group(sb, group, gfp); 1586 if (ret) 1587 return ret; 1588 } 1589 1590 /* 1591 * the buddy cache inode stores the block bitmap 1592 * and buddy information in consecutive blocks. 1593 * So for each group we need two blocks. 1594 */ 1595 block = group * 2; 1596 pnum = block / blocks_per_page; 1597 poff = block % blocks_per_page; 1598 1599 /* we could use find_or_create_page(), but it locks page 1600 * what we'd like to avoid in fast path ... */ 1601 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1602 if (page == NULL || !PageUptodate(page)) { 1603 if (page) 1604 /* 1605 * drop the page reference and try 1606 * to get the page with lock. If we 1607 * are not uptodate that implies 1608 * somebody just created the page but 1609 * is yet to initialize the same. So 1610 * wait for it to initialize. 1611 */ 1612 put_page(page); 1613 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1614 if (page) { 1615 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1616 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 1617 /* should never happen */ 1618 unlock_page(page); 1619 ret = -EINVAL; 1620 goto err; 1621 } 1622 if (!PageUptodate(page)) { 1623 ret = ext4_mb_init_cache(page, NULL, gfp); 1624 if (ret) { 1625 unlock_page(page); 1626 goto err; 1627 } 1628 mb_cmp_bitmaps(e4b, page_address(page) + 1629 (poff * sb->s_blocksize)); 1630 } 1631 unlock_page(page); 1632 } 1633 } 1634 if (page == NULL) { 1635 ret = -ENOMEM; 1636 goto err; 1637 } 1638 if (!PageUptodate(page)) { 1639 ret = -EIO; 1640 goto err; 1641 } 1642 1643 /* Pages marked accessed already */ 1644 e4b->bd_bitmap_page = page; 1645 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1646 1647 block++; 1648 pnum = block / blocks_per_page; 1649 poff = block % blocks_per_page; 1650 1651 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1652 if (page == NULL || !PageUptodate(page)) { 1653 if (page) 1654 put_page(page); 1655 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1656 if (page) { 1657 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1658 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 1659 /* should never happen */ 1660 unlock_page(page); 1661 ret = -EINVAL; 1662 goto err; 1663 } 1664 if (!PageUptodate(page)) { 1665 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1666 gfp); 1667 if (ret) { 1668 unlock_page(page); 1669 goto err; 1670 } 1671 } 1672 unlock_page(page); 1673 } 1674 } 1675 if (page == NULL) { 1676 ret = -ENOMEM; 1677 goto err; 1678 } 1679 if (!PageUptodate(page)) { 1680 ret = -EIO; 1681 goto err; 1682 } 1683 1684 /* Pages marked accessed already */ 1685 e4b->bd_buddy_page = page; 1686 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1687 1688 return 0; 1689 1690 err: 1691 if (page) 1692 put_page(page); 1693 if (e4b->bd_bitmap_page) 1694 put_page(e4b->bd_bitmap_page); 1695 1696 e4b->bd_buddy = NULL; 1697 e4b->bd_bitmap = NULL; 1698 return ret; 1699 } 1700 1701 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1702 struct ext4_buddy *e4b) 1703 { 1704 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1705 } 1706 1707 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1708 { 1709 if (e4b->bd_bitmap_page) 1710 put_page(e4b->bd_bitmap_page); 1711 if (e4b->bd_buddy_page) 1712 put_page(e4b->bd_buddy_page); 1713 } 1714 1715 1716 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1717 { 1718 int order = 1, max; 1719 void *bb; 1720 1721 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1722 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1723 1724 while (order <= e4b->bd_blkbits + 1) { 1725 bb = mb_find_buddy(e4b, order, &max); 1726 if (!mb_test_bit(block >> order, bb)) { 1727 /* this block is part of buddy of order 'order' */ 1728 return order; 1729 } 1730 order++; 1731 } 1732 return 0; 1733 } 1734 1735 static void mb_clear_bits(void *bm, int cur, int len) 1736 { 1737 __u32 *addr; 1738 1739 len = cur + len; 1740 while (cur < len) { 1741 if ((cur & 31) == 0 && (len - cur) >= 32) { 1742 /* fast path: clear whole word at once */ 1743 addr = bm + (cur >> 3); 1744 *addr = 0; 1745 cur += 32; 1746 continue; 1747 } 1748 mb_clear_bit(cur, bm); 1749 cur++; 1750 } 1751 } 1752 1753 /* clear bits in given range 1754 * will return first found zero bit if any, -1 otherwise 1755 */ 1756 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1757 { 1758 __u32 *addr; 1759 int zero_bit = -1; 1760 1761 len = cur + len; 1762 while (cur < len) { 1763 if ((cur & 31) == 0 && (len - cur) >= 32) { 1764 /* fast path: clear whole word at once */ 1765 addr = bm + (cur >> 3); 1766 if (*addr != (__u32)(-1) && zero_bit == -1) 1767 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1768 *addr = 0; 1769 cur += 32; 1770 continue; 1771 } 1772 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1773 zero_bit = cur; 1774 cur++; 1775 } 1776 1777 return zero_bit; 1778 } 1779 1780 void mb_set_bits(void *bm, int cur, int len) 1781 { 1782 __u32 *addr; 1783 1784 len = cur + len; 1785 while (cur < len) { 1786 if ((cur & 31) == 0 && (len - cur) >= 32) { 1787 /* fast path: set whole word at once */ 1788 addr = bm + (cur >> 3); 1789 *addr = 0xffffffff; 1790 cur += 32; 1791 continue; 1792 } 1793 mb_set_bit(cur, bm); 1794 cur++; 1795 } 1796 } 1797 1798 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1799 { 1800 if (mb_test_bit(*bit + side, bitmap)) { 1801 mb_clear_bit(*bit, bitmap); 1802 (*bit) -= side; 1803 return 1; 1804 } 1805 else { 1806 (*bit) += side; 1807 mb_set_bit(*bit, bitmap); 1808 return -1; 1809 } 1810 } 1811 1812 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1813 { 1814 int max; 1815 int order = 1; 1816 void *buddy = mb_find_buddy(e4b, order, &max); 1817 1818 while (buddy) { 1819 void *buddy2; 1820 1821 /* Bits in range [first; last] are known to be set since 1822 * corresponding blocks were allocated. Bits in range 1823 * (first; last) will stay set because they form buddies on 1824 * upper layer. We just deal with borders if they don't 1825 * align with upper layer and then go up. 1826 * Releasing entire group is all about clearing 1827 * single bit of highest order buddy. 1828 */ 1829 1830 /* Example: 1831 * --------------------------------- 1832 * | 1 | 1 | 1 | 1 | 1833 * --------------------------------- 1834 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1835 * --------------------------------- 1836 * 0 1 2 3 4 5 6 7 1837 * \_____________________/ 1838 * 1839 * Neither [1] nor [6] is aligned to above layer. 1840 * Left neighbour [0] is free, so mark it busy, 1841 * decrease bb_counters and extend range to 1842 * [0; 6] 1843 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1844 * mark [6] free, increase bb_counters and shrink range to 1845 * [0; 5]. 1846 * Then shift range to [0; 2], go up and do the same. 1847 */ 1848 1849 1850 if (first & 1) 1851 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1852 if (!(last & 1)) 1853 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1854 if (first > last) 1855 break; 1856 order++; 1857 1858 buddy2 = mb_find_buddy(e4b, order, &max); 1859 if (!buddy2) { 1860 mb_clear_bits(buddy, first, last - first + 1); 1861 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1862 break; 1863 } 1864 first >>= 1; 1865 last >>= 1; 1866 buddy = buddy2; 1867 } 1868 } 1869 1870 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1871 int first, int count) 1872 { 1873 int left_is_free = 0; 1874 int right_is_free = 0; 1875 int block; 1876 int last = first + count - 1; 1877 struct super_block *sb = e4b->bd_sb; 1878 1879 if (WARN_ON(count == 0)) 1880 return; 1881 BUG_ON(last >= (sb->s_blocksize << 3)); 1882 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1883 /* Don't bother if the block group is corrupt. */ 1884 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1885 return; 1886 1887 mb_check_buddy(e4b); 1888 mb_free_blocks_double(inode, e4b, first, count); 1889 1890 this_cpu_inc(discard_pa_seq); 1891 e4b->bd_info->bb_free += count; 1892 if (first < e4b->bd_info->bb_first_free) 1893 e4b->bd_info->bb_first_free = first; 1894 1895 /* access memory sequentially: check left neighbour, 1896 * clear range and then check right neighbour 1897 */ 1898 if (first != 0) 1899 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1900 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1901 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1902 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1903 1904 if (unlikely(block != -1)) { 1905 struct ext4_sb_info *sbi = EXT4_SB(sb); 1906 ext4_fsblk_t blocknr; 1907 1908 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1909 blocknr += EXT4_C2B(sbi, block); 1910 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 1911 ext4_grp_locked_error(sb, e4b->bd_group, 1912 inode ? inode->i_ino : 0, 1913 blocknr, 1914 "freeing already freed block (bit %u); block bitmap corrupt.", 1915 block); 1916 ext4_mark_group_bitmap_corrupted( 1917 sb, e4b->bd_group, 1918 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1919 } 1920 goto done; 1921 } 1922 1923 /* let's maintain fragments counter */ 1924 if (left_is_free && right_is_free) 1925 e4b->bd_info->bb_fragments--; 1926 else if (!left_is_free && !right_is_free) 1927 e4b->bd_info->bb_fragments++; 1928 1929 /* buddy[0] == bd_bitmap is a special case, so handle 1930 * it right away and let mb_buddy_mark_free stay free of 1931 * zero order checks. 1932 * Check if neighbours are to be coaleasced, 1933 * adjust bitmap bb_counters and borders appropriately. 1934 */ 1935 if (first & 1) { 1936 first += !left_is_free; 1937 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1938 } 1939 if (!(last & 1)) { 1940 last -= !right_is_free; 1941 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1942 } 1943 1944 if (first <= last) 1945 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1946 1947 done: 1948 mb_set_largest_free_order(sb, e4b->bd_info); 1949 mb_update_avg_fragment_size(sb, e4b->bd_info); 1950 mb_check_buddy(e4b); 1951 } 1952 1953 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1954 int needed, struct ext4_free_extent *ex) 1955 { 1956 int next = block; 1957 int max, order; 1958 void *buddy; 1959 1960 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1961 BUG_ON(ex == NULL); 1962 1963 buddy = mb_find_buddy(e4b, 0, &max); 1964 BUG_ON(buddy == NULL); 1965 BUG_ON(block >= max); 1966 if (mb_test_bit(block, buddy)) { 1967 ex->fe_len = 0; 1968 ex->fe_start = 0; 1969 ex->fe_group = 0; 1970 return 0; 1971 } 1972 1973 /* find actual order */ 1974 order = mb_find_order_for_block(e4b, block); 1975 block = block >> order; 1976 1977 ex->fe_len = 1 << order; 1978 ex->fe_start = block << order; 1979 ex->fe_group = e4b->bd_group; 1980 1981 /* calc difference from given start */ 1982 next = next - ex->fe_start; 1983 ex->fe_len -= next; 1984 ex->fe_start += next; 1985 1986 while (needed > ex->fe_len && 1987 mb_find_buddy(e4b, order, &max)) { 1988 1989 if (block + 1 >= max) 1990 break; 1991 1992 next = (block + 1) * (1 << order); 1993 if (mb_test_bit(next, e4b->bd_bitmap)) 1994 break; 1995 1996 order = mb_find_order_for_block(e4b, next); 1997 1998 block = next >> order; 1999 ex->fe_len += 1 << order; 2000 } 2001 2002 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 2003 /* Should never happen! (but apparently sometimes does?!?) */ 2004 WARN_ON(1); 2005 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 2006 "corruption or bug in mb_find_extent " 2007 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 2008 block, order, needed, ex->fe_group, ex->fe_start, 2009 ex->fe_len, ex->fe_logical); 2010 ex->fe_len = 0; 2011 ex->fe_start = 0; 2012 ex->fe_group = 0; 2013 } 2014 return ex->fe_len; 2015 } 2016 2017 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 2018 { 2019 int ord; 2020 int mlen = 0; 2021 int max = 0; 2022 int cur; 2023 int start = ex->fe_start; 2024 int len = ex->fe_len; 2025 unsigned ret = 0; 2026 int len0 = len; 2027 void *buddy; 2028 bool split = false; 2029 2030 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 2031 BUG_ON(e4b->bd_group != ex->fe_group); 2032 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2033 mb_check_buddy(e4b); 2034 mb_mark_used_double(e4b, start, len); 2035 2036 this_cpu_inc(discard_pa_seq); 2037 e4b->bd_info->bb_free -= len; 2038 if (e4b->bd_info->bb_first_free == start) 2039 e4b->bd_info->bb_first_free += len; 2040 2041 /* let's maintain fragments counter */ 2042 if (start != 0) 2043 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 2044 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 2045 max = !mb_test_bit(start + len, e4b->bd_bitmap); 2046 if (mlen && max) 2047 e4b->bd_info->bb_fragments++; 2048 else if (!mlen && !max) 2049 e4b->bd_info->bb_fragments--; 2050 2051 /* let's maintain buddy itself */ 2052 while (len) { 2053 if (!split) 2054 ord = mb_find_order_for_block(e4b, start); 2055 2056 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 2057 /* the whole chunk may be allocated at once! */ 2058 mlen = 1 << ord; 2059 if (!split) 2060 buddy = mb_find_buddy(e4b, ord, &max); 2061 else 2062 split = false; 2063 BUG_ON((start >> ord) >= max); 2064 mb_set_bit(start >> ord, buddy); 2065 e4b->bd_info->bb_counters[ord]--; 2066 start += mlen; 2067 len -= mlen; 2068 BUG_ON(len < 0); 2069 continue; 2070 } 2071 2072 /* store for history */ 2073 if (ret == 0) 2074 ret = len | (ord << 16); 2075 2076 /* we have to split large buddy */ 2077 BUG_ON(ord <= 0); 2078 buddy = mb_find_buddy(e4b, ord, &max); 2079 mb_set_bit(start >> ord, buddy); 2080 e4b->bd_info->bb_counters[ord]--; 2081 2082 ord--; 2083 cur = (start >> ord) & ~1U; 2084 buddy = mb_find_buddy(e4b, ord, &max); 2085 mb_clear_bit(cur, buddy); 2086 mb_clear_bit(cur + 1, buddy); 2087 e4b->bd_info->bb_counters[ord]++; 2088 e4b->bd_info->bb_counters[ord]++; 2089 split = true; 2090 } 2091 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 2092 2093 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 2094 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2095 mb_check_buddy(e4b); 2096 2097 return ret; 2098 } 2099 2100 /* 2101 * Must be called under group lock! 2102 */ 2103 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2104 struct ext4_buddy *e4b) 2105 { 2106 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2107 int ret; 2108 2109 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2110 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2111 2112 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2113 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2114 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2115 2116 /* preallocation can change ac_b_ex, thus we store actually 2117 * allocated blocks for history */ 2118 ac->ac_f_ex = ac->ac_b_ex; 2119 2120 ac->ac_status = AC_STATUS_FOUND; 2121 ac->ac_tail = ret & 0xffff; 2122 ac->ac_buddy = ret >> 16; 2123 2124 /* 2125 * take the page reference. We want the page to be pinned 2126 * so that we don't get a ext4_mb_init_cache_call for this 2127 * group until we update the bitmap. That would mean we 2128 * double allocate blocks. The reference is dropped 2129 * in ext4_mb_release_context 2130 */ 2131 ac->ac_bitmap_page = e4b->bd_bitmap_page; 2132 get_page(ac->ac_bitmap_page); 2133 ac->ac_buddy_page = e4b->bd_buddy_page; 2134 get_page(ac->ac_buddy_page); 2135 /* store last allocated for subsequent stream allocation */ 2136 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2137 spin_lock(&sbi->s_md_lock); 2138 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2139 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2140 spin_unlock(&sbi->s_md_lock); 2141 } 2142 /* 2143 * As we've just preallocated more space than 2144 * user requested originally, we store allocated 2145 * space in a special descriptor. 2146 */ 2147 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2148 ext4_mb_new_preallocation(ac); 2149 2150 } 2151 2152 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2153 struct ext4_buddy *e4b, 2154 int finish_group) 2155 { 2156 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2157 struct ext4_free_extent *bex = &ac->ac_b_ex; 2158 struct ext4_free_extent *gex = &ac->ac_g_ex; 2159 2160 if (ac->ac_status == AC_STATUS_FOUND) 2161 return; 2162 /* 2163 * We don't want to scan for a whole year 2164 */ 2165 if (ac->ac_found > sbi->s_mb_max_to_scan && 2166 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2167 ac->ac_status = AC_STATUS_BREAK; 2168 return; 2169 } 2170 2171 /* 2172 * Haven't found good chunk so far, let's continue 2173 */ 2174 if (bex->fe_len < gex->fe_len) 2175 return; 2176 2177 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2178 ext4_mb_use_best_found(ac, e4b); 2179 } 2180 2181 /* 2182 * The routine checks whether found extent is good enough. If it is, 2183 * then the extent gets marked used and flag is set to the context 2184 * to stop scanning. Otherwise, the extent is compared with the 2185 * previous found extent and if new one is better, then it's stored 2186 * in the context. Later, the best found extent will be used, if 2187 * mballoc can't find good enough extent. 2188 * 2189 * The algorithm used is roughly as follows: 2190 * 2191 * * If free extent found is exactly as big as goal, then 2192 * stop the scan and use it immediately 2193 * 2194 * * If free extent found is smaller than goal, then keep retrying 2195 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2196 * that stop scanning and use whatever we have. 2197 * 2198 * * If free extent found is bigger than goal, then keep retrying 2199 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2200 * stopping the scan and using the extent. 2201 * 2202 * 2203 * FIXME: real allocation policy is to be designed yet! 2204 */ 2205 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2206 struct ext4_free_extent *ex, 2207 struct ext4_buddy *e4b) 2208 { 2209 struct ext4_free_extent *bex = &ac->ac_b_ex; 2210 struct ext4_free_extent *gex = &ac->ac_g_ex; 2211 2212 BUG_ON(ex->fe_len <= 0); 2213 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2214 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2215 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2216 2217 ac->ac_found++; 2218 ac->ac_cX_found[ac->ac_criteria]++; 2219 2220 /* 2221 * The special case - take what you catch first 2222 */ 2223 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2224 *bex = *ex; 2225 ext4_mb_use_best_found(ac, e4b); 2226 return; 2227 } 2228 2229 /* 2230 * Let's check whether the chuck is good enough 2231 */ 2232 if (ex->fe_len == gex->fe_len) { 2233 *bex = *ex; 2234 ext4_mb_use_best_found(ac, e4b); 2235 return; 2236 } 2237 2238 /* 2239 * If this is first found extent, just store it in the context 2240 */ 2241 if (bex->fe_len == 0) { 2242 *bex = *ex; 2243 return; 2244 } 2245 2246 /* 2247 * If new found extent is better, store it in the context 2248 */ 2249 if (bex->fe_len < gex->fe_len) { 2250 /* if the request isn't satisfied, any found extent 2251 * larger than previous best one is better */ 2252 if (ex->fe_len > bex->fe_len) 2253 *bex = *ex; 2254 } else if (ex->fe_len > gex->fe_len) { 2255 /* if the request is satisfied, then we try to find 2256 * an extent that still satisfy the request, but is 2257 * smaller than previous one */ 2258 if (ex->fe_len < bex->fe_len) 2259 *bex = *ex; 2260 } 2261 2262 ext4_mb_check_limits(ac, e4b, 0); 2263 } 2264 2265 static noinline_for_stack 2266 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2267 struct ext4_buddy *e4b) 2268 { 2269 struct ext4_free_extent ex = ac->ac_b_ex; 2270 ext4_group_t group = ex.fe_group; 2271 int max; 2272 int err; 2273 2274 BUG_ON(ex.fe_len <= 0); 2275 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2276 if (err) 2277 return; 2278 2279 ext4_lock_group(ac->ac_sb, group); 2280 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2281 2282 if (max > 0) { 2283 ac->ac_b_ex = ex; 2284 ext4_mb_use_best_found(ac, e4b); 2285 } 2286 2287 ext4_unlock_group(ac->ac_sb, group); 2288 ext4_mb_unload_buddy(e4b); 2289 } 2290 2291 static noinline_for_stack 2292 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2293 struct ext4_buddy *e4b) 2294 { 2295 ext4_group_t group = ac->ac_g_ex.fe_group; 2296 int max; 2297 int err; 2298 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2299 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2300 struct ext4_free_extent ex; 2301 2302 if (!grp) 2303 return -EFSCORRUPTED; 2304 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2305 return 0; 2306 if (grp->bb_free == 0) 2307 return 0; 2308 2309 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2310 if (err) 2311 return err; 2312 2313 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2314 ext4_mb_unload_buddy(e4b); 2315 return 0; 2316 } 2317 2318 ext4_lock_group(ac->ac_sb, group); 2319 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2320 ac->ac_g_ex.fe_len, &ex); 2321 ex.fe_logical = 0xDEADFA11; /* debug value */ 2322 2323 if (max >= ac->ac_g_ex.fe_len && 2324 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { 2325 ext4_fsblk_t start; 2326 2327 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2328 /* use do_div to get remainder (would be 64-bit modulo) */ 2329 if (do_div(start, sbi->s_stripe) == 0) { 2330 ac->ac_found++; 2331 ac->ac_b_ex = ex; 2332 ext4_mb_use_best_found(ac, e4b); 2333 } 2334 } else if (max >= ac->ac_g_ex.fe_len) { 2335 BUG_ON(ex.fe_len <= 0); 2336 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2337 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2338 ac->ac_found++; 2339 ac->ac_b_ex = ex; 2340 ext4_mb_use_best_found(ac, e4b); 2341 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2342 /* Sometimes, caller may want to merge even small 2343 * number of blocks to an existing extent */ 2344 BUG_ON(ex.fe_len <= 0); 2345 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2346 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2347 ac->ac_found++; 2348 ac->ac_b_ex = ex; 2349 ext4_mb_use_best_found(ac, e4b); 2350 } 2351 ext4_unlock_group(ac->ac_sb, group); 2352 ext4_mb_unload_buddy(e4b); 2353 2354 return 0; 2355 } 2356 2357 /* 2358 * The routine scans buddy structures (not bitmap!) from given order 2359 * to max order and tries to find big enough chunk to satisfy the req 2360 */ 2361 static noinline_for_stack 2362 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2363 struct ext4_buddy *e4b) 2364 { 2365 struct super_block *sb = ac->ac_sb; 2366 struct ext4_group_info *grp = e4b->bd_info; 2367 void *buddy; 2368 int i; 2369 int k; 2370 int max; 2371 2372 BUG_ON(ac->ac_2order <= 0); 2373 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2374 if (grp->bb_counters[i] == 0) 2375 continue; 2376 2377 buddy = mb_find_buddy(e4b, i, &max); 2378 if (WARN_RATELIMIT(buddy == NULL, 2379 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2380 continue; 2381 2382 k = mb_find_next_zero_bit(buddy, max, 0); 2383 if (k >= max) { 2384 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2385 "%d free clusters of order %d. But found 0", 2386 grp->bb_counters[i], i); 2387 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2388 e4b->bd_group, 2389 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2390 break; 2391 } 2392 ac->ac_found++; 2393 ac->ac_cX_found[ac->ac_criteria]++; 2394 2395 ac->ac_b_ex.fe_len = 1 << i; 2396 ac->ac_b_ex.fe_start = k << i; 2397 ac->ac_b_ex.fe_group = e4b->bd_group; 2398 2399 ext4_mb_use_best_found(ac, e4b); 2400 2401 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2402 2403 if (EXT4_SB(sb)->s_mb_stats) 2404 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2405 2406 break; 2407 } 2408 } 2409 2410 /* 2411 * The routine scans the group and measures all found extents. 2412 * In order to optimize scanning, caller must pass number of 2413 * free blocks in the group, so the routine can know upper limit. 2414 */ 2415 static noinline_for_stack 2416 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2417 struct ext4_buddy *e4b) 2418 { 2419 struct super_block *sb = ac->ac_sb; 2420 void *bitmap = e4b->bd_bitmap; 2421 struct ext4_free_extent ex; 2422 int i, j, freelen; 2423 int free; 2424 2425 free = e4b->bd_info->bb_free; 2426 if (WARN_ON(free <= 0)) 2427 return; 2428 2429 i = e4b->bd_info->bb_first_free; 2430 2431 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2432 i = mb_find_next_zero_bit(bitmap, 2433 EXT4_CLUSTERS_PER_GROUP(sb), i); 2434 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2435 /* 2436 * IF we have corrupt bitmap, we won't find any 2437 * free blocks even though group info says we 2438 * have free blocks 2439 */ 2440 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2441 "%d free clusters as per " 2442 "group info. But bitmap says 0", 2443 free); 2444 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2445 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2446 break; 2447 } 2448 2449 if (ac->ac_criteria < CR_FAST) { 2450 /* 2451 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are 2452 * sure that this group will have a large enough 2453 * continuous free extent, so skip over the smaller free 2454 * extents 2455 */ 2456 j = mb_find_next_bit(bitmap, 2457 EXT4_CLUSTERS_PER_GROUP(sb), i); 2458 freelen = j - i; 2459 2460 if (freelen < ac->ac_g_ex.fe_len) { 2461 i = j; 2462 free -= freelen; 2463 continue; 2464 } 2465 } 2466 2467 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2468 if (WARN_ON(ex.fe_len <= 0)) 2469 break; 2470 if (free < ex.fe_len) { 2471 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2472 "%d free clusters as per " 2473 "group info. But got %d blocks", 2474 free, ex.fe_len); 2475 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2476 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2477 /* 2478 * The number of free blocks differs. This mostly 2479 * indicate that the bitmap is corrupt. So exit 2480 * without claiming the space. 2481 */ 2482 break; 2483 } 2484 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2485 ext4_mb_measure_extent(ac, &ex, e4b); 2486 2487 i += ex.fe_len; 2488 free -= ex.fe_len; 2489 } 2490 2491 ext4_mb_check_limits(ac, e4b, 1); 2492 } 2493 2494 /* 2495 * This is a special case for storages like raid5 2496 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2497 */ 2498 static noinline_for_stack 2499 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2500 struct ext4_buddy *e4b) 2501 { 2502 struct super_block *sb = ac->ac_sb; 2503 struct ext4_sb_info *sbi = EXT4_SB(sb); 2504 void *bitmap = e4b->bd_bitmap; 2505 struct ext4_free_extent ex; 2506 ext4_fsblk_t first_group_block; 2507 ext4_fsblk_t a; 2508 ext4_grpblk_t i, stripe; 2509 int max; 2510 2511 BUG_ON(sbi->s_stripe == 0); 2512 2513 /* find first stripe-aligned block in group */ 2514 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2515 2516 a = first_group_block + sbi->s_stripe - 1; 2517 do_div(a, sbi->s_stripe); 2518 i = (a * sbi->s_stripe) - first_group_block; 2519 2520 stripe = EXT4_B2C(sbi, sbi->s_stripe); 2521 i = EXT4_B2C(sbi, i); 2522 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2523 if (!mb_test_bit(i, bitmap)) { 2524 max = mb_find_extent(e4b, i, stripe, &ex); 2525 if (max >= stripe) { 2526 ac->ac_found++; 2527 ac->ac_cX_found[ac->ac_criteria]++; 2528 ex.fe_logical = 0xDEADF00D; /* debug value */ 2529 ac->ac_b_ex = ex; 2530 ext4_mb_use_best_found(ac, e4b); 2531 break; 2532 } 2533 } 2534 i += stripe; 2535 } 2536 } 2537 2538 /* 2539 * This is also called BEFORE we load the buddy bitmap. 2540 * Returns either 1 or 0 indicating that the group is either suitable 2541 * for the allocation or not. 2542 */ 2543 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2544 ext4_group_t group, enum criteria cr) 2545 { 2546 ext4_grpblk_t free, fragments; 2547 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2548 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2549 2550 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); 2551 2552 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) 2553 return false; 2554 2555 free = grp->bb_free; 2556 if (free == 0) 2557 return false; 2558 2559 fragments = grp->bb_fragments; 2560 if (fragments == 0) 2561 return false; 2562 2563 switch (cr) { 2564 case CR_POWER2_ALIGNED: 2565 BUG_ON(ac->ac_2order == 0); 2566 2567 /* Avoid using the first bg of a flexgroup for data files */ 2568 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2569 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2570 ((group % flex_size) == 0)) 2571 return false; 2572 2573 if (free < ac->ac_g_ex.fe_len) 2574 return false; 2575 2576 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2577 return true; 2578 2579 if (grp->bb_largest_free_order < ac->ac_2order) 2580 return false; 2581 2582 return true; 2583 case CR_GOAL_LEN_FAST: 2584 case CR_BEST_AVAIL_LEN: 2585 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2586 return true; 2587 break; 2588 case CR_GOAL_LEN_SLOW: 2589 if (free >= ac->ac_g_ex.fe_len) 2590 return true; 2591 break; 2592 case CR_ANY_FREE: 2593 return true; 2594 default: 2595 BUG(); 2596 } 2597 2598 return false; 2599 } 2600 2601 /* 2602 * This could return negative error code if something goes wrong 2603 * during ext4_mb_init_group(). This should not be called with 2604 * ext4_lock_group() held. 2605 * 2606 * Note: because we are conditionally operating with the group lock in 2607 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2608 * function using __acquire and __release. This means we need to be 2609 * super careful before messing with the error path handling via "goto 2610 * out"! 2611 */ 2612 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2613 ext4_group_t group, enum criteria cr) 2614 { 2615 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2616 struct super_block *sb = ac->ac_sb; 2617 struct ext4_sb_info *sbi = EXT4_SB(sb); 2618 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2619 ext4_grpblk_t free; 2620 int ret = 0; 2621 2622 if (!grp) 2623 return -EFSCORRUPTED; 2624 if (sbi->s_mb_stats) 2625 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2626 if (should_lock) { 2627 ext4_lock_group(sb, group); 2628 __release(ext4_group_lock_ptr(sb, group)); 2629 } 2630 free = grp->bb_free; 2631 if (free == 0) 2632 goto out; 2633 if (cr <= CR_FAST && free < ac->ac_g_ex.fe_len) 2634 goto out; 2635 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2636 goto out; 2637 if (should_lock) { 2638 __acquire(ext4_group_lock_ptr(sb, group)); 2639 ext4_unlock_group(sb, group); 2640 } 2641 2642 /* We only do this if the grp has never been initialized */ 2643 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2644 struct ext4_group_desc *gdp = 2645 ext4_get_group_desc(sb, group, NULL); 2646 int ret; 2647 2648 /* 2649 * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic 2650 * search to find large good chunks almost for free. If buddy 2651 * data is not ready, then this optimization makes no sense. But 2652 * we never skip the first block group in a flex_bg, since this 2653 * gets used for metadata block allocation, and we want to make 2654 * sure we locate metadata blocks in the first block group in 2655 * the flex_bg if possible. 2656 */ 2657 if (cr < CR_FAST && 2658 (!sbi->s_log_groups_per_flex || 2659 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2660 !(ext4_has_group_desc_csum(sb) && 2661 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2662 return 0; 2663 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2664 if (ret) 2665 return ret; 2666 } 2667 2668 if (should_lock) { 2669 ext4_lock_group(sb, group); 2670 __release(ext4_group_lock_ptr(sb, group)); 2671 } 2672 ret = ext4_mb_good_group(ac, group, cr); 2673 out: 2674 if (should_lock) { 2675 __acquire(ext4_group_lock_ptr(sb, group)); 2676 ext4_unlock_group(sb, group); 2677 } 2678 return ret; 2679 } 2680 2681 /* 2682 * Start prefetching @nr block bitmaps starting at @group. 2683 * Return the next group which needs to be prefetched. 2684 */ 2685 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2686 unsigned int nr, int *cnt) 2687 { 2688 ext4_group_t ngroups = ext4_get_groups_count(sb); 2689 struct buffer_head *bh; 2690 struct blk_plug plug; 2691 2692 blk_start_plug(&plug); 2693 while (nr-- > 0) { 2694 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2695 NULL); 2696 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2697 2698 /* 2699 * Prefetch block groups with free blocks; but don't 2700 * bother if it is marked uninitialized on disk, since 2701 * it won't require I/O to read. Also only try to 2702 * prefetch once, so we avoid getblk() call, which can 2703 * be expensive. 2704 */ 2705 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2706 EXT4_MB_GRP_NEED_INIT(grp) && 2707 ext4_free_group_clusters(sb, gdp) > 0 ) { 2708 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2709 if (bh && !IS_ERR(bh)) { 2710 if (!buffer_uptodate(bh) && cnt) 2711 (*cnt)++; 2712 brelse(bh); 2713 } 2714 } 2715 if (++group >= ngroups) 2716 group = 0; 2717 } 2718 blk_finish_plug(&plug); 2719 return group; 2720 } 2721 2722 /* 2723 * Prefetching reads the block bitmap into the buffer cache; but we 2724 * need to make sure that the buddy bitmap in the page cache has been 2725 * initialized. Note that ext4_mb_init_group() will block if the I/O 2726 * is not yet completed, or indeed if it was not initiated by 2727 * ext4_mb_prefetch did not start the I/O. 2728 * 2729 * TODO: We should actually kick off the buddy bitmap setup in a work 2730 * queue when the buffer I/O is completed, so that we don't block 2731 * waiting for the block allocation bitmap read to finish when 2732 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2733 */ 2734 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2735 unsigned int nr) 2736 { 2737 struct ext4_group_desc *gdp; 2738 struct ext4_group_info *grp; 2739 2740 while (nr-- > 0) { 2741 if (!group) 2742 group = ext4_get_groups_count(sb); 2743 group--; 2744 gdp = ext4_get_group_desc(sb, group, NULL); 2745 grp = ext4_get_group_info(sb, group); 2746 2747 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2748 ext4_free_group_clusters(sb, gdp) > 0) { 2749 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2750 break; 2751 } 2752 } 2753 } 2754 2755 static noinline_for_stack int 2756 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2757 { 2758 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2759 enum criteria new_cr, cr = CR_GOAL_LEN_FAST; 2760 int err = 0, first_err = 0; 2761 unsigned int nr = 0, prefetch_ios = 0; 2762 struct ext4_sb_info *sbi; 2763 struct super_block *sb; 2764 struct ext4_buddy e4b; 2765 int lost; 2766 2767 sb = ac->ac_sb; 2768 sbi = EXT4_SB(sb); 2769 ngroups = ext4_get_groups_count(sb); 2770 /* non-extent files are limited to low blocks/groups */ 2771 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2772 ngroups = sbi->s_blockfile_groups; 2773 2774 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2775 2776 /* first, try the goal */ 2777 err = ext4_mb_find_by_goal(ac, &e4b); 2778 if (err || ac->ac_status == AC_STATUS_FOUND) 2779 goto out; 2780 2781 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2782 goto out; 2783 2784 /* 2785 * ac->ac_2order is set only if the fe_len is a power of 2 2786 * if ac->ac_2order is set we also set criteria to 0 so that we 2787 * try exact allocation using buddy. 2788 */ 2789 i = fls(ac->ac_g_ex.fe_len); 2790 ac->ac_2order = 0; 2791 /* 2792 * We search using buddy data only if the order of the request 2793 * is greater than equal to the sbi_s_mb_order2_reqs 2794 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2795 * We also support searching for power-of-two requests only for 2796 * requests upto maximum buddy size we have constructed. 2797 */ 2798 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2799 /* 2800 * This should tell if fe_len is exactly power of 2 2801 */ 2802 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2803 ac->ac_2order = array_index_nospec(i - 1, 2804 MB_NUM_ORDERS(sb)); 2805 } 2806 2807 /* if stream allocation is enabled, use global goal */ 2808 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2809 /* TBD: may be hot point */ 2810 spin_lock(&sbi->s_md_lock); 2811 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2812 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2813 spin_unlock(&sbi->s_md_lock); 2814 } 2815 2816 /* 2817 * Let's just scan groups to find more-less suitable blocks We 2818 * start with CR_GOAL_LEN_FAST, unless it is power of 2 2819 * aligned, in which case let's do that faster approach first. 2820 */ 2821 if (ac->ac_2order) 2822 cr = CR_POWER2_ALIGNED; 2823 repeat: 2824 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2825 ac->ac_criteria = cr; 2826 /* 2827 * searching for the right group start 2828 * from the goal value specified 2829 */ 2830 group = ac->ac_g_ex.fe_group; 2831 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2832 prefetch_grp = group; 2833 2834 for (i = 0, new_cr = cr; i < ngroups; i++, 2835 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 2836 int ret = 0; 2837 2838 cond_resched(); 2839 if (new_cr != cr) { 2840 cr = new_cr; 2841 goto repeat; 2842 } 2843 2844 /* 2845 * Batch reads of the block allocation bitmaps 2846 * to get multiple READs in flight; limit 2847 * prefetching at cr=0/1, otherwise mballoc can 2848 * spend a lot of time loading imperfect groups 2849 */ 2850 if ((prefetch_grp == group) && 2851 (cr >= CR_FAST || 2852 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2853 nr = sbi->s_mb_prefetch; 2854 if (ext4_has_feature_flex_bg(sb)) { 2855 nr = 1 << sbi->s_log_groups_per_flex; 2856 nr -= group & (nr - 1); 2857 nr = min(nr, sbi->s_mb_prefetch); 2858 } 2859 prefetch_grp = ext4_mb_prefetch(sb, group, 2860 nr, &prefetch_ios); 2861 } 2862 2863 /* This now checks without needing the buddy page */ 2864 ret = ext4_mb_good_group_nolock(ac, group, cr); 2865 if (ret <= 0) { 2866 if (!first_err) 2867 first_err = ret; 2868 continue; 2869 } 2870 2871 err = ext4_mb_load_buddy(sb, group, &e4b); 2872 if (err) 2873 goto out; 2874 2875 ext4_lock_group(sb, group); 2876 2877 /* 2878 * We need to check again after locking the 2879 * block group 2880 */ 2881 ret = ext4_mb_good_group(ac, group, cr); 2882 if (ret == 0) { 2883 ext4_unlock_group(sb, group); 2884 ext4_mb_unload_buddy(&e4b); 2885 continue; 2886 } 2887 2888 ac->ac_groups_scanned++; 2889 if (cr == CR_POWER2_ALIGNED) 2890 ext4_mb_simple_scan_group(ac, &e4b); 2891 else if ((cr == CR_GOAL_LEN_FAST || 2892 cr == CR_BEST_AVAIL_LEN) && 2893 sbi->s_stripe && 2894 !(ac->ac_g_ex.fe_len % 2895 EXT4_B2C(sbi, sbi->s_stripe))) 2896 ext4_mb_scan_aligned(ac, &e4b); 2897 else 2898 ext4_mb_complex_scan_group(ac, &e4b); 2899 2900 ext4_unlock_group(sb, group); 2901 ext4_mb_unload_buddy(&e4b); 2902 2903 if (ac->ac_status != AC_STATUS_CONTINUE) 2904 break; 2905 } 2906 /* Processed all groups and haven't found blocks */ 2907 if (sbi->s_mb_stats && i == ngroups) 2908 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2909 2910 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) 2911 /* Reset goal length to original goal length before 2912 * falling into CR_GOAL_LEN_SLOW */ 2913 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 2914 } 2915 2916 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2917 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2918 /* 2919 * We've been searching too long. Let's try to allocate 2920 * the best chunk we've found so far 2921 */ 2922 ext4_mb_try_best_found(ac, &e4b); 2923 if (ac->ac_status != AC_STATUS_FOUND) { 2924 /* 2925 * Someone more lucky has already allocated it. 2926 * The only thing we can do is just take first 2927 * found block(s) 2928 */ 2929 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2930 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2931 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2932 ac->ac_b_ex.fe_len, lost); 2933 2934 ac->ac_b_ex.fe_group = 0; 2935 ac->ac_b_ex.fe_start = 0; 2936 ac->ac_b_ex.fe_len = 0; 2937 ac->ac_status = AC_STATUS_CONTINUE; 2938 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2939 cr = CR_ANY_FREE; 2940 goto repeat; 2941 } 2942 } 2943 2944 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2945 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2946 out: 2947 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2948 err = first_err; 2949 2950 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2951 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2952 ac->ac_flags, cr, err); 2953 2954 if (nr) 2955 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2956 2957 return err; 2958 } 2959 2960 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2961 { 2962 struct super_block *sb = pde_data(file_inode(seq->file)); 2963 ext4_group_t group; 2964 2965 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2966 return NULL; 2967 group = *pos + 1; 2968 return (void *) ((unsigned long) group); 2969 } 2970 2971 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2972 { 2973 struct super_block *sb = pde_data(file_inode(seq->file)); 2974 ext4_group_t group; 2975 2976 ++*pos; 2977 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2978 return NULL; 2979 group = *pos + 1; 2980 return (void *) ((unsigned long) group); 2981 } 2982 2983 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2984 { 2985 struct super_block *sb = pde_data(file_inode(seq->file)); 2986 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2987 int i; 2988 int err, buddy_loaded = 0; 2989 struct ext4_buddy e4b; 2990 struct ext4_group_info *grinfo; 2991 unsigned char blocksize_bits = min_t(unsigned char, 2992 sb->s_blocksize_bits, 2993 EXT4_MAX_BLOCK_LOG_SIZE); 2994 struct sg { 2995 struct ext4_group_info info; 2996 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2997 } sg; 2998 2999 group--; 3000 if (group == 0) 3001 seq_puts(seq, "#group: free frags first [" 3002 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 3003 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 3004 3005 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 3006 sizeof(struct ext4_group_info); 3007 3008 grinfo = ext4_get_group_info(sb, group); 3009 if (!grinfo) 3010 return 0; 3011 /* Load the group info in memory only if not already loaded. */ 3012 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 3013 err = ext4_mb_load_buddy(sb, group, &e4b); 3014 if (err) { 3015 seq_printf(seq, "#%-5u: I/O error\n", group); 3016 return 0; 3017 } 3018 buddy_loaded = 1; 3019 } 3020 3021 memcpy(&sg, grinfo, i); 3022 3023 if (buddy_loaded) 3024 ext4_mb_unload_buddy(&e4b); 3025 3026 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 3027 sg.info.bb_fragments, sg.info.bb_first_free); 3028 for (i = 0; i <= 13; i++) 3029 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 3030 sg.info.bb_counters[i] : 0); 3031 seq_puts(seq, " ]\n"); 3032 3033 return 0; 3034 } 3035 3036 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 3037 { 3038 } 3039 3040 const struct seq_operations ext4_mb_seq_groups_ops = { 3041 .start = ext4_mb_seq_groups_start, 3042 .next = ext4_mb_seq_groups_next, 3043 .stop = ext4_mb_seq_groups_stop, 3044 .show = ext4_mb_seq_groups_show, 3045 }; 3046 3047 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 3048 { 3049 struct super_block *sb = seq->private; 3050 struct ext4_sb_info *sbi = EXT4_SB(sb); 3051 3052 seq_puts(seq, "mballoc:\n"); 3053 if (!sbi->s_mb_stats) { 3054 seq_puts(seq, "\tmb stats collection turned off.\n"); 3055 seq_puts( 3056 seq, 3057 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 3058 return 0; 3059 } 3060 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 3061 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 3062 3063 seq_printf(seq, "\tgroups_scanned: %u\n", 3064 atomic_read(&sbi->s_bal_groups_scanned)); 3065 3066 /* CR_POWER2_ALIGNED stats */ 3067 seq_puts(seq, "\tcr_p2_aligned_stats:\n"); 3068 seq_printf(seq, "\t\thits: %llu\n", 3069 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); 3070 seq_printf( 3071 seq, "\t\tgroups_considered: %llu\n", 3072 atomic64_read( 3073 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); 3074 seq_printf(seq, "\t\textents_scanned: %u\n", 3075 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3076 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3077 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3078 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3079 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); 3080 3081 /* CR_GOAL_LEN_FAST stats */ 3082 seq_puts(seq, "\tcr_goal_fast_stats:\n"); 3083 seq_printf(seq, "\t\thits: %llu\n", 3084 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); 3085 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3086 atomic64_read( 3087 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); 3088 seq_printf(seq, "\t\textents_scanned: %u\n", 3089 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3090 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3091 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3092 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3093 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); 3094 3095 /* CR_BEST_AVAIL_LEN stats */ 3096 seq_puts(seq, "\tcr_best_avail_stats:\n"); 3097 seq_printf(seq, "\t\thits: %llu\n", 3098 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); 3099 seq_printf( 3100 seq, "\t\tgroups_considered: %llu\n", 3101 atomic64_read( 3102 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); 3103 seq_printf(seq, "\t\textents_scanned: %u\n", 3104 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3105 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3106 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3107 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3108 atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); 3109 3110 /* CR_GOAL_LEN_SLOW stats */ 3111 seq_puts(seq, "\tcr_goal_slow_stats:\n"); 3112 seq_printf(seq, "\t\thits: %llu\n", 3113 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); 3114 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3115 atomic64_read( 3116 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); 3117 seq_printf(seq, "\t\textents_scanned: %u\n", 3118 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); 3119 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3120 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); 3121 3122 /* CR_ANY_FREE stats */ 3123 seq_puts(seq, "\tcr_any_free_stats:\n"); 3124 seq_printf(seq, "\t\thits: %llu\n", 3125 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); 3126 seq_printf( 3127 seq, "\t\tgroups_considered: %llu\n", 3128 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); 3129 seq_printf(seq, "\t\textents_scanned: %u\n", 3130 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); 3131 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3132 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); 3133 3134 /* Aggregates */ 3135 seq_printf(seq, "\textents_scanned: %u\n", 3136 atomic_read(&sbi->s_bal_ex_scanned)); 3137 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 3138 seq_printf(seq, "\t\tlen_goal_hits: %u\n", 3139 atomic_read(&sbi->s_bal_len_goals)); 3140 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 3141 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 3142 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 3143 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 3144 atomic_read(&sbi->s_mb_buddies_generated), 3145 ext4_get_groups_count(sb)); 3146 seq_printf(seq, "\tbuddies_time_used: %llu\n", 3147 atomic64_read(&sbi->s_mb_generation_time)); 3148 seq_printf(seq, "\tpreallocated: %u\n", 3149 atomic_read(&sbi->s_mb_preallocated)); 3150 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); 3151 return 0; 3152 } 3153 3154 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3155 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 3156 { 3157 struct super_block *sb = pde_data(file_inode(seq->file)); 3158 unsigned long position; 3159 3160 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3161 return NULL; 3162 position = *pos + 1; 3163 return (void *) ((unsigned long) position); 3164 } 3165 3166 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3167 { 3168 struct super_block *sb = pde_data(file_inode(seq->file)); 3169 unsigned long position; 3170 3171 ++*pos; 3172 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3173 return NULL; 3174 position = *pos + 1; 3175 return (void *) ((unsigned long) position); 3176 } 3177 3178 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3179 { 3180 struct super_block *sb = pde_data(file_inode(seq->file)); 3181 struct ext4_sb_info *sbi = EXT4_SB(sb); 3182 unsigned long position = ((unsigned long) v); 3183 struct ext4_group_info *grp; 3184 unsigned int count; 3185 3186 position--; 3187 if (position >= MB_NUM_ORDERS(sb)) { 3188 position -= MB_NUM_ORDERS(sb); 3189 if (position == 0) 3190 seq_puts(seq, "avg_fragment_size_lists:\n"); 3191 3192 count = 0; 3193 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 3194 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 3195 bb_avg_fragment_size_node) 3196 count++; 3197 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 3198 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3199 (unsigned int)position, count); 3200 return 0; 3201 } 3202 3203 if (position == 0) { 3204 seq_printf(seq, "optimize_scan: %d\n", 3205 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3206 seq_puts(seq, "max_free_order_lists:\n"); 3207 } 3208 count = 0; 3209 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3210 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3211 bb_largest_free_order_node) 3212 count++; 3213 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3214 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3215 (unsigned int)position, count); 3216 3217 return 0; 3218 } 3219 3220 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3221 { 3222 } 3223 3224 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3225 .start = ext4_mb_seq_structs_summary_start, 3226 .next = ext4_mb_seq_structs_summary_next, 3227 .stop = ext4_mb_seq_structs_summary_stop, 3228 .show = ext4_mb_seq_structs_summary_show, 3229 }; 3230 3231 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3232 { 3233 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3234 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3235 3236 BUG_ON(!cachep); 3237 return cachep; 3238 } 3239 3240 /* 3241 * Allocate the top-level s_group_info array for the specified number 3242 * of groups 3243 */ 3244 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3245 { 3246 struct ext4_sb_info *sbi = EXT4_SB(sb); 3247 unsigned size; 3248 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3249 3250 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3251 EXT4_DESC_PER_BLOCK_BITS(sb); 3252 if (size <= sbi->s_group_info_size) 3253 return 0; 3254 3255 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3256 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3257 if (!new_groupinfo) { 3258 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3259 return -ENOMEM; 3260 } 3261 rcu_read_lock(); 3262 old_groupinfo = rcu_dereference(sbi->s_group_info); 3263 if (old_groupinfo) 3264 memcpy(new_groupinfo, old_groupinfo, 3265 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3266 rcu_read_unlock(); 3267 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3268 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3269 if (old_groupinfo) 3270 ext4_kvfree_array_rcu(old_groupinfo); 3271 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3272 sbi->s_group_info_size); 3273 return 0; 3274 } 3275 3276 /* Create and initialize ext4_group_info data for the given group. */ 3277 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3278 struct ext4_group_desc *desc) 3279 { 3280 int i; 3281 int metalen = 0; 3282 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3283 struct ext4_sb_info *sbi = EXT4_SB(sb); 3284 struct ext4_group_info **meta_group_info; 3285 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3286 3287 /* 3288 * First check if this group is the first of a reserved block. 3289 * If it's true, we have to allocate a new table of pointers 3290 * to ext4_group_info structures 3291 */ 3292 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3293 metalen = sizeof(*meta_group_info) << 3294 EXT4_DESC_PER_BLOCK_BITS(sb); 3295 meta_group_info = kmalloc(metalen, GFP_NOFS); 3296 if (meta_group_info == NULL) { 3297 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3298 "for a buddy group"); 3299 return -ENOMEM; 3300 } 3301 rcu_read_lock(); 3302 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3303 rcu_read_unlock(); 3304 } 3305 3306 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3307 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3308 3309 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3310 if (meta_group_info[i] == NULL) { 3311 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3312 goto exit_group_info; 3313 } 3314 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3315 &(meta_group_info[i]->bb_state)); 3316 3317 /* 3318 * initialize bb_free to be able to skip 3319 * empty groups without initialization 3320 */ 3321 if (ext4_has_group_desc_csum(sb) && 3322 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3323 meta_group_info[i]->bb_free = 3324 ext4_free_clusters_after_init(sb, group, desc); 3325 } else { 3326 meta_group_info[i]->bb_free = 3327 ext4_free_group_clusters(sb, desc); 3328 } 3329 3330 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3331 init_rwsem(&meta_group_info[i]->alloc_sem); 3332 meta_group_info[i]->bb_free_root = RB_ROOT; 3333 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3334 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 3335 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3336 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3337 meta_group_info[i]->bb_group = group; 3338 3339 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3340 return 0; 3341 3342 exit_group_info: 3343 /* If a meta_group_info table has been allocated, release it now */ 3344 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3345 struct ext4_group_info ***group_info; 3346 3347 rcu_read_lock(); 3348 group_info = rcu_dereference(sbi->s_group_info); 3349 kfree(group_info[idx]); 3350 group_info[idx] = NULL; 3351 rcu_read_unlock(); 3352 } 3353 return -ENOMEM; 3354 } /* ext4_mb_add_groupinfo */ 3355 3356 static int ext4_mb_init_backend(struct super_block *sb) 3357 { 3358 ext4_group_t ngroups = ext4_get_groups_count(sb); 3359 ext4_group_t i; 3360 struct ext4_sb_info *sbi = EXT4_SB(sb); 3361 int err; 3362 struct ext4_group_desc *desc; 3363 struct ext4_group_info ***group_info; 3364 struct kmem_cache *cachep; 3365 3366 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3367 if (err) 3368 return err; 3369 3370 sbi->s_buddy_cache = new_inode(sb); 3371 if (sbi->s_buddy_cache == NULL) { 3372 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3373 goto err_freesgi; 3374 } 3375 /* To avoid potentially colliding with an valid on-disk inode number, 3376 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3377 * not in the inode hash, so it should never be found by iget(), but 3378 * this will avoid confusion if it ever shows up during debugging. */ 3379 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3380 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3381 for (i = 0; i < ngroups; i++) { 3382 cond_resched(); 3383 desc = ext4_get_group_desc(sb, i, NULL); 3384 if (desc == NULL) { 3385 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3386 goto err_freebuddy; 3387 } 3388 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3389 goto err_freebuddy; 3390 } 3391 3392 if (ext4_has_feature_flex_bg(sb)) { 3393 /* a single flex group is supposed to be read by a single IO. 3394 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3395 * unsigned integer, so the maximum shift is 32. 3396 */ 3397 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3398 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3399 goto err_freebuddy; 3400 } 3401 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3402 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3403 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3404 } else { 3405 sbi->s_mb_prefetch = 32; 3406 } 3407 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3408 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3409 /* now many real IOs to prefetch within a single allocation at cr=0 3410 * given cr=0 is an CPU-related optimization we shouldn't try to 3411 * load too many groups, at some point we should start to use what 3412 * we've got in memory. 3413 * with an average random access time 5ms, it'd take a second to get 3414 * 200 groups (* N with flex_bg), so let's make this limit 4 3415 */ 3416 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3417 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3418 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3419 3420 return 0; 3421 3422 err_freebuddy: 3423 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3424 while (i-- > 0) { 3425 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3426 3427 if (grp) 3428 kmem_cache_free(cachep, grp); 3429 } 3430 i = sbi->s_group_info_size; 3431 rcu_read_lock(); 3432 group_info = rcu_dereference(sbi->s_group_info); 3433 while (i-- > 0) 3434 kfree(group_info[i]); 3435 rcu_read_unlock(); 3436 iput(sbi->s_buddy_cache); 3437 err_freesgi: 3438 rcu_read_lock(); 3439 kvfree(rcu_dereference(sbi->s_group_info)); 3440 rcu_read_unlock(); 3441 return -ENOMEM; 3442 } 3443 3444 static void ext4_groupinfo_destroy_slabs(void) 3445 { 3446 int i; 3447 3448 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3449 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3450 ext4_groupinfo_caches[i] = NULL; 3451 } 3452 } 3453 3454 static int ext4_groupinfo_create_slab(size_t size) 3455 { 3456 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3457 int slab_size; 3458 int blocksize_bits = order_base_2(size); 3459 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3460 struct kmem_cache *cachep; 3461 3462 if (cache_index >= NR_GRPINFO_CACHES) 3463 return -EINVAL; 3464 3465 if (unlikely(cache_index < 0)) 3466 cache_index = 0; 3467 3468 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3469 if (ext4_groupinfo_caches[cache_index]) { 3470 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3471 return 0; /* Already created */ 3472 } 3473 3474 slab_size = offsetof(struct ext4_group_info, 3475 bb_counters[blocksize_bits + 2]); 3476 3477 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3478 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3479 NULL); 3480 3481 ext4_groupinfo_caches[cache_index] = cachep; 3482 3483 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3484 if (!cachep) { 3485 printk(KERN_EMERG 3486 "EXT4-fs: no memory for groupinfo slab cache\n"); 3487 return -ENOMEM; 3488 } 3489 3490 return 0; 3491 } 3492 3493 static void ext4_discard_work(struct work_struct *work) 3494 { 3495 struct ext4_sb_info *sbi = container_of(work, 3496 struct ext4_sb_info, s_discard_work); 3497 struct super_block *sb = sbi->s_sb; 3498 struct ext4_free_data *fd, *nfd; 3499 struct ext4_buddy e4b; 3500 struct list_head discard_list; 3501 ext4_group_t grp, load_grp; 3502 int err = 0; 3503 3504 INIT_LIST_HEAD(&discard_list); 3505 spin_lock(&sbi->s_md_lock); 3506 list_splice_init(&sbi->s_discard_list, &discard_list); 3507 spin_unlock(&sbi->s_md_lock); 3508 3509 load_grp = UINT_MAX; 3510 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3511 /* 3512 * If filesystem is umounting or no memory or suffering 3513 * from no space, give up the discard 3514 */ 3515 if ((sb->s_flags & SB_ACTIVE) && !err && 3516 !atomic_read(&sbi->s_retry_alloc_pending)) { 3517 grp = fd->efd_group; 3518 if (grp != load_grp) { 3519 if (load_grp != UINT_MAX) 3520 ext4_mb_unload_buddy(&e4b); 3521 3522 err = ext4_mb_load_buddy(sb, grp, &e4b); 3523 if (err) { 3524 kmem_cache_free(ext4_free_data_cachep, fd); 3525 load_grp = UINT_MAX; 3526 continue; 3527 } else { 3528 load_grp = grp; 3529 } 3530 } 3531 3532 ext4_lock_group(sb, grp); 3533 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3534 fd->efd_start_cluster + fd->efd_count - 1, 1); 3535 ext4_unlock_group(sb, grp); 3536 } 3537 kmem_cache_free(ext4_free_data_cachep, fd); 3538 } 3539 3540 if (load_grp != UINT_MAX) 3541 ext4_mb_unload_buddy(&e4b); 3542 } 3543 3544 int ext4_mb_init(struct super_block *sb) 3545 { 3546 struct ext4_sb_info *sbi = EXT4_SB(sb); 3547 unsigned i, j; 3548 unsigned offset, offset_incr; 3549 unsigned max; 3550 int ret; 3551 3552 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3553 3554 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3555 if (sbi->s_mb_offsets == NULL) { 3556 ret = -ENOMEM; 3557 goto out; 3558 } 3559 3560 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3561 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3562 if (sbi->s_mb_maxs == NULL) { 3563 ret = -ENOMEM; 3564 goto out; 3565 } 3566 3567 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3568 if (ret < 0) 3569 goto out; 3570 3571 /* order 0 is regular bitmap */ 3572 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3573 sbi->s_mb_offsets[0] = 0; 3574 3575 i = 1; 3576 offset = 0; 3577 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3578 max = sb->s_blocksize << 2; 3579 do { 3580 sbi->s_mb_offsets[i] = offset; 3581 sbi->s_mb_maxs[i] = max; 3582 offset += offset_incr; 3583 offset_incr = offset_incr >> 1; 3584 max = max >> 1; 3585 i++; 3586 } while (i < MB_NUM_ORDERS(sb)); 3587 3588 sbi->s_mb_avg_fragment_size = 3589 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3590 GFP_KERNEL); 3591 if (!sbi->s_mb_avg_fragment_size) { 3592 ret = -ENOMEM; 3593 goto out; 3594 } 3595 sbi->s_mb_avg_fragment_size_locks = 3596 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3597 GFP_KERNEL); 3598 if (!sbi->s_mb_avg_fragment_size_locks) { 3599 ret = -ENOMEM; 3600 goto out; 3601 } 3602 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3603 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 3604 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 3605 } 3606 sbi->s_mb_largest_free_orders = 3607 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3608 GFP_KERNEL); 3609 if (!sbi->s_mb_largest_free_orders) { 3610 ret = -ENOMEM; 3611 goto out; 3612 } 3613 sbi->s_mb_largest_free_orders_locks = 3614 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3615 GFP_KERNEL); 3616 if (!sbi->s_mb_largest_free_orders_locks) { 3617 ret = -ENOMEM; 3618 goto out; 3619 } 3620 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3621 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3622 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3623 } 3624 3625 spin_lock_init(&sbi->s_md_lock); 3626 sbi->s_mb_free_pending = 0; 3627 INIT_LIST_HEAD(&sbi->s_freed_data_list); 3628 INIT_LIST_HEAD(&sbi->s_discard_list); 3629 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3630 atomic_set(&sbi->s_retry_alloc_pending, 0); 3631 3632 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3633 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3634 sbi->s_mb_stats = MB_DEFAULT_STATS; 3635 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3636 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3637 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; 3638 3639 /* 3640 * The default group preallocation is 512, which for 4k block 3641 * sizes translates to 2 megabytes. However for bigalloc file 3642 * systems, this is probably too big (i.e, if the cluster size 3643 * is 1 megabyte, then group preallocation size becomes half a 3644 * gigabyte!). As a default, we will keep a two megabyte 3645 * group pralloc size for cluster sizes up to 64k, and after 3646 * that, we will force a minimum group preallocation size of 3647 * 32 clusters. This translates to 8 megs when the cluster 3648 * size is 256k, and 32 megs when the cluster size is 1 meg, 3649 * which seems reasonable as a default. 3650 */ 3651 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3652 sbi->s_cluster_bits, 32); 3653 /* 3654 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3655 * to the lowest multiple of s_stripe which is bigger than 3656 * the s_mb_group_prealloc as determined above. We want 3657 * the preallocation size to be an exact multiple of the 3658 * RAID stripe size so that preallocations don't fragment 3659 * the stripes. 3660 */ 3661 if (sbi->s_stripe > 1) { 3662 sbi->s_mb_group_prealloc = roundup( 3663 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); 3664 } 3665 3666 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3667 if (sbi->s_locality_groups == NULL) { 3668 ret = -ENOMEM; 3669 goto out; 3670 } 3671 for_each_possible_cpu(i) { 3672 struct ext4_locality_group *lg; 3673 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3674 mutex_init(&lg->lg_mutex); 3675 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3676 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3677 spin_lock_init(&lg->lg_prealloc_lock); 3678 } 3679 3680 if (bdev_nonrot(sb->s_bdev)) 3681 sbi->s_mb_max_linear_groups = 0; 3682 else 3683 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3684 /* init file for buddy data */ 3685 ret = ext4_mb_init_backend(sb); 3686 if (ret != 0) 3687 goto out_free_locality_groups; 3688 3689 return 0; 3690 3691 out_free_locality_groups: 3692 free_percpu(sbi->s_locality_groups); 3693 sbi->s_locality_groups = NULL; 3694 out: 3695 kfree(sbi->s_mb_avg_fragment_size); 3696 kfree(sbi->s_mb_avg_fragment_size_locks); 3697 kfree(sbi->s_mb_largest_free_orders); 3698 kfree(sbi->s_mb_largest_free_orders_locks); 3699 kfree(sbi->s_mb_offsets); 3700 sbi->s_mb_offsets = NULL; 3701 kfree(sbi->s_mb_maxs); 3702 sbi->s_mb_maxs = NULL; 3703 return ret; 3704 } 3705 3706 /* need to called with the ext4 group lock held */ 3707 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3708 { 3709 struct ext4_prealloc_space *pa; 3710 struct list_head *cur, *tmp; 3711 int count = 0; 3712 3713 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3714 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3715 list_del(&pa->pa_group_list); 3716 count++; 3717 kmem_cache_free(ext4_pspace_cachep, pa); 3718 } 3719 return count; 3720 } 3721 3722 int ext4_mb_release(struct super_block *sb) 3723 { 3724 ext4_group_t ngroups = ext4_get_groups_count(sb); 3725 ext4_group_t i; 3726 int num_meta_group_infos; 3727 struct ext4_group_info *grinfo, ***group_info; 3728 struct ext4_sb_info *sbi = EXT4_SB(sb); 3729 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3730 int count; 3731 3732 if (test_opt(sb, DISCARD)) { 3733 /* 3734 * wait the discard work to drain all of ext4_free_data 3735 */ 3736 flush_work(&sbi->s_discard_work); 3737 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3738 } 3739 3740 if (sbi->s_group_info) { 3741 for (i = 0; i < ngroups; i++) { 3742 cond_resched(); 3743 grinfo = ext4_get_group_info(sb, i); 3744 if (!grinfo) 3745 continue; 3746 mb_group_bb_bitmap_free(grinfo); 3747 ext4_lock_group(sb, i); 3748 count = ext4_mb_cleanup_pa(grinfo); 3749 if (count) 3750 mb_debug(sb, "mballoc: %d PAs left\n", 3751 count); 3752 ext4_unlock_group(sb, i); 3753 kmem_cache_free(cachep, grinfo); 3754 } 3755 num_meta_group_infos = (ngroups + 3756 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3757 EXT4_DESC_PER_BLOCK_BITS(sb); 3758 rcu_read_lock(); 3759 group_info = rcu_dereference(sbi->s_group_info); 3760 for (i = 0; i < num_meta_group_infos; i++) 3761 kfree(group_info[i]); 3762 kvfree(group_info); 3763 rcu_read_unlock(); 3764 } 3765 kfree(sbi->s_mb_avg_fragment_size); 3766 kfree(sbi->s_mb_avg_fragment_size_locks); 3767 kfree(sbi->s_mb_largest_free_orders); 3768 kfree(sbi->s_mb_largest_free_orders_locks); 3769 kfree(sbi->s_mb_offsets); 3770 kfree(sbi->s_mb_maxs); 3771 iput(sbi->s_buddy_cache); 3772 if (sbi->s_mb_stats) { 3773 ext4_msg(sb, KERN_INFO, 3774 "mballoc: %u blocks %u reqs (%u success)", 3775 atomic_read(&sbi->s_bal_allocated), 3776 atomic_read(&sbi->s_bal_reqs), 3777 atomic_read(&sbi->s_bal_success)); 3778 ext4_msg(sb, KERN_INFO, 3779 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3780 "%u 2^N hits, %u breaks, %u lost", 3781 atomic_read(&sbi->s_bal_ex_scanned), 3782 atomic_read(&sbi->s_bal_groups_scanned), 3783 atomic_read(&sbi->s_bal_goals), 3784 atomic_read(&sbi->s_bal_2orders), 3785 atomic_read(&sbi->s_bal_breaks), 3786 atomic_read(&sbi->s_mb_lost_chunks)); 3787 ext4_msg(sb, KERN_INFO, 3788 "mballoc: %u generated and it took %llu", 3789 atomic_read(&sbi->s_mb_buddies_generated), 3790 atomic64_read(&sbi->s_mb_generation_time)); 3791 ext4_msg(sb, KERN_INFO, 3792 "mballoc: %u preallocated, %u discarded", 3793 atomic_read(&sbi->s_mb_preallocated), 3794 atomic_read(&sbi->s_mb_discarded)); 3795 } 3796 3797 free_percpu(sbi->s_locality_groups); 3798 3799 return 0; 3800 } 3801 3802 static inline int ext4_issue_discard(struct super_block *sb, 3803 ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3804 struct bio **biop) 3805 { 3806 ext4_fsblk_t discard_block; 3807 3808 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3809 ext4_group_first_block_no(sb, block_group)); 3810 count = EXT4_C2B(EXT4_SB(sb), count); 3811 trace_ext4_discard_blocks(sb, 3812 (unsigned long long) discard_block, count); 3813 if (biop) { 3814 return __blkdev_issue_discard(sb->s_bdev, 3815 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3816 (sector_t)count << (sb->s_blocksize_bits - 9), 3817 GFP_NOFS, biop); 3818 } else 3819 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3820 } 3821 3822 static void ext4_free_data_in_buddy(struct super_block *sb, 3823 struct ext4_free_data *entry) 3824 { 3825 struct ext4_buddy e4b; 3826 struct ext4_group_info *db; 3827 int err, count = 0; 3828 3829 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3830 entry->efd_count, entry->efd_group, entry); 3831 3832 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3833 /* we expect to find existing buddy because it's pinned */ 3834 BUG_ON(err != 0); 3835 3836 spin_lock(&EXT4_SB(sb)->s_md_lock); 3837 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3838 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3839 3840 db = e4b.bd_info; 3841 /* there are blocks to put in buddy to make them really free */ 3842 count += entry->efd_count; 3843 ext4_lock_group(sb, entry->efd_group); 3844 /* Take it out of per group rb tree */ 3845 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3846 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3847 3848 /* 3849 * Clear the trimmed flag for the group so that the next 3850 * ext4_trim_fs can trim it. 3851 * If the volume is mounted with -o discard, online discard 3852 * is supported and the free blocks will be trimmed online. 3853 */ 3854 if (!test_opt(sb, DISCARD)) 3855 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3856 3857 if (!db->bb_free_root.rb_node) { 3858 /* No more items in the per group rb tree 3859 * balance refcounts from ext4_mb_free_metadata() 3860 */ 3861 put_page(e4b.bd_buddy_page); 3862 put_page(e4b.bd_bitmap_page); 3863 } 3864 ext4_unlock_group(sb, entry->efd_group); 3865 ext4_mb_unload_buddy(&e4b); 3866 3867 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3868 } 3869 3870 /* 3871 * This function is called by the jbd2 layer once the commit has finished, 3872 * so we know we can free the blocks that were released with that commit. 3873 */ 3874 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3875 { 3876 struct ext4_sb_info *sbi = EXT4_SB(sb); 3877 struct ext4_free_data *entry, *tmp; 3878 struct list_head freed_data_list; 3879 struct list_head *cut_pos = NULL; 3880 bool wake; 3881 3882 INIT_LIST_HEAD(&freed_data_list); 3883 3884 spin_lock(&sbi->s_md_lock); 3885 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3886 if (entry->efd_tid != commit_tid) 3887 break; 3888 cut_pos = &entry->efd_list; 3889 } 3890 if (cut_pos) 3891 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3892 cut_pos); 3893 spin_unlock(&sbi->s_md_lock); 3894 3895 list_for_each_entry(entry, &freed_data_list, efd_list) 3896 ext4_free_data_in_buddy(sb, entry); 3897 3898 if (test_opt(sb, DISCARD)) { 3899 spin_lock(&sbi->s_md_lock); 3900 wake = list_empty(&sbi->s_discard_list); 3901 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3902 spin_unlock(&sbi->s_md_lock); 3903 if (wake) 3904 queue_work(system_unbound_wq, &sbi->s_discard_work); 3905 } else { 3906 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3907 kmem_cache_free(ext4_free_data_cachep, entry); 3908 } 3909 } 3910 3911 int __init ext4_init_mballoc(void) 3912 { 3913 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3914 SLAB_RECLAIM_ACCOUNT); 3915 if (ext4_pspace_cachep == NULL) 3916 goto out; 3917 3918 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3919 SLAB_RECLAIM_ACCOUNT); 3920 if (ext4_ac_cachep == NULL) 3921 goto out_pa_free; 3922 3923 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3924 SLAB_RECLAIM_ACCOUNT); 3925 if (ext4_free_data_cachep == NULL) 3926 goto out_ac_free; 3927 3928 return 0; 3929 3930 out_ac_free: 3931 kmem_cache_destroy(ext4_ac_cachep); 3932 out_pa_free: 3933 kmem_cache_destroy(ext4_pspace_cachep); 3934 out: 3935 return -ENOMEM; 3936 } 3937 3938 void ext4_exit_mballoc(void) 3939 { 3940 /* 3941 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3942 * before destroying the slab cache. 3943 */ 3944 rcu_barrier(); 3945 kmem_cache_destroy(ext4_pspace_cachep); 3946 kmem_cache_destroy(ext4_ac_cachep); 3947 kmem_cache_destroy(ext4_free_data_cachep); 3948 ext4_groupinfo_destroy_slabs(); 3949 } 3950 3951 3952 /* 3953 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3954 * Returns 0 if success or error code 3955 */ 3956 static noinline_for_stack int 3957 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 3958 handle_t *handle, unsigned int reserv_clstrs) 3959 { 3960 struct buffer_head *bitmap_bh = NULL; 3961 struct ext4_group_desc *gdp; 3962 struct buffer_head *gdp_bh; 3963 struct ext4_sb_info *sbi; 3964 struct super_block *sb; 3965 ext4_fsblk_t block; 3966 int err, len; 3967 3968 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3969 BUG_ON(ac->ac_b_ex.fe_len <= 0); 3970 3971 sb = ac->ac_sb; 3972 sbi = EXT4_SB(sb); 3973 3974 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 3975 if (IS_ERR(bitmap_bh)) { 3976 return PTR_ERR(bitmap_bh); 3977 } 3978 3979 BUFFER_TRACE(bitmap_bh, "getting write access"); 3980 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3981 EXT4_JTR_NONE); 3982 if (err) 3983 goto out_err; 3984 3985 err = -EIO; 3986 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3987 if (!gdp) 3988 goto out_err; 3989 3990 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3991 ext4_free_group_clusters(sb, gdp)); 3992 3993 BUFFER_TRACE(gdp_bh, "get_write_access"); 3994 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3995 if (err) 3996 goto out_err; 3997 3998 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3999 4000 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4001 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 4002 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 4003 "fs metadata", block, block+len); 4004 /* File system mounted not to panic on error 4005 * Fix the bitmap and return EFSCORRUPTED 4006 * We leak some of the blocks here. 4007 */ 4008 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 4009 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 4010 ac->ac_b_ex.fe_len); 4011 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 4012 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4013 if (!err) 4014 err = -EFSCORRUPTED; 4015 goto out_err; 4016 } 4017 4018 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 4019 #ifdef AGGRESSIVE_CHECK 4020 { 4021 int i; 4022 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 4023 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 4024 bitmap_bh->b_data)); 4025 } 4026 } 4027 #endif 4028 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 4029 ac->ac_b_ex.fe_len); 4030 if (ext4_has_group_desc_csum(sb) && 4031 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4032 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4033 ext4_free_group_clusters_set(sb, gdp, 4034 ext4_free_clusters_after_init(sb, 4035 ac->ac_b_ex.fe_group, gdp)); 4036 } 4037 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 4038 ext4_free_group_clusters_set(sb, gdp, len); 4039 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4040 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 4041 4042 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 4043 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 4044 /* 4045 * Now reduce the dirty block count also. Should not go negative 4046 */ 4047 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 4048 /* release all the reserved blocks if non delalloc */ 4049 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4050 reserv_clstrs); 4051 4052 if (sbi->s_log_groups_per_flex) { 4053 ext4_group_t flex_group = ext4_flex_group(sbi, 4054 ac->ac_b_ex.fe_group); 4055 atomic64_sub(ac->ac_b_ex.fe_len, 4056 &sbi_array_rcu_deref(sbi, s_flex_groups, 4057 flex_group)->free_clusters); 4058 } 4059 4060 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4061 if (err) 4062 goto out_err; 4063 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 4064 4065 out_err: 4066 brelse(bitmap_bh); 4067 return err; 4068 } 4069 4070 /* 4071 * Idempotent helper for Ext4 fast commit replay path to set the state of 4072 * blocks in bitmaps and update counters. 4073 */ 4074 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 4075 int len, int state) 4076 { 4077 struct buffer_head *bitmap_bh = NULL; 4078 struct ext4_group_desc *gdp; 4079 struct buffer_head *gdp_bh; 4080 struct ext4_sb_info *sbi = EXT4_SB(sb); 4081 ext4_group_t group; 4082 ext4_grpblk_t blkoff; 4083 int i, err; 4084 int already; 4085 unsigned int clen, clen_changed, thisgrp_len; 4086 4087 while (len > 0) { 4088 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 4089 4090 /* 4091 * Check to see if we are freeing blocks across a group 4092 * boundary. 4093 * In case of flex_bg, this can happen that (block, len) may 4094 * span across more than one group. In that case we need to 4095 * get the corresponding group metadata to work with. 4096 * For this we have goto again loop. 4097 */ 4098 thisgrp_len = min_t(unsigned int, (unsigned int)len, 4099 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 4100 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 4101 4102 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 4103 ext4_error(sb, "Marking blocks in system zone - " 4104 "Block = %llu, len = %u", 4105 block, thisgrp_len); 4106 bitmap_bh = NULL; 4107 break; 4108 } 4109 4110 bitmap_bh = ext4_read_block_bitmap(sb, group); 4111 if (IS_ERR(bitmap_bh)) { 4112 err = PTR_ERR(bitmap_bh); 4113 bitmap_bh = NULL; 4114 break; 4115 } 4116 4117 err = -EIO; 4118 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 4119 if (!gdp) 4120 break; 4121 4122 ext4_lock_group(sb, group); 4123 already = 0; 4124 for (i = 0; i < clen; i++) 4125 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 4126 !state) 4127 already++; 4128 4129 clen_changed = clen - already; 4130 if (state) 4131 mb_set_bits(bitmap_bh->b_data, blkoff, clen); 4132 else 4133 mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 4134 if (ext4_has_group_desc_csum(sb) && 4135 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4136 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4137 ext4_free_group_clusters_set(sb, gdp, 4138 ext4_free_clusters_after_init(sb, group, gdp)); 4139 } 4140 if (state) 4141 clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 4142 else 4143 clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 4144 4145 ext4_free_group_clusters_set(sb, gdp, clen); 4146 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4147 ext4_group_desc_csum_set(sb, group, gdp); 4148 4149 ext4_unlock_group(sb, group); 4150 4151 if (sbi->s_log_groups_per_flex) { 4152 ext4_group_t flex_group = ext4_flex_group(sbi, group); 4153 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4154 s_flex_groups, flex_group); 4155 4156 if (state) 4157 atomic64_sub(clen_changed, &fg->free_clusters); 4158 else 4159 atomic64_add(clen_changed, &fg->free_clusters); 4160 4161 } 4162 4163 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 4164 if (err) 4165 break; 4166 sync_dirty_buffer(bitmap_bh); 4167 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 4168 sync_dirty_buffer(gdp_bh); 4169 if (err) 4170 break; 4171 4172 block += thisgrp_len; 4173 len -= thisgrp_len; 4174 brelse(bitmap_bh); 4175 BUG_ON(len < 0); 4176 } 4177 4178 if (err) 4179 brelse(bitmap_bh); 4180 } 4181 4182 /* 4183 * here we normalize request for locality group 4184 * Group request are normalized to s_mb_group_prealloc, which goes to 4185 * s_strip if we set the same via mount option. 4186 * s_mb_group_prealloc can be configured via 4187 * /sys/fs/ext4/<partition>/mb_group_prealloc 4188 * 4189 * XXX: should we try to preallocate more than the group has now? 4190 */ 4191 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4192 { 4193 struct super_block *sb = ac->ac_sb; 4194 struct ext4_locality_group *lg = ac->ac_lg; 4195 4196 BUG_ON(lg == NULL); 4197 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4198 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4199 } 4200 4201 /* 4202 * This function returns the next element to look at during inode 4203 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4204 * (ei->i_prealloc_lock) 4205 * 4206 * new_start The start of the range we want to compare 4207 * cur_start The existing start that we are comparing against 4208 * node The node of the rb_tree 4209 */ 4210 static inline struct rb_node* 4211 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4212 { 4213 if (new_start < cur_start) 4214 return node->rb_left; 4215 else 4216 return node->rb_right; 4217 } 4218 4219 static inline void 4220 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4221 ext4_lblk_t start, ext4_lblk_t end) 4222 { 4223 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4224 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4225 struct ext4_prealloc_space *tmp_pa; 4226 ext4_lblk_t tmp_pa_start, tmp_pa_end; 4227 struct rb_node *iter; 4228 4229 read_lock(&ei->i_prealloc_lock); 4230 for (iter = ei->i_prealloc_node.rb_node; iter; 4231 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4232 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4233 pa_node.inode_node); 4234 tmp_pa_start = tmp_pa->pa_lstart; 4235 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4236 4237 spin_lock(&tmp_pa->pa_lock); 4238 if (tmp_pa->pa_deleted == 0) 4239 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4240 spin_unlock(&tmp_pa->pa_lock); 4241 } 4242 read_unlock(&ei->i_prealloc_lock); 4243 } 4244 4245 /* 4246 * Given an allocation context "ac" and a range "start", "end", check 4247 * and adjust boundaries if the range overlaps with any of the existing 4248 * preallocatoins stored in the corresponding inode of the allocation context. 4249 * 4250 * Parameters: 4251 * ac allocation context 4252 * start start of the new range 4253 * end end of the new range 4254 */ 4255 static inline void 4256 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4257 ext4_lblk_t *start, ext4_lblk_t *end) 4258 { 4259 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4260 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4261 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4262 struct rb_node *iter; 4263 ext4_lblk_t new_start, new_end; 4264 ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1; 4265 4266 new_start = *start; 4267 new_end = *end; 4268 4269 /* 4270 * Adjust the normalized range so that it doesn't overlap with any 4271 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4272 * so it doesn't change underneath us. 4273 */ 4274 read_lock(&ei->i_prealloc_lock); 4275 4276 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4277 for (iter = ei->i_prealloc_node.rb_node; iter; 4278 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4279 tmp_pa_start, iter)) { 4280 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4281 pa_node.inode_node); 4282 tmp_pa_start = tmp_pa->pa_lstart; 4283 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4284 4285 /* PA must not overlap original request */ 4286 spin_lock(&tmp_pa->pa_lock); 4287 if (tmp_pa->pa_deleted == 0) 4288 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4289 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4290 spin_unlock(&tmp_pa->pa_lock); 4291 } 4292 4293 /* 4294 * Step 2: check if the found PA is left or right neighbor and 4295 * get the other neighbor 4296 */ 4297 if (tmp_pa) { 4298 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4299 struct rb_node *tmp; 4300 4301 left_pa = tmp_pa; 4302 tmp = rb_next(&left_pa->pa_node.inode_node); 4303 if (tmp) { 4304 right_pa = rb_entry(tmp, 4305 struct ext4_prealloc_space, 4306 pa_node.inode_node); 4307 } 4308 } else { 4309 struct rb_node *tmp; 4310 4311 right_pa = tmp_pa; 4312 tmp = rb_prev(&right_pa->pa_node.inode_node); 4313 if (tmp) { 4314 left_pa = rb_entry(tmp, 4315 struct ext4_prealloc_space, 4316 pa_node.inode_node); 4317 } 4318 } 4319 } 4320 4321 /* Step 3: get the non deleted neighbors */ 4322 if (left_pa) { 4323 for (iter = &left_pa->pa_node.inode_node;; 4324 iter = rb_prev(iter)) { 4325 if (!iter) { 4326 left_pa = NULL; 4327 break; 4328 } 4329 4330 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4331 pa_node.inode_node); 4332 left_pa = tmp_pa; 4333 spin_lock(&tmp_pa->pa_lock); 4334 if (tmp_pa->pa_deleted == 0) { 4335 spin_unlock(&tmp_pa->pa_lock); 4336 break; 4337 } 4338 spin_unlock(&tmp_pa->pa_lock); 4339 } 4340 } 4341 4342 if (right_pa) { 4343 for (iter = &right_pa->pa_node.inode_node;; 4344 iter = rb_next(iter)) { 4345 if (!iter) { 4346 right_pa = NULL; 4347 break; 4348 } 4349 4350 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4351 pa_node.inode_node); 4352 right_pa = tmp_pa; 4353 spin_lock(&tmp_pa->pa_lock); 4354 if (tmp_pa->pa_deleted == 0) { 4355 spin_unlock(&tmp_pa->pa_lock); 4356 break; 4357 } 4358 spin_unlock(&tmp_pa->pa_lock); 4359 } 4360 } 4361 4362 if (left_pa) { 4363 left_pa_end = 4364 left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len); 4365 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4366 } 4367 4368 if (right_pa) { 4369 right_pa_start = right_pa->pa_lstart; 4370 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4371 } 4372 4373 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4374 if (left_pa) { 4375 if (left_pa_end > new_start) 4376 new_start = left_pa_end; 4377 } 4378 4379 if (right_pa) { 4380 if (right_pa_start < new_end) 4381 new_end = right_pa_start; 4382 } 4383 read_unlock(&ei->i_prealloc_lock); 4384 4385 /* XXX: extra loop to check we really don't overlap preallocations */ 4386 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4387 4388 *start = new_start; 4389 *end = new_end; 4390 } 4391 4392 /* 4393 * Normalization means making request better in terms of 4394 * size and alignment 4395 */ 4396 static noinline_for_stack void 4397 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4398 struct ext4_allocation_request *ar) 4399 { 4400 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4401 struct ext4_super_block *es = sbi->s_es; 4402 int bsbits, max; 4403 ext4_lblk_t end; 4404 loff_t size, start_off; 4405 loff_t orig_size __maybe_unused; 4406 ext4_lblk_t start; 4407 4408 /* do normalize only data requests, metadata requests 4409 do not need preallocation */ 4410 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4411 return; 4412 4413 /* sometime caller may want exact blocks */ 4414 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4415 return; 4416 4417 /* caller may indicate that preallocation isn't 4418 * required (it's a tail, for example) */ 4419 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4420 return; 4421 4422 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4423 ext4_mb_normalize_group_request(ac); 4424 return ; 4425 } 4426 4427 bsbits = ac->ac_sb->s_blocksize_bits; 4428 4429 /* first, let's learn actual file size 4430 * given current request is allocated */ 4431 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4432 size = size << bsbits; 4433 if (size < i_size_read(ac->ac_inode)) 4434 size = i_size_read(ac->ac_inode); 4435 orig_size = size; 4436 4437 /* max size of free chunks */ 4438 max = 2 << bsbits; 4439 4440 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4441 (req <= (size) || max <= (chunk_size)) 4442 4443 /* first, try to predict filesize */ 4444 /* XXX: should this table be tunable? */ 4445 start_off = 0; 4446 if (size <= 16 * 1024) { 4447 size = 16 * 1024; 4448 } else if (size <= 32 * 1024) { 4449 size = 32 * 1024; 4450 } else if (size <= 64 * 1024) { 4451 size = 64 * 1024; 4452 } else if (size <= 128 * 1024) { 4453 size = 128 * 1024; 4454 } else if (size <= 256 * 1024) { 4455 size = 256 * 1024; 4456 } else if (size <= 512 * 1024) { 4457 size = 512 * 1024; 4458 } else if (size <= 1024 * 1024) { 4459 size = 1024 * 1024; 4460 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4461 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4462 (21 - bsbits)) << 21; 4463 size = 2 * 1024 * 1024; 4464 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4465 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4466 (22 - bsbits)) << 22; 4467 size = 4 * 1024 * 1024; 4468 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 4469 (8<<20)>>bsbits, max, 8 * 1024)) { 4470 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4471 (23 - bsbits)) << 23; 4472 size = 8 * 1024 * 1024; 4473 } else { 4474 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4475 size = (loff_t) EXT4_C2B(sbi, 4476 ac->ac_o_ex.fe_len) << bsbits; 4477 } 4478 size = size >> bsbits; 4479 start = start_off >> bsbits; 4480 4481 /* 4482 * For tiny groups (smaller than 8MB) the chosen allocation 4483 * alignment may be larger than group size. Make sure the 4484 * alignment does not move allocation to a different group which 4485 * makes mballoc fail assertions later. 4486 */ 4487 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4488 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4489 4490 /* don't cover already allocated blocks in selected range */ 4491 if (ar->pleft && start <= ar->lleft) { 4492 size -= ar->lleft + 1 - start; 4493 start = ar->lleft + 1; 4494 } 4495 if (ar->pright && start + size - 1 >= ar->lright) 4496 size -= start + size - ar->lright; 4497 4498 /* 4499 * Trim allocation request for filesystems with artificially small 4500 * groups. 4501 */ 4502 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4503 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4504 4505 end = start + size; 4506 4507 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4508 4509 size = end - start; 4510 4511 /* 4512 * In this function "start" and "size" are normalized for better 4513 * alignment and length such that we could preallocate more blocks. 4514 * This normalization is done such that original request of 4515 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4516 * "size" boundaries. 4517 * (Note fe_len can be relaxed since FS block allocation API does not 4518 * provide gurantee on number of contiguous blocks allocation since that 4519 * depends upon free space left, etc). 4520 * In case of inode pa, later we use the allocated blocks 4521 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4522 * range of goal/best blocks [start, size] to put it at the 4523 * ac_o_ex.fe_logical extent of this inode. 4524 * (See ext4_mb_use_inode_pa() for more details) 4525 */ 4526 if (start + size <= ac->ac_o_ex.fe_logical || 4527 start > ac->ac_o_ex.fe_logical) { 4528 ext4_msg(ac->ac_sb, KERN_ERR, 4529 "start %lu, size %lu, fe_logical %lu", 4530 (unsigned long) start, (unsigned long) size, 4531 (unsigned long) ac->ac_o_ex.fe_logical); 4532 BUG(); 4533 } 4534 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4535 4536 /* now prepare goal request */ 4537 4538 /* XXX: is it better to align blocks WRT to logical 4539 * placement or satisfy big request as is */ 4540 ac->ac_g_ex.fe_logical = start; 4541 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4542 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 4543 4544 /* define goal start in order to merge */ 4545 if (ar->pright && (ar->lright == (start + size)) && 4546 ar->pright >= size && 4547 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4548 /* merge to the right */ 4549 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4550 &ac->ac_g_ex.fe_group, 4551 &ac->ac_g_ex.fe_start); 4552 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4553 } 4554 if (ar->pleft && (ar->lleft + 1 == start) && 4555 ar->pleft + 1 < ext4_blocks_count(es)) { 4556 /* merge to the left */ 4557 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4558 &ac->ac_g_ex.fe_group, 4559 &ac->ac_g_ex.fe_start); 4560 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4561 } 4562 4563 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4564 orig_size, start); 4565 } 4566 4567 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4568 { 4569 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4570 4571 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4572 atomic_inc(&sbi->s_bal_reqs); 4573 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4574 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4575 atomic_inc(&sbi->s_bal_success); 4576 4577 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4578 for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4579 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4580 } 4581 4582 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4583 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4584 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4585 atomic_inc(&sbi->s_bal_goals); 4586 /* did we allocate as much as normalizer originally wanted? */ 4587 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) 4588 atomic_inc(&sbi->s_bal_len_goals); 4589 4590 if (ac->ac_found > sbi->s_mb_max_to_scan) 4591 atomic_inc(&sbi->s_bal_breaks); 4592 } 4593 4594 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4595 trace_ext4_mballoc_alloc(ac); 4596 else 4597 trace_ext4_mballoc_prealloc(ac); 4598 } 4599 4600 /* 4601 * Called on failure; free up any blocks from the inode PA for this 4602 * context. We don't need this for MB_GROUP_PA because we only change 4603 * pa_free in ext4_mb_release_context(), but on failure, we've already 4604 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4605 */ 4606 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4607 { 4608 struct ext4_prealloc_space *pa = ac->ac_pa; 4609 struct ext4_buddy e4b; 4610 int err; 4611 4612 if (pa == NULL) { 4613 if (ac->ac_f_ex.fe_len == 0) 4614 return; 4615 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4616 if (WARN_RATELIMIT(err, 4617 "ext4: mb_load_buddy failed (%d)", err)) 4618 /* 4619 * This should never happen since we pin the 4620 * pages in the ext4_allocation_context so 4621 * ext4_mb_load_buddy() should never fail. 4622 */ 4623 return; 4624 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4625 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4626 ac->ac_f_ex.fe_len); 4627 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4628 ext4_mb_unload_buddy(&e4b); 4629 return; 4630 } 4631 if (pa->pa_type == MB_INODE_PA) { 4632 spin_lock(&pa->pa_lock); 4633 pa->pa_free += ac->ac_b_ex.fe_len; 4634 spin_unlock(&pa->pa_lock); 4635 } 4636 } 4637 4638 /* 4639 * use blocks preallocated to inode 4640 */ 4641 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4642 struct ext4_prealloc_space *pa) 4643 { 4644 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4645 ext4_fsblk_t start; 4646 ext4_fsblk_t end; 4647 int len; 4648 4649 /* found preallocated blocks, use them */ 4650 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4651 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4652 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4653 len = EXT4_NUM_B2C(sbi, end - start); 4654 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4655 &ac->ac_b_ex.fe_start); 4656 ac->ac_b_ex.fe_len = len; 4657 ac->ac_status = AC_STATUS_FOUND; 4658 ac->ac_pa = pa; 4659 4660 BUG_ON(start < pa->pa_pstart); 4661 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4662 BUG_ON(pa->pa_free < len); 4663 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4664 pa->pa_free -= len; 4665 4666 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4667 } 4668 4669 /* 4670 * use blocks preallocated to locality group 4671 */ 4672 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4673 struct ext4_prealloc_space *pa) 4674 { 4675 unsigned int len = ac->ac_o_ex.fe_len; 4676 4677 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4678 &ac->ac_b_ex.fe_group, 4679 &ac->ac_b_ex.fe_start); 4680 ac->ac_b_ex.fe_len = len; 4681 ac->ac_status = AC_STATUS_FOUND; 4682 ac->ac_pa = pa; 4683 4684 /* we don't correct pa_pstart or pa_len here to avoid 4685 * possible race when the group is being loaded concurrently 4686 * instead we correct pa later, after blocks are marked 4687 * in on-disk bitmap -- see ext4_mb_release_context() 4688 * Other CPUs are prevented from allocating from this pa by lg_mutex 4689 */ 4690 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4691 pa->pa_lstart, len, pa); 4692 } 4693 4694 /* 4695 * Return the prealloc space that have minimal distance 4696 * from the goal block. @cpa is the prealloc 4697 * space that is having currently known minimal distance 4698 * from the goal block. 4699 */ 4700 static struct ext4_prealloc_space * 4701 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4702 struct ext4_prealloc_space *pa, 4703 struct ext4_prealloc_space *cpa) 4704 { 4705 ext4_fsblk_t cur_distance, new_distance; 4706 4707 if (cpa == NULL) { 4708 atomic_inc(&pa->pa_count); 4709 return pa; 4710 } 4711 cur_distance = abs(goal_block - cpa->pa_pstart); 4712 new_distance = abs(goal_block - pa->pa_pstart); 4713 4714 if (cur_distance <= new_distance) 4715 return cpa; 4716 4717 /* drop the previous reference */ 4718 atomic_dec(&cpa->pa_count); 4719 atomic_inc(&pa->pa_count); 4720 return pa; 4721 } 4722 4723 /* 4724 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 4725 */ 4726 static bool 4727 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 4728 struct ext4_prealloc_space *pa) 4729 { 4730 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4731 ext4_fsblk_t start; 4732 4733 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 4734 return true; 4735 4736 /* 4737 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 4738 * in ext4_mb_normalize_request and will keep same with ac_o_ex 4739 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 4740 * consistent with ext4_mb_find_by_goal. 4741 */ 4742 start = pa->pa_pstart + 4743 (ac->ac_g_ex.fe_logical - pa->pa_lstart); 4744 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 4745 return false; 4746 4747 if (ac->ac_g_ex.fe_len > pa->pa_len - 4748 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 4749 return false; 4750 4751 return true; 4752 } 4753 4754 /* 4755 * search goal blocks in preallocated space 4756 */ 4757 static noinline_for_stack bool 4758 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4759 { 4760 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4761 int order, i; 4762 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4763 struct ext4_locality_group *lg; 4764 struct ext4_prealloc_space *tmp_pa, *cpa = NULL; 4765 ext4_lblk_t tmp_pa_start, tmp_pa_end; 4766 struct rb_node *iter; 4767 ext4_fsblk_t goal_block; 4768 4769 /* only data can be preallocated */ 4770 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4771 return false; 4772 4773 /* first, try per-file preallocation */ 4774 read_lock(&ei->i_prealloc_lock); 4775 for (iter = ei->i_prealloc_node.rb_node; iter; 4776 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4777 tmp_pa_start, iter)) { 4778 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4779 pa_node.inode_node); 4780 4781 /* all fields in this condition don't change, 4782 * so we can skip locking for them */ 4783 tmp_pa_start = tmp_pa->pa_lstart; 4784 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4785 4786 /* original request start doesn't lie in this PA */ 4787 if (ac->ac_o_ex.fe_logical < tmp_pa_start || 4788 ac->ac_o_ex.fe_logical >= tmp_pa_end) 4789 continue; 4790 4791 /* non-extent files can't have physical blocks past 2^32 */ 4792 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4793 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4794 EXT4_MAX_BLOCK_FILE_PHYS)) { 4795 /* 4796 * Since PAs don't overlap, we won't find any 4797 * other PA to satisfy this. 4798 */ 4799 break; 4800 } 4801 4802 /* found preallocated blocks, use them */ 4803 spin_lock(&tmp_pa->pa_lock); 4804 if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free && 4805 likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 4806 atomic_inc(&tmp_pa->pa_count); 4807 ext4_mb_use_inode_pa(ac, tmp_pa); 4808 spin_unlock(&tmp_pa->pa_lock); 4809 read_unlock(&ei->i_prealloc_lock); 4810 return true; 4811 } 4812 spin_unlock(&tmp_pa->pa_lock); 4813 } 4814 read_unlock(&ei->i_prealloc_lock); 4815 4816 /* can we use group allocation? */ 4817 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 4818 return false; 4819 4820 /* inode may have no locality group for some reason */ 4821 lg = ac->ac_lg; 4822 if (lg == NULL) 4823 return false; 4824 order = fls(ac->ac_o_ex.fe_len) - 1; 4825 if (order > PREALLOC_TB_SIZE - 1) 4826 /* The max size of hash table is PREALLOC_TB_SIZE */ 4827 order = PREALLOC_TB_SIZE - 1; 4828 4829 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 4830 /* 4831 * search for the prealloc space that is having 4832 * minimal distance from the goal block. 4833 */ 4834 for (i = order; i < PREALLOC_TB_SIZE; i++) { 4835 rcu_read_lock(); 4836 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4837 pa_node.lg_list) { 4838 spin_lock(&tmp_pa->pa_lock); 4839 if (tmp_pa->pa_deleted == 0 && 4840 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 4841 4842 cpa = ext4_mb_check_group_pa(goal_block, 4843 tmp_pa, cpa); 4844 } 4845 spin_unlock(&tmp_pa->pa_lock); 4846 } 4847 rcu_read_unlock(); 4848 } 4849 if (cpa) { 4850 ext4_mb_use_group_pa(ac, cpa); 4851 return true; 4852 } 4853 return false; 4854 } 4855 4856 /* 4857 * the function goes through all block freed in the group 4858 * but not yet committed and marks them used in in-core bitmap. 4859 * buddy must be generated from this bitmap 4860 * Need to be called with the ext4 group lock held 4861 */ 4862 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4863 ext4_group_t group) 4864 { 4865 struct rb_node *n; 4866 struct ext4_group_info *grp; 4867 struct ext4_free_data *entry; 4868 4869 grp = ext4_get_group_info(sb, group); 4870 if (!grp) 4871 return; 4872 n = rb_first(&(grp->bb_free_root)); 4873 4874 while (n) { 4875 entry = rb_entry(n, struct ext4_free_data, efd_node); 4876 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 4877 n = rb_next(n); 4878 } 4879 return; 4880 } 4881 4882 /* 4883 * the function goes through all preallocation in this group and marks them 4884 * used in in-core bitmap. buddy must be generated from this bitmap 4885 * Need to be called with ext4 group lock held 4886 */ 4887 static noinline_for_stack 4888 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4889 ext4_group_t group) 4890 { 4891 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4892 struct ext4_prealloc_space *pa; 4893 struct list_head *cur; 4894 ext4_group_t groupnr; 4895 ext4_grpblk_t start; 4896 int preallocated = 0; 4897 int len; 4898 4899 if (!grp) 4900 return; 4901 4902 /* all form of preallocation discards first load group, 4903 * so the only competing code is preallocation use. 4904 * we don't need any locking here 4905 * notice we do NOT ignore preallocations with pa_deleted 4906 * otherwise we could leave used blocks available for 4907 * allocation in buddy when concurrent ext4_mb_put_pa() 4908 * is dropping preallocation 4909 */ 4910 list_for_each(cur, &grp->bb_prealloc_list) { 4911 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4912 spin_lock(&pa->pa_lock); 4913 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4914 &groupnr, &start); 4915 len = pa->pa_len; 4916 spin_unlock(&pa->pa_lock); 4917 if (unlikely(len == 0)) 4918 continue; 4919 BUG_ON(groupnr != group); 4920 mb_set_bits(bitmap, start, len); 4921 preallocated += len; 4922 } 4923 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4924 } 4925 4926 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 4927 struct ext4_prealloc_space *pa) 4928 { 4929 struct ext4_inode_info *ei; 4930 4931 if (pa->pa_deleted) { 4932 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 4933 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 4934 pa->pa_len); 4935 return; 4936 } 4937 4938 pa->pa_deleted = 1; 4939 4940 if (pa->pa_type == MB_INODE_PA) { 4941 ei = EXT4_I(pa->pa_inode); 4942 atomic_dec(&ei->i_prealloc_active); 4943 } 4944 } 4945 4946 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 4947 { 4948 BUG_ON(!pa); 4949 BUG_ON(atomic_read(&pa->pa_count)); 4950 BUG_ON(pa->pa_deleted == 0); 4951 kmem_cache_free(ext4_pspace_cachep, pa); 4952 } 4953 4954 static void ext4_mb_pa_callback(struct rcu_head *head) 4955 { 4956 struct ext4_prealloc_space *pa; 4957 4958 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 4959 ext4_mb_pa_free(pa); 4960 } 4961 4962 /* 4963 * drops a reference to preallocated space descriptor 4964 * if this was the last reference and the space is consumed 4965 */ 4966 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4967 struct super_block *sb, struct ext4_prealloc_space *pa) 4968 { 4969 ext4_group_t grp; 4970 ext4_fsblk_t grp_blk; 4971 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4972 4973 /* in this short window concurrent discard can set pa_deleted */ 4974 spin_lock(&pa->pa_lock); 4975 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 4976 spin_unlock(&pa->pa_lock); 4977 return; 4978 } 4979 4980 if (pa->pa_deleted == 1) { 4981 spin_unlock(&pa->pa_lock); 4982 return; 4983 } 4984 4985 ext4_mb_mark_pa_deleted(sb, pa); 4986 spin_unlock(&pa->pa_lock); 4987 4988 grp_blk = pa->pa_pstart; 4989 /* 4990 * If doing group-based preallocation, pa_pstart may be in the 4991 * next group when pa is used up 4992 */ 4993 if (pa->pa_type == MB_GROUP_PA) 4994 grp_blk--; 4995 4996 grp = ext4_get_group_number(sb, grp_blk); 4997 4998 /* 4999 * possible race: 5000 * 5001 * P1 (buddy init) P2 (regular allocation) 5002 * find block B in PA 5003 * copy on-disk bitmap to buddy 5004 * mark B in on-disk bitmap 5005 * drop PA from group 5006 * mark all PAs in buddy 5007 * 5008 * thus, P1 initializes buddy with B available. to prevent this 5009 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 5010 * against that pair 5011 */ 5012 ext4_lock_group(sb, grp); 5013 list_del(&pa->pa_group_list); 5014 ext4_unlock_group(sb, grp); 5015 5016 if (pa->pa_type == MB_INODE_PA) { 5017 write_lock(pa->pa_node_lock.inode_lock); 5018 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5019 write_unlock(pa->pa_node_lock.inode_lock); 5020 ext4_mb_pa_free(pa); 5021 } else { 5022 spin_lock(pa->pa_node_lock.lg_lock); 5023 list_del_rcu(&pa->pa_node.lg_list); 5024 spin_unlock(pa->pa_node_lock.lg_lock); 5025 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5026 } 5027 } 5028 5029 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 5030 { 5031 struct rb_node **iter = &root->rb_node, *parent = NULL; 5032 struct ext4_prealloc_space *iter_pa, *new_pa; 5033 ext4_lblk_t iter_start, new_start; 5034 5035 while (*iter) { 5036 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 5037 pa_node.inode_node); 5038 new_pa = rb_entry(new, struct ext4_prealloc_space, 5039 pa_node.inode_node); 5040 iter_start = iter_pa->pa_lstart; 5041 new_start = new_pa->pa_lstart; 5042 5043 parent = *iter; 5044 if (new_start < iter_start) 5045 iter = &((*iter)->rb_left); 5046 else 5047 iter = &((*iter)->rb_right); 5048 } 5049 5050 rb_link_node(new, parent, iter); 5051 rb_insert_color(new, root); 5052 } 5053 5054 /* 5055 * creates new preallocated space for given inode 5056 */ 5057 static noinline_for_stack void 5058 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 5059 { 5060 struct super_block *sb = ac->ac_sb; 5061 struct ext4_sb_info *sbi = EXT4_SB(sb); 5062 struct ext4_prealloc_space *pa; 5063 struct ext4_group_info *grp; 5064 struct ext4_inode_info *ei; 5065 5066 /* preallocate only when found space is larger then requested */ 5067 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5068 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5069 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5070 BUG_ON(ac->ac_pa == NULL); 5071 5072 pa = ac->ac_pa; 5073 5074 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { 5075 int new_bex_start; 5076 int new_bex_end; 5077 5078 /* we can't allocate as much as normalizer wants. 5079 * so, found space must get proper lstart 5080 * to cover original request */ 5081 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 5082 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 5083 5084 /* 5085 * Use the below logic for adjusting best extent as it keeps 5086 * fragmentation in check while ensuring logical range of best 5087 * extent doesn't overflow out of goal extent: 5088 * 5089 * 1. Check if best ex can be kept at end of goal (before 5090 * cr_best_avail trimmed it) and still cover original start 5091 * 2. Else, check if best ex can be kept at start of goal and 5092 * still cover original start 5093 * 3. Else, keep the best ex at start of original request. 5094 */ 5095 new_bex_end = ac->ac_g_ex.fe_logical + 5096 EXT4_C2B(sbi, ac->ac_orig_goal_len); 5097 new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5098 if (ac->ac_o_ex.fe_logical >= new_bex_start) 5099 goto adjust_bex; 5100 5101 new_bex_start = ac->ac_g_ex.fe_logical; 5102 new_bex_end = 5103 new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5104 if (ac->ac_o_ex.fe_logical < new_bex_end) 5105 goto adjust_bex; 5106 5107 new_bex_start = ac->ac_o_ex.fe_logical; 5108 new_bex_end = 5109 new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5110 5111 adjust_bex: 5112 ac->ac_b_ex.fe_logical = new_bex_start; 5113 5114 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 5115 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 5116 BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + 5117 EXT4_C2B(sbi, ac->ac_orig_goal_len))); 5118 } 5119 5120 pa->pa_lstart = ac->ac_b_ex.fe_logical; 5121 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5122 pa->pa_len = ac->ac_b_ex.fe_len; 5123 pa->pa_free = pa->pa_len; 5124 spin_lock_init(&pa->pa_lock); 5125 INIT_LIST_HEAD(&pa->pa_group_list); 5126 pa->pa_deleted = 0; 5127 pa->pa_type = MB_INODE_PA; 5128 5129 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5130 pa->pa_len, pa->pa_lstart); 5131 trace_ext4_mb_new_inode_pa(ac, pa); 5132 5133 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 5134 ext4_mb_use_inode_pa(ac, pa); 5135 5136 ei = EXT4_I(ac->ac_inode); 5137 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5138 if (!grp) 5139 return; 5140 5141 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 5142 pa->pa_inode = ac->ac_inode; 5143 5144 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5145 5146 write_lock(pa->pa_node_lock.inode_lock); 5147 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 5148 write_unlock(pa->pa_node_lock.inode_lock); 5149 atomic_inc(&ei->i_prealloc_active); 5150 } 5151 5152 /* 5153 * creates new preallocated space for locality group inodes belongs to 5154 */ 5155 static noinline_for_stack void 5156 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5157 { 5158 struct super_block *sb = ac->ac_sb; 5159 struct ext4_locality_group *lg; 5160 struct ext4_prealloc_space *pa; 5161 struct ext4_group_info *grp; 5162 5163 /* preallocate only when found space is larger then requested */ 5164 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5165 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5166 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5167 BUG_ON(ac->ac_pa == NULL); 5168 5169 pa = ac->ac_pa; 5170 5171 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5172 pa->pa_lstart = pa->pa_pstart; 5173 pa->pa_len = ac->ac_b_ex.fe_len; 5174 pa->pa_free = pa->pa_len; 5175 spin_lock_init(&pa->pa_lock); 5176 INIT_LIST_HEAD(&pa->pa_node.lg_list); 5177 INIT_LIST_HEAD(&pa->pa_group_list); 5178 pa->pa_deleted = 0; 5179 pa->pa_type = MB_GROUP_PA; 5180 5181 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5182 pa->pa_len, pa->pa_lstart); 5183 trace_ext4_mb_new_group_pa(ac, pa); 5184 5185 ext4_mb_use_group_pa(ac, pa); 5186 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5187 5188 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5189 if (!grp) 5190 return; 5191 lg = ac->ac_lg; 5192 BUG_ON(lg == NULL); 5193 5194 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5195 pa->pa_inode = NULL; 5196 5197 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5198 5199 /* 5200 * We will later add the new pa to the right bucket 5201 * after updating the pa_free in ext4_mb_release_context 5202 */ 5203 } 5204 5205 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5206 { 5207 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5208 ext4_mb_new_group_pa(ac); 5209 else 5210 ext4_mb_new_inode_pa(ac); 5211 } 5212 5213 /* 5214 * finds all unused blocks in on-disk bitmap, frees them in 5215 * in-core bitmap and buddy. 5216 * @pa must be unlinked from inode and group lists, so that 5217 * nobody else can find/use it. 5218 * the caller MUST hold group/inode locks. 5219 * TODO: optimize the case when there are no in-core structures yet 5220 */ 5221 static noinline_for_stack int 5222 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5223 struct ext4_prealloc_space *pa) 5224 { 5225 struct super_block *sb = e4b->bd_sb; 5226 struct ext4_sb_info *sbi = EXT4_SB(sb); 5227 unsigned int end; 5228 unsigned int next; 5229 ext4_group_t group; 5230 ext4_grpblk_t bit; 5231 unsigned long long grp_blk_start; 5232 int free = 0; 5233 5234 BUG_ON(pa->pa_deleted == 0); 5235 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5236 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5237 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5238 end = bit + pa->pa_len; 5239 5240 while (bit < end) { 5241 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5242 if (bit >= end) 5243 break; 5244 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5245 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5246 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5247 (unsigned) next - bit, (unsigned) group); 5248 free += next - bit; 5249 5250 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5251 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5252 EXT4_C2B(sbi, bit)), 5253 next - bit); 5254 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5255 bit = next + 1; 5256 } 5257 if (free != pa->pa_free) { 5258 ext4_msg(e4b->bd_sb, KERN_CRIT, 5259 "pa %p: logic %lu, phys. %lu, len %d", 5260 pa, (unsigned long) pa->pa_lstart, 5261 (unsigned long) pa->pa_pstart, 5262 pa->pa_len); 5263 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5264 free, pa->pa_free); 5265 /* 5266 * pa is already deleted so we use the value obtained 5267 * from the bitmap and continue. 5268 */ 5269 } 5270 atomic_add(free, &sbi->s_mb_discarded); 5271 5272 return 0; 5273 } 5274 5275 static noinline_for_stack int 5276 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5277 struct ext4_prealloc_space *pa) 5278 { 5279 struct super_block *sb = e4b->bd_sb; 5280 ext4_group_t group; 5281 ext4_grpblk_t bit; 5282 5283 trace_ext4_mb_release_group_pa(sb, pa); 5284 BUG_ON(pa->pa_deleted == 0); 5285 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5286 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5287 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5288 e4b->bd_group, group, pa->pa_pstart); 5289 return 0; 5290 } 5291 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5292 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5293 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5294 5295 return 0; 5296 } 5297 5298 /* 5299 * releases all preallocations in given group 5300 * 5301 * first, we need to decide discard policy: 5302 * - when do we discard 5303 * 1) ENOSPC 5304 * - how many do we discard 5305 * 1) how many requested 5306 */ 5307 static noinline_for_stack int 5308 ext4_mb_discard_group_preallocations(struct super_block *sb, 5309 ext4_group_t group, int *busy) 5310 { 5311 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5312 struct buffer_head *bitmap_bh = NULL; 5313 struct ext4_prealloc_space *pa, *tmp; 5314 struct list_head list; 5315 struct ext4_buddy e4b; 5316 struct ext4_inode_info *ei; 5317 int err; 5318 int free = 0; 5319 5320 if (!grp) 5321 return 0; 5322 mb_debug(sb, "discard preallocation for group %u\n", group); 5323 if (list_empty(&grp->bb_prealloc_list)) 5324 goto out_dbg; 5325 5326 bitmap_bh = ext4_read_block_bitmap(sb, group); 5327 if (IS_ERR(bitmap_bh)) { 5328 err = PTR_ERR(bitmap_bh); 5329 ext4_error_err(sb, -err, 5330 "Error %d reading block bitmap for %u", 5331 err, group); 5332 goto out_dbg; 5333 } 5334 5335 err = ext4_mb_load_buddy(sb, group, &e4b); 5336 if (err) { 5337 ext4_warning(sb, "Error %d loading buddy information for %u", 5338 err, group); 5339 put_bh(bitmap_bh); 5340 goto out_dbg; 5341 } 5342 5343 INIT_LIST_HEAD(&list); 5344 ext4_lock_group(sb, group); 5345 list_for_each_entry_safe(pa, tmp, 5346 &grp->bb_prealloc_list, pa_group_list) { 5347 spin_lock(&pa->pa_lock); 5348 if (atomic_read(&pa->pa_count)) { 5349 spin_unlock(&pa->pa_lock); 5350 *busy = 1; 5351 continue; 5352 } 5353 if (pa->pa_deleted) { 5354 spin_unlock(&pa->pa_lock); 5355 continue; 5356 } 5357 5358 /* seems this one can be freed ... */ 5359 ext4_mb_mark_pa_deleted(sb, pa); 5360 5361 if (!free) 5362 this_cpu_inc(discard_pa_seq); 5363 5364 /* we can trust pa_free ... */ 5365 free += pa->pa_free; 5366 5367 spin_unlock(&pa->pa_lock); 5368 5369 list_del(&pa->pa_group_list); 5370 list_add(&pa->u.pa_tmp_list, &list); 5371 } 5372 5373 /* now free all selected PAs */ 5374 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5375 5376 /* remove from object (inode or locality group) */ 5377 if (pa->pa_type == MB_GROUP_PA) { 5378 spin_lock(pa->pa_node_lock.lg_lock); 5379 list_del_rcu(&pa->pa_node.lg_list); 5380 spin_unlock(pa->pa_node_lock.lg_lock); 5381 } else { 5382 write_lock(pa->pa_node_lock.inode_lock); 5383 ei = EXT4_I(pa->pa_inode); 5384 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5385 write_unlock(pa->pa_node_lock.inode_lock); 5386 } 5387 5388 list_del(&pa->u.pa_tmp_list); 5389 5390 if (pa->pa_type == MB_GROUP_PA) { 5391 ext4_mb_release_group_pa(&e4b, pa); 5392 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5393 } else { 5394 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5395 ext4_mb_pa_free(pa); 5396 } 5397 } 5398 5399 ext4_unlock_group(sb, group); 5400 ext4_mb_unload_buddy(&e4b); 5401 put_bh(bitmap_bh); 5402 out_dbg: 5403 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5404 free, group, grp->bb_free); 5405 return free; 5406 } 5407 5408 /* 5409 * releases all non-used preallocated blocks for given inode 5410 * 5411 * It's important to discard preallocations under i_data_sem 5412 * We don't want another block to be served from the prealloc 5413 * space when we are discarding the inode prealloc space. 5414 * 5415 * FIXME!! Make sure it is valid at all the call sites 5416 */ 5417 void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 5418 { 5419 struct ext4_inode_info *ei = EXT4_I(inode); 5420 struct super_block *sb = inode->i_sb; 5421 struct buffer_head *bitmap_bh = NULL; 5422 struct ext4_prealloc_space *pa, *tmp; 5423 ext4_group_t group = 0; 5424 struct list_head list; 5425 struct ext4_buddy e4b; 5426 struct rb_node *iter; 5427 int err; 5428 5429 if (!S_ISREG(inode->i_mode)) { 5430 return; 5431 } 5432 5433 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5434 return; 5435 5436 mb_debug(sb, "discard preallocation for inode %lu\n", 5437 inode->i_ino); 5438 trace_ext4_discard_preallocations(inode, 5439 atomic_read(&ei->i_prealloc_active), needed); 5440 5441 INIT_LIST_HEAD(&list); 5442 5443 if (needed == 0) 5444 needed = UINT_MAX; 5445 5446 repeat: 5447 /* first, collect all pa's in the inode */ 5448 write_lock(&ei->i_prealloc_lock); 5449 for (iter = rb_first(&ei->i_prealloc_node); iter && needed; 5450 iter = rb_next(iter)) { 5451 pa = rb_entry(iter, struct ext4_prealloc_space, 5452 pa_node.inode_node); 5453 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5454 5455 spin_lock(&pa->pa_lock); 5456 if (atomic_read(&pa->pa_count)) { 5457 /* this shouldn't happen often - nobody should 5458 * use preallocation while we're discarding it */ 5459 spin_unlock(&pa->pa_lock); 5460 write_unlock(&ei->i_prealloc_lock); 5461 ext4_msg(sb, KERN_ERR, 5462 "uh-oh! used pa while discarding"); 5463 WARN_ON(1); 5464 schedule_timeout_uninterruptible(HZ); 5465 goto repeat; 5466 5467 } 5468 if (pa->pa_deleted == 0) { 5469 ext4_mb_mark_pa_deleted(sb, pa); 5470 spin_unlock(&pa->pa_lock); 5471 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5472 list_add(&pa->u.pa_tmp_list, &list); 5473 needed--; 5474 continue; 5475 } 5476 5477 /* someone is deleting pa right now */ 5478 spin_unlock(&pa->pa_lock); 5479 write_unlock(&ei->i_prealloc_lock); 5480 5481 /* we have to wait here because pa_deleted 5482 * doesn't mean pa is already unlinked from 5483 * the list. as we might be called from 5484 * ->clear_inode() the inode will get freed 5485 * and concurrent thread which is unlinking 5486 * pa from inode's list may access already 5487 * freed memory, bad-bad-bad */ 5488 5489 /* XXX: if this happens too often, we can 5490 * add a flag to force wait only in case 5491 * of ->clear_inode(), but not in case of 5492 * regular truncate */ 5493 schedule_timeout_uninterruptible(HZ); 5494 goto repeat; 5495 } 5496 write_unlock(&ei->i_prealloc_lock); 5497 5498 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5499 BUG_ON(pa->pa_type != MB_INODE_PA); 5500 group = ext4_get_group_number(sb, pa->pa_pstart); 5501 5502 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5503 GFP_NOFS|__GFP_NOFAIL); 5504 if (err) { 5505 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5506 err, group); 5507 continue; 5508 } 5509 5510 bitmap_bh = ext4_read_block_bitmap(sb, group); 5511 if (IS_ERR(bitmap_bh)) { 5512 err = PTR_ERR(bitmap_bh); 5513 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5514 err, group); 5515 ext4_mb_unload_buddy(&e4b); 5516 continue; 5517 } 5518 5519 ext4_lock_group(sb, group); 5520 list_del(&pa->pa_group_list); 5521 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5522 ext4_unlock_group(sb, group); 5523 5524 ext4_mb_unload_buddy(&e4b); 5525 put_bh(bitmap_bh); 5526 5527 list_del(&pa->u.pa_tmp_list); 5528 ext4_mb_pa_free(pa); 5529 } 5530 } 5531 5532 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5533 { 5534 struct ext4_prealloc_space *pa; 5535 5536 BUG_ON(ext4_pspace_cachep == NULL); 5537 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5538 if (!pa) 5539 return -ENOMEM; 5540 atomic_set(&pa->pa_count, 1); 5541 ac->ac_pa = pa; 5542 return 0; 5543 } 5544 5545 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5546 { 5547 struct ext4_prealloc_space *pa = ac->ac_pa; 5548 5549 BUG_ON(!pa); 5550 ac->ac_pa = NULL; 5551 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5552 /* 5553 * current function is only called due to an error or due to 5554 * len of found blocks < len of requested blocks hence the PA has not 5555 * been added to grp->bb_prealloc_list. So we don't need to lock it 5556 */ 5557 pa->pa_deleted = 1; 5558 ext4_mb_pa_free(pa); 5559 } 5560 5561 #ifdef CONFIG_EXT4_DEBUG 5562 static inline void ext4_mb_show_pa(struct super_block *sb) 5563 { 5564 ext4_group_t i, ngroups; 5565 5566 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5567 return; 5568 5569 ngroups = ext4_get_groups_count(sb); 5570 mb_debug(sb, "groups: "); 5571 for (i = 0; i < ngroups; i++) { 5572 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5573 struct ext4_prealloc_space *pa; 5574 ext4_grpblk_t start; 5575 struct list_head *cur; 5576 5577 if (!grp) 5578 continue; 5579 ext4_lock_group(sb, i); 5580 list_for_each(cur, &grp->bb_prealloc_list) { 5581 pa = list_entry(cur, struct ext4_prealloc_space, 5582 pa_group_list); 5583 spin_lock(&pa->pa_lock); 5584 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5585 NULL, &start); 5586 spin_unlock(&pa->pa_lock); 5587 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5588 pa->pa_len); 5589 } 5590 ext4_unlock_group(sb, i); 5591 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5592 grp->bb_fragments); 5593 } 5594 } 5595 5596 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5597 { 5598 struct super_block *sb = ac->ac_sb; 5599 5600 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5601 return; 5602 5603 mb_debug(sb, "Can't allocate:" 5604 " Allocation context details:"); 5605 mb_debug(sb, "status %u flags 0x%x", 5606 ac->ac_status, ac->ac_flags); 5607 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5608 "goal %lu/%lu/%lu@%lu, " 5609 "best %lu/%lu/%lu@%lu cr %d", 5610 (unsigned long)ac->ac_o_ex.fe_group, 5611 (unsigned long)ac->ac_o_ex.fe_start, 5612 (unsigned long)ac->ac_o_ex.fe_len, 5613 (unsigned long)ac->ac_o_ex.fe_logical, 5614 (unsigned long)ac->ac_g_ex.fe_group, 5615 (unsigned long)ac->ac_g_ex.fe_start, 5616 (unsigned long)ac->ac_g_ex.fe_len, 5617 (unsigned long)ac->ac_g_ex.fe_logical, 5618 (unsigned long)ac->ac_b_ex.fe_group, 5619 (unsigned long)ac->ac_b_ex.fe_start, 5620 (unsigned long)ac->ac_b_ex.fe_len, 5621 (unsigned long)ac->ac_b_ex.fe_logical, 5622 (int)ac->ac_criteria); 5623 mb_debug(sb, "%u found", ac->ac_found); 5624 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); 5625 if (ac->ac_pa) 5626 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5627 "group pa" : "inode pa"); 5628 ext4_mb_show_pa(sb); 5629 } 5630 #else 5631 static inline void ext4_mb_show_pa(struct super_block *sb) 5632 { 5633 return; 5634 } 5635 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5636 { 5637 ext4_mb_show_pa(ac->ac_sb); 5638 return; 5639 } 5640 #endif 5641 5642 /* 5643 * We use locality group preallocation for small size file. The size of the 5644 * file is determined by the current size or the resulting size after 5645 * allocation which ever is larger 5646 * 5647 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5648 */ 5649 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5650 { 5651 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5652 int bsbits = ac->ac_sb->s_blocksize_bits; 5653 loff_t size, isize; 5654 bool inode_pa_eligible, group_pa_eligible; 5655 5656 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5657 return; 5658 5659 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5660 return; 5661 5662 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5663 inode_pa_eligible = true; 5664 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 5665 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5666 >> bsbits; 5667 5668 /* No point in using inode preallocation for closed files */ 5669 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5670 !inode_is_open_for_write(ac->ac_inode)) 5671 inode_pa_eligible = false; 5672 5673 size = max(size, isize); 5674 /* Don't use group allocation for large files */ 5675 if (size > sbi->s_mb_stream_request) 5676 group_pa_eligible = false; 5677 5678 if (!group_pa_eligible) { 5679 if (inode_pa_eligible) 5680 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5681 else 5682 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5683 return; 5684 } 5685 5686 BUG_ON(ac->ac_lg != NULL); 5687 /* 5688 * locality group prealloc space are per cpu. The reason for having 5689 * per cpu locality group is to reduce the contention between block 5690 * request from multiple CPUs. 5691 */ 5692 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5693 5694 /* we're going to use group allocation */ 5695 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5696 5697 /* serialize all allocations in the group */ 5698 mutex_lock(&ac->ac_lg->lg_mutex); 5699 } 5700 5701 static noinline_for_stack void 5702 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5703 struct ext4_allocation_request *ar) 5704 { 5705 struct super_block *sb = ar->inode->i_sb; 5706 struct ext4_sb_info *sbi = EXT4_SB(sb); 5707 struct ext4_super_block *es = sbi->s_es; 5708 ext4_group_t group; 5709 unsigned int len; 5710 ext4_fsblk_t goal; 5711 ext4_grpblk_t block; 5712 5713 /* we can't allocate > group size */ 5714 len = ar->len; 5715 5716 /* just a dirty hack to filter too big requests */ 5717 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5718 len = EXT4_CLUSTERS_PER_GROUP(sb); 5719 5720 /* start searching from the goal */ 5721 goal = ar->goal; 5722 if (goal < le32_to_cpu(es->s_first_data_block) || 5723 goal >= ext4_blocks_count(es)) 5724 goal = le32_to_cpu(es->s_first_data_block); 5725 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5726 5727 /* set up allocation goals */ 5728 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5729 ac->ac_status = AC_STATUS_CONTINUE; 5730 ac->ac_sb = sb; 5731 ac->ac_inode = ar->inode; 5732 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5733 ac->ac_o_ex.fe_group = group; 5734 ac->ac_o_ex.fe_start = block; 5735 ac->ac_o_ex.fe_len = len; 5736 ac->ac_g_ex = ac->ac_o_ex; 5737 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 5738 ac->ac_flags = ar->flags; 5739 5740 /* we have to define context: we'll work with a file or 5741 * locality group. this is a policy, actually */ 5742 ext4_mb_group_or_file(ac); 5743 5744 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5745 "left: %u/%u, right %u/%u to %swritable\n", 5746 (unsigned) ar->len, (unsigned) ar->logical, 5747 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5748 (unsigned) ar->lleft, (unsigned) ar->pleft, 5749 (unsigned) ar->lright, (unsigned) ar->pright, 5750 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5751 } 5752 5753 static noinline_for_stack void 5754 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5755 struct ext4_locality_group *lg, 5756 int order, int total_entries) 5757 { 5758 ext4_group_t group = 0; 5759 struct ext4_buddy e4b; 5760 struct list_head discard_list; 5761 struct ext4_prealloc_space *pa, *tmp; 5762 5763 mb_debug(sb, "discard locality group preallocation\n"); 5764 5765 INIT_LIST_HEAD(&discard_list); 5766 5767 spin_lock(&lg->lg_prealloc_lock); 5768 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5769 pa_node.lg_list, 5770 lockdep_is_held(&lg->lg_prealloc_lock)) { 5771 spin_lock(&pa->pa_lock); 5772 if (atomic_read(&pa->pa_count)) { 5773 /* 5774 * This is the pa that we just used 5775 * for block allocation. So don't 5776 * free that 5777 */ 5778 spin_unlock(&pa->pa_lock); 5779 continue; 5780 } 5781 if (pa->pa_deleted) { 5782 spin_unlock(&pa->pa_lock); 5783 continue; 5784 } 5785 /* only lg prealloc space */ 5786 BUG_ON(pa->pa_type != MB_GROUP_PA); 5787 5788 /* seems this one can be freed ... */ 5789 ext4_mb_mark_pa_deleted(sb, pa); 5790 spin_unlock(&pa->pa_lock); 5791 5792 list_del_rcu(&pa->pa_node.lg_list); 5793 list_add(&pa->u.pa_tmp_list, &discard_list); 5794 5795 total_entries--; 5796 if (total_entries <= 5) { 5797 /* 5798 * we want to keep only 5 entries 5799 * allowing it to grow to 8. This 5800 * mak sure we don't call discard 5801 * soon for this list. 5802 */ 5803 break; 5804 } 5805 } 5806 spin_unlock(&lg->lg_prealloc_lock); 5807 5808 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5809 int err; 5810 5811 group = ext4_get_group_number(sb, pa->pa_pstart); 5812 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5813 GFP_NOFS|__GFP_NOFAIL); 5814 if (err) { 5815 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5816 err, group); 5817 continue; 5818 } 5819 ext4_lock_group(sb, group); 5820 list_del(&pa->pa_group_list); 5821 ext4_mb_release_group_pa(&e4b, pa); 5822 ext4_unlock_group(sb, group); 5823 5824 ext4_mb_unload_buddy(&e4b); 5825 list_del(&pa->u.pa_tmp_list); 5826 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5827 } 5828 } 5829 5830 /* 5831 * We have incremented pa_count. So it cannot be freed at this 5832 * point. Also we hold lg_mutex. So no parallel allocation is 5833 * possible from this lg. That means pa_free cannot be updated. 5834 * 5835 * A parallel ext4_mb_discard_group_preallocations is possible. 5836 * which can cause the lg_prealloc_list to be updated. 5837 */ 5838 5839 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 5840 { 5841 int order, added = 0, lg_prealloc_count = 1; 5842 struct super_block *sb = ac->ac_sb; 5843 struct ext4_locality_group *lg = ac->ac_lg; 5844 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 5845 5846 order = fls(pa->pa_free) - 1; 5847 if (order > PREALLOC_TB_SIZE - 1) 5848 /* The max size of hash table is PREALLOC_TB_SIZE */ 5849 order = PREALLOC_TB_SIZE - 1; 5850 /* Add the prealloc space to lg */ 5851 spin_lock(&lg->lg_prealloc_lock); 5852 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5853 pa_node.lg_list, 5854 lockdep_is_held(&lg->lg_prealloc_lock)) { 5855 spin_lock(&tmp_pa->pa_lock); 5856 if (tmp_pa->pa_deleted) { 5857 spin_unlock(&tmp_pa->pa_lock); 5858 continue; 5859 } 5860 if (!added && pa->pa_free < tmp_pa->pa_free) { 5861 /* Add to the tail of the previous entry */ 5862 list_add_tail_rcu(&pa->pa_node.lg_list, 5863 &tmp_pa->pa_node.lg_list); 5864 added = 1; 5865 /* 5866 * we want to count the total 5867 * number of entries in the list 5868 */ 5869 } 5870 spin_unlock(&tmp_pa->pa_lock); 5871 lg_prealloc_count++; 5872 } 5873 if (!added) 5874 list_add_tail_rcu(&pa->pa_node.lg_list, 5875 &lg->lg_prealloc_list[order]); 5876 spin_unlock(&lg->lg_prealloc_lock); 5877 5878 /* Now trim the list to be not more than 8 elements */ 5879 if (lg_prealloc_count > 8) { 5880 ext4_mb_discard_lg_preallocations(sb, lg, 5881 order, lg_prealloc_count); 5882 return; 5883 } 5884 return ; 5885 } 5886 5887 /* 5888 * release all resource we used in allocation 5889 */ 5890 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5891 { 5892 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5893 struct ext4_prealloc_space *pa = ac->ac_pa; 5894 if (pa) { 5895 if (pa->pa_type == MB_GROUP_PA) { 5896 /* see comment in ext4_mb_use_group_pa() */ 5897 spin_lock(&pa->pa_lock); 5898 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5899 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5900 pa->pa_free -= ac->ac_b_ex.fe_len; 5901 pa->pa_len -= ac->ac_b_ex.fe_len; 5902 spin_unlock(&pa->pa_lock); 5903 5904 /* 5905 * We want to add the pa to the right bucket. 5906 * Remove it from the list and while adding 5907 * make sure the list to which we are adding 5908 * doesn't grow big. 5909 */ 5910 if (likely(pa->pa_free)) { 5911 spin_lock(pa->pa_node_lock.lg_lock); 5912 list_del_rcu(&pa->pa_node.lg_list); 5913 spin_unlock(pa->pa_node_lock.lg_lock); 5914 ext4_mb_add_n_trim(ac); 5915 } 5916 } 5917 5918 ext4_mb_put_pa(ac, ac->ac_sb, pa); 5919 } 5920 if (ac->ac_bitmap_page) 5921 put_page(ac->ac_bitmap_page); 5922 if (ac->ac_buddy_page) 5923 put_page(ac->ac_buddy_page); 5924 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5925 mutex_unlock(&ac->ac_lg->lg_mutex); 5926 ext4_mb_collect_stats(ac); 5927 return 0; 5928 } 5929 5930 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5931 { 5932 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5933 int ret; 5934 int freed = 0, busy = 0; 5935 int retry = 0; 5936 5937 trace_ext4_mb_discard_preallocations(sb, needed); 5938 5939 if (needed == 0) 5940 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 5941 repeat: 5942 for (i = 0; i < ngroups && needed > 0; i++) { 5943 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5944 freed += ret; 5945 needed -= ret; 5946 cond_resched(); 5947 } 5948 5949 if (needed > 0 && busy && ++retry < 3) { 5950 busy = 0; 5951 goto repeat; 5952 } 5953 5954 return freed; 5955 } 5956 5957 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 5958 struct ext4_allocation_context *ac, u64 *seq) 5959 { 5960 int freed; 5961 u64 seq_retry = 0; 5962 bool ret = false; 5963 5964 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 5965 if (freed) { 5966 ret = true; 5967 goto out_dbg; 5968 } 5969 seq_retry = ext4_get_discard_pa_seq_sum(); 5970 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 5971 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 5972 *seq = seq_retry; 5973 ret = true; 5974 } 5975 5976 out_dbg: 5977 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 5978 return ret; 5979 } 5980 5981 /* 5982 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 5983 * linearly starting at the goal block and also excludes the blocks which 5984 * are going to be in use after fast commit replay. 5985 */ 5986 static ext4_fsblk_t 5987 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 5988 { 5989 struct buffer_head *bitmap_bh; 5990 struct super_block *sb = ar->inode->i_sb; 5991 struct ext4_sb_info *sbi = EXT4_SB(sb); 5992 ext4_group_t group, nr; 5993 ext4_grpblk_t blkoff; 5994 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 5995 ext4_grpblk_t i = 0; 5996 ext4_fsblk_t goal, block; 5997 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 5998 5999 goal = ar->goal; 6000 if (goal < le32_to_cpu(es->s_first_data_block) || 6001 goal >= ext4_blocks_count(es)) 6002 goal = le32_to_cpu(es->s_first_data_block); 6003 6004 ar->len = 0; 6005 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6006 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 6007 bitmap_bh = ext4_read_block_bitmap(sb, group); 6008 if (IS_ERR(bitmap_bh)) { 6009 *errp = PTR_ERR(bitmap_bh); 6010 pr_warn("Failed to read block bitmap\n"); 6011 return 0; 6012 } 6013 6014 while (1) { 6015 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6016 blkoff); 6017 if (i >= max) 6018 break; 6019 if (ext4_fc_replay_check_excluded(sb, 6020 ext4_group_first_block_no(sb, group) + 6021 EXT4_C2B(sbi, i))) { 6022 blkoff = i + 1; 6023 } else 6024 break; 6025 } 6026 brelse(bitmap_bh); 6027 if (i < max) 6028 break; 6029 6030 if (++group >= ext4_get_groups_count(sb)) 6031 group = 0; 6032 6033 blkoff = 0; 6034 } 6035 6036 if (i >= max) { 6037 *errp = -ENOSPC; 6038 return 0; 6039 } 6040 6041 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 6042 ext4_mb_mark_bb(sb, block, 1, 1); 6043 ar->len = 1; 6044 6045 return block; 6046 } 6047 6048 /* 6049 * Main entry point into mballoc to allocate blocks 6050 * it tries to use preallocation first, then falls back 6051 * to usual allocation 6052 */ 6053 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 6054 struct ext4_allocation_request *ar, int *errp) 6055 { 6056 struct ext4_allocation_context *ac = NULL; 6057 struct ext4_sb_info *sbi; 6058 struct super_block *sb; 6059 ext4_fsblk_t block = 0; 6060 unsigned int inquota = 0; 6061 unsigned int reserv_clstrs = 0; 6062 int retries = 0; 6063 u64 seq; 6064 6065 might_sleep(); 6066 sb = ar->inode->i_sb; 6067 sbi = EXT4_SB(sb); 6068 6069 trace_ext4_request_blocks(ar); 6070 if (sbi->s_mount_state & EXT4_FC_REPLAY) 6071 return ext4_mb_new_blocks_simple(ar, errp); 6072 6073 /* Allow to use superuser reservation for quota file */ 6074 if (ext4_is_quota_file(ar->inode)) 6075 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 6076 6077 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 6078 /* Without delayed allocation we need to verify 6079 * there is enough free blocks to do block allocation 6080 * and verify allocation doesn't exceed the quota limits. 6081 */ 6082 while (ar->len && 6083 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 6084 6085 /* let others to free the space */ 6086 cond_resched(); 6087 ar->len = ar->len >> 1; 6088 } 6089 if (!ar->len) { 6090 ext4_mb_show_pa(sb); 6091 *errp = -ENOSPC; 6092 return 0; 6093 } 6094 reserv_clstrs = ar->len; 6095 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 6096 dquot_alloc_block_nofail(ar->inode, 6097 EXT4_C2B(sbi, ar->len)); 6098 } else { 6099 while (ar->len && 6100 dquot_alloc_block(ar->inode, 6101 EXT4_C2B(sbi, ar->len))) { 6102 6103 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 6104 ar->len--; 6105 } 6106 } 6107 inquota = ar->len; 6108 if (ar->len == 0) { 6109 *errp = -EDQUOT; 6110 goto out; 6111 } 6112 } 6113 6114 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 6115 if (!ac) { 6116 ar->len = 0; 6117 *errp = -ENOMEM; 6118 goto out; 6119 } 6120 6121 ext4_mb_initialize_context(ac, ar); 6122 6123 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 6124 seq = this_cpu_read(discard_pa_seq); 6125 if (!ext4_mb_use_preallocated(ac)) { 6126 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 6127 ext4_mb_normalize_request(ac, ar); 6128 6129 *errp = ext4_mb_pa_alloc(ac); 6130 if (*errp) 6131 goto errout; 6132 repeat: 6133 /* allocate space in core */ 6134 *errp = ext4_mb_regular_allocator(ac); 6135 /* 6136 * pa allocated above is added to grp->bb_prealloc_list only 6137 * when we were able to allocate some block i.e. when 6138 * ac->ac_status == AC_STATUS_FOUND. 6139 * And error from above mean ac->ac_status != AC_STATUS_FOUND 6140 * So we have to free this pa here itself. 6141 */ 6142 if (*errp) { 6143 ext4_mb_pa_put_free(ac); 6144 ext4_discard_allocated_blocks(ac); 6145 goto errout; 6146 } 6147 if (ac->ac_status == AC_STATUS_FOUND && 6148 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 6149 ext4_mb_pa_put_free(ac); 6150 } 6151 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 6152 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 6153 if (*errp) { 6154 ext4_discard_allocated_blocks(ac); 6155 goto errout; 6156 } else { 6157 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6158 ar->len = ac->ac_b_ex.fe_len; 6159 } 6160 } else { 6161 if (++retries < 3 && 6162 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6163 goto repeat; 6164 /* 6165 * If block allocation fails then the pa allocated above 6166 * needs to be freed here itself. 6167 */ 6168 ext4_mb_pa_put_free(ac); 6169 *errp = -ENOSPC; 6170 } 6171 6172 if (*errp) { 6173 errout: 6174 ac->ac_b_ex.fe_len = 0; 6175 ar->len = 0; 6176 ext4_mb_show_ac(ac); 6177 } 6178 ext4_mb_release_context(ac); 6179 kmem_cache_free(ext4_ac_cachep, ac); 6180 out: 6181 if (inquota && ar->len < inquota) 6182 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 6183 if (!ar->len) { 6184 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 6185 /* release all the reserved blocks if non delalloc */ 6186 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 6187 reserv_clstrs); 6188 } 6189 6190 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6191 6192 return block; 6193 } 6194 6195 /* 6196 * We can merge two free data extents only if the physical blocks 6197 * are contiguous, AND the extents were freed by the same transaction, 6198 * AND the blocks are associated with the same group. 6199 */ 6200 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 6201 struct ext4_free_data *entry, 6202 struct ext4_free_data *new_entry, 6203 struct rb_root *entry_rb_root) 6204 { 6205 if ((entry->efd_tid != new_entry->efd_tid) || 6206 (entry->efd_group != new_entry->efd_group)) 6207 return; 6208 if (entry->efd_start_cluster + entry->efd_count == 6209 new_entry->efd_start_cluster) { 6210 new_entry->efd_start_cluster = entry->efd_start_cluster; 6211 new_entry->efd_count += entry->efd_count; 6212 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 6213 entry->efd_start_cluster) { 6214 new_entry->efd_count += entry->efd_count; 6215 } else 6216 return; 6217 spin_lock(&sbi->s_md_lock); 6218 list_del(&entry->efd_list); 6219 spin_unlock(&sbi->s_md_lock); 6220 rb_erase(&entry->efd_node, entry_rb_root); 6221 kmem_cache_free(ext4_free_data_cachep, entry); 6222 } 6223 6224 static noinline_for_stack void 6225 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 6226 struct ext4_free_data *new_entry) 6227 { 6228 ext4_group_t group = e4b->bd_group; 6229 ext4_grpblk_t cluster; 6230 ext4_grpblk_t clusters = new_entry->efd_count; 6231 struct ext4_free_data *entry; 6232 struct ext4_group_info *db = e4b->bd_info; 6233 struct super_block *sb = e4b->bd_sb; 6234 struct ext4_sb_info *sbi = EXT4_SB(sb); 6235 struct rb_node **n = &db->bb_free_root.rb_node, *node; 6236 struct rb_node *parent = NULL, *new_node; 6237 6238 BUG_ON(!ext4_handle_valid(handle)); 6239 BUG_ON(e4b->bd_bitmap_page == NULL); 6240 BUG_ON(e4b->bd_buddy_page == NULL); 6241 6242 new_node = &new_entry->efd_node; 6243 cluster = new_entry->efd_start_cluster; 6244 6245 if (!*n) { 6246 /* first free block exent. We need to 6247 protect buddy cache from being freed, 6248 * otherwise we'll refresh it from 6249 * on-disk bitmap and lose not-yet-available 6250 * blocks */ 6251 get_page(e4b->bd_buddy_page); 6252 get_page(e4b->bd_bitmap_page); 6253 } 6254 while (*n) { 6255 parent = *n; 6256 entry = rb_entry(parent, struct ext4_free_data, efd_node); 6257 if (cluster < entry->efd_start_cluster) 6258 n = &(*n)->rb_left; 6259 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6260 n = &(*n)->rb_right; 6261 else { 6262 ext4_grp_locked_error(sb, group, 0, 6263 ext4_group_first_block_no(sb, group) + 6264 EXT4_C2B(sbi, cluster), 6265 "Block already on to-be-freed list"); 6266 kmem_cache_free(ext4_free_data_cachep, new_entry); 6267 return; 6268 } 6269 } 6270 6271 rb_link_node(new_node, parent, n); 6272 rb_insert_color(new_node, &db->bb_free_root); 6273 6274 /* Now try to see the extent can be merged to left and right */ 6275 node = rb_prev(new_node); 6276 if (node) { 6277 entry = rb_entry(node, struct ext4_free_data, efd_node); 6278 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6279 &(db->bb_free_root)); 6280 } 6281 6282 node = rb_next(new_node); 6283 if (node) { 6284 entry = rb_entry(node, struct ext4_free_data, efd_node); 6285 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6286 &(db->bb_free_root)); 6287 } 6288 6289 spin_lock(&sbi->s_md_lock); 6290 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 6291 sbi->s_mb_free_pending += clusters; 6292 spin_unlock(&sbi->s_md_lock); 6293 } 6294 6295 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6296 unsigned long count) 6297 { 6298 struct buffer_head *bitmap_bh; 6299 struct super_block *sb = inode->i_sb; 6300 struct ext4_group_desc *gdp; 6301 struct buffer_head *gdp_bh; 6302 ext4_group_t group; 6303 ext4_grpblk_t blkoff; 6304 int already_freed = 0, err, i; 6305 6306 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6307 bitmap_bh = ext4_read_block_bitmap(sb, group); 6308 if (IS_ERR(bitmap_bh)) { 6309 pr_warn("Failed to read block bitmap\n"); 6310 return; 6311 } 6312 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 6313 if (!gdp) 6314 goto err_out; 6315 6316 for (i = 0; i < count; i++) { 6317 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 6318 already_freed++; 6319 } 6320 mb_clear_bits(bitmap_bh->b_data, blkoff, count); 6321 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 6322 if (err) 6323 goto err_out; 6324 ext4_free_group_clusters_set( 6325 sb, gdp, ext4_free_group_clusters(sb, gdp) + 6326 count - already_freed); 6327 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6328 ext4_group_desc_csum_set(sb, group, gdp); 6329 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 6330 sync_dirty_buffer(bitmap_bh); 6331 sync_dirty_buffer(gdp_bh); 6332 6333 err_out: 6334 brelse(bitmap_bh); 6335 } 6336 6337 /** 6338 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6339 * Used by ext4_free_blocks() 6340 * @handle: handle for this transaction 6341 * @inode: inode 6342 * @block: starting physical block to be freed 6343 * @count: number of blocks to be freed 6344 * @flags: flags used by ext4_free_blocks 6345 */ 6346 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6347 ext4_fsblk_t block, unsigned long count, 6348 int flags) 6349 { 6350 struct buffer_head *bitmap_bh = NULL; 6351 struct super_block *sb = inode->i_sb; 6352 struct ext4_group_desc *gdp; 6353 struct ext4_group_info *grp; 6354 unsigned int overflow; 6355 ext4_grpblk_t bit; 6356 struct buffer_head *gd_bh; 6357 ext4_group_t block_group; 6358 struct ext4_sb_info *sbi; 6359 struct ext4_buddy e4b; 6360 unsigned int count_clusters; 6361 int err = 0; 6362 int ret; 6363 6364 sbi = EXT4_SB(sb); 6365 6366 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6367 !ext4_inode_block_valid(inode, block, count)) { 6368 ext4_error(sb, "Freeing blocks in system zone - " 6369 "Block = %llu, count = %lu", block, count); 6370 /* err = 0. ext4_std_error should be a no op */ 6371 goto error_return; 6372 } 6373 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6374 6375 do_more: 6376 overflow = 0; 6377 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6378 6379 grp = ext4_get_group_info(sb, block_group); 6380 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6381 return; 6382 6383 /* 6384 * Check to see if we are freeing blocks across a group 6385 * boundary. 6386 */ 6387 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6388 overflow = EXT4_C2B(sbi, bit) + count - 6389 EXT4_BLOCKS_PER_GROUP(sb); 6390 count -= overflow; 6391 /* The range changed so it's no longer validated */ 6392 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6393 } 6394 count_clusters = EXT4_NUM_B2C(sbi, count); 6395 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6396 if (IS_ERR(bitmap_bh)) { 6397 err = PTR_ERR(bitmap_bh); 6398 bitmap_bh = NULL; 6399 goto error_return; 6400 } 6401 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 6402 if (!gdp) { 6403 err = -EIO; 6404 goto error_return; 6405 } 6406 6407 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6408 !ext4_inode_block_valid(inode, block, count)) { 6409 ext4_error(sb, "Freeing blocks in system zone - " 6410 "Block = %llu, count = %lu", block, count); 6411 /* err = 0. ext4_std_error should be a no op */ 6412 goto error_return; 6413 } 6414 6415 BUFFER_TRACE(bitmap_bh, "getting write access"); 6416 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6417 EXT4_JTR_NONE); 6418 if (err) 6419 goto error_return; 6420 6421 /* 6422 * We are about to modify some metadata. Call the journal APIs 6423 * to unshare ->b_data if a currently-committing transaction is 6424 * using it 6425 */ 6426 BUFFER_TRACE(gd_bh, "get_write_access"); 6427 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6428 if (err) 6429 goto error_return; 6430 #ifdef AGGRESSIVE_CHECK 6431 { 6432 int i; 6433 for (i = 0; i < count_clusters; i++) 6434 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6435 } 6436 #endif 6437 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6438 6439 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6440 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6441 GFP_NOFS|__GFP_NOFAIL); 6442 if (err) 6443 goto error_return; 6444 6445 /* 6446 * We need to make sure we don't reuse the freed block until after the 6447 * transaction is committed. We make an exception if the inode is to be 6448 * written in writeback mode since writeback mode has weak data 6449 * consistency guarantees. 6450 */ 6451 if (ext4_handle_valid(handle) && 6452 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6453 !ext4_should_writeback_data(inode))) { 6454 struct ext4_free_data *new_entry; 6455 /* 6456 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6457 * to fail. 6458 */ 6459 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6460 GFP_NOFS|__GFP_NOFAIL); 6461 new_entry->efd_start_cluster = bit; 6462 new_entry->efd_group = block_group; 6463 new_entry->efd_count = count_clusters; 6464 new_entry->efd_tid = handle->h_transaction->t_tid; 6465 6466 ext4_lock_group(sb, block_group); 6467 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6468 ext4_mb_free_metadata(handle, &e4b, new_entry); 6469 } else { 6470 /* need to update group_info->bb_free and bitmap 6471 * with group lock held. generate_buddy look at 6472 * them with group lock_held 6473 */ 6474 if (test_opt(sb, DISCARD)) { 6475 err = ext4_issue_discard(sb, block_group, bit, 6476 count_clusters, NULL); 6477 if (err && err != -EOPNOTSUPP) 6478 ext4_msg(sb, KERN_WARNING, "discard request in" 6479 " group:%u block:%d count:%lu failed" 6480 " with %d", block_group, bit, count, 6481 err); 6482 } else 6483 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6484 6485 ext4_lock_group(sb, block_group); 6486 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6487 mb_free_blocks(inode, &e4b, bit, count_clusters); 6488 } 6489 6490 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6491 ext4_free_group_clusters_set(sb, gdp, ret); 6492 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6493 ext4_group_desc_csum_set(sb, block_group, gdp); 6494 ext4_unlock_group(sb, block_group); 6495 6496 if (sbi->s_log_groups_per_flex) { 6497 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6498 atomic64_add(count_clusters, 6499 &sbi_array_rcu_deref(sbi, s_flex_groups, 6500 flex_group)->free_clusters); 6501 } 6502 6503 /* 6504 * on a bigalloc file system, defer the s_freeclusters_counter 6505 * update to the caller (ext4_remove_space and friends) so they 6506 * can determine if a cluster freed here should be rereserved 6507 */ 6508 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6509 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6510 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6511 percpu_counter_add(&sbi->s_freeclusters_counter, 6512 count_clusters); 6513 } 6514 6515 ext4_mb_unload_buddy(&e4b); 6516 6517 /* We dirtied the bitmap block */ 6518 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6519 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6520 6521 /* And the group descriptor block */ 6522 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6523 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6524 if (!err) 6525 err = ret; 6526 6527 if (overflow && !err) { 6528 block += count; 6529 count = overflow; 6530 put_bh(bitmap_bh); 6531 /* The range changed so it's no longer validated */ 6532 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6533 goto do_more; 6534 } 6535 error_return: 6536 brelse(bitmap_bh); 6537 ext4_std_error(sb, err); 6538 return; 6539 } 6540 6541 /** 6542 * ext4_free_blocks() -- Free given blocks and update quota 6543 * @handle: handle for this transaction 6544 * @inode: inode 6545 * @bh: optional buffer of the block to be freed 6546 * @block: starting physical block to be freed 6547 * @count: number of blocks to be freed 6548 * @flags: flags used by ext4_free_blocks 6549 */ 6550 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6551 struct buffer_head *bh, ext4_fsblk_t block, 6552 unsigned long count, int flags) 6553 { 6554 struct super_block *sb = inode->i_sb; 6555 unsigned int overflow; 6556 struct ext4_sb_info *sbi; 6557 6558 sbi = EXT4_SB(sb); 6559 6560 if (bh) { 6561 if (block) 6562 BUG_ON(block != bh->b_blocknr); 6563 else 6564 block = bh->b_blocknr; 6565 } 6566 6567 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6568 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 6569 return; 6570 } 6571 6572 might_sleep(); 6573 6574 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6575 !ext4_inode_block_valid(inode, block, count)) { 6576 ext4_error(sb, "Freeing blocks not in datazone - " 6577 "block = %llu, count = %lu", block, count); 6578 return; 6579 } 6580 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6581 6582 ext4_debug("freeing block %llu\n", block); 6583 trace_ext4_free_blocks(inode, block, count, flags); 6584 6585 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6586 BUG_ON(count > 1); 6587 6588 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6589 inode, bh, block); 6590 } 6591 6592 /* 6593 * If the extent to be freed does not begin on a cluster 6594 * boundary, we need to deal with partial clusters at the 6595 * beginning and end of the extent. Normally we will free 6596 * blocks at the beginning or the end unless we are explicitly 6597 * requested to avoid doing so. 6598 */ 6599 overflow = EXT4_PBLK_COFF(sbi, block); 6600 if (overflow) { 6601 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6602 overflow = sbi->s_cluster_ratio - overflow; 6603 block += overflow; 6604 if (count > overflow) 6605 count -= overflow; 6606 else 6607 return; 6608 } else { 6609 block -= overflow; 6610 count += overflow; 6611 } 6612 /* The range changed so it's no longer validated */ 6613 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6614 } 6615 overflow = EXT4_LBLK_COFF(sbi, count); 6616 if (overflow) { 6617 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6618 if (count > overflow) 6619 count -= overflow; 6620 else 6621 return; 6622 } else 6623 count += sbi->s_cluster_ratio - overflow; 6624 /* The range changed so it's no longer validated */ 6625 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6626 } 6627 6628 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6629 int i; 6630 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6631 6632 for (i = 0; i < count; i++) { 6633 cond_resched(); 6634 if (is_metadata) 6635 bh = sb_find_get_block(inode->i_sb, block + i); 6636 ext4_forget(handle, is_metadata, inode, bh, block + i); 6637 } 6638 } 6639 6640 ext4_mb_clear_bb(handle, inode, block, count, flags); 6641 return; 6642 } 6643 6644 /** 6645 * ext4_group_add_blocks() -- Add given blocks to an existing group 6646 * @handle: handle to this transaction 6647 * @sb: super block 6648 * @block: start physical block to add to the block group 6649 * @count: number of blocks to free 6650 * 6651 * This marks the blocks as free in the bitmap and buddy. 6652 */ 6653 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6654 ext4_fsblk_t block, unsigned long count) 6655 { 6656 struct buffer_head *bitmap_bh = NULL; 6657 struct buffer_head *gd_bh; 6658 ext4_group_t block_group; 6659 ext4_grpblk_t bit; 6660 unsigned int i; 6661 struct ext4_group_desc *desc; 6662 struct ext4_sb_info *sbi = EXT4_SB(sb); 6663 struct ext4_buddy e4b; 6664 int err = 0, ret, free_clusters_count; 6665 ext4_grpblk_t clusters_freed; 6666 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6667 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6668 unsigned long cluster_count = last_cluster - first_cluster + 1; 6669 6670 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6671 6672 if (count == 0) 6673 return 0; 6674 6675 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6676 /* 6677 * Check to see if we are freeing blocks across a group 6678 * boundary. 6679 */ 6680 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6681 ext4_warning(sb, "too many blocks added to group %u", 6682 block_group); 6683 err = -EINVAL; 6684 goto error_return; 6685 } 6686 6687 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6688 if (IS_ERR(bitmap_bh)) { 6689 err = PTR_ERR(bitmap_bh); 6690 bitmap_bh = NULL; 6691 goto error_return; 6692 } 6693 6694 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6695 if (!desc) { 6696 err = -EIO; 6697 goto error_return; 6698 } 6699 6700 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6701 ext4_error(sb, "Adding blocks in system zones - " 6702 "Block = %llu, count = %lu", 6703 block, count); 6704 err = -EINVAL; 6705 goto error_return; 6706 } 6707 6708 BUFFER_TRACE(bitmap_bh, "getting write access"); 6709 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6710 EXT4_JTR_NONE); 6711 if (err) 6712 goto error_return; 6713 6714 /* 6715 * We are about to modify some metadata. Call the journal APIs 6716 * to unshare ->b_data if a currently-committing transaction is 6717 * using it 6718 */ 6719 BUFFER_TRACE(gd_bh, "get_write_access"); 6720 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6721 if (err) 6722 goto error_return; 6723 6724 for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 6725 BUFFER_TRACE(bitmap_bh, "clear bit"); 6726 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 6727 ext4_error(sb, "bit already cleared for block %llu", 6728 (ext4_fsblk_t)(block + i)); 6729 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 6730 } else { 6731 clusters_freed++; 6732 } 6733 } 6734 6735 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6736 if (err) 6737 goto error_return; 6738 6739 /* 6740 * need to update group_info->bb_free and bitmap 6741 * with group lock held. generate_buddy look at 6742 * them with group lock_held 6743 */ 6744 ext4_lock_group(sb, block_group); 6745 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6746 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6747 free_clusters_count = clusters_freed + 6748 ext4_free_group_clusters(sb, desc); 6749 ext4_free_group_clusters_set(sb, desc, free_clusters_count); 6750 ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6751 ext4_group_desc_csum_set(sb, block_group, desc); 6752 ext4_unlock_group(sb, block_group); 6753 percpu_counter_add(&sbi->s_freeclusters_counter, 6754 clusters_freed); 6755 6756 if (sbi->s_log_groups_per_flex) { 6757 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6758 atomic64_add(clusters_freed, 6759 &sbi_array_rcu_deref(sbi, s_flex_groups, 6760 flex_group)->free_clusters); 6761 } 6762 6763 ext4_mb_unload_buddy(&e4b); 6764 6765 /* We dirtied the bitmap block */ 6766 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6767 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6768 6769 /* And the group descriptor block */ 6770 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6771 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6772 if (!err) 6773 err = ret; 6774 6775 error_return: 6776 brelse(bitmap_bh); 6777 ext4_std_error(sb, err); 6778 return err; 6779 } 6780 6781 /** 6782 * ext4_trim_extent -- function to TRIM one single free extent in the group 6783 * @sb: super block for the file system 6784 * @start: starting block of the free extent in the alloc. group 6785 * @count: number of blocks to TRIM 6786 * @e4b: ext4 buddy for the group 6787 * 6788 * Trim "count" blocks starting at "start" in the "group". To assure that no 6789 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6790 * be called with under the group lock. 6791 */ 6792 static int ext4_trim_extent(struct super_block *sb, 6793 int start, int count, struct ext4_buddy *e4b) 6794 __releases(bitlock) 6795 __acquires(bitlock) 6796 { 6797 struct ext4_free_extent ex; 6798 ext4_group_t group = e4b->bd_group; 6799 int ret = 0; 6800 6801 trace_ext4_trim_extent(sb, group, start, count); 6802 6803 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6804 6805 ex.fe_start = start; 6806 ex.fe_group = group; 6807 ex.fe_len = count; 6808 6809 /* 6810 * Mark blocks used, so no one can reuse them while 6811 * being trimmed. 6812 */ 6813 mb_mark_used(e4b, &ex); 6814 ext4_unlock_group(sb, group); 6815 ret = ext4_issue_discard(sb, group, start, count, NULL); 6816 ext4_lock_group(sb, group); 6817 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6818 return ret; 6819 } 6820 6821 static int ext4_try_to_trim_range(struct super_block *sb, 6822 struct ext4_buddy *e4b, ext4_grpblk_t start, 6823 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6824 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6825 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6826 { 6827 ext4_grpblk_t next, count, free_count; 6828 void *bitmap; 6829 6830 bitmap = e4b->bd_bitmap; 6831 start = (e4b->bd_info->bb_first_free > start) ? 6832 e4b->bd_info->bb_first_free : start; 6833 count = 0; 6834 free_count = 0; 6835 6836 while (start <= max) { 6837 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6838 if (start > max) 6839 break; 6840 next = mb_find_next_bit(bitmap, max + 1, start); 6841 6842 if ((next - start) >= minblocks) { 6843 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6844 6845 if (ret && ret != -EOPNOTSUPP) 6846 break; 6847 count += next - start; 6848 } 6849 free_count += next - start; 6850 start = next + 1; 6851 6852 if (fatal_signal_pending(current)) { 6853 count = -ERESTARTSYS; 6854 break; 6855 } 6856 6857 if (need_resched()) { 6858 ext4_unlock_group(sb, e4b->bd_group); 6859 cond_resched(); 6860 ext4_lock_group(sb, e4b->bd_group); 6861 } 6862 6863 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6864 break; 6865 } 6866 6867 return count; 6868 } 6869 6870 /** 6871 * ext4_trim_all_free -- function to trim all free space in alloc. group 6872 * @sb: super block for file system 6873 * @group: group to be trimmed 6874 * @start: first group block to examine 6875 * @max: last group block to examine 6876 * @minblocks: minimum extent block count 6877 * @set_trimmed: set the trimmed flag if at least one block is trimmed 6878 * 6879 * ext4_trim_all_free walks through group's block bitmap searching for free 6880 * extents. When the free extent is found, mark it as used in group buddy 6881 * bitmap. Then issue a TRIM command on this extent and free the extent in 6882 * the group buddy bitmap. 6883 */ 6884 static ext4_grpblk_t 6885 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 6886 ext4_grpblk_t start, ext4_grpblk_t max, 6887 ext4_grpblk_t minblocks, bool set_trimmed) 6888 { 6889 struct ext4_buddy e4b; 6890 int ret; 6891 6892 trace_ext4_trim_all_free(sb, group, start, max); 6893 6894 ret = ext4_mb_load_buddy(sb, group, &e4b); 6895 if (ret) { 6896 ext4_warning(sb, "Error %d loading buddy information for %u", 6897 ret, group); 6898 return ret; 6899 } 6900 6901 ext4_lock_group(sb, group); 6902 6903 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 6904 minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 6905 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6906 if (ret >= 0 && set_trimmed) 6907 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 6908 } else { 6909 ret = 0; 6910 } 6911 6912 ext4_unlock_group(sb, group); 6913 ext4_mb_unload_buddy(&e4b); 6914 6915 ext4_debug("trimmed %d blocks in the group %d\n", 6916 ret, group); 6917 6918 return ret; 6919 } 6920 6921 /** 6922 * ext4_trim_fs() -- trim ioctl handle function 6923 * @sb: superblock for filesystem 6924 * @range: fstrim_range structure 6925 * 6926 * start: First Byte to trim 6927 * len: number of Bytes to trim from start 6928 * minlen: minimum extent length in Bytes 6929 * ext4_trim_fs goes through all allocation groups containing Bytes from 6930 * start to start+len. For each such a group ext4_trim_all_free function 6931 * is invoked to trim all free space. 6932 */ 6933 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 6934 { 6935 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 6936 struct ext4_group_info *grp; 6937 ext4_group_t group, first_group, last_group; 6938 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6939 uint64_t start, end, minlen, trimmed = 0; 6940 ext4_fsblk_t first_data_blk = 6941 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6942 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6943 bool whole_group, eof = false; 6944 int ret = 0; 6945 6946 start = range->start >> sb->s_blocksize_bits; 6947 end = start + (range->len >> sb->s_blocksize_bits) - 1; 6948 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6949 range->minlen >> sb->s_blocksize_bits); 6950 6951 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 6952 start >= max_blks || 6953 range->len < sb->s_blocksize) 6954 return -EINVAL; 6955 /* No point to try to trim less than discard granularity */ 6956 if (range->minlen < discard_granularity) { 6957 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6958 discard_granularity >> sb->s_blocksize_bits); 6959 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6960 goto out; 6961 } 6962 if (end >= max_blks - 1) { 6963 end = max_blks - 1; 6964 eof = true; 6965 } 6966 if (end <= first_data_blk) 6967 goto out; 6968 if (start < first_data_blk) 6969 start = first_data_blk; 6970 6971 /* Determine first and last group to examine based on start and end */ 6972 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 6973 &first_group, &first_cluster); 6974 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 6975 &last_group, &last_cluster); 6976 6977 /* end now represents the last cluster to discard in this group */ 6978 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6979 whole_group = true; 6980 6981 for (group = first_group; group <= last_group; group++) { 6982 grp = ext4_get_group_info(sb, group); 6983 if (!grp) 6984 continue; 6985 /* We only do this if the grp has never been initialized */ 6986 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6987 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 6988 if (ret) 6989 break; 6990 } 6991 6992 /* 6993 * For all the groups except the last one, last cluster will 6994 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6995 * change it for the last group, note that last_cluster is 6996 * already computed earlier by ext4_get_group_no_and_offset() 6997 */ 6998 if (group == last_group) { 6999 end = last_cluster; 7000 whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7001 } 7002 if (grp->bb_free >= minlen) { 7003 cnt = ext4_trim_all_free(sb, group, first_cluster, 7004 end, minlen, whole_group); 7005 if (cnt < 0) { 7006 ret = cnt; 7007 break; 7008 } 7009 trimmed += cnt; 7010 } 7011 7012 /* 7013 * For every group except the first one, we are sure 7014 * that the first cluster to discard will be cluster #0. 7015 */ 7016 first_cluster = 0; 7017 } 7018 7019 if (!ret) 7020 EXT4_SB(sb)->s_last_trim_minblks = minlen; 7021 7022 out: 7023 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 7024 return ret; 7025 } 7026 7027 /* Iterate all the free extents in the group. */ 7028 int 7029 ext4_mballoc_query_range( 7030 struct super_block *sb, 7031 ext4_group_t group, 7032 ext4_grpblk_t start, 7033 ext4_grpblk_t end, 7034 ext4_mballoc_query_range_fn formatter, 7035 void *priv) 7036 { 7037 void *bitmap; 7038 ext4_grpblk_t next; 7039 struct ext4_buddy e4b; 7040 int error; 7041 7042 error = ext4_mb_load_buddy(sb, group, &e4b); 7043 if (error) 7044 return error; 7045 bitmap = e4b.bd_bitmap; 7046 7047 ext4_lock_group(sb, group); 7048 7049 start = (e4b.bd_info->bb_first_free > start) ? 7050 e4b.bd_info->bb_first_free : start; 7051 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 7052 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7053 7054 while (start <= end) { 7055 start = mb_find_next_zero_bit(bitmap, end + 1, start); 7056 if (start > end) 7057 break; 7058 next = mb_find_next_bit(bitmap, end + 1, start); 7059 7060 ext4_unlock_group(sb, group); 7061 error = formatter(sb, group, start, next - start, priv); 7062 if (error) 7063 goto out_unload; 7064 ext4_lock_group(sb, group); 7065 7066 start = next + 1; 7067 } 7068 7069 ext4_unlock_group(sb, group); 7070 out_unload: 7071 ext4_mb_unload_buddy(&e4b); 7072 7073 return error; 7074 } 7075