1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <trace/events/ext4.h> 20 21 /* 22 * MUSTDO: 23 * - test ext4_ext_search_left() and ext4_ext_search_right() 24 * - search for metadata in few groups 25 * 26 * TODO v4: 27 * - normalization should take into account whether file is still open 28 * - discard preallocations if no free space left (policy?) 29 * - don't normalize tails 30 * - quota 31 * - reservation for superuser 32 * 33 * TODO v3: 34 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35 * - track min/max extents in each group for better group selection 36 * - mb_mark_used() may allocate chunk right after splitting buddy 37 * - tree of groups sorted by number of free blocks 38 * - error handling 39 */ 40 41 /* 42 * The allocation request involve request for multiple number of blocks 43 * near to the goal(block) value specified. 44 * 45 * During initialization phase of the allocator we decide to use the 46 * group preallocation or inode preallocation depending on the size of 47 * the file. The size of the file could be the resulting file size we 48 * would have after allocation, or the current file size, which ever 49 * is larger. If the size is less than sbi->s_mb_stream_request we 50 * select to use the group preallocation. The default value of 51 * s_mb_stream_request is 16 blocks. This can also be tuned via 52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53 * terms of number of blocks. 54 * 55 * The main motivation for having small file use group preallocation is to 56 * ensure that we have small files closer together on the disk. 57 * 58 * First stage the allocator looks at the inode prealloc list, 59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60 * spaces for this particular inode. The inode prealloc space is 61 * represented as: 62 * 63 * pa_lstart -> the logical start block for this prealloc space 64 * pa_pstart -> the physical start block for this prealloc space 65 * pa_len -> length for this prealloc space (in clusters) 66 * pa_free -> free space available in this prealloc space (in clusters) 67 * 68 * The inode preallocation space is used looking at the _logical_ start 69 * block. If only the logical file block falls within the range of prealloc 70 * space we will consume the particular prealloc space. This makes sure that 71 * we have contiguous physical blocks representing the file blocks 72 * 73 * The important thing to be noted in case of inode prealloc space is that 74 * we don't modify the values associated to inode prealloc space except 75 * pa_free. 76 * 77 * If we are not able to find blocks in the inode prealloc space and if we 78 * have the group allocation flag set then we look at the locality group 79 * prealloc space. These are per CPU prealloc list represented as 80 * 81 * ext4_sb_info.s_locality_groups[smp_processor_id()] 82 * 83 * The reason for having a per cpu locality group is to reduce the contention 84 * between CPUs. It is possible to get scheduled at this point. 85 * 86 * The locality group prealloc space is used looking at whether we have 87 * enough free space (pa_free) within the prealloc space. 88 * 89 * If we can't allocate blocks via inode prealloc or/and locality group 90 * prealloc then we look at the buddy cache. The buddy cache is represented 91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92 * mapped to the buddy and bitmap information regarding different 93 * groups. The buddy information is attached to buddy cache inode so that 94 * we can access them through the page cache. The information regarding 95 * each group is loaded via ext4_mb_load_buddy. The information involve 96 * block bitmap and buddy information. The information are stored in the 97 * inode as: 98 * 99 * { page } 100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101 * 102 * 103 * one block each for bitmap and buddy information. So for each group we 104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105 * blocksize) blocks. So it can have information regarding groups_per_page 106 * which is blocks_per_page/2 107 * 108 * The buddy cache inode is not stored on disk. The inode is thrown 109 * away when the filesystem is unmounted. 110 * 111 * We look for count number of blocks in the buddy cache. If we were able 112 * to locate that many free blocks we return with additional information 113 * regarding rest of the contiguous physical block available 114 * 115 * Before allocating blocks via buddy cache we normalize the request 116 * blocks. This ensure we ask for more blocks that we needed. The extra 117 * blocks that we get after allocation is added to the respective prealloc 118 * list. In case of inode preallocation we follow a list of heuristics 119 * based on file size. This can be found in ext4_mb_normalize_request. If 120 * we are doing a group prealloc we try to normalize the request to 121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 122 * dependent on the cluster size; for non-bigalloc file systems, it is 123 * 512 blocks. This can be tuned via 124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125 * terms of number of blocks. If we have mounted the file system with -O 126 * stripe=<value> option the group prealloc request is normalized to the 127 * smallest multiple of the stripe value (sbi->s_stripe) which is 128 * greater than the default mb_group_prealloc. 129 * 130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131 * structures in two data structures: 132 * 133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134 * 135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136 * 137 * This is an array of lists where the index in the array represents the 138 * largest free order in the buddy bitmap of the participating group infos of 139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140 * number of buddy bitmap orders possible) number of lists. Group-infos are 141 * placed in appropriate lists. 142 * 143 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144 * 145 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146 * 147 * This is an array of lists where in the i-th list there are groups with 148 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 149 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 150 * Note that we don't bother with a special list for completely empty groups 151 * so we only have MB_NUM_ORDERS(sb) lists. 152 * 153 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154 * structures to decide the order in which groups are to be traversed for 155 * fulfilling an allocation request. 156 * 157 * At CR = 0, we look for groups which have the largest_free_order >= the order 158 * of the request. We directly look at the largest free order list in the data 159 * structure (1) above where largest_free_order = order of the request. If that 160 * list is empty, we look at remaining list in the increasing order of 161 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 162 * 163 * At CR = 1, we only consider groups where average fragment size > request 164 * size. So, we lookup a group which has average fragment size just above or 165 * equal to request size using our average fragment size group lists (data 166 * structure 2) in O(1) time. 167 * 168 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 169 * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 170 * 171 * The regular allocator (using the buddy cache) supports a few tunables. 172 * 173 * /sys/fs/ext4/<partition>/mb_min_to_scan 174 * /sys/fs/ext4/<partition>/mb_max_to_scan 175 * /sys/fs/ext4/<partition>/mb_order2_req 176 * /sys/fs/ext4/<partition>/mb_linear_limit 177 * 178 * The regular allocator uses buddy scan only if the request len is power of 179 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 180 * value of s_mb_order2_reqs can be tuned via 181 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 182 * stripe size (sbi->s_stripe), we try to search for contiguous block in 183 * stripe size. This should result in better allocation on RAID setups. If 184 * not, we search in the specific group using bitmap for best extents. The 185 * tunable min_to_scan and max_to_scan control the behaviour here. 186 * min_to_scan indicate how long the mballoc __must__ look for a best 187 * extent and max_to_scan indicates how long the mballoc __can__ look for a 188 * best extent in the found extents. Searching for the blocks starts with 189 * the group specified as the goal value in allocation context via 190 * ac_g_ex. Each group is first checked based on the criteria whether it 191 * can be used for allocation. ext4_mb_good_group explains how the groups are 192 * checked. 193 * 194 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 195 * get traversed linearly. That may result in subsequent allocations being not 196 * close to each other. And so, the underlying device may get filled up in a 197 * non-linear fashion. While that may not matter on non-rotational devices, for 198 * rotational devices that may result in higher seek times. "mb_linear_limit" 199 * tells mballoc how many groups mballoc should search linearly before 200 * performing consulting above data structures for more efficient lookups. For 201 * non rotational devices, this value defaults to 0 and for rotational devices 202 * this is set to MB_DEFAULT_LINEAR_LIMIT. 203 * 204 * Both the prealloc space are getting populated as above. So for the first 205 * request we will hit the buddy cache which will result in this prealloc 206 * space getting filled. The prealloc space is then later used for the 207 * subsequent request. 208 */ 209 210 /* 211 * mballoc operates on the following data: 212 * - on-disk bitmap 213 * - in-core buddy (actually includes buddy and bitmap) 214 * - preallocation descriptors (PAs) 215 * 216 * there are two types of preallocations: 217 * - inode 218 * assiged to specific inode and can be used for this inode only. 219 * it describes part of inode's space preallocated to specific 220 * physical blocks. any block from that preallocated can be used 221 * independent. the descriptor just tracks number of blocks left 222 * unused. so, before taking some block from descriptor, one must 223 * make sure corresponded logical block isn't allocated yet. this 224 * also means that freeing any block within descriptor's range 225 * must discard all preallocated blocks. 226 * - locality group 227 * assigned to specific locality group which does not translate to 228 * permanent set of inodes: inode can join and leave group. space 229 * from this type of preallocation can be used for any inode. thus 230 * it's consumed from the beginning to the end. 231 * 232 * relation between them can be expressed as: 233 * in-core buddy = on-disk bitmap + preallocation descriptors 234 * 235 * this mean blocks mballoc considers used are: 236 * - allocated blocks (persistent) 237 * - preallocated blocks (non-persistent) 238 * 239 * consistency in mballoc world means that at any time a block is either 240 * free or used in ALL structures. notice: "any time" should not be read 241 * literally -- time is discrete and delimited by locks. 242 * 243 * to keep it simple, we don't use block numbers, instead we count number of 244 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 245 * 246 * all operations can be expressed as: 247 * - init buddy: buddy = on-disk + PAs 248 * - new PA: buddy += N; PA = N 249 * - use inode PA: on-disk += N; PA -= N 250 * - discard inode PA buddy -= on-disk - PA; PA = 0 251 * - use locality group PA on-disk += N; PA -= N 252 * - discard locality group PA buddy -= PA; PA = 0 253 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 254 * is used in real operation because we can't know actual used 255 * bits from PA, only from on-disk bitmap 256 * 257 * if we follow this strict logic, then all operations above should be atomic. 258 * given some of them can block, we'd have to use something like semaphores 259 * killing performance on high-end SMP hardware. let's try to relax it using 260 * the following knowledge: 261 * 1) if buddy is referenced, it's already initialized 262 * 2) while block is used in buddy and the buddy is referenced, 263 * nobody can re-allocate that block 264 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 265 * bit set and PA claims same block, it's OK. IOW, one can set bit in 266 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 267 * block 268 * 269 * so, now we're building a concurrency table: 270 * - init buddy vs. 271 * - new PA 272 * blocks for PA are allocated in the buddy, buddy must be referenced 273 * until PA is linked to allocation group to avoid concurrent buddy init 274 * - use inode PA 275 * we need to make sure that either on-disk bitmap or PA has uptodate data 276 * given (3) we care that PA-=N operation doesn't interfere with init 277 * - discard inode PA 278 * the simplest way would be to have buddy initialized by the discard 279 * - use locality group PA 280 * again PA-=N must be serialized with init 281 * - discard locality group PA 282 * the simplest way would be to have buddy initialized by the discard 283 * - new PA vs. 284 * - use inode PA 285 * i_data_sem serializes them 286 * - discard inode PA 287 * discard process must wait until PA isn't used by another process 288 * - use locality group PA 289 * some mutex should serialize them 290 * - discard locality group PA 291 * discard process must wait until PA isn't used by another process 292 * - use inode PA 293 * - use inode PA 294 * i_data_sem or another mutex should serializes them 295 * - discard inode PA 296 * discard process must wait until PA isn't used by another process 297 * - use locality group PA 298 * nothing wrong here -- they're different PAs covering different blocks 299 * - discard locality group PA 300 * discard process must wait until PA isn't used by another process 301 * 302 * now we're ready to make few consequences: 303 * - PA is referenced and while it is no discard is possible 304 * - PA is referenced until block isn't marked in on-disk bitmap 305 * - PA changes only after on-disk bitmap 306 * - discard must not compete with init. either init is done before 307 * any discard or they're serialized somehow 308 * - buddy init as sum of on-disk bitmap and PAs is done atomically 309 * 310 * a special case when we've used PA to emptiness. no need to modify buddy 311 * in this case, but we should care about concurrent init 312 * 313 */ 314 315 /* 316 * Logic in few words: 317 * 318 * - allocation: 319 * load group 320 * find blocks 321 * mark bits in on-disk bitmap 322 * release group 323 * 324 * - use preallocation: 325 * find proper PA (per-inode or group) 326 * load group 327 * mark bits in on-disk bitmap 328 * release group 329 * release PA 330 * 331 * - free: 332 * load group 333 * mark bits in on-disk bitmap 334 * release group 335 * 336 * - discard preallocations in group: 337 * mark PAs deleted 338 * move them onto local list 339 * load on-disk bitmap 340 * load group 341 * remove PA from object (inode or locality group) 342 * mark free blocks in-core 343 * 344 * - discard inode's preallocations: 345 */ 346 347 /* 348 * Locking rules 349 * 350 * Locks: 351 * - bitlock on a group (group) 352 * - object (inode/locality) (object) 353 * - per-pa lock (pa) 354 * - cr0 lists lock (cr0) 355 * - cr1 tree lock (cr1) 356 * 357 * Paths: 358 * - new pa 359 * object 360 * group 361 * 362 * - find and use pa: 363 * pa 364 * 365 * - release consumed pa: 366 * pa 367 * group 368 * object 369 * 370 * - generate in-core bitmap: 371 * group 372 * pa 373 * 374 * - discard all for given object (inode, locality group): 375 * object 376 * pa 377 * group 378 * 379 * - discard all for given group: 380 * group 381 * pa 382 * group 383 * object 384 * 385 * - allocation path (ext4_mb_regular_allocator) 386 * group 387 * cr0/cr1 388 */ 389 static struct kmem_cache *ext4_pspace_cachep; 390 static struct kmem_cache *ext4_ac_cachep; 391 static struct kmem_cache *ext4_free_data_cachep; 392 393 /* We create slab caches for groupinfo data structures based on the 394 * superblock block size. There will be one per mounted filesystem for 395 * each unique s_blocksize_bits */ 396 #define NR_GRPINFO_CACHES 8 397 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 398 399 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 400 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 401 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 402 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 403 }; 404 405 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 406 ext4_group_t group); 407 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 408 ext4_group_t group); 409 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 410 411 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 412 ext4_group_t group, int cr); 413 414 static int ext4_try_to_trim_range(struct super_block *sb, 415 struct ext4_buddy *e4b, ext4_grpblk_t start, 416 ext4_grpblk_t max, ext4_grpblk_t minblocks); 417 418 /* 419 * The algorithm using this percpu seq counter goes below: 420 * 1. We sample the percpu discard_pa_seq counter before trying for block 421 * allocation in ext4_mb_new_blocks(). 422 * 2. We increment this percpu discard_pa_seq counter when we either allocate 423 * or free these blocks i.e. while marking those blocks as used/free in 424 * mb_mark_used()/mb_free_blocks(). 425 * 3. We also increment this percpu seq counter when we successfully identify 426 * that the bb_prealloc_list is not empty and hence proceed for discarding 427 * of those PAs inside ext4_mb_discard_group_preallocations(). 428 * 429 * Now to make sure that the regular fast path of block allocation is not 430 * affected, as a small optimization we only sample the percpu seq counter 431 * on that cpu. Only when the block allocation fails and when freed blocks 432 * found were 0, that is when we sample percpu seq counter for all cpus using 433 * below function ext4_get_discard_pa_seq_sum(). This happens after making 434 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 435 */ 436 static DEFINE_PER_CPU(u64, discard_pa_seq); 437 static inline u64 ext4_get_discard_pa_seq_sum(void) 438 { 439 int __cpu; 440 u64 __seq = 0; 441 442 for_each_possible_cpu(__cpu) 443 __seq += per_cpu(discard_pa_seq, __cpu); 444 return __seq; 445 } 446 447 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 448 { 449 #if BITS_PER_LONG == 64 450 *bit += ((unsigned long) addr & 7UL) << 3; 451 addr = (void *) ((unsigned long) addr & ~7UL); 452 #elif BITS_PER_LONG == 32 453 *bit += ((unsigned long) addr & 3UL) << 3; 454 addr = (void *) ((unsigned long) addr & ~3UL); 455 #else 456 #error "how many bits you are?!" 457 #endif 458 return addr; 459 } 460 461 static inline int mb_test_bit(int bit, void *addr) 462 { 463 /* 464 * ext4_test_bit on architecture like powerpc 465 * needs unsigned long aligned address 466 */ 467 addr = mb_correct_addr_and_bit(&bit, addr); 468 return ext4_test_bit(bit, addr); 469 } 470 471 static inline void mb_set_bit(int bit, void *addr) 472 { 473 addr = mb_correct_addr_and_bit(&bit, addr); 474 ext4_set_bit(bit, addr); 475 } 476 477 static inline void mb_clear_bit(int bit, void *addr) 478 { 479 addr = mb_correct_addr_and_bit(&bit, addr); 480 ext4_clear_bit(bit, addr); 481 } 482 483 static inline int mb_test_and_clear_bit(int bit, void *addr) 484 { 485 addr = mb_correct_addr_and_bit(&bit, addr); 486 return ext4_test_and_clear_bit(bit, addr); 487 } 488 489 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 490 { 491 int fix = 0, ret, tmpmax; 492 addr = mb_correct_addr_and_bit(&fix, addr); 493 tmpmax = max + fix; 494 start += fix; 495 496 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 497 if (ret > max) 498 return max; 499 return ret; 500 } 501 502 static inline int mb_find_next_bit(void *addr, int max, int start) 503 { 504 int fix = 0, ret, tmpmax; 505 addr = mb_correct_addr_and_bit(&fix, addr); 506 tmpmax = max + fix; 507 start += fix; 508 509 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 510 if (ret > max) 511 return max; 512 return ret; 513 } 514 515 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 516 { 517 char *bb; 518 519 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 520 BUG_ON(max == NULL); 521 522 if (order > e4b->bd_blkbits + 1) { 523 *max = 0; 524 return NULL; 525 } 526 527 /* at order 0 we see each particular block */ 528 if (order == 0) { 529 *max = 1 << (e4b->bd_blkbits + 3); 530 return e4b->bd_bitmap; 531 } 532 533 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 534 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 535 536 return bb; 537 } 538 539 #ifdef DOUBLE_CHECK 540 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 541 int first, int count) 542 { 543 int i; 544 struct super_block *sb = e4b->bd_sb; 545 546 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 547 return; 548 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 549 for (i = 0; i < count; i++) { 550 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 551 ext4_fsblk_t blocknr; 552 553 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 554 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 555 ext4_grp_locked_error(sb, e4b->bd_group, 556 inode ? inode->i_ino : 0, 557 blocknr, 558 "freeing block already freed " 559 "(bit %u)", 560 first + i); 561 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 562 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 563 } 564 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 565 } 566 } 567 568 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 569 { 570 int i; 571 572 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 573 return; 574 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 575 for (i = 0; i < count; i++) { 576 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 577 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 578 } 579 } 580 581 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 582 { 583 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584 return; 585 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 586 unsigned char *b1, *b2; 587 int i; 588 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 589 b2 = (unsigned char *) bitmap; 590 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 591 if (b1[i] != b2[i]) { 592 ext4_msg(e4b->bd_sb, KERN_ERR, 593 "corruption in group %u " 594 "at byte %u(%u): %x in copy != %x " 595 "on disk/prealloc", 596 e4b->bd_group, i, i * 8, b1[i], b2[i]); 597 BUG(); 598 } 599 } 600 } 601 } 602 603 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 604 struct ext4_group_info *grp, ext4_group_t group) 605 { 606 struct buffer_head *bh; 607 608 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 609 if (!grp->bb_bitmap) 610 return; 611 612 bh = ext4_read_block_bitmap(sb, group); 613 if (IS_ERR_OR_NULL(bh)) { 614 kfree(grp->bb_bitmap); 615 grp->bb_bitmap = NULL; 616 return; 617 } 618 619 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 620 put_bh(bh); 621 } 622 623 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 624 { 625 kfree(grp->bb_bitmap); 626 } 627 628 #else 629 static inline void mb_free_blocks_double(struct inode *inode, 630 struct ext4_buddy *e4b, int first, int count) 631 { 632 return; 633 } 634 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 635 int first, int count) 636 { 637 return; 638 } 639 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 640 { 641 return; 642 } 643 644 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 645 struct ext4_group_info *grp, ext4_group_t group) 646 { 647 return; 648 } 649 650 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 651 { 652 return; 653 } 654 #endif 655 656 #ifdef AGGRESSIVE_CHECK 657 658 #define MB_CHECK_ASSERT(assert) \ 659 do { \ 660 if (!(assert)) { \ 661 printk(KERN_EMERG \ 662 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 663 function, file, line, # assert); \ 664 BUG(); \ 665 } \ 666 } while (0) 667 668 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 669 const char *function, int line) 670 { 671 struct super_block *sb = e4b->bd_sb; 672 int order = e4b->bd_blkbits + 1; 673 int max; 674 int max2; 675 int i; 676 int j; 677 int k; 678 int count; 679 struct ext4_group_info *grp; 680 int fragments = 0; 681 int fstart; 682 struct list_head *cur; 683 void *buddy; 684 void *buddy2; 685 686 if (e4b->bd_info->bb_check_counter++ % 10) 687 return 0; 688 689 while (order > 1) { 690 buddy = mb_find_buddy(e4b, order, &max); 691 MB_CHECK_ASSERT(buddy); 692 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 693 MB_CHECK_ASSERT(buddy2); 694 MB_CHECK_ASSERT(buddy != buddy2); 695 MB_CHECK_ASSERT(max * 2 == max2); 696 697 count = 0; 698 for (i = 0; i < max; i++) { 699 700 if (mb_test_bit(i, buddy)) { 701 /* only single bit in buddy2 may be 0 */ 702 if (!mb_test_bit(i << 1, buddy2)) { 703 MB_CHECK_ASSERT( 704 mb_test_bit((i<<1)+1, buddy2)); 705 } 706 continue; 707 } 708 709 /* both bits in buddy2 must be 1 */ 710 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712 713 for (j = 0; j < (1 << order); j++) { 714 k = (i * (1 << order)) + j; 715 MB_CHECK_ASSERT( 716 !mb_test_bit(k, e4b->bd_bitmap)); 717 } 718 count++; 719 } 720 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721 order--; 722 } 723 724 fstart = -1; 725 buddy = mb_find_buddy(e4b, 0, &max); 726 for (i = 0; i < max; i++) { 727 if (!mb_test_bit(i, buddy)) { 728 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729 if (fstart == -1) { 730 fragments++; 731 fstart = i; 732 } 733 continue; 734 } 735 fstart = -1; 736 /* check used bits only */ 737 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738 buddy2 = mb_find_buddy(e4b, j, &max2); 739 k = i >> j; 740 MB_CHECK_ASSERT(k < max2); 741 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742 } 743 } 744 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746 747 grp = ext4_get_group_info(sb, e4b->bd_group); 748 if (!grp) 749 return NULL; 750 list_for_each(cur, &grp->bb_prealloc_list) { 751 ext4_group_t groupnr; 752 struct ext4_prealloc_space *pa; 753 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 754 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 755 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 756 for (i = 0; i < pa->pa_len; i++) 757 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 758 } 759 return 0; 760 } 761 #undef MB_CHECK_ASSERT 762 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 763 __FILE__, __func__, __LINE__) 764 #else 765 #define mb_check_buddy(e4b) 766 #endif 767 768 /* 769 * Divide blocks started from @first with length @len into 770 * smaller chunks with power of 2 blocks. 771 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 772 * then increase bb_counters[] for corresponded chunk size. 773 */ 774 static void ext4_mb_mark_free_simple(struct super_block *sb, 775 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 776 struct ext4_group_info *grp) 777 { 778 struct ext4_sb_info *sbi = EXT4_SB(sb); 779 ext4_grpblk_t min; 780 ext4_grpblk_t max; 781 ext4_grpblk_t chunk; 782 unsigned int border; 783 784 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 785 786 border = 2 << sb->s_blocksize_bits; 787 788 while (len > 0) { 789 /* find how many blocks can be covered since this position */ 790 max = ffs(first | border) - 1; 791 792 /* find how many blocks of power 2 we need to mark */ 793 min = fls(len) - 1; 794 795 if (max < min) 796 min = max; 797 chunk = 1 << min; 798 799 /* mark multiblock chunks only */ 800 grp->bb_counters[min]++; 801 if (min > 0) 802 mb_clear_bit(first >> min, 803 buddy + sbi->s_mb_offsets[min]); 804 805 len -= chunk; 806 first += chunk; 807 } 808 } 809 810 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 811 { 812 int order; 813 814 /* 815 * We don't bother with a special lists groups with only 1 block free 816 * extents and for completely empty groups. 817 */ 818 order = fls(len) - 2; 819 if (order < 0) 820 return 0; 821 if (order == MB_NUM_ORDERS(sb)) 822 order--; 823 return order; 824 } 825 826 /* Move group to appropriate avg_fragment_size list */ 827 static void 828 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 829 { 830 struct ext4_sb_info *sbi = EXT4_SB(sb); 831 int new_order; 832 833 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 834 return; 835 836 new_order = mb_avg_fragment_size_order(sb, 837 grp->bb_free / grp->bb_fragments); 838 if (new_order == grp->bb_avg_fragment_size_order) 839 return; 840 841 if (grp->bb_avg_fragment_size_order != -1) { 842 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 843 grp->bb_avg_fragment_size_order]); 844 list_del(&grp->bb_avg_fragment_size_node); 845 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 846 grp->bb_avg_fragment_size_order]); 847 } 848 grp->bb_avg_fragment_size_order = new_order; 849 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 850 grp->bb_avg_fragment_size_order]); 851 list_add_tail(&grp->bb_avg_fragment_size_node, 852 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 853 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 854 grp->bb_avg_fragment_size_order]); 855 } 856 857 /* 858 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 859 * cr level needs an update. 860 */ 861 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 862 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 863 { 864 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 865 struct ext4_group_info *iter, *grp; 866 int i; 867 868 if (ac->ac_status == AC_STATUS_FOUND) 869 return; 870 871 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 872 atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 873 874 grp = NULL; 875 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 876 if (list_empty(&sbi->s_mb_largest_free_orders[i])) 877 continue; 878 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 879 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 880 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 881 continue; 882 } 883 grp = NULL; 884 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 885 bb_largest_free_order_node) { 886 if (sbi->s_mb_stats) 887 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 888 if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 889 grp = iter; 890 break; 891 } 892 } 893 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 894 if (grp) 895 break; 896 } 897 898 if (!grp) { 899 /* Increment cr and search again */ 900 *new_cr = 1; 901 } else { 902 *group = grp->bb_group; 903 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 904 } 905 } 906 907 /* 908 * Choose next group by traversing average fragment size list of suitable 909 * order. Updates *new_cr if cr level needs an update. 910 */ 911 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 912 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 913 { 914 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 915 struct ext4_group_info *grp = NULL, *iter; 916 int i; 917 918 if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 919 if (sbi->s_mb_stats) 920 atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 921 } 922 923 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 924 i < MB_NUM_ORDERS(ac->ac_sb); i++) { 925 if (list_empty(&sbi->s_mb_avg_fragment_size[i])) 926 continue; 927 read_lock(&sbi->s_mb_avg_fragment_size_locks[i]); 928 if (list_empty(&sbi->s_mb_avg_fragment_size[i])) { 929 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 930 continue; 931 } 932 list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i], 933 bb_avg_fragment_size_node) { 934 if (sbi->s_mb_stats) 935 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 936 if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) { 937 grp = iter; 938 break; 939 } 940 } 941 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 942 if (grp) 943 break; 944 } 945 946 if (grp) { 947 *group = grp->bb_group; 948 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 949 } else { 950 *new_cr = 2; 951 } 952 } 953 954 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 955 { 956 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 957 return 0; 958 if (ac->ac_criteria >= 2) 959 return 0; 960 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 961 return 0; 962 return 1; 963 } 964 965 /* 966 * Return next linear group for allocation. If linear traversal should not be 967 * performed, this function just returns the same group 968 */ 969 static int 970 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 971 { 972 if (!should_optimize_scan(ac)) 973 goto inc_and_return; 974 975 if (ac->ac_groups_linear_remaining) { 976 ac->ac_groups_linear_remaining--; 977 goto inc_and_return; 978 } 979 980 return group; 981 inc_and_return: 982 /* 983 * Artificially restricted ngroups for non-extent 984 * files makes group > ngroups possible on first loop. 985 */ 986 return group + 1 >= ngroups ? 0 : group + 1; 987 } 988 989 /* 990 * ext4_mb_choose_next_group: choose next group for allocation. 991 * 992 * @ac Allocation Context 993 * @new_cr This is an output parameter. If the there is no good group 994 * available at current CR level, this field is updated to indicate 995 * the new cr level that should be used. 996 * @group This is an input / output parameter. As an input it indicates the 997 * next group that the allocator intends to use for allocation. As 998 * output, this field indicates the next group that should be used as 999 * determined by the optimization functions. 1000 * @ngroups Total number of groups 1001 */ 1002 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1003 int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1004 { 1005 *new_cr = ac->ac_criteria; 1006 1007 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 1008 *group = next_linear_group(ac, *group, ngroups); 1009 return; 1010 } 1011 1012 if (*new_cr == 0) { 1013 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1014 } else if (*new_cr == 1) { 1015 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1016 } else { 1017 /* 1018 * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1019 * bb_free. But until that happens, we should never come here. 1020 */ 1021 WARN_ON(1); 1022 } 1023 } 1024 1025 /* 1026 * Cache the order of the largest free extent we have available in this block 1027 * group. 1028 */ 1029 static void 1030 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1031 { 1032 struct ext4_sb_info *sbi = EXT4_SB(sb); 1033 int i; 1034 1035 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 1036 if (grp->bb_counters[i] > 0) 1037 break; 1038 /* No need to move between order lists? */ 1039 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 1040 i == grp->bb_largest_free_order) { 1041 grp->bb_largest_free_order = i; 1042 return; 1043 } 1044 1045 if (grp->bb_largest_free_order >= 0) { 1046 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1047 grp->bb_largest_free_order]); 1048 list_del_init(&grp->bb_largest_free_order_node); 1049 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1050 grp->bb_largest_free_order]); 1051 } 1052 grp->bb_largest_free_order = i; 1053 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1054 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1055 grp->bb_largest_free_order]); 1056 list_add_tail(&grp->bb_largest_free_order_node, 1057 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1058 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1059 grp->bb_largest_free_order]); 1060 } 1061 } 1062 1063 static noinline_for_stack 1064 void ext4_mb_generate_buddy(struct super_block *sb, 1065 void *buddy, void *bitmap, ext4_group_t group, 1066 struct ext4_group_info *grp) 1067 { 1068 struct ext4_sb_info *sbi = EXT4_SB(sb); 1069 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1070 ext4_grpblk_t i = 0; 1071 ext4_grpblk_t first; 1072 ext4_grpblk_t len; 1073 unsigned free = 0; 1074 unsigned fragments = 0; 1075 unsigned long long period = get_cycles(); 1076 1077 /* initialize buddy from bitmap which is aggregation 1078 * of on-disk bitmap and preallocations */ 1079 i = mb_find_next_zero_bit(bitmap, max, 0); 1080 grp->bb_first_free = i; 1081 while (i < max) { 1082 fragments++; 1083 first = i; 1084 i = mb_find_next_bit(bitmap, max, i); 1085 len = i - first; 1086 free += len; 1087 if (len > 1) 1088 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1089 else 1090 grp->bb_counters[0]++; 1091 if (i < max) 1092 i = mb_find_next_zero_bit(bitmap, max, i); 1093 } 1094 grp->bb_fragments = fragments; 1095 1096 if (free != grp->bb_free) { 1097 ext4_grp_locked_error(sb, group, 0, 0, 1098 "block bitmap and bg descriptor " 1099 "inconsistent: %u vs %u free clusters", 1100 free, grp->bb_free); 1101 /* 1102 * If we intend to continue, we consider group descriptor 1103 * corrupt and update bb_free using bitmap value 1104 */ 1105 grp->bb_free = free; 1106 ext4_mark_group_bitmap_corrupted(sb, group, 1107 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1108 } 1109 mb_set_largest_free_order(sb, grp); 1110 mb_update_avg_fragment_size(sb, grp); 1111 1112 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1113 1114 period = get_cycles() - period; 1115 atomic_inc(&sbi->s_mb_buddies_generated); 1116 atomic64_add(period, &sbi->s_mb_generation_time); 1117 } 1118 1119 /* The buddy information is attached the buddy cache inode 1120 * for convenience. The information regarding each group 1121 * is loaded via ext4_mb_load_buddy. The information involve 1122 * block bitmap and buddy information. The information are 1123 * stored in the inode as 1124 * 1125 * { page } 1126 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1127 * 1128 * 1129 * one block each for bitmap and buddy information. 1130 * So for each group we take up 2 blocks. A page can 1131 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1132 * So it can have information regarding groups_per_page which 1133 * is blocks_per_page/2 1134 * 1135 * Locking note: This routine takes the block group lock of all groups 1136 * for this page; do not hold this lock when calling this routine! 1137 */ 1138 1139 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1140 { 1141 ext4_group_t ngroups; 1142 int blocksize; 1143 int blocks_per_page; 1144 int groups_per_page; 1145 int err = 0; 1146 int i; 1147 ext4_group_t first_group, group; 1148 int first_block; 1149 struct super_block *sb; 1150 struct buffer_head *bhs; 1151 struct buffer_head **bh = NULL; 1152 struct inode *inode; 1153 char *data; 1154 char *bitmap; 1155 struct ext4_group_info *grinfo; 1156 1157 inode = page->mapping->host; 1158 sb = inode->i_sb; 1159 ngroups = ext4_get_groups_count(sb); 1160 blocksize = i_blocksize(inode); 1161 blocks_per_page = PAGE_SIZE / blocksize; 1162 1163 mb_debug(sb, "init page %lu\n", page->index); 1164 1165 groups_per_page = blocks_per_page >> 1; 1166 if (groups_per_page == 0) 1167 groups_per_page = 1; 1168 1169 /* allocate buffer_heads to read bitmaps */ 1170 if (groups_per_page > 1) { 1171 i = sizeof(struct buffer_head *) * groups_per_page; 1172 bh = kzalloc(i, gfp); 1173 if (bh == NULL) 1174 return -ENOMEM; 1175 } else 1176 bh = &bhs; 1177 1178 first_group = page->index * blocks_per_page / 2; 1179 1180 /* read all groups the page covers into the cache */ 1181 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1182 if (group >= ngroups) 1183 break; 1184 1185 grinfo = ext4_get_group_info(sb, group); 1186 if (!grinfo) 1187 continue; 1188 /* 1189 * If page is uptodate then we came here after online resize 1190 * which added some new uninitialized group info structs, so 1191 * we must skip all initialized uptodate buddies on the page, 1192 * which may be currently in use by an allocating task. 1193 */ 1194 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1195 bh[i] = NULL; 1196 continue; 1197 } 1198 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1199 if (IS_ERR(bh[i])) { 1200 err = PTR_ERR(bh[i]); 1201 bh[i] = NULL; 1202 goto out; 1203 } 1204 mb_debug(sb, "read bitmap for group %u\n", group); 1205 } 1206 1207 /* wait for I/O completion */ 1208 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1209 int err2; 1210 1211 if (!bh[i]) 1212 continue; 1213 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1214 if (!err) 1215 err = err2; 1216 } 1217 1218 first_block = page->index * blocks_per_page; 1219 for (i = 0; i < blocks_per_page; i++) { 1220 group = (first_block + i) >> 1; 1221 if (group >= ngroups) 1222 break; 1223 1224 if (!bh[group - first_group]) 1225 /* skip initialized uptodate buddy */ 1226 continue; 1227 1228 if (!buffer_verified(bh[group - first_group])) 1229 /* Skip faulty bitmaps */ 1230 continue; 1231 err = 0; 1232 1233 /* 1234 * data carry information regarding this 1235 * particular group in the format specified 1236 * above 1237 * 1238 */ 1239 data = page_address(page) + (i * blocksize); 1240 bitmap = bh[group - first_group]->b_data; 1241 1242 /* 1243 * We place the buddy block and bitmap block 1244 * close together 1245 */ 1246 if ((first_block + i) & 1) { 1247 /* this is block of buddy */ 1248 BUG_ON(incore == NULL); 1249 mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1250 group, page->index, i * blocksize); 1251 trace_ext4_mb_buddy_bitmap_load(sb, group); 1252 grinfo = ext4_get_group_info(sb, group); 1253 if (!grinfo) { 1254 err = -EFSCORRUPTED; 1255 goto out; 1256 } 1257 grinfo->bb_fragments = 0; 1258 memset(grinfo->bb_counters, 0, 1259 sizeof(*grinfo->bb_counters) * 1260 (MB_NUM_ORDERS(sb))); 1261 /* 1262 * incore got set to the group block bitmap below 1263 */ 1264 ext4_lock_group(sb, group); 1265 /* init the buddy */ 1266 memset(data, 0xff, blocksize); 1267 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1268 ext4_unlock_group(sb, group); 1269 incore = NULL; 1270 } else { 1271 /* this is block of bitmap */ 1272 BUG_ON(incore != NULL); 1273 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1274 group, page->index, i * blocksize); 1275 trace_ext4_mb_bitmap_load(sb, group); 1276 1277 /* see comments in ext4_mb_put_pa() */ 1278 ext4_lock_group(sb, group); 1279 memcpy(data, bitmap, blocksize); 1280 1281 /* mark all preallocated blks used in in-core bitmap */ 1282 ext4_mb_generate_from_pa(sb, data, group); 1283 ext4_mb_generate_from_freelist(sb, data, group); 1284 ext4_unlock_group(sb, group); 1285 1286 /* set incore so that the buddy information can be 1287 * generated using this 1288 */ 1289 incore = data; 1290 } 1291 } 1292 SetPageUptodate(page); 1293 1294 out: 1295 if (bh) { 1296 for (i = 0; i < groups_per_page; i++) 1297 brelse(bh[i]); 1298 if (bh != &bhs) 1299 kfree(bh); 1300 } 1301 return err; 1302 } 1303 1304 /* 1305 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1306 * on the same buddy page doesn't happen whild holding the buddy page lock. 1307 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1308 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1309 */ 1310 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1311 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1312 { 1313 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1314 int block, pnum, poff; 1315 int blocks_per_page; 1316 struct page *page; 1317 1318 e4b->bd_buddy_page = NULL; 1319 e4b->bd_bitmap_page = NULL; 1320 1321 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1322 /* 1323 * the buddy cache inode stores the block bitmap 1324 * and buddy information in consecutive blocks. 1325 * So for each group we need two blocks. 1326 */ 1327 block = group * 2; 1328 pnum = block / blocks_per_page; 1329 poff = block % blocks_per_page; 1330 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1331 if (!page) 1332 return -ENOMEM; 1333 BUG_ON(page->mapping != inode->i_mapping); 1334 e4b->bd_bitmap_page = page; 1335 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1336 1337 if (blocks_per_page >= 2) { 1338 /* buddy and bitmap are on the same page */ 1339 return 0; 1340 } 1341 1342 block++; 1343 pnum = block / blocks_per_page; 1344 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1345 if (!page) 1346 return -ENOMEM; 1347 BUG_ON(page->mapping != inode->i_mapping); 1348 e4b->bd_buddy_page = page; 1349 return 0; 1350 } 1351 1352 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1353 { 1354 if (e4b->bd_bitmap_page) { 1355 unlock_page(e4b->bd_bitmap_page); 1356 put_page(e4b->bd_bitmap_page); 1357 } 1358 if (e4b->bd_buddy_page) { 1359 unlock_page(e4b->bd_buddy_page); 1360 put_page(e4b->bd_buddy_page); 1361 } 1362 } 1363 1364 /* 1365 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1366 * block group lock of all groups for this page; do not hold the BG lock when 1367 * calling this routine! 1368 */ 1369 static noinline_for_stack 1370 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1371 { 1372 1373 struct ext4_group_info *this_grp; 1374 struct ext4_buddy e4b; 1375 struct page *page; 1376 int ret = 0; 1377 1378 might_sleep(); 1379 mb_debug(sb, "init group %u\n", group); 1380 this_grp = ext4_get_group_info(sb, group); 1381 if (!this_grp) 1382 return -EFSCORRUPTED; 1383 1384 /* 1385 * This ensures that we don't reinit the buddy cache 1386 * page which map to the group from which we are already 1387 * allocating. If we are looking at the buddy cache we would 1388 * have taken a reference using ext4_mb_load_buddy and that 1389 * would have pinned buddy page to page cache. 1390 * The call to ext4_mb_get_buddy_page_lock will mark the 1391 * page accessed. 1392 */ 1393 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1394 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1395 /* 1396 * somebody initialized the group 1397 * return without doing anything 1398 */ 1399 goto err; 1400 } 1401 1402 page = e4b.bd_bitmap_page; 1403 ret = ext4_mb_init_cache(page, NULL, gfp); 1404 if (ret) 1405 goto err; 1406 if (!PageUptodate(page)) { 1407 ret = -EIO; 1408 goto err; 1409 } 1410 1411 if (e4b.bd_buddy_page == NULL) { 1412 /* 1413 * If both the bitmap and buddy are in 1414 * the same page we don't need to force 1415 * init the buddy 1416 */ 1417 ret = 0; 1418 goto err; 1419 } 1420 /* init buddy cache */ 1421 page = e4b.bd_buddy_page; 1422 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1423 if (ret) 1424 goto err; 1425 if (!PageUptodate(page)) { 1426 ret = -EIO; 1427 goto err; 1428 } 1429 err: 1430 ext4_mb_put_buddy_page_lock(&e4b); 1431 return ret; 1432 } 1433 1434 /* 1435 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1436 * block group lock of all groups for this page; do not hold the BG lock when 1437 * calling this routine! 1438 */ 1439 static noinline_for_stack int 1440 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1441 struct ext4_buddy *e4b, gfp_t gfp) 1442 { 1443 int blocks_per_page; 1444 int block; 1445 int pnum; 1446 int poff; 1447 struct page *page; 1448 int ret; 1449 struct ext4_group_info *grp; 1450 struct ext4_sb_info *sbi = EXT4_SB(sb); 1451 struct inode *inode = sbi->s_buddy_cache; 1452 1453 might_sleep(); 1454 mb_debug(sb, "load group %u\n", group); 1455 1456 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1457 grp = ext4_get_group_info(sb, group); 1458 if (!grp) 1459 return -EFSCORRUPTED; 1460 1461 e4b->bd_blkbits = sb->s_blocksize_bits; 1462 e4b->bd_info = grp; 1463 e4b->bd_sb = sb; 1464 e4b->bd_group = group; 1465 e4b->bd_buddy_page = NULL; 1466 e4b->bd_bitmap_page = NULL; 1467 1468 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1469 /* 1470 * we need full data about the group 1471 * to make a good selection 1472 */ 1473 ret = ext4_mb_init_group(sb, group, gfp); 1474 if (ret) 1475 return ret; 1476 } 1477 1478 /* 1479 * the buddy cache inode stores the block bitmap 1480 * and buddy information in consecutive blocks. 1481 * So for each group we need two blocks. 1482 */ 1483 block = group * 2; 1484 pnum = block / blocks_per_page; 1485 poff = block % blocks_per_page; 1486 1487 /* we could use find_or_create_page(), but it locks page 1488 * what we'd like to avoid in fast path ... */ 1489 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1490 if (page == NULL || !PageUptodate(page)) { 1491 if (page) 1492 /* 1493 * drop the page reference and try 1494 * to get the page with lock. If we 1495 * are not uptodate that implies 1496 * somebody just created the page but 1497 * is yet to initialize the same. So 1498 * wait for it to initialize. 1499 */ 1500 put_page(page); 1501 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1502 if (page) { 1503 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1504 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { 1505 /* should never happen */ 1506 unlock_page(page); 1507 ret = -EINVAL; 1508 goto err; 1509 } 1510 if (!PageUptodate(page)) { 1511 ret = ext4_mb_init_cache(page, NULL, gfp); 1512 if (ret) { 1513 unlock_page(page); 1514 goto err; 1515 } 1516 mb_cmp_bitmaps(e4b, page_address(page) + 1517 (poff * sb->s_blocksize)); 1518 } 1519 unlock_page(page); 1520 } 1521 } 1522 if (page == NULL) { 1523 ret = -ENOMEM; 1524 goto err; 1525 } 1526 if (!PageUptodate(page)) { 1527 ret = -EIO; 1528 goto err; 1529 } 1530 1531 /* Pages marked accessed already */ 1532 e4b->bd_bitmap_page = page; 1533 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1534 1535 block++; 1536 pnum = block / blocks_per_page; 1537 poff = block % blocks_per_page; 1538 1539 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1540 if (page == NULL || !PageUptodate(page)) { 1541 if (page) 1542 put_page(page); 1543 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1544 if (page) { 1545 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, 1546 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { 1547 /* should never happen */ 1548 unlock_page(page); 1549 ret = -EINVAL; 1550 goto err; 1551 } 1552 if (!PageUptodate(page)) { 1553 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1554 gfp); 1555 if (ret) { 1556 unlock_page(page); 1557 goto err; 1558 } 1559 } 1560 unlock_page(page); 1561 } 1562 } 1563 if (page == NULL) { 1564 ret = -ENOMEM; 1565 goto err; 1566 } 1567 if (!PageUptodate(page)) { 1568 ret = -EIO; 1569 goto err; 1570 } 1571 1572 /* Pages marked accessed already */ 1573 e4b->bd_buddy_page = page; 1574 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1575 1576 return 0; 1577 1578 err: 1579 if (page) 1580 put_page(page); 1581 if (e4b->bd_bitmap_page) 1582 put_page(e4b->bd_bitmap_page); 1583 1584 e4b->bd_buddy = NULL; 1585 e4b->bd_bitmap = NULL; 1586 return ret; 1587 } 1588 1589 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1590 struct ext4_buddy *e4b) 1591 { 1592 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1593 } 1594 1595 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1596 { 1597 if (e4b->bd_bitmap_page) 1598 put_page(e4b->bd_bitmap_page); 1599 if (e4b->bd_buddy_page) 1600 put_page(e4b->bd_buddy_page); 1601 } 1602 1603 1604 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1605 { 1606 int order = 1, max; 1607 void *bb; 1608 1609 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1610 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1611 1612 while (order <= e4b->bd_blkbits + 1) { 1613 bb = mb_find_buddy(e4b, order, &max); 1614 if (!mb_test_bit(block >> order, bb)) { 1615 /* this block is part of buddy of order 'order' */ 1616 return order; 1617 } 1618 order++; 1619 } 1620 return 0; 1621 } 1622 1623 static void mb_clear_bits(void *bm, int cur, int len) 1624 { 1625 __u32 *addr; 1626 1627 len = cur + len; 1628 while (cur < len) { 1629 if ((cur & 31) == 0 && (len - cur) >= 32) { 1630 /* fast path: clear whole word at once */ 1631 addr = bm + (cur >> 3); 1632 *addr = 0; 1633 cur += 32; 1634 continue; 1635 } 1636 mb_clear_bit(cur, bm); 1637 cur++; 1638 } 1639 } 1640 1641 /* clear bits in given range 1642 * will return first found zero bit if any, -1 otherwise 1643 */ 1644 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1645 { 1646 __u32 *addr; 1647 int zero_bit = -1; 1648 1649 len = cur + len; 1650 while (cur < len) { 1651 if ((cur & 31) == 0 && (len - cur) >= 32) { 1652 /* fast path: clear whole word at once */ 1653 addr = bm + (cur >> 3); 1654 if (*addr != (__u32)(-1) && zero_bit == -1) 1655 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1656 *addr = 0; 1657 cur += 32; 1658 continue; 1659 } 1660 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1661 zero_bit = cur; 1662 cur++; 1663 } 1664 1665 return zero_bit; 1666 } 1667 1668 void mb_set_bits(void *bm, int cur, int len) 1669 { 1670 __u32 *addr; 1671 1672 len = cur + len; 1673 while (cur < len) { 1674 if ((cur & 31) == 0 && (len - cur) >= 32) { 1675 /* fast path: set whole word at once */ 1676 addr = bm + (cur >> 3); 1677 *addr = 0xffffffff; 1678 cur += 32; 1679 continue; 1680 } 1681 mb_set_bit(cur, bm); 1682 cur++; 1683 } 1684 } 1685 1686 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1687 { 1688 if (mb_test_bit(*bit + side, bitmap)) { 1689 mb_clear_bit(*bit, bitmap); 1690 (*bit) -= side; 1691 return 1; 1692 } 1693 else { 1694 (*bit) += side; 1695 mb_set_bit(*bit, bitmap); 1696 return -1; 1697 } 1698 } 1699 1700 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1701 { 1702 int max; 1703 int order = 1; 1704 void *buddy = mb_find_buddy(e4b, order, &max); 1705 1706 while (buddy) { 1707 void *buddy2; 1708 1709 /* Bits in range [first; last] are known to be set since 1710 * corresponding blocks were allocated. Bits in range 1711 * (first; last) will stay set because they form buddies on 1712 * upper layer. We just deal with borders if they don't 1713 * align with upper layer and then go up. 1714 * Releasing entire group is all about clearing 1715 * single bit of highest order buddy. 1716 */ 1717 1718 /* Example: 1719 * --------------------------------- 1720 * | 1 | 1 | 1 | 1 | 1721 * --------------------------------- 1722 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1723 * --------------------------------- 1724 * 0 1 2 3 4 5 6 7 1725 * \_____________________/ 1726 * 1727 * Neither [1] nor [6] is aligned to above layer. 1728 * Left neighbour [0] is free, so mark it busy, 1729 * decrease bb_counters and extend range to 1730 * [0; 6] 1731 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1732 * mark [6] free, increase bb_counters and shrink range to 1733 * [0; 5]. 1734 * Then shift range to [0; 2], go up and do the same. 1735 */ 1736 1737 1738 if (first & 1) 1739 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1740 if (!(last & 1)) 1741 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1742 if (first > last) 1743 break; 1744 order++; 1745 1746 buddy2 = mb_find_buddy(e4b, order, &max); 1747 if (!buddy2) { 1748 mb_clear_bits(buddy, first, last - first + 1); 1749 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1750 break; 1751 } 1752 first >>= 1; 1753 last >>= 1; 1754 buddy = buddy2; 1755 } 1756 } 1757 1758 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1759 int first, int count) 1760 { 1761 int left_is_free = 0; 1762 int right_is_free = 0; 1763 int block; 1764 int last = first + count - 1; 1765 struct super_block *sb = e4b->bd_sb; 1766 1767 if (WARN_ON(count == 0)) 1768 return; 1769 BUG_ON(last >= (sb->s_blocksize << 3)); 1770 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1771 /* Don't bother if the block group is corrupt. */ 1772 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1773 return; 1774 1775 mb_check_buddy(e4b); 1776 mb_free_blocks_double(inode, e4b, first, count); 1777 1778 this_cpu_inc(discard_pa_seq); 1779 e4b->bd_info->bb_free += count; 1780 if (first < e4b->bd_info->bb_first_free) 1781 e4b->bd_info->bb_first_free = first; 1782 1783 /* access memory sequentially: check left neighbour, 1784 * clear range and then check right neighbour 1785 */ 1786 if (first != 0) 1787 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1788 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1789 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1790 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1791 1792 if (unlikely(block != -1)) { 1793 struct ext4_sb_info *sbi = EXT4_SB(sb); 1794 ext4_fsblk_t blocknr; 1795 1796 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1797 blocknr += EXT4_C2B(sbi, block); 1798 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 1799 ext4_grp_locked_error(sb, e4b->bd_group, 1800 inode ? inode->i_ino : 0, 1801 blocknr, 1802 "freeing already freed block (bit %u); block bitmap corrupt.", 1803 block); 1804 ext4_mark_group_bitmap_corrupted( 1805 sb, e4b->bd_group, 1806 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1807 } 1808 goto done; 1809 } 1810 1811 /* let's maintain fragments counter */ 1812 if (left_is_free && right_is_free) 1813 e4b->bd_info->bb_fragments--; 1814 else if (!left_is_free && !right_is_free) 1815 e4b->bd_info->bb_fragments++; 1816 1817 /* buddy[0] == bd_bitmap is a special case, so handle 1818 * it right away and let mb_buddy_mark_free stay free of 1819 * zero order checks. 1820 * Check if neighbours are to be coaleasced, 1821 * adjust bitmap bb_counters and borders appropriately. 1822 */ 1823 if (first & 1) { 1824 first += !left_is_free; 1825 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1826 } 1827 if (!(last & 1)) { 1828 last -= !right_is_free; 1829 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1830 } 1831 1832 if (first <= last) 1833 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1834 1835 done: 1836 mb_set_largest_free_order(sb, e4b->bd_info); 1837 mb_update_avg_fragment_size(sb, e4b->bd_info); 1838 mb_check_buddy(e4b); 1839 } 1840 1841 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1842 int needed, struct ext4_free_extent *ex) 1843 { 1844 int next = block; 1845 int max, order; 1846 void *buddy; 1847 1848 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1849 BUG_ON(ex == NULL); 1850 1851 buddy = mb_find_buddy(e4b, 0, &max); 1852 BUG_ON(buddy == NULL); 1853 BUG_ON(block >= max); 1854 if (mb_test_bit(block, buddy)) { 1855 ex->fe_len = 0; 1856 ex->fe_start = 0; 1857 ex->fe_group = 0; 1858 return 0; 1859 } 1860 1861 /* find actual order */ 1862 order = mb_find_order_for_block(e4b, block); 1863 block = block >> order; 1864 1865 ex->fe_len = 1 << order; 1866 ex->fe_start = block << order; 1867 ex->fe_group = e4b->bd_group; 1868 1869 /* calc difference from given start */ 1870 next = next - ex->fe_start; 1871 ex->fe_len -= next; 1872 ex->fe_start += next; 1873 1874 while (needed > ex->fe_len && 1875 mb_find_buddy(e4b, order, &max)) { 1876 1877 if (block + 1 >= max) 1878 break; 1879 1880 next = (block + 1) * (1 << order); 1881 if (mb_test_bit(next, e4b->bd_bitmap)) 1882 break; 1883 1884 order = mb_find_order_for_block(e4b, next); 1885 1886 block = next >> order; 1887 ex->fe_len += 1 << order; 1888 } 1889 1890 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 1891 /* Should never happen! (but apparently sometimes does?!?) */ 1892 WARN_ON(1); 1893 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1894 "corruption or bug in mb_find_extent " 1895 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 1896 block, order, needed, ex->fe_group, ex->fe_start, 1897 ex->fe_len, ex->fe_logical); 1898 ex->fe_len = 0; 1899 ex->fe_start = 0; 1900 ex->fe_group = 0; 1901 } 1902 return ex->fe_len; 1903 } 1904 1905 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1906 { 1907 int ord; 1908 int mlen = 0; 1909 int max = 0; 1910 int cur; 1911 int start = ex->fe_start; 1912 int len = ex->fe_len; 1913 unsigned ret = 0; 1914 int len0 = len; 1915 void *buddy; 1916 bool split = false; 1917 1918 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1919 BUG_ON(e4b->bd_group != ex->fe_group); 1920 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1921 mb_check_buddy(e4b); 1922 mb_mark_used_double(e4b, start, len); 1923 1924 this_cpu_inc(discard_pa_seq); 1925 e4b->bd_info->bb_free -= len; 1926 if (e4b->bd_info->bb_first_free == start) 1927 e4b->bd_info->bb_first_free += len; 1928 1929 /* let's maintain fragments counter */ 1930 if (start != 0) 1931 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1932 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1933 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1934 if (mlen && max) 1935 e4b->bd_info->bb_fragments++; 1936 else if (!mlen && !max) 1937 e4b->bd_info->bb_fragments--; 1938 1939 /* let's maintain buddy itself */ 1940 while (len) { 1941 if (!split) 1942 ord = mb_find_order_for_block(e4b, start); 1943 1944 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1945 /* the whole chunk may be allocated at once! */ 1946 mlen = 1 << ord; 1947 if (!split) 1948 buddy = mb_find_buddy(e4b, ord, &max); 1949 else 1950 split = false; 1951 BUG_ON((start >> ord) >= max); 1952 mb_set_bit(start >> ord, buddy); 1953 e4b->bd_info->bb_counters[ord]--; 1954 start += mlen; 1955 len -= mlen; 1956 BUG_ON(len < 0); 1957 continue; 1958 } 1959 1960 /* store for history */ 1961 if (ret == 0) 1962 ret = len | (ord << 16); 1963 1964 /* we have to split large buddy */ 1965 BUG_ON(ord <= 0); 1966 buddy = mb_find_buddy(e4b, ord, &max); 1967 mb_set_bit(start >> ord, buddy); 1968 e4b->bd_info->bb_counters[ord]--; 1969 1970 ord--; 1971 cur = (start >> ord) & ~1U; 1972 buddy = mb_find_buddy(e4b, ord, &max); 1973 mb_clear_bit(cur, buddy); 1974 mb_clear_bit(cur + 1, buddy); 1975 e4b->bd_info->bb_counters[ord]++; 1976 e4b->bd_info->bb_counters[ord]++; 1977 split = true; 1978 } 1979 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1980 1981 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1982 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1983 mb_check_buddy(e4b); 1984 1985 return ret; 1986 } 1987 1988 /* 1989 * Must be called under group lock! 1990 */ 1991 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1992 struct ext4_buddy *e4b) 1993 { 1994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1995 int ret; 1996 1997 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1998 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1999 2000 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2001 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2002 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2003 2004 /* preallocation can change ac_b_ex, thus we store actually 2005 * allocated blocks for history */ 2006 ac->ac_f_ex = ac->ac_b_ex; 2007 2008 ac->ac_status = AC_STATUS_FOUND; 2009 ac->ac_tail = ret & 0xffff; 2010 ac->ac_buddy = ret >> 16; 2011 2012 /* 2013 * take the page reference. We want the page to be pinned 2014 * so that we don't get a ext4_mb_init_cache_call for this 2015 * group until we update the bitmap. That would mean we 2016 * double allocate blocks. The reference is dropped 2017 * in ext4_mb_release_context 2018 */ 2019 ac->ac_bitmap_page = e4b->bd_bitmap_page; 2020 get_page(ac->ac_bitmap_page); 2021 ac->ac_buddy_page = e4b->bd_buddy_page; 2022 get_page(ac->ac_buddy_page); 2023 /* store last allocated for subsequent stream allocation */ 2024 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2025 spin_lock(&sbi->s_md_lock); 2026 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2027 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2028 spin_unlock(&sbi->s_md_lock); 2029 } 2030 /* 2031 * As we've just preallocated more space than 2032 * user requested originally, we store allocated 2033 * space in a special descriptor. 2034 */ 2035 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2036 ext4_mb_new_preallocation(ac); 2037 2038 } 2039 2040 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2041 struct ext4_buddy *e4b, 2042 int finish_group) 2043 { 2044 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2045 struct ext4_free_extent *bex = &ac->ac_b_ex; 2046 struct ext4_free_extent *gex = &ac->ac_g_ex; 2047 2048 if (ac->ac_status == AC_STATUS_FOUND) 2049 return; 2050 /* 2051 * We don't want to scan for a whole year 2052 */ 2053 if (ac->ac_found > sbi->s_mb_max_to_scan && 2054 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2055 ac->ac_status = AC_STATUS_BREAK; 2056 return; 2057 } 2058 2059 /* 2060 * Haven't found good chunk so far, let's continue 2061 */ 2062 if (bex->fe_len < gex->fe_len) 2063 return; 2064 2065 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2066 ext4_mb_use_best_found(ac, e4b); 2067 } 2068 2069 /* 2070 * The routine checks whether found extent is good enough. If it is, 2071 * then the extent gets marked used and flag is set to the context 2072 * to stop scanning. Otherwise, the extent is compared with the 2073 * previous found extent and if new one is better, then it's stored 2074 * in the context. Later, the best found extent will be used, if 2075 * mballoc can't find good enough extent. 2076 * 2077 * The algorithm used is roughly as follows: 2078 * 2079 * * If free extent found is exactly as big as goal, then 2080 * stop the scan and use it immediately 2081 * 2082 * * If free extent found is smaller than goal, then keep retrying 2083 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2084 * that stop scanning and use whatever we have. 2085 * 2086 * * If free extent found is bigger than goal, then keep retrying 2087 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2088 * stopping the scan and using the extent. 2089 * 2090 * 2091 * FIXME: real allocation policy is to be designed yet! 2092 */ 2093 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2094 struct ext4_free_extent *ex, 2095 struct ext4_buddy *e4b) 2096 { 2097 struct ext4_free_extent *bex = &ac->ac_b_ex; 2098 struct ext4_free_extent *gex = &ac->ac_g_ex; 2099 2100 BUG_ON(ex->fe_len <= 0); 2101 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2102 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2103 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2104 2105 ac->ac_found++; 2106 2107 /* 2108 * The special case - take what you catch first 2109 */ 2110 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2111 *bex = *ex; 2112 ext4_mb_use_best_found(ac, e4b); 2113 return; 2114 } 2115 2116 /* 2117 * Let's check whether the chuck is good enough 2118 */ 2119 if (ex->fe_len == gex->fe_len) { 2120 *bex = *ex; 2121 ext4_mb_use_best_found(ac, e4b); 2122 return; 2123 } 2124 2125 /* 2126 * If this is first found extent, just store it in the context 2127 */ 2128 if (bex->fe_len == 0) { 2129 *bex = *ex; 2130 return; 2131 } 2132 2133 /* 2134 * If new found extent is better, store it in the context 2135 */ 2136 if (bex->fe_len < gex->fe_len) { 2137 /* if the request isn't satisfied, any found extent 2138 * larger than previous best one is better */ 2139 if (ex->fe_len > bex->fe_len) 2140 *bex = *ex; 2141 } else if (ex->fe_len > gex->fe_len) { 2142 /* if the request is satisfied, then we try to find 2143 * an extent that still satisfy the request, but is 2144 * smaller than previous one */ 2145 if (ex->fe_len < bex->fe_len) 2146 *bex = *ex; 2147 } 2148 2149 ext4_mb_check_limits(ac, e4b, 0); 2150 } 2151 2152 static noinline_for_stack 2153 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2154 struct ext4_buddy *e4b) 2155 { 2156 struct ext4_free_extent ex = ac->ac_b_ex; 2157 ext4_group_t group = ex.fe_group; 2158 int max; 2159 int err; 2160 2161 BUG_ON(ex.fe_len <= 0); 2162 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2163 if (err) 2164 return; 2165 2166 ext4_lock_group(ac->ac_sb, group); 2167 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2168 2169 if (max > 0) { 2170 ac->ac_b_ex = ex; 2171 ext4_mb_use_best_found(ac, e4b); 2172 } 2173 2174 ext4_unlock_group(ac->ac_sb, group); 2175 ext4_mb_unload_buddy(e4b); 2176 } 2177 2178 static noinline_for_stack 2179 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2180 struct ext4_buddy *e4b) 2181 { 2182 ext4_group_t group = ac->ac_g_ex.fe_group; 2183 int max; 2184 int err; 2185 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2186 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2187 struct ext4_free_extent ex; 2188 2189 if (!grp) 2190 return -EFSCORRUPTED; 2191 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2192 return 0; 2193 if (grp->bb_free == 0) 2194 return 0; 2195 2196 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2197 if (err) 2198 return err; 2199 2200 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2201 ext4_mb_unload_buddy(e4b); 2202 return 0; 2203 } 2204 2205 ext4_lock_group(ac->ac_sb, group); 2206 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2207 ac->ac_g_ex.fe_len, &ex); 2208 ex.fe_logical = 0xDEADFA11; /* debug value */ 2209 2210 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2211 ext4_fsblk_t start; 2212 2213 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 2214 ex.fe_start; 2215 /* use do_div to get remainder (would be 64-bit modulo) */ 2216 if (do_div(start, sbi->s_stripe) == 0) { 2217 ac->ac_found++; 2218 ac->ac_b_ex = ex; 2219 ext4_mb_use_best_found(ac, e4b); 2220 } 2221 } else if (max >= ac->ac_g_ex.fe_len) { 2222 BUG_ON(ex.fe_len <= 0); 2223 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2224 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2225 ac->ac_found++; 2226 ac->ac_b_ex = ex; 2227 ext4_mb_use_best_found(ac, e4b); 2228 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2229 /* Sometimes, caller may want to merge even small 2230 * number of blocks to an existing extent */ 2231 BUG_ON(ex.fe_len <= 0); 2232 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2233 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2234 ac->ac_found++; 2235 ac->ac_b_ex = ex; 2236 ext4_mb_use_best_found(ac, e4b); 2237 } 2238 ext4_unlock_group(ac->ac_sb, group); 2239 ext4_mb_unload_buddy(e4b); 2240 2241 return 0; 2242 } 2243 2244 /* 2245 * The routine scans buddy structures (not bitmap!) from given order 2246 * to max order and tries to find big enough chunk to satisfy the req 2247 */ 2248 static noinline_for_stack 2249 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2250 struct ext4_buddy *e4b) 2251 { 2252 struct super_block *sb = ac->ac_sb; 2253 struct ext4_group_info *grp = e4b->bd_info; 2254 void *buddy; 2255 int i; 2256 int k; 2257 int max; 2258 2259 BUG_ON(ac->ac_2order <= 0); 2260 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2261 if (grp->bb_counters[i] == 0) 2262 continue; 2263 2264 buddy = mb_find_buddy(e4b, i, &max); 2265 if (WARN_RATELIMIT(buddy == NULL, 2266 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2267 continue; 2268 2269 k = mb_find_next_zero_bit(buddy, max, 0); 2270 if (k >= max) { 2271 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2272 "%d free clusters of order %d. But found 0", 2273 grp->bb_counters[i], i); 2274 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2275 e4b->bd_group, 2276 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2277 break; 2278 } 2279 ac->ac_found++; 2280 2281 ac->ac_b_ex.fe_len = 1 << i; 2282 ac->ac_b_ex.fe_start = k << i; 2283 ac->ac_b_ex.fe_group = e4b->bd_group; 2284 2285 ext4_mb_use_best_found(ac, e4b); 2286 2287 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2288 2289 if (EXT4_SB(sb)->s_mb_stats) 2290 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2291 2292 break; 2293 } 2294 } 2295 2296 /* 2297 * The routine scans the group and measures all found extents. 2298 * In order to optimize scanning, caller must pass number of 2299 * free blocks in the group, so the routine can know upper limit. 2300 */ 2301 static noinline_for_stack 2302 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2303 struct ext4_buddy *e4b) 2304 { 2305 struct super_block *sb = ac->ac_sb; 2306 void *bitmap = e4b->bd_bitmap; 2307 struct ext4_free_extent ex; 2308 int i; 2309 int free; 2310 2311 free = e4b->bd_info->bb_free; 2312 if (WARN_ON(free <= 0)) 2313 return; 2314 2315 i = e4b->bd_info->bb_first_free; 2316 2317 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2318 i = mb_find_next_zero_bit(bitmap, 2319 EXT4_CLUSTERS_PER_GROUP(sb), i); 2320 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2321 /* 2322 * IF we have corrupt bitmap, we won't find any 2323 * free blocks even though group info says we 2324 * have free blocks 2325 */ 2326 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2327 "%d free clusters as per " 2328 "group info. But bitmap says 0", 2329 free); 2330 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2331 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2332 break; 2333 } 2334 2335 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2336 if (WARN_ON(ex.fe_len <= 0)) 2337 break; 2338 if (free < ex.fe_len) { 2339 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2340 "%d free clusters as per " 2341 "group info. But got %d blocks", 2342 free, ex.fe_len); 2343 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2344 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2345 /* 2346 * The number of free blocks differs. This mostly 2347 * indicate that the bitmap is corrupt. So exit 2348 * without claiming the space. 2349 */ 2350 break; 2351 } 2352 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2353 ext4_mb_measure_extent(ac, &ex, e4b); 2354 2355 i += ex.fe_len; 2356 free -= ex.fe_len; 2357 } 2358 2359 ext4_mb_check_limits(ac, e4b, 1); 2360 } 2361 2362 /* 2363 * This is a special case for storages like raid5 2364 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2365 */ 2366 static noinline_for_stack 2367 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2368 struct ext4_buddy *e4b) 2369 { 2370 struct super_block *sb = ac->ac_sb; 2371 struct ext4_sb_info *sbi = EXT4_SB(sb); 2372 void *bitmap = e4b->bd_bitmap; 2373 struct ext4_free_extent ex; 2374 ext4_fsblk_t first_group_block; 2375 ext4_fsblk_t a; 2376 ext4_grpblk_t i; 2377 int max; 2378 2379 BUG_ON(sbi->s_stripe == 0); 2380 2381 /* find first stripe-aligned block in group */ 2382 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2383 2384 a = first_group_block + sbi->s_stripe - 1; 2385 do_div(a, sbi->s_stripe); 2386 i = (a * sbi->s_stripe) - first_group_block; 2387 2388 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2389 if (!mb_test_bit(i, bitmap)) { 2390 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2391 if (max >= sbi->s_stripe) { 2392 ac->ac_found++; 2393 ex.fe_logical = 0xDEADF00D; /* debug value */ 2394 ac->ac_b_ex = ex; 2395 ext4_mb_use_best_found(ac, e4b); 2396 break; 2397 } 2398 } 2399 i += sbi->s_stripe; 2400 } 2401 } 2402 2403 /* 2404 * This is also called BEFORE we load the buddy bitmap. 2405 * Returns either 1 or 0 indicating that the group is either suitable 2406 * for the allocation or not. 2407 */ 2408 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2409 ext4_group_t group, int cr) 2410 { 2411 ext4_grpblk_t free, fragments; 2412 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2413 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2414 2415 BUG_ON(cr < 0 || cr >= 4); 2416 2417 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) 2418 return false; 2419 2420 free = grp->bb_free; 2421 if (free == 0) 2422 return false; 2423 2424 fragments = grp->bb_fragments; 2425 if (fragments == 0) 2426 return false; 2427 2428 switch (cr) { 2429 case 0: 2430 BUG_ON(ac->ac_2order == 0); 2431 2432 /* Avoid using the first bg of a flexgroup for data files */ 2433 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2434 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2435 ((group % flex_size) == 0)) 2436 return false; 2437 2438 if (free < ac->ac_g_ex.fe_len) 2439 return false; 2440 2441 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2442 return true; 2443 2444 if (grp->bb_largest_free_order < ac->ac_2order) 2445 return false; 2446 2447 return true; 2448 case 1: 2449 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2450 return true; 2451 break; 2452 case 2: 2453 if (free >= ac->ac_g_ex.fe_len) 2454 return true; 2455 break; 2456 case 3: 2457 return true; 2458 default: 2459 BUG(); 2460 } 2461 2462 return false; 2463 } 2464 2465 /* 2466 * This could return negative error code if something goes wrong 2467 * during ext4_mb_init_group(). This should not be called with 2468 * ext4_lock_group() held. 2469 * 2470 * Note: because we are conditionally operating with the group lock in 2471 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2472 * function using __acquire and __release. This means we need to be 2473 * super careful before messing with the error path handling via "goto 2474 * out"! 2475 */ 2476 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2477 ext4_group_t group, int cr) 2478 { 2479 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2480 struct super_block *sb = ac->ac_sb; 2481 struct ext4_sb_info *sbi = EXT4_SB(sb); 2482 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2483 ext4_grpblk_t free; 2484 int ret = 0; 2485 2486 if (!grp) 2487 return -EFSCORRUPTED; 2488 if (sbi->s_mb_stats) 2489 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2490 if (should_lock) { 2491 ext4_lock_group(sb, group); 2492 __release(ext4_group_lock_ptr(sb, group)); 2493 } 2494 free = grp->bb_free; 2495 if (free == 0) 2496 goto out; 2497 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2498 goto out; 2499 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2500 goto out; 2501 if (should_lock) { 2502 __acquire(ext4_group_lock_ptr(sb, group)); 2503 ext4_unlock_group(sb, group); 2504 } 2505 2506 /* We only do this if the grp has never been initialized */ 2507 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2508 struct ext4_group_desc *gdp = 2509 ext4_get_group_desc(sb, group, NULL); 2510 int ret; 2511 2512 /* cr=0/1 is a very optimistic search to find large 2513 * good chunks almost for free. If buddy data is not 2514 * ready, then this optimization makes no sense. But 2515 * we never skip the first block group in a flex_bg, 2516 * since this gets used for metadata block allocation, 2517 * and we want to make sure we locate metadata blocks 2518 * in the first block group in the flex_bg if possible. 2519 */ 2520 if (cr < 2 && 2521 (!sbi->s_log_groups_per_flex || 2522 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2523 !(ext4_has_group_desc_csum(sb) && 2524 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2525 return 0; 2526 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2527 if (ret) 2528 return ret; 2529 } 2530 2531 if (should_lock) { 2532 ext4_lock_group(sb, group); 2533 __release(ext4_group_lock_ptr(sb, group)); 2534 } 2535 ret = ext4_mb_good_group(ac, group, cr); 2536 out: 2537 if (should_lock) { 2538 __acquire(ext4_group_lock_ptr(sb, group)); 2539 ext4_unlock_group(sb, group); 2540 } 2541 return ret; 2542 } 2543 2544 /* 2545 * Start prefetching @nr block bitmaps starting at @group. 2546 * Return the next group which needs to be prefetched. 2547 */ 2548 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2549 unsigned int nr, int *cnt) 2550 { 2551 ext4_group_t ngroups = ext4_get_groups_count(sb); 2552 struct buffer_head *bh; 2553 struct blk_plug plug; 2554 2555 blk_start_plug(&plug); 2556 while (nr-- > 0) { 2557 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2558 NULL); 2559 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2560 2561 /* 2562 * Prefetch block groups with free blocks; but don't 2563 * bother if it is marked uninitialized on disk, since 2564 * it won't require I/O to read. Also only try to 2565 * prefetch once, so we avoid getblk() call, which can 2566 * be expensive. 2567 */ 2568 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2569 EXT4_MB_GRP_NEED_INIT(grp) && 2570 ext4_free_group_clusters(sb, gdp) > 0 && 2571 !(ext4_has_group_desc_csum(sb) && 2572 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2573 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2574 if (bh && !IS_ERR(bh)) { 2575 if (!buffer_uptodate(bh) && cnt) 2576 (*cnt)++; 2577 brelse(bh); 2578 } 2579 } 2580 if (++group >= ngroups) 2581 group = 0; 2582 } 2583 blk_finish_plug(&plug); 2584 return group; 2585 } 2586 2587 /* 2588 * Prefetching reads the block bitmap into the buffer cache; but we 2589 * need to make sure that the buddy bitmap in the page cache has been 2590 * initialized. Note that ext4_mb_init_group() will block if the I/O 2591 * is not yet completed, or indeed if it was not initiated by 2592 * ext4_mb_prefetch did not start the I/O. 2593 * 2594 * TODO: We should actually kick off the buddy bitmap setup in a work 2595 * queue when the buffer I/O is completed, so that we don't block 2596 * waiting for the block allocation bitmap read to finish when 2597 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2598 */ 2599 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2600 unsigned int nr) 2601 { 2602 struct ext4_group_desc *gdp; 2603 struct ext4_group_info *grp; 2604 2605 while (nr-- > 0) { 2606 if (!group) 2607 group = ext4_get_groups_count(sb); 2608 group--; 2609 gdp = ext4_get_group_desc(sb, group, NULL); 2610 grp = ext4_get_group_info(sb, group); 2611 2612 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2613 ext4_free_group_clusters(sb, gdp) > 0 && 2614 !(ext4_has_group_desc_csum(sb) && 2615 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2616 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2617 break; 2618 } 2619 } 2620 } 2621 2622 static noinline_for_stack int 2623 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2624 { 2625 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2626 int cr = -1, new_cr; 2627 int err = 0, first_err = 0; 2628 unsigned int nr = 0, prefetch_ios = 0; 2629 struct ext4_sb_info *sbi; 2630 struct super_block *sb; 2631 struct ext4_buddy e4b; 2632 int lost; 2633 2634 sb = ac->ac_sb; 2635 sbi = EXT4_SB(sb); 2636 ngroups = ext4_get_groups_count(sb); 2637 /* non-extent files are limited to low blocks/groups */ 2638 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2639 ngroups = sbi->s_blockfile_groups; 2640 2641 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2642 2643 /* first, try the goal */ 2644 err = ext4_mb_find_by_goal(ac, &e4b); 2645 if (err || ac->ac_status == AC_STATUS_FOUND) 2646 goto out; 2647 2648 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2649 goto out; 2650 2651 /* 2652 * ac->ac_2order is set only if the fe_len is a power of 2 2653 * if ac->ac_2order is set we also set criteria to 0 so that we 2654 * try exact allocation using buddy. 2655 */ 2656 i = fls(ac->ac_g_ex.fe_len); 2657 ac->ac_2order = 0; 2658 /* 2659 * We search using buddy data only if the order of the request 2660 * is greater than equal to the sbi_s_mb_order2_reqs 2661 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2662 * We also support searching for power-of-two requests only for 2663 * requests upto maximum buddy size we have constructed. 2664 */ 2665 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2666 /* 2667 * This should tell if fe_len is exactly power of 2 2668 */ 2669 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2670 ac->ac_2order = array_index_nospec(i - 1, 2671 MB_NUM_ORDERS(sb)); 2672 } 2673 2674 /* if stream allocation is enabled, use global goal */ 2675 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2676 /* TBD: may be hot point */ 2677 spin_lock(&sbi->s_md_lock); 2678 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2679 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2680 spin_unlock(&sbi->s_md_lock); 2681 } 2682 2683 /* Let's just scan groups to find more-less suitable blocks */ 2684 cr = ac->ac_2order ? 0 : 1; 2685 /* 2686 * cr == 0 try to get exact allocation, 2687 * cr == 3 try to get anything 2688 */ 2689 repeat: 2690 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2691 ac->ac_criteria = cr; 2692 /* 2693 * searching for the right group start 2694 * from the goal value specified 2695 */ 2696 group = ac->ac_g_ex.fe_group; 2697 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2698 prefetch_grp = group; 2699 2700 for (i = 0, new_cr = cr; i < ngroups; i++, 2701 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 2702 int ret = 0; 2703 2704 cond_resched(); 2705 if (new_cr != cr) { 2706 cr = new_cr; 2707 goto repeat; 2708 } 2709 2710 /* 2711 * Batch reads of the block allocation bitmaps 2712 * to get multiple READs in flight; limit 2713 * prefetching at cr=0/1, otherwise mballoc can 2714 * spend a lot of time loading imperfect groups 2715 */ 2716 if ((prefetch_grp == group) && 2717 (cr > 1 || 2718 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2719 unsigned int curr_ios = prefetch_ios; 2720 2721 nr = sbi->s_mb_prefetch; 2722 if (ext4_has_feature_flex_bg(sb)) { 2723 nr = 1 << sbi->s_log_groups_per_flex; 2724 nr -= group & (nr - 1); 2725 nr = min(nr, sbi->s_mb_prefetch); 2726 } 2727 prefetch_grp = ext4_mb_prefetch(sb, group, 2728 nr, &prefetch_ios); 2729 if (prefetch_ios == curr_ios) 2730 nr = 0; 2731 } 2732 2733 /* This now checks without needing the buddy page */ 2734 ret = ext4_mb_good_group_nolock(ac, group, cr); 2735 if (ret <= 0) { 2736 if (!first_err) 2737 first_err = ret; 2738 continue; 2739 } 2740 2741 err = ext4_mb_load_buddy(sb, group, &e4b); 2742 if (err) 2743 goto out; 2744 2745 ext4_lock_group(sb, group); 2746 2747 /* 2748 * We need to check again after locking the 2749 * block group 2750 */ 2751 ret = ext4_mb_good_group(ac, group, cr); 2752 if (ret == 0) { 2753 ext4_unlock_group(sb, group); 2754 ext4_mb_unload_buddy(&e4b); 2755 continue; 2756 } 2757 2758 ac->ac_groups_scanned++; 2759 if (cr == 0) 2760 ext4_mb_simple_scan_group(ac, &e4b); 2761 else if (cr == 1 && sbi->s_stripe && 2762 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2763 ext4_mb_scan_aligned(ac, &e4b); 2764 else 2765 ext4_mb_complex_scan_group(ac, &e4b); 2766 2767 ext4_unlock_group(sb, group); 2768 ext4_mb_unload_buddy(&e4b); 2769 2770 if (ac->ac_status != AC_STATUS_CONTINUE) 2771 break; 2772 } 2773 /* Processed all groups and haven't found blocks */ 2774 if (sbi->s_mb_stats && i == ngroups) 2775 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2776 } 2777 2778 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2779 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2780 /* 2781 * We've been searching too long. Let's try to allocate 2782 * the best chunk we've found so far 2783 */ 2784 ext4_mb_try_best_found(ac, &e4b); 2785 if (ac->ac_status != AC_STATUS_FOUND) { 2786 /* 2787 * Someone more lucky has already allocated it. 2788 * The only thing we can do is just take first 2789 * found block(s) 2790 */ 2791 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2792 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2793 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2794 ac->ac_b_ex.fe_len, lost); 2795 2796 ac->ac_b_ex.fe_group = 0; 2797 ac->ac_b_ex.fe_start = 0; 2798 ac->ac_b_ex.fe_len = 0; 2799 ac->ac_status = AC_STATUS_CONTINUE; 2800 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2801 cr = 3; 2802 goto repeat; 2803 } 2804 } 2805 2806 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2807 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2808 out: 2809 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2810 err = first_err; 2811 2812 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2813 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2814 ac->ac_flags, cr, err); 2815 2816 if (nr) 2817 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2818 2819 return err; 2820 } 2821 2822 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2823 { 2824 struct super_block *sb = pde_data(file_inode(seq->file)); 2825 ext4_group_t group; 2826 2827 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2828 return NULL; 2829 group = *pos + 1; 2830 return (void *) ((unsigned long) group); 2831 } 2832 2833 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2834 { 2835 struct super_block *sb = pde_data(file_inode(seq->file)); 2836 ext4_group_t group; 2837 2838 ++*pos; 2839 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2840 return NULL; 2841 group = *pos + 1; 2842 return (void *) ((unsigned long) group); 2843 } 2844 2845 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2846 { 2847 struct super_block *sb = pde_data(file_inode(seq->file)); 2848 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2849 int i; 2850 int err, buddy_loaded = 0; 2851 struct ext4_buddy e4b; 2852 struct ext4_group_info *grinfo; 2853 unsigned char blocksize_bits = min_t(unsigned char, 2854 sb->s_blocksize_bits, 2855 EXT4_MAX_BLOCK_LOG_SIZE); 2856 struct sg { 2857 struct ext4_group_info info; 2858 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2859 } sg; 2860 2861 group--; 2862 if (group == 0) 2863 seq_puts(seq, "#group: free frags first [" 2864 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2865 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2866 2867 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2868 sizeof(struct ext4_group_info); 2869 2870 grinfo = ext4_get_group_info(sb, group); 2871 if (!grinfo) 2872 return 0; 2873 /* Load the group info in memory only if not already loaded. */ 2874 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2875 err = ext4_mb_load_buddy(sb, group, &e4b); 2876 if (err) { 2877 seq_printf(seq, "#%-5u: I/O error\n", group); 2878 return 0; 2879 } 2880 buddy_loaded = 1; 2881 } 2882 2883 memcpy(&sg, grinfo, i); 2884 2885 if (buddy_loaded) 2886 ext4_mb_unload_buddy(&e4b); 2887 2888 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2889 sg.info.bb_fragments, sg.info.bb_first_free); 2890 for (i = 0; i <= 13; i++) 2891 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2892 sg.info.bb_counters[i] : 0); 2893 seq_puts(seq, " ]\n"); 2894 2895 return 0; 2896 } 2897 2898 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2899 { 2900 } 2901 2902 const struct seq_operations ext4_mb_seq_groups_ops = { 2903 .start = ext4_mb_seq_groups_start, 2904 .next = ext4_mb_seq_groups_next, 2905 .stop = ext4_mb_seq_groups_stop, 2906 .show = ext4_mb_seq_groups_show, 2907 }; 2908 2909 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2910 { 2911 struct super_block *sb = seq->private; 2912 struct ext4_sb_info *sbi = EXT4_SB(sb); 2913 2914 seq_puts(seq, "mballoc:\n"); 2915 if (!sbi->s_mb_stats) { 2916 seq_puts(seq, "\tmb stats collection turned off.\n"); 2917 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2918 return 0; 2919 } 2920 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2921 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2922 2923 seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2924 2925 seq_puts(seq, "\tcr0_stats:\n"); 2926 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2927 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2928 atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2929 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2930 atomic64_read(&sbi->s_bal_cX_failed[0])); 2931 seq_printf(seq, "\t\tbad_suggestions: %u\n", 2932 atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2933 2934 seq_puts(seq, "\tcr1_stats:\n"); 2935 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2936 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2937 atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2938 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2939 atomic64_read(&sbi->s_bal_cX_failed[1])); 2940 seq_printf(seq, "\t\tbad_suggestions: %u\n", 2941 atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2942 2943 seq_puts(seq, "\tcr2_stats:\n"); 2944 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2945 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2946 atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2947 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2948 atomic64_read(&sbi->s_bal_cX_failed[2])); 2949 2950 seq_puts(seq, "\tcr3_stats:\n"); 2951 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2952 seq_printf(seq, "\t\tgroups_considered: %llu\n", 2953 atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2954 seq_printf(seq, "\t\tuseless_loops: %llu\n", 2955 atomic64_read(&sbi->s_bal_cX_failed[3])); 2956 seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2957 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2958 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2959 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2960 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2961 2962 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2963 atomic_read(&sbi->s_mb_buddies_generated), 2964 ext4_get_groups_count(sb)); 2965 seq_printf(seq, "\tbuddies_time_used: %llu\n", 2966 atomic64_read(&sbi->s_mb_generation_time)); 2967 seq_printf(seq, "\tpreallocated: %u\n", 2968 atomic_read(&sbi->s_mb_preallocated)); 2969 seq_printf(seq, "\tdiscarded: %u\n", 2970 atomic_read(&sbi->s_mb_discarded)); 2971 return 0; 2972 } 2973 2974 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2975 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2976 { 2977 struct super_block *sb = pde_data(file_inode(seq->file)); 2978 unsigned long position; 2979 2980 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2981 return NULL; 2982 position = *pos + 1; 2983 return (void *) ((unsigned long) position); 2984 } 2985 2986 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 2987 { 2988 struct super_block *sb = pde_data(file_inode(seq->file)); 2989 unsigned long position; 2990 2991 ++*pos; 2992 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2993 return NULL; 2994 position = *pos + 1; 2995 return (void *) ((unsigned long) position); 2996 } 2997 2998 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 2999 { 3000 struct super_block *sb = pde_data(file_inode(seq->file)); 3001 struct ext4_sb_info *sbi = EXT4_SB(sb); 3002 unsigned long position = ((unsigned long) v); 3003 struct ext4_group_info *grp; 3004 unsigned int count; 3005 3006 position--; 3007 if (position >= MB_NUM_ORDERS(sb)) { 3008 position -= MB_NUM_ORDERS(sb); 3009 if (position == 0) 3010 seq_puts(seq, "avg_fragment_size_lists:\n"); 3011 3012 count = 0; 3013 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 3014 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 3015 bb_avg_fragment_size_node) 3016 count++; 3017 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 3018 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3019 (unsigned int)position, count); 3020 return 0; 3021 } 3022 3023 if (position == 0) { 3024 seq_printf(seq, "optimize_scan: %d\n", 3025 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3026 seq_puts(seq, "max_free_order_lists:\n"); 3027 } 3028 count = 0; 3029 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3030 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3031 bb_largest_free_order_node) 3032 count++; 3033 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3034 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3035 (unsigned int)position, count); 3036 3037 return 0; 3038 } 3039 3040 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3041 { 3042 } 3043 3044 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3045 .start = ext4_mb_seq_structs_summary_start, 3046 .next = ext4_mb_seq_structs_summary_next, 3047 .stop = ext4_mb_seq_structs_summary_stop, 3048 .show = ext4_mb_seq_structs_summary_show, 3049 }; 3050 3051 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3052 { 3053 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3054 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3055 3056 BUG_ON(!cachep); 3057 return cachep; 3058 } 3059 3060 /* 3061 * Allocate the top-level s_group_info array for the specified number 3062 * of groups 3063 */ 3064 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3065 { 3066 struct ext4_sb_info *sbi = EXT4_SB(sb); 3067 unsigned size; 3068 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3069 3070 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3071 EXT4_DESC_PER_BLOCK_BITS(sb); 3072 if (size <= sbi->s_group_info_size) 3073 return 0; 3074 3075 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3076 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3077 if (!new_groupinfo) { 3078 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3079 return -ENOMEM; 3080 } 3081 rcu_read_lock(); 3082 old_groupinfo = rcu_dereference(sbi->s_group_info); 3083 if (old_groupinfo) 3084 memcpy(new_groupinfo, old_groupinfo, 3085 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3086 rcu_read_unlock(); 3087 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3088 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3089 if (old_groupinfo) 3090 ext4_kvfree_array_rcu(old_groupinfo); 3091 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3092 sbi->s_group_info_size); 3093 return 0; 3094 } 3095 3096 /* Create and initialize ext4_group_info data for the given group. */ 3097 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3098 struct ext4_group_desc *desc) 3099 { 3100 int i; 3101 int metalen = 0; 3102 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3103 struct ext4_sb_info *sbi = EXT4_SB(sb); 3104 struct ext4_group_info **meta_group_info; 3105 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3106 3107 /* 3108 * First check if this group is the first of a reserved block. 3109 * If it's true, we have to allocate a new table of pointers 3110 * to ext4_group_info structures 3111 */ 3112 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3113 metalen = sizeof(*meta_group_info) << 3114 EXT4_DESC_PER_BLOCK_BITS(sb); 3115 meta_group_info = kmalloc(metalen, GFP_NOFS); 3116 if (meta_group_info == NULL) { 3117 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3118 "for a buddy group"); 3119 return -ENOMEM; 3120 } 3121 rcu_read_lock(); 3122 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3123 rcu_read_unlock(); 3124 } 3125 3126 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3127 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3128 3129 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3130 if (meta_group_info[i] == NULL) { 3131 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3132 goto exit_group_info; 3133 } 3134 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3135 &(meta_group_info[i]->bb_state)); 3136 3137 /* 3138 * initialize bb_free to be able to skip 3139 * empty groups without initialization 3140 */ 3141 if (ext4_has_group_desc_csum(sb) && 3142 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3143 meta_group_info[i]->bb_free = 3144 ext4_free_clusters_after_init(sb, group, desc); 3145 } else { 3146 meta_group_info[i]->bb_free = 3147 ext4_free_group_clusters(sb, desc); 3148 } 3149 3150 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3151 init_rwsem(&meta_group_info[i]->alloc_sem); 3152 meta_group_info[i]->bb_free_root = RB_ROOT; 3153 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3154 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 3155 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3156 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3157 meta_group_info[i]->bb_group = group; 3158 3159 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3160 return 0; 3161 3162 exit_group_info: 3163 /* If a meta_group_info table has been allocated, release it now */ 3164 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3165 struct ext4_group_info ***group_info; 3166 3167 rcu_read_lock(); 3168 group_info = rcu_dereference(sbi->s_group_info); 3169 kfree(group_info[idx]); 3170 group_info[idx] = NULL; 3171 rcu_read_unlock(); 3172 } 3173 return -ENOMEM; 3174 } /* ext4_mb_add_groupinfo */ 3175 3176 static int ext4_mb_init_backend(struct super_block *sb) 3177 { 3178 ext4_group_t ngroups = ext4_get_groups_count(sb); 3179 ext4_group_t i; 3180 struct ext4_sb_info *sbi = EXT4_SB(sb); 3181 int err; 3182 struct ext4_group_desc *desc; 3183 struct ext4_group_info ***group_info; 3184 struct kmem_cache *cachep; 3185 3186 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3187 if (err) 3188 return err; 3189 3190 sbi->s_buddy_cache = new_inode(sb); 3191 if (sbi->s_buddy_cache == NULL) { 3192 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3193 goto err_freesgi; 3194 } 3195 /* To avoid potentially colliding with an valid on-disk inode number, 3196 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3197 * not in the inode hash, so it should never be found by iget(), but 3198 * this will avoid confusion if it ever shows up during debugging. */ 3199 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3200 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3201 for (i = 0; i < ngroups; i++) { 3202 cond_resched(); 3203 desc = ext4_get_group_desc(sb, i, NULL); 3204 if (desc == NULL) { 3205 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3206 goto err_freebuddy; 3207 } 3208 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3209 goto err_freebuddy; 3210 } 3211 3212 if (ext4_has_feature_flex_bg(sb)) { 3213 /* a single flex group is supposed to be read by a single IO. 3214 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3215 * unsigned integer, so the maximum shift is 32. 3216 */ 3217 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3218 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3219 goto err_freebuddy; 3220 } 3221 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3222 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3223 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3224 } else { 3225 sbi->s_mb_prefetch = 32; 3226 } 3227 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3228 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3229 /* now many real IOs to prefetch within a single allocation at cr=0 3230 * given cr=0 is an CPU-related optimization we shouldn't try to 3231 * load too many groups, at some point we should start to use what 3232 * we've got in memory. 3233 * with an average random access time 5ms, it'd take a second to get 3234 * 200 groups (* N with flex_bg), so let's make this limit 4 3235 */ 3236 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3237 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3238 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3239 3240 return 0; 3241 3242 err_freebuddy: 3243 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3244 while (i-- > 0) { 3245 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3246 3247 if (grp) 3248 kmem_cache_free(cachep, grp); 3249 } 3250 i = sbi->s_group_info_size; 3251 rcu_read_lock(); 3252 group_info = rcu_dereference(sbi->s_group_info); 3253 while (i-- > 0) 3254 kfree(group_info[i]); 3255 rcu_read_unlock(); 3256 iput(sbi->s_buddy_cache); 3257 err_freesgi: 3258 rcu_read_lock(); 3259 kvfree(rcu_dereference(sbi->s_group_info)); 3260 rcu_read_unlock(); 3261 return -ENOMEM; 3262 } 3263 3264 static void ext4_groupinfo_destroy_slabs(void) 3265 { 3266 int i; 3267 3268 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3269 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3270 ext4_groupinfo_caches[i] = NULL; 3271 } 3272 } 3273 3274 static int ext4_groupinfo_create_slab(size_t size) 3275 { 3276 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3277 int slab_size; 3278 int blocksize_bits = order_base_2(size); 3279 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3280 struct kmem_cache *cachep; 3281 3282 if (cache_index >= NR_GRPINFO_CACHES) 3283 return -EINVAL; 3284 3285 if (unlikely(cache_index < 0)) 3286 cache_index = 0; 3287 3288 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3289 if (ext4_groupinfo_caches[cache_index]) { 3290 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3291 return 0; /* Already created */ 3292 } 3293 3294 slab_size = offsetof(struct ext4_group_info, 3295 bb_counters[blocksize_bits + 2]); 3296 3297 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3298 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3299 NULL); 3300 3301 ext4_groupinfo_caches[cache_index] = cachep; 3302 3303 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3304 if (!cachep) { 3305 printk(KERN_EMERG 3306 "EXT4-fs: no memory for groupinfo slab cache\n"); 3307 return -ENOMEM; 3308 } 3309 3310 return 0; 3311 } 3312 3313 static void ext4_discard_work(struct work_struct *work) 3314 { 3315 struct ext4_sb_info *sbi = container_of(work, 3316 struct ext4_sb_info, s_discard_work); 3317 struct super_block *sb = sbi->s_sb; 3318 struct ext4_free_data *fd, *nfd; 3319 struct ext4_buddy e4b; 3320 struct list_head discard_list; 3321 ext4_group_t grp, load_grp; 3322 int err = 0; 3323 3324 INIT_LIST_HEAD(&discard_list); 3325 spin_lock(&sbi->s_md_lock); 3326 list_splice_init(&sbi->s_discard_list, &discard_list); 3327 spin_unlock(&sbi->s_md_lock); 3328 3329 load_grp = UINT_MAX; 3330 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3331 /* 3332 * If filesystem is umounting or no memory or suffering 3333 * from no space, give up the discard 3334 */ 3335 if ((sb->s_flags & SB_ACTIVE) && !err && 3336 !atomic_read(&sbi->s_retry_alloc_pending)) { 3337 grp = fd->efd_group; 3338 if (grp != load_grp) { 3339 if (load_grp != UINT_MAX) 3340 ext4_mb_unload_buddy(&e4b); 3341 3342 err = ext4_mb_load_buddy(sb, grp, &e4b); 3343 if (err) { 3344 kmem_cache_free(ext4_free_data_cachep, fd); 3345 load_grp = UINT_MAX; 3346 continue; 3347 } else { 3348 load_grp = grp; 3349 } 3350 } 3351 3352 ext4_lock_group(sb, grp); 3353 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3354 fd->efd_start_cluster + fd->efd_count - 1, 1); 3355 ext4_unlock_group(sb, grp); 3356 } 3357 kmem_cache_free(ext4_free_data_cachep, fd); 3358 } 3359 3360 if (load_grp != UINT_MAX) 3361 ext4_mb_unload_buddy(&e4b); 3362 } 3363 3364 int ext4_mb_init(struct super_block *sb) 3365 { 3366 struct ext4_sb_info *sbi = EXT4_SB(sb); 3367 unsigned i, j; 3368 unsigned offset, offset_incr; 3369 unsigned max; 3370 int ret; 3371 3372 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3373 3374 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3375 if (sbi->s_mb_offsets == NULL) { 3376 ret = -ENOMEM; 3377 goto out; 3378 } 3379 3380 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3381 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3382 if (sbi->s_mb_maxs == NULL) { 3383 ret = -ENOMEM; 3384 goto out; 3385 } 3386 3387 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3388 if (ret < 0) 3389 goto out; 3390 3391 /* order 0 is regular bitmap */ 3392 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3393 sbi->s_mb_offsets[0] = 0; 3394 3395 i = 1; 3396 offset = 0; 3397 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3398 max = sb->s_blocksize << 2; 3399 do { 3400 sbi->s_mb_offsets[i] = offset; 3401 sbi->s_mb_maxs[i] = max; 3402 offset += offset_incr; 3403 offset_incr = offset_incr >> 1; 3404 max = max >> 1; 3405 i++; 3406 } while (i < MB_NUM_ORDERS(sb)); 3407 3408 sbi->s_mb_avg_fragment_size = 3409 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3410 GFP_KERNEL); 3411 if (!sbi->s_mb_avg_fragment_size) { 3412 ret = -ENOMEM; 3413 goto out; 3414 } 3415 sbi->s_mb_avg_fragment_size_locks = 3416 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3417 GFP_KERNEL); 3418 if (!sbi->s_mb_avg_fragment_size_locks) { 3419 ret = -ENOMEM; 3420 goto out; 3421 } 3422 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3423 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 3424 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 3425 } 3426 sbi->s_mb_largest_free_orders = 3427 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3428 GFP_KERNEL); 3429 if (!sbi->s_mb_largest_free_orders) { 3430 ret = -ENOMEM; 3431 goto out; 3432 } 3433 sbi->s_mb_largest_free_orders_locks = 3434 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3435 GFP_KERNEL); 3436 if (!sbi->s_mb_largest_free_orders_locks) { 3437 ret = -ENOMEM; 3438 goto out; 3439 } 3440 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3441 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3442 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3443 } 3444 3445 spin_lock_init(&sbi->s_md_lock); 3446 sbi->s_mb_free_pending = 0; 3447 INIT_LIST_HEAD(&sbi->s_freed_data_list); 3448 INIT_LIST_HEAD(&sbi->s_discard_list); 3449 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3450 atomic_set(&sbi->s_retry_alloc_pending, 0); 3451 3452 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3453 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3454 sbi->s_mb_stats = MB_DEFAULT_STATS; 3455 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3456 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3457 /* 3458 * The default group preallocation is 512, which for 4k block 3459 * sizes translates to 2 megabytes. However for bigalloc file 3460 * systems, this is probably too big (i.e, if the cluster size 3461 * is 1 megabyte, then group preallocation size becomes half a 3462 * gigabyte!). As a default, we will keep a two megabyte 3463 * group pralloc size for cluster sizes up to 64k, and after 3464 * that, we will force a minimum group preallocation size of 3465 * 32 clusters. This translates to 8 megs when the cluster 3466 * size is 256k, and 32 megs when the cluster size is 1 meg, 3467 * which seems reasonable as a default. 3468 */ 3469 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3470 sbi->s_cluster_bits, 32); 3471 /* 3472 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3473 * to the lowest multiple of s_stripe which is bigger than 3474 * the s_mb_group_prealloc as determined above. We want 3475 * the preallocation size to be an exact multiple of the 3476 * RAID stripe size so that preallocations don't fragment 3477 * the stripes. 3478 */ 3479 if (sbi->s_stripe > 1) { 3480 sbi->s_mb_group_prealloc = roundup( 3481 sbi->s_mb_group_prealloc, sbi->s_stripe); 3482 } 3483 3484 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3485 if (sbi->s_locality_groups == NULL) { 3486 ret = -ENOMEM; 3487 goto out; 3488 } 3489 for_each_possible_cpu(i) { 3490 struct ext4_locality_group *lg; 3491 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3492 mutex_init(&lg->lg_mutex); 3493 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3494 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3495 spin_lock_init(&lg->lg_prealloc_lock); 3496 } 3497 3498 if (bdev_nonrot(sb->s_bdev)) 3499 sbi->s_mb_max_linear_groups = 0; 3500 else 3501 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3502 /* init file for buddy data */ 3503 ret = ext4_mb_init_backend(sb); 3504 if (ret != 0) 3505 goto out_free_locality_groups; 3506 3507 return 0; 3508 3509 out_free_locality_groups: 3510 free_percpu(sbi->s_locality_groups); 3511 sbi->s_locality_groups = NULL; 3512 out: 3513 kfree(sbi->s_mb_avg_fragment_size); 3514 kfree(sbi->s_mb_avg_fragment_size_locks); 3515 kfree(sbi->s_mb_largest_free_orders); 3516 kfree(sbi->s_mb_largest_free_orders_locks); 3517 kfree(sbi->s_mb_offsets); 3518 sbi->s_mb_offsets = NULL; 3519 kfree(sbi->s_mb_maxs); 3520 sbi->s_mb_maxs = NULL; 3521 return ret; 3522 } 3523 3524 /* need to called with the ext4 group lock held */ 3525 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3526 { 3527 struct ext4_prealloc_space *pa; 3528 struct list_head *cur, *tmp; 3529 int count = 0; 3530 3531 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3532 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3533 list_del(&pa->pa_group_list); 3534 count++; 3535 kmem_cache_free(ext4_pspace_cachep, pa); 3536 } 3537 return count; 3538 } 3539 3540 int ext4_mb_release(struct super_block *sb) 3541 { 3542 ext4_group_t ngroups = ext4_get_groups_count(sb); 3543 ext4_group_t i; 3544 int num_meta_group_infos; 3545 struct ext4_group_info *grinfo, ***group_info; 3546 struct ext4_sb_info *sbi = EXT4_SB(sb); 3547 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3548 int count; 3549 3550 if (test_opt(sb, DISCARD)) { 3551 /* 3552 * wait the discard work to drain all of ext4_free_data 3553 */ 3554 flush_work(&sbi->s_discard_work); 3555 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3556 } 3557 3558 if (sbi->s_group_info) { 3559 for (i = 0; i < ngroups; i++) { 3560 cond_resched(); 3561 grinfo = ext4_get_group_info(sb, i); 3562 if (!grinfo) 3563 continue; 3564 mb_group_bb_bitmap_free(grinfo); 3565 ext4_lock_group(sb, i); 3566 count = ext4_mb_cleanup_pa(grinfo); 3567 if (count) 3568 mb_debug(sb, "mballoc: %d PAs left\n", 3569 count); 3570 ext4_unlock_group(sb, i); 3571 kmem_cache_free(cachep, grinfo); 3572 } 3573 num_meta_group_infos = (ngroups + 3574 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3575 EXT4_DESC_PER_BLOCK_BITS(sb); 3576 rcu_read_lock(); 3577 group_info = rcu_dereference(sbi->s_group_info); 3578 for (i = 0; i < num_meta_group_infos; i++) 3579 kfree(group_info[i]); 3580 kvfree(group_info); 3581 rcu_read_unlock(); 3582 } 3583 kfree(sbi->s_mb_avg_fragment_size); 3584 kfree(sbi->s_mb_avg_fragment_size_locks); 3585 kfree(sbi->s_mb_largest_free_orders); 3586 kfree(sbi->s_mb_largest_free_orders_locks); 3587 kfree(sbi->s_mb_offsets); 3588 kfree(sbi->s_mb_maxs); 3589 iput(sbi->s_buddy_cache); 3590 if (sbi->s_mb_stats) { 3591 ext4_msg(sb, KERN_INFO, 3592 "mballoc: %u blocks %u reqs (%u success)", 3593 atomic_read(&sbi->s_bal_allocated), 3594 atomic_read(&sbi->s_bal_reqs), 3595 atomic_read(&sbi->s_bal_success)); 3596 ext4_msg(sb, KERN_INFO, 3597 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3598 "%u 2^N hits, %u breaks, %u lost", 3599 atomic_read(&sbi->s_bal_ex_scanned), 3600 atomic_read(&sbi->s_bal_groups_scanned), 3601 atomic_read(&sbi->s_bal_goals), 3602 atomic_read(&sbi->s_bal_2orders), 3603 atomic_read(&sbi->s_bal_breaks), 3604 atomic_read(&sbi->s_mb_lost_chunks)); 3605 ext4_msg(sb, KERN_INFO, 3606 "mballoc: %u generated and it took %llu", 3607 atomic_read(&sbi->s_mb_buddies_generated), 3608 atomic64_read(&sbi->s_mb_generation_time)); 3609 ext4_msg(sb, KERN_INFO, 3610 "mballoc: %u preallocated, %u discarded", 3611 atomic_read(&sbi->s_mb_preallocated), 3612 atomic_read(&sbi->s_mb_discarded)); 3613 } 3614 3615 free_percpu(sbi->s_locality_groups); 3616 3617 return 0; 3618 } 3619 3620 static inline int ext4_issue_discard(struct super_block *sb, 3621 ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3622 struct bio **biop) 3623 { 3624 ext4_fsblk_t discard_block; 3625 3626 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3627 ext4_group_first_block_no(sb, block_group)); 3628 count = EXT4_C2B(EXT4_SB(sb), count); 3629 trace_ext4_discard_blocks(sb, 3630 (unsigned long long) discard_block, count); 3631 if (biop) { 3632 return __blkdev_issue_discard(sb->s_bdev, 3633 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3634 (sector_t)count << (sb->s_blocksize_bits - 9), 3635 GFP_NOFS, biop); 3636 } else 3637 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3638 } 3639 3640 static void ext4_free_data_in_buddy(struct super_block *sb, 3641 struct ext4_free_data *entry) 3642 { 3643 struct ext4_buddy e4b; 3644 struct ext4_group_info *db; 3645 int err, count = 0; 3646 3647 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3648 entry->efd_count, entry->efd_group, entry); 3649 3650 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3651 /* we expect to find existing buddy because it's pinned */ 3652 BUG_ON(err != 0); 3653 3654 spin_lock(&EXT4_SB(sb)->s_md_lock); 3655 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3656 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3657 3658 db = e4b.bd_info; 3659 /* there are blocks to put in buddy to make them really free */ 3660 count += entry->efd_count; 3661 ext4_lock_group(sb, entry->efd_group); 3662 /* Take it out of per group rb tree */ 3663 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3664 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3665 3666 /* 3667 * Clear the trimmed flag for the group so that the next 3668 * ext4_trim_fs can trim it. 3669 * If the volume is mounted with -o discard, online discard 3670 * is supported and the free blocks will be trimmed online. 3671 */ 3672 if (!test_opt(sb, DISCARD)) 3673 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3674 3675 if (!db->bb_free_root.rb_node) { 3676 /* No more items in the per group rb tree 3677 * balance refcounts from ext4_mb_free_metadata() 3678 */ 3679 put_page(e4b.bd_buddy_page); 3680 put_page(e4b.bd_bitmap_page); 3681 } 3682 ext4_unlock_group(sb, entry->efd_group); 3683 ext4_mb_unload_buddy(&e4b); 3684 3685 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3686 } 3687 3688 /* 3689 * This function is called by the jbd2 layer once the commit has finished, 3690 * so we know we can free the blocks that were released with that commit. 3691 */ 3692 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3693 { 3694 struct ext4_sb_info *sbi = EXT4_SB(sb); 3695 struct ext4_free_data *entry, *tmp; 3696 struct list_head freed_data_list; 3697 struct list_head *cut_pos = NULL; 3698 bool wake; 3699 3700 INIT_LIST_HEAD(&freed_data_list); 3701 3702 spin_lock(&sbi->s_md_lock); 3703 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3704 if (entry->efd_tid != commit_tid) 3705 break; 3706 cut_pos = &entry->efd_list; 3707 } 3708 if (cut_pos) 3709 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3710 cut_pos); 3711 spin_unlock(&sbi->s_md_lock); 3712 3713 list_for_each_entry(entry, &freed_data_list, efd_list) 3714 ext4_free_data_in_buddy(sb, entry); 3715 3716 if (test_opt(sb, DISCARD)) { 3717 spin_lock(&sbi->s_md_lock); 3718 wake = list_empty(&sbi->s_discard_list); 3719 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3720 spin_unlock(&sbi->s_md_lock); 3721 if (wake) 3722 queue_work(system_unbound_wq, &sbi->s_discard_work); 3723 } else { 3724 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3725 kmem_cache_free(ext4_free_data_cachep, entry); 3726 } 3727 } 3728 3729 int __init ext4_init_mballoc(void) 3730 { 3731 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3732 SLAB_RECLAIM_ACCOUNT); 3733 if (ext4_pspace_cachep == NULL) 3734 goto out; 3735 3736 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3737 SLAB_RECLAIM_ACCOUNT); 3738 if (ext4_ac_cachep == NULL) 3739 goto out_pa_free; 3740 3741 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3742 SLAB_RECLAIM_ACCOUNT); 3743 if (ext4_free_data_cachep == NULL) 3744 goto out_ac_free; 3745 3746 return 0; 3747 3748 out_ac_free: 3749 kmem_cache_destroy(ext4_ac_cachep); 3750 out_pa_free: 3751 kmem_cache_destroy(ext4_pspace_cachep); 3752 out: 3753 return -ENOMEM; 3754 } 3755 3756 void ext4_exit_mballoc(void) 3757 { 3758 /* 3759 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3760 * before destroying the slab cache. 3761 */ 3762 rcu_barrier(); 3763 kmem_cache_destroy(ext4_pspace_cachep); 3764 kmem_cache_destroy(ext4_ac_cachep); 3765 kmem_cache_destroy(ext4_free_data_cachep); 3766 ext4_groupinfo_destroy_slabs(); 3767 } 3768 3769 3770 /* 3771 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3772 * Returns 0 if success or error code 3773 */ 3774 static noinline_for_stack int 3775 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 3776 handle_t *handle, unsigned int reserv_clstrs) 3777 { 3778 struct buffer_head *bitmap_bh = NULL; 3779 struct ext4_group_desc *gdp; 3780 struct buffer_head *gdp_bh; 3781 struct ext4_sb_info *sbi; 3782 struct super_block *sb; 3783 ext4_fsblk_t block; 3784 int err, len; 3785 3786 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3787 BUG_ON(ac->ac_b_ex.fe_len <= 0); 3788 3789 sb = ac->ac_sb; 3790 sbi = EXT4_SB(sb); 3791 3792 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 3793 if (IS_ERR(bitmap_bh)) { 3794 return PTR_ERR(bitmap_bh); 3795 } 3796 3797 BUFFER_TRACE(bitmap_bh, "getting write access"); 3798 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3799 EXT4_JTR_NONE); 3800 if (err) 3801 goto out_err; 3802 3803 err = -EIO; 3804 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3805 if (!gdp) 3806 goto out_err; 3807 3808 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3809 ext4_free_group_clusters(sb, gdp)); 3810 3811 BUFFER_TRACE(gdp_bh, "get_write_access"); 3812 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3813 if (err) 3814 goto out_err; 3815 3816 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3817 3818 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3819 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 3820 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 3821 "fs metadata", block, block+len); 3822 /* File system mounted not to panic on error 3823 * Fix the bitmap and return EFSCORRUPTED 3824 * We leak some of the blocks here. 3825 */ 3826 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3827 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3828 ac->ac_b_ex.fe_len); 3829 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3830 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3831 if (!err) 3832 err = -EFSCORRUPTED; 3833 goto out_err; 3834 } 3835 3836 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3837 #ifdef AGGRESSIVE_CHECK 3838 { 3839 int i; 3840 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3841 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3842 bitmap_bh->b_data)); 3843 } 3844 } 3845 #endif 3846 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3847 ac->ac_b_ex.fe_len); 3848 if (ext4_has_group_desc_csum(sb) && 3849 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3850 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3851 ext4_free_group_clusters_set(sb, gdp, 3852 ext4_free_clusters_after_init(sb, 3853 ac->ac_b_ex.fe_group, gdp)); 3854 } 3855 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3856 ext4_free_group_clusters_set(sb, gdp, len); 3857 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3858 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3859 3860 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3861 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3862 /* 3863 * Now reduce the dirty block count also. Should not go negative 3864 */ 3865 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 3866 /* release all the reserved blocks if non delalloc */ 3867 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 3868 reserv_clstrs); 3869 3870 if (sbi->s_log_groups_per_flex) { 3871 ext4_group_t flex_group = ext4_flex_group(sbi, 3872 ac->ac_b_ex.fe_group); 3873 atomic64_sub(ac->ac_b_ex.fe_len, 3874 &sbi_array_rcu_deref(sbi, s_flex_groups, 3875 flex_group)->free_clusters); 3876 } 3877 3878 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3879 if (err) 3880 goto out_err; 3881 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3882 3883 out_err: 3884 brelse(bitmap_bh); 3885 return err; 3886 } 3887 3888 /* 3889 * Idempotent helper for Ext4 fast commit replay path to set the state of 3890 * blocks in bitmaps and update counters. 3891 */ 3892 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 3893 int len, int state) 3894 { 3895 struct buffer_head *bitmap_bh = NULL; 3896 struct ext4_group_desc *gdp; 3897 struct buffer_head *gdp_bh; 3898 struct ext4_sb_info *sbi = EXT4_SB(sb); 3899 ext4_group_t group; 3900 ext4_grpblk_t blkoff; 3901 int i, err; 3902 int already; 3903 unsigned int clen, clen_changed, thisgrp_len; 3904 3905 while (len > 0) { 3906 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3907 3908 /* 3909 * Check to see if we are freeing blocks across a group 3910 * boundary. 3911 * In case of flex_bg, this can happen that (block, len) may 3912 * span across more than one group. In that case we need to 3913 * get the corresponding group metadata to work with. 3914 * For this we have goto again loop. 3915 */ 3916 thisgrp_len = min_t(unsigned int, (unsigned int)len, 3917 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3918 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3919 3920 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 3921 ext4_error(sb, "Marking blocks in system zone - " 3922 "Block = %llu, len = %u", 3923 block, thisgrp_len); 3924 bitmap_bh = NULL; 3925 break; 3926 } 3927 3928 bitmap_bh = ext4_read_block_bitmap(sb, group); 3929 if (IS_ERR(bitmap_bh)) { 3930 err = PTR_ERR(bitmap_bh); 3931 bitmap_bh = NULL; 3932 break; 3933 } 3934 3935 err = -EIO; 3936 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 3937 if (!gdp) 3938 break; 3939 3940 ext4_lock_group(sb, group); 3941 already = 0; 3942 for (i = 0; i < clen; i++) 3943 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3944 !state) 3945 already++; 3946 3947 clen_changed = clen - already; 3948 if (state) 3949 mb_set_bits(bitmap_bh->b_data, blkoff, clen); 3950 else 3951 mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 3952 if (ext4_has_group_desc_csum(sb) && 3953 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3954 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3955 ext4_free_group_clusters_set(sb, gdp, 3956 ext4_free_clusters_after_init(sb, group, gdp)); 3957 } 3958 if (state) 3959 clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 3960 else 3961 clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 3962 3963 ext4_free_group_clusters_set(sb, gdp, clen); 3964 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3965 ext4_group_desc_csum_set(sb, group, gdp); 3966 3967 ext4_unlock_group(sb, group); 3968 3969 if (sbi->s_log_groups_per_flex) { 3970 ext4_group_t flex_group = ext4_flex_group(sbi, group); 3971 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3972 s_flex_groups, flex_group); 3973 3974 if (state) 3975 atomic64_sub(clen_changed, &fg->free_clusters); 3976 else 3977 atomic64_add(clen_changed, &fg->free_clusters); 3978 3979 } 3980 3981 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 3982 if (err) 3983 break; 3984 sync_dirty_buffer(bitmap_bh); 3985 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 3986 sync_dirty_buffer(gdp_bh); 3987 if (err) 3988 break; 3989 3990 block += thisgrp_len; 3991 len -= thisgrp_len; 3992 brelse(bitmap_bh); 3993 BUG_ON(len < 0); 3994 } 3995 3996 if (err) 3997 brelse(bitmap_bh); 3998 } 3999 4000 /* 4001 * here we normalize request for locality group 4002 * Group request are normalized to s_mb_group_prealloc, which goes to 4003 * s_strip if we set the same via mount option. 4004 * s_mb_group_prealloc can be configured via 4005 * /sys/fs/ext4/<partition>/mb_group_prealloc 4006 * 4007 * XXX: should we try to preallocate more than the group has now? 4008 */ 4009 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4010 { 4011 struct super_block *sb = ac->ac_sb; 4012 struct ext4_locality_group *lg = ac->ac_lg; 4013 4014 BUG_ON(lg == NULL); 4015 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4016 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4017 } 4018 4019 /* 4020 * This function returns the next element to look at during inode 4021 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4022 * (ei->i_prealloc_lock) 4023 * 4024 * new_start The start of the range we want to compare 4025 * cur_start The existing start that we are comparing against 4026 * node The node of the rb_tree 4027 */ 4028 static inline struct rb_node* 4029 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4030 { 4031 if (new_start < cur_start) 4032 return node->rb_left; 4033 else 4034 return node->rb_right; 4035 } 4036 4037 static inline void 4038 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4039 ext4_lblk_t start, ext4_lblk_t end) 4040 { 4041 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4042 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4043 struct ext4_prealloc_space *tmp_pa; 4044 ext4_lblk_t tmp_pa_start, tmp_pa_end; 4045 struct rb_node *iter; 4046 4047 read_lock(&ei->i_prealloc_lock); 4048 for (iter = ei->i_prealloc_node.rb_node; iter; 4049 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4050 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4051 pa_node.inode_node); 4052 tmp_pa_start = tmp_pa->pa_lstart; 4053 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4054 4055 spin_lock(&tmp_pa->pa_lock); 4056 if (tmp_pa->pa_deleted == 0) 4057 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4058 spin_unlock(&tmp_pa->pa_lock); 4059 } 4060 read_unlock(&ei->i_prealloc_lock); 4061 } 4062 4063 /* 4064 * Given an allocation context "ac" and a range "start", "end", check 4065 * and adjust boundaries if the range overlaps with any of the existing 4066 * preallocatoins stored in the corresponding inode of the allocation context. 4067 * 4068 * Parameters: 4069 * ac allocation context 4070 * start start of the new range 4071 * end end of the new range 4072 */ 4073 static inline void 4074 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4075 ext4_lblk_t *start, ext4_lblk_t *end) 4076 { 4077 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4078 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4079 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4080 struct rb_node *iter; 4081 ext4_lblk_t new_start, new_end; 4082 ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1; 4083 4084 new_start = *start; 4085 new_end = *end; 4086 4087 /* 4088 * Adjust the normalized range so that it doesn't overlap with any 4089 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4090 * so it doesn't change underneath us. 4091 */ 4092 read_lock(&ei->i_prealloc_lock); 4093 4094 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4095 for (iter = ei->i_prealloc_node.rb_node; iter; 4096 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4097 tmp_pa_start, iter)) { 4098 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4099 pa_node.inode_node); 4100 tmp_pa_start = tmp_pa->pa_lstart; 4101 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4102 4103 /* PA must not overlap original request */ 4104 spin_lock(&tmp_pa->pa_lock); 4105 if (tmp_pa->pa_deleted == 0) 4106 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4107 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4108 spin_unlock(&tmp_pa->pa_lock); 4109 } 4110 4111 /* 4112 * Step 2: check if the found PA is left or right neighbor and 4113 * get the other neighbor 4114 */ 4115 if (tmp_pa) { 4116 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4117 struct rb_node *tmp; 4118 4119 left_pa = tmp_pa; 4120 tmp = rb_next(&left_pa->pa_node.inode_node); 4121 if (tmp) { 4122 right_pa = rb_entry(tmp, 4123 struct ext4_prealloc_space, 4124 pa_node.inode_node); 4125 } 4126 } else { 4127 struct rb_node *tmp; 4128 4129 right_pa = tmp_pa; 4130 tmp = rb_prev(&right_pa->pa_node.inode_node); 4131 if (tmp) { 4132 left_pa = rb_entry(tmp, 4133 struct ext4_prealloc_space, 4134 pa_node.inode_node); 4135 } 4136 } 4137 } 4138 4139 /* Step 3: get the non deleted neighbors */ 4140 if (left_pa) { 4141 for (iter = &left_pa->pa_node.inode_node;; 4142 iter = rb_prev(iter)) { 4143 if (!iter) { 4144 left_pa = NULL; 4145 break; 4146 } 4147 4148 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4149 pa_node.inode_node); 4150 left_pa = tmp_pa; 4151 spin_lock(&tmp_pa->pa_lock); 4152 if (tmp_pa->pa_deleted == 0) { 4153 spin_unlock(&tmp_pa->pa_lock); 4154 break; 4155 } 4156 spin_unlock(&tmp_pa->pa_lock); 4157 } 4158 } 4159 4160 if (right_pa) { 4161 for (iter = &right_pa->pa_node.inode_node;; 4162 iter = rb_next(iter)) { 4163 if (!iter) { 4164 right_pa = NULL; 4165 break; 4166 } 4167 4168 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4169 pa_node.inode_node); 4170 right_pa = tmp_pa; 4171 spin_lock(&tmp_pa->pa_lock); 4172 if (tmp_pa->pa_deleted == 0) { 4173 spin_unlock(&tmp_pa->pa_lock); 4174 break; 4175 } 4176 spin_unlock(&tmp_pa->pa_lock); 4177 } 4178 } 4179 4180 if (left_pa) { 4181 left_pa_end = 4182 left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len); 4183 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4184 } 4185 4186 if (right_pa) { 4187 right_pa_start = right_pa->pa_lstart; 4188 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4189 } 4190 4191 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4192 if (left_pa) { 4193 if (left_pa_end > new_start) 4194 new_start = left_pa_end; 4195 } 4196 4197 if (right_pa) { 4198 if (right_pa_start < new_end) 4199 new_end = right_pa_start; 4200 } 4201 read_unlock(&ei->i_prealloc_lock); 4202 4203 /* XXX: extra loop to check we really don't overlap preallocations */ 4204 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4205 4206 *start = new_start; 4207 *end = new_end; 4208 } 4209 4210 /* 4211 * Normalization means making request better in terms of 4212 * size and alignment 4213 */ 4214 static noinline_for_stack void 4215 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4216 struct ext4_allocation_request *ar) 4217 { 4218 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4219 struct ext4_super_block *es = sbi->s_es; 4220 int bsbits, max; 4221 ext4_lblk_t end; 4222 loff_t size, start_off; 4223 loff_t orig_size __maybe_unused; 4224 ext4_lblk_t start; 4225 4226 /* do normalize only data requests, metadata requests 4227 do not need preallocation */ 4228 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4229 return; 4230 4231 /* sometime caller may want exact blocks */ 4232 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4233 return; 4234 4235 /* caller may indicate that preallocation isn't 4236 * required (it's a tail, for example) */ 4237 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4238 return; 4239 4240 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4241 ext4_mb_normalize_group_request(ac); 4242 return ; 4243 } 4244 4245 bsbits = ac->ac_sb->s_blocksize_bits; 4246 4247 /* first, let's learn actual file size 4248 * given current request is allocated */ 4249 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4250 size = size << bsbits; 4251 if (size < i_size_read(ac->ac_inode)) 4252 size = i_size_read(ac->ac_inode); 4253 orig_size = size; 4254 4255 /* max size of free chunks */ 4256 max = 2 << bsbits; 4257 4258 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4259 (req <= (size) || max <= (chunk_size)) 4260 4261 /* first, try to predict filesize */ 4262 /* XXX: should this table be tunable? */ 4263 start_off = 0; 4264 if (size <= 16 * 1024) { 4265 size = 16 * 1024; 4266 } else if (size <= 32 * 1024) { 4267 size = 32 * 1024; 4268 } else if (size <= 64 * 1024) { 4269 size = 64 * 1024; 4270 } else if (size <= 128 * 1024) { 4271 size = 128 * 1024; 4272 } else if (size <= 256 * 1024) { 4273 size = 256 * 1024; 4274 } else if (size <= 512 * 1024) { 4275 size = 512 * 1024; 4276 } else if (size <= 1024 * 1024) { 4277 size = 1024 * 1024; 4278 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4279 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4280 (21 - bsbits)) << 21; 4281 size = 2 * 1024 * 1024; 4282 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4283 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4284 (22 - bsbits)) << 22; 4285 size = 4 * 1024 * 1024; 4286 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 4287 (8<<20)>>bsbits, max, 8 * 1024)) { 4288 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4289 (23 - bsbits)) << 23; 4290 size = 8 * 1024 * 1024; 4291 } else { 4292 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4293 size = (loff_t) EXT4_C2B(sbi, 4294 ac->ac_o_ex.fe_len) << bsbits; 4295 } 4296 size = size >> bsbits; 4297 start = start_off >> bsbits; 4298 4299 /* 4300 * For tiny groups (smaller than 8MB) the chosen allocation 4301 * alignment may be larger than group size. Make sure the 4302 * alignment does not move allocation to a different group which 4303 * makes mballoc fail assertions later. 4304 */ 4305 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4306 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4307 4308 /* don't cover already allocated blocks in selected range */ 4309 if (ar->pleft && start <= ar->lleft) { 4310 size -= ar->lleft + 1 - start; 4311 start = ar->lleft + 1; 4312 } 4313 if (ar->pright && start + size - 1 >= ar->lright) 4314 size -= start + size - ar->lright; 4315 4316 /* 4317 * Trim allocation request for filesystems with artificially small 4318 * groups. 4319 */ 4320 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4321 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4322 4323 end = start + size; 4324 4325 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4326 4327 size = end - start; 4328 4329 /* 4330 * In this function "start" and "size" are normalized for better 4331 * alignment and length such that we could preallocate more blocks. 4332 * This normalization is done such that original request of 4333 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4334 * "size" boundaries. 4335 * (Note fe_len can be relaxed since FS block allocation API does not 4336 * provide gurantee on number of contiguous blocks allocation since that 4337 * depends upon free space left, etc). 4338 * In case of inode pa, later we use the allocated blocks 4339 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4340 * range of goal/best blocks [start, size] to put it at the 4341 * ac_o_ex.fe_logical extent of this inode. 4342 * (See ext4_mb_use_inode_pa() for more details) 4343 */ 4344 if (start + size <= ac->ac_o_ex.fe_logical || 4345 start > ac->ac_o_ex.fe_logical) { 4346 ext4_msg(ac->ac_sb, KERN_ERR, 4347 "start %lu, size %lu, fe_logical %lu", 4348 (unsigned long) start, (unsigned long) size, 4349 (unsigned long) ac->ac_o_ex.fe_logical); 4350 BUG(); 4351 } 4352 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4353 4354 /* now prepare goal request */ 4355 4356 /* XXX: is it better to align blocks WRT to logical 4357 * placement or satisfy big request as is */ 4358 ac->ac_g_ex.fe_logical = start; 4359 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4360 4361 /* define goal start in order to merge */ 4362 if (ar->pright && (ar->lright == (start + size)) && 4363 ar->pright >= size && 4364 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4365 /* merge to the right */ 4366 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4367 &ac->ac_g_ex.fe_group, 4368 &ac->ac_g_ex.fe_start); 4369 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4370 } 4371 if (ar->pleft && (ar->lleft + 1 == start) && 4372 ar->pleft + 1 < ext4_blocks_count(es)) { 4373 /* merge to the left */ 4374 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4375 &ac->ac_g_ex.fe_group, 4376 &ac->ac_g_ex.fe_start); 4377 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4378 } 4379 4380 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4381 orig_size, start); 4382 } 4383 4384 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4385 { 4386 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4387 4388 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4389 atomic_inc(&sbi->s_bal_reqs); 4390 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4391 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4392 atomic_inc(&sbi->s_bal_success); 4393 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4394 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4395 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4396 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4397 atomic_inc(&sbi->s_bal_goals); 4398 if (ac->ac_found > sbi->s_mb_max_to_scan) 4399 atomic_inc(&sbi->s_bal_breaks); 4400 } 4401 4402 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4403 trace_ext4_mballoc_alloc(ac); 4404 else 4405 trace_ext4_mballoc_prealloc(ac); 4406 } 4407 4408 /* 4409 * Called on failure; free up any blocks from the inode PA for this 4410 * context. We don't need this for MB_GROUP_PA because we only change 4411 * pa_free in ext4_mb_release_context(), but on failure, we've already 4412 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4413 */ 4414 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4415 { 4416 struct ext4_prealloc_space *pa = ac->ac_pa; 4417 struct ext4_buddy e4b; 4418 int err; 4419 4420 if (pa == NULL) { 4421 if (ac->ac_f_ex.fe_len == 0) 4422 return; 4423 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4424 if (WARN_RATELIMIT(err, 4425 "ext4: mb_load_buddy failed (%d)", err)) 4426 /* 4427 * This should never happen since we pin the 4428 * pages in the ext4_allocation_context so 4429 * ext4_mb_load_buddy() should never fail. 4430 */ 4431 return; 4432 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4433 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4434 ac->ac_f_ex.fe_len); 4435 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4436 ext4_mb_unload_buddy(&e4b); 4437 return; 4438 } 4439 if (pa->pa_type == MB_INODE_PA) { 4440 spin_lock(&pa->pa_lock); 4441 pa->pa_free += ac->ac_b_ex.fe_len; 4442 spin_unlock(&pa->pa_lock); 4443 } 4444 } 4445 4446 /* 4447 * use blocks preallocated to inode 4448 */ 4449 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4450 struct ext4_prealloc_space *pa) 4451 { 4452 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4453 ext4_fsblk_t start; 4454 ext4_fsblk_t end; 4455 int len; 4456 4457 /* found preallocated blocks, use them */ 4458 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4459 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4460 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4461 len = EXT4_NUM_B2C(sbi, end - start); 4462 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4463 &ac->ac_b_ex.fe_start); 4464 ac->ac_b_ex.fe_len = len; 4465 ac->ac_status = AC_STATUS_FOUND; 4466 ac->ac_pa = pa; 4467 4468 BUG_ON(start < pa->pa_pstart); 4469 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4470 BUG_ON(pa->pa_free < len); 4471 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4472 pa->pa_free -= len; 4473 4474 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4475 } 4476 4477 /* 4478 * use blocks preallocated to locality group 4479 */ 4480 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4481 struct ext4_prealloc_space *pa) 4482 { 4483 unsigned int len = ac->ac_o_ex.fe_len; 4484 4485 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4486 &ac->ac_b_ex.fe_group, 4487 &ac->ac_b_ex.fe_start); 4488 ac->ac_b_ex.fe_len = len; 4489 ac->ac_status = AC_STATUS_FOUND; 4490 ac->ac_pa = pa; 4491 4492 /* we don't correct pa_pstart or pa_len here to avoid 4493 * possible race when the group is being loaded concurrently 4494 * instead we correct pa later, after blocks are marked 4495 * in on-disk bitmap -- see ext4_mb_release_context() 4496 * Other CPUs are prevented from allocating from this pa by lg_mutex 4497 */ 4498 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4499 pa->pa_lstart, len, pa); 4500 } 4501 4502 /* 4503 * Return the prealloc space that have minimal distance 4504 * from the goal block. @cpa is the prealloc 4505 * space that is having currently known minimal distance 4506 * from the goal block. 4507 */ 4508 static struct ext4_prealloc_space * 4509 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4510 struct ext4_prealloc_space *pa, 4511 struct ext4_prealloc_space *cpa) 4512 { 4513 ext4_fsblk_t cur_distance, new_distance; 4514 4515 if (cpa == NULL) { 4516 atomic_inc(&pa->pa_count); 4517 return pa; 4518 } 4519 cur_distance = abs(goal_block - cpa->pa_pstart); 4520 new_distance = abs(goal_block - pa->pa_pstart); 4521 4522 if (cur_distance <= new_distance) 4523 return cpa; 4524 4525 /* drop the previous reference */ 4526 atomic_dec(&cpa->pa_count); 4527 atomic_inc(&pa->pa_count); 4528 return pa; 4529 } 4530 4531 /* 4532 * search goal blocks in preallocated space 4533 */ 4534 static noinline_for_stack bool 4535 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4536 { 4537 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4538 int order, i; 4539 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4540 struct ext4_locality_group *lg; 4541 struct ext4_prealloc_space *tmp_pa, *cpa = NULL; 4542 ext4_lblk_t tmp_pa_start, tmp_pa_end; 4543 struct rb_node *iter; 4544 ext4_fsblk_t goal_block; 4545 4546 /* only data can be preallocated */ 4547 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4548 return false; 4549 4550 /* first, try per-file preallocation */ 4551 read_lock(&ei->i_prealloc_lock); 4552 for (iter = ei->i_prealloc_node.rb_node; iter; 4553 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4554 tmp_pa_start, iter)) { 4555 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4556 pa_node.inode_node); 4557 4558 /* all fields in this condition don't change, 4559 * so we can skip locking for them */ 4560 tmp_pa_start = tmp_pa->pa_lstart; 4561 tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); 4562 4563 /* original request start doesn't lie in this PA */ 4564 if (ac->ac_o_ex.fe_logical < tmp_pa_start || 4565 ac->ac_o_ex.fe_logical >= tmp_pa_end) 4566 continue; 4567 4568 /* non-extent files can't have physical blocks past 2^32 */ 4569 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4570 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4571 EXT4_MAX_BLOCK_FILE_PHYS)) { 4572 /* 4573 * Since PAs don't overlap, we won't find any 4574 * other PA to satisfy this. 4575 */ 4576 break; 4577 } 4578 4579 /* found preallocated blocks, use them */ 4580 spin_lock(&tmp_pa->pa_lock); 4581 if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) { 4582 atomic_inc(&tmp_pa->pa_count); 4583 ext4_mb_use_inode_pa(ac, tmp_pa); 4584 spin_unlock(&tmp_pa->pa_lock); 4585 ac->ac_criteria = 10; 4586 read_unlock(&ei->i_prealloc_lock); 4587 return true; 4588 } 4589 spin_unlock(&tmp_pa->pa_lock); 4590 } 4591 read_unlock(&ei->i_prealloc_lock); 4592 4593 /* can we use group allocation? */ 4594 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 4595 return false; 4596 4597 /* inode may have no locality group for some reason */ 4598 lg = ac->ac_lg; 4599 if (lg == NULL) 4600 return false; 4601 order = fls(ac->ac_o_ex.fe_len) - 1; 4602 if (order > PREALLOC_TB_SIZE - 1) 4603 /* The max size of hash table is PREALLOC_TB_SIZE */ 4604 order = PREALLOC_TB_SIZE - 1; 4605 4606 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 4607 /* 4608 * search for the prealloc space that is having 4609 * minimal distance from the goal block. 4610 */ 4611 for (i = order; i < PREALLOC_TB_SIZE; i++) { 4612 rcu_read_lock(); 4613 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4614 pa_node.lg_list) { 4615 spin_lock(&tmp_pa->pa_lock); 4616 if (tmp_pa->pa_deleted == 0 && 4617 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 4618 4619 cpa = ext4_mb_check_group_pa(goal_block, 4620 tmp_pa, cpa); 4621 } 4622 spin_unlock(&tmp_pa->pa_lock); 4623 } 4624 rcu_read_unlock(); 4625 } 4626 if (cpa) { 4627 ext4_mb_use_group_pa(ac, cpa); 4628 ac->ac_criteria = 20; 4629 return true; 4630 } 4631 return false; 4632 } 4633 4634 /* 4635 * the function goes through all block freed in the group 4636 * but not yet committed and marks them used in in-core bitmap. 4637 * buddy must be generated from this bitmap 4638 * Need to be called with the ext4 group lock held 4639 */ 4640 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4641 ext4_group_t group) 4642 { 4643 struct rb_node *n; 4644 struct ext4_group_info *grp; 4645 struct ext4_free_data *entry; 4646 4647 grp = ext4_get_group_info(sb, group); 4648 if (!grp) 4649 return; 4650 n = rb_first(&(grp->bb_free_root)); 4651 4652 while (n) { 4653 entry = rb_entry(n, struct ext4_free_data, efd_node); 4654 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 4655 n = rb_next(n); 4656 } 4657 return; 4658 } 4659 4660 /* 4661 * the function goes through all preallocation in this group and marks them 4662 * used in in-core bitmap. buddy must be generated from this bitmap 4663 * Need to be called with ext4 group lock held 4664 */ 4665 static noinline_for_stack 4666 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4667 ext4_group_t group) 4668 { 4669 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4670 struct ext4_prealloc_space *pa; 4671 struct list_head *cur; 4672 ext4_group_t groupnr; 4673 ext4_grpblk_t start; 4674 int preallocated = 0; 4675 int len; 4676 4677 if (!grp) 4678 return; 4679 4680 /* all form of preallocation discards first load group, 4681 * so the only competing code is preallocation use. 4682 * we don't need any locking here 4683 * notice we do NOT ignore preallocations with pa_deleted 4684 * otherwise we could leave used blocks available for 4685 * allocation in buddy when concurrent ext4_mb_put_pa() 4686 * is dropping preallocation 4687 */ 4688 list_for_each(cur, &grp->bb_prealloc_list) { 4689 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4690 spin_lock(&pa->pa_lock); 4691 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4692 &groupnr, &start); 4693 len = pa->pa_len; 4694 spin_unlock(&pa->pa_lock); 4695 if (unlikely(len == 0)) 4696 continue; 4697 BUG_ON(groupnr != group); 4698 mb_set_bits(bitmap, start, len); 4699 preallocated += len; 4700 } 4701 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4702 } 4703 4704 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 4705 struct ext4_prealloc_space *pa) 4706 { 4707 struct ext4_inode_info *ei; 4708 4709 if (pa->pa_deleted) { 4710 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 4711 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 4712 pa->pa_len); 4713 return; 4714 } 4715 4716 pa->pa_deleted = 1; 4717 4718 if (pa->pa_type == MB_INODE_PA) { 4719 ei = EXT4_I(pa->pa_inode); 4720 atomic_dec(&ei->i_prealloc_active); 4721 } 4722 } 4723 4724 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 4725 { 4726 BUG_ON(!pa); 4727 BUG_ON(atomic_read(&pa->pa_count)); 4728 BUG_ON(pa->pa_deleted == 0); 4729 kmem_cache_free(ext4_pspace_cachep, pa); 4730 } 4731 4732 static void ext4_mb_pa_callback(struct rcu_head *head) 4733 { 4734 struct ext4_prealloc_space *pa; 4735 4736 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 4737 ext4_mb_pa_free(pa); 4738 } 4739 4740 /* 4741 * drops a reference to preallocated space descriptor 4742 * if this was the last reference and the space is consumed 4743 */ 4744 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4745 struct super_block *sb, struct ext4_prealloc_space *pa) 4746 { 4747 ext4_group_t grp; 4748 ext4_fsblk_t grp_blk; 4749 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4750 4751 /* in this short window concurrent discard can set pa_deleted */ 4752 spin_lock(&pa->pa_lock); 4753 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 4754 spin_unlock(&pa->pa_lock); 4755 return; 4756 } 4757 4758 if (pa->pa_deleted == 1) { 4759 spin_unlock(&pa->pa_lock); 4760 return; 4761 } 4762 4763 ext4_mb_mark_pa_deleted(sb, pa); 4764 spin_unlock(&pa->pa_lock); 4765 4766 grp_blk = pa->pa_pstart; 4767 /* 4768 * If doing group-based preallocation, pa_pstart may be in the 4769 * next group when pa is used up 4770 */ 4771 if (pa->pa_type == MB_GROUP_PA) 4772 grp_blk--; 4773 4774 grp = ext4_get_group_number(sb, grp_blk); 4775 4776 /* 4777 * possible race: 4778 * 4779 * P1 (buddy init) P2 (regular allocation) 4780 * find block B in PA 4781 * copy on-disk bitmap to buddy 4782 * mark B in on-disk bitmap 4783 * drop PA from group 4784 * mark all PAs in buddy 4785 * 4786 * thus, P1 initializes buddy with B available. to prevent this 4787 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4788 * against that pair 4789 */ 4790 ext4_lock_group(sb, grp); 4791 list_del(&pa->pa_group_list); 4792 ext4_unlock_group(sb, grp); 4793 4794 if (pa->pa_type == MB_INODE_PA) { 4795 write_lock(pa->pa_node_lock.inode_lock); 4796 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 4797 write_unlock(pa->pa_node_lock.inode_lock); 4798 ext4_mb_pa_free(pa); 4799 } else { 4800 spin_lock(pa->pa_node_lock.lg_lock); 4801 list_del_rcu(&pa->pa_node.lg_list); 4802 spin_unlock(pa->pa_node_lock.lg_lock); 4803 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4804 } 4805 } 4806 4807 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 4808 { 4809 struct rb_node **iter = &root->rb_node, *parent = NULL; 4810 struct ext4_prealloc_space *iter_pa, *new_pa; 4811 ext4_lblk_t iter_start, new_start; 4812 4813 while (*iter) { 4814 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 4815 pa_node.inode_node); 4816 new_pa = rb_entry(new, struct ext4_prealloc_space, 4817 pa_node.inode_node); 4818 iter_start = iter_pa->pa_lstart; 4819 new_start = new_pa->pa_lstart; 4820 4821 parent = *iter; 4822 if (new_start < iter_start) 4823 iter = &((*iter)->rb_left); 4824 else 4825 iter = &((*iter)->rb_right); 4826 } 4827 4828 rb_link_node(new, parent, iter); 4829 rb_insert_color(new, root); 4830 } 4831 4832 /* 4833 * creates new preallocated space for given inode 4834 */ 4835 static noinline_for_stack void 4836 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4837 { 4838 struct super_block *sb = ac->ac_sb; 4839 struct ext4_sb_info *sbi = EXT4_SB(sb); 4840 struct ext4_prealloc_space *pa; 4841 struct ext4_group_info *grp; 4842 struct ext4_inode_info *ei; 4843 4844 /* preallocate only when found space is larger then requested */ 4845 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4846 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4847 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 4848 BUG_ON(ac->ac_pa == NULL); 4849 4850 pa = ac->ac_pa; 4851 4852 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 4853 int new_bex_start; 4854 int new_bex_end; 4855 4856 /* we can't allocate as much as normalizer wants. 4857 * so, found space must get proper lstart 4858 * to cover original request */ 4859 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4860 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4861 4862 /* 4863 * Use the below logic for adjusting best extent as it keeps 4864 * fragmentation in check while ensuring logical range of best 4865 * extent doesn't overflow out of goal extent: 4866 * 4867 * 1. Check if best ex can be kept at end of goal and still 4868 * cover original start 4869 * 2. Else, check if best ex can be kept at start of goal and 4870 * still cover original start 4871 * 3. Else, keep the best ex at start of original request. 4872 */ 4873 new_bex_end = ac->ac_g_ex.fe_logical + 4874 EXT4_C2B(sbi, ac->ac_g_ex.fe_len); 4875 new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4876 if (ac->ac_o_ex.fe_logical >= new_bex_start) 4877 goto adjust_bex; 4878 4879 new_bex_start = ac->ac_g_ex.fe_logical; 4880 new_bex_end = 4881 new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4882 if (ac->ac_o_ex.fe_logical < new_bex_end) 4883 goto adjust_bex; 4884 4885 new_bex_start = ac->ac_o_ex.fe_logical; 4886 new_bex_end = 4887 new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4888 4889 adjust_bex: 4890 ac->ac_b_ex.fe_logical = new_bex_start; 4891 4892 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4893 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4894 BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + 4895 EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); 4896 } 4897 4898 pa->pa_lstart = ac->ac_b_ex.fe_logical; 4899 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4900 pa->pa_len = ac->ac_b_ex.fe_len; 4901 pa->pa_free = pa->pa_len; 4902 spin_lock_init(&pa->pa_lock); 4903 INIT_LIST_HEAD(&pa->pa_group_list); 4904 pa->pa_deleted = 0; 4905 pa->pa_type = MB_INODE_PA; 4906 4907 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4908 pa->pa_len, pa->pa_lstart); 4909 trace_ext4_mb_new_inode_pa(ac, pa); 4910 4911 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4912 ext4_mb_use_inode_pa(ac, pa); 4913 4914 ei = EXT4_I(ac->ac_inode); 4915 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4916 if (!grp) 4917 return; 4918 4919 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 4920 pa->pa_inode = ac->ac_inode; 4921 4922 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4923 4924 write_lock(pa->pa_node_lock.inode_lock); 4925 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 4926 write_unlock(pa->pa_node_lock.inode_lock); 4927 atomic_inc(&ei->i_prealloc_active); 4928 } 4929 4930 /* 4931 * creates new preallocated space for locality group inodes belongs to 4932 */ 4933 static noinline_for_stack void 4934 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4935 { 4936 struct super_block *sb = ac->ac_sb; 4937 struct ext4_locality_group *lg; 4938 struct ext4_prealloc_space *pa; 4939 struct ext4_group_info *grp; 4940 4941 /* preallocate only when found space is larger then requested */ 4942 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4943 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4944 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 4945 BUG_ON(ac->ac_pa == NULL); 4946 4947 pa = ac->ac_pa; 4948 4949 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4950 pa->pa_lstart = pa->pa_pstart; 4951 pa->pa_len = ac->ac_b_ex.fe_len; 4952 pa->pa_free = pa->pa_len; 4953 spin_lock_init(&pa->pa_lock); 4954 INIT_LIST_HEAD(&pa->pa_node.lg_list); 4955 INIT_LIST_HEAD(&pa->pa_group_list); 4956 pa->pa_deleted = 0; 4957 pa->pa_type = MB_GROUP_PA; 4958 4959 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4960 pa->pa_len, pa->pa_lstart); 4961 trace_ext4_mb_new_group_pa(ac, pa); 4962 4963 ext4_mb_use_group_pa(ac, pa); 4964 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4965 4966 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4967 if (!grp) 4968 return; 4969 lg = ac->ac_lg; 4970 BUG_ON(lg == NULL); 4971 4972 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 4973 pa->pa_inode = NULL; 4974 4975 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4976 4977 /* 4978 * We will later add the new pa to the right bucket 4979 * after updating the pa_free in ext4_mb_release_context 4980 */ 4981 } 4982 4983 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4984 { 4985 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4986 ext4_mb_new_group_pa(ac); 4987 else 4988 ext4_mb_new_inode_pa(ac); 4989 } 4990 4991 /* 4992 * finds all unused blocks in on-disk bitmap, frees them in 4993 * in-core bitmap and buddy. 4994 * @pa must be unlinked from inode and group lists, so that 4995 * nobody else can find/use it. 4996 * the caller MUST hold group/inode locks. 4997 * TODO: optimize the case when there are no in-core structures yet 4998 */ 4999 static noinline_for_stack int 5000 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5001 struct ext4_prealloc_space *pa) 5002 { 5003 struct super_block *sb = e4b->bd_sb; 5004 struct ext4_sb_info *sbi = EXT4_SB(sb); 5005 unsigned int end; 5006 unsigned int next; 5007 ext4_group_t group; 5008 ext4_grpblk_t bit; 5009 unsigned long long grp_blk_start; 5010 int free = 0; 5011 5012 BUG_ON(pa->pa_deleted == 0); 5013 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5014 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5015 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5016 end = bit + pa->pa_len; 5017 5018 while (bit < end) { 5019 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5020 if (bit >= end) 5021 break; 5022 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5023 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5024 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5025 (unsigned) next - bit, (unsigned) group); 5026 free += next - bit; 5027 5028 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5029 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5030 EXT4_C2B(sbi, bit)), 5031 next - bit); 5032 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5033 bit = next + 1; 5034 } 5035 if (free != pa->pa_free) { 5036 ext4_msg(e4b->bd_sb, KERN_CRIT, 5037 "pa %p: logic %lu, phys. %lu, len %d", 5038 pa, (unsigned long) pa->pa_lstart, 5039 (unsigned long) pa->pa_pstart, 5040 pa->pa_len); 5041 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5042 free, pa->pa_free); 5043 /* 5044 * pa is already deleted so we use the value obtained 5045 * from the bitmap and continue. 5046 */ 5047 } 5048 atomic_add(free, &sbi->s_mb_discarded); 5049 5050 return 0; 5051 } 5052 5053 static noinline_for_stack int 5054 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5055 struct ext4_prealloc_space *pa) 5056 { 5057 struct super_block *sb = e4b->bd_sb; 5058 ext4_group_t group; 5059 ext4_grpblk_t bit; 5060 5061 trace_ext4_mb_release_group_pa(sb, pa); 5062 BUG_ON(pa->pa_deleted == 0); 5063 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5064 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5065 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5066 e4b->bd_group, group, pa->pa_pstart); 5067 return 0; 5068 } 5069 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5070 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5071 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5072 5073 return 0; 5074 } 5075 5076 /* 5077 * releases all preallocations in given group 5078 * 5079 * first, we need to decide discard policy: 5080 * - when do we discard 5081 * 1) ENOSPC 5082 * - how many do we discard 5083 * 1) how many requested 5084 */ 5085 static noinline_for_stack int 5086 ext4_mb_discard_group_preallocations(struct super_block *sb, 5087 ext4_group_t group, int *busy) 5088 { 5089 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5090 struct buffer_head *bitmap_bh = NULL; 5091 struct ext4_prealloc_space *pa, *tmp; 5092 struct list_head list; 5093 struct ext4_buddy e4b; 5094 struct ext4_inode_info *ei; 5095 int err; 5096 int free = 0; 5097 5098 if (!grp) 5099 return 0; 5100 mb_debug(sb, "discard preallocation for group %u\n", group); 5101 if (list_empty(&grp->bb_prealloc_list)) 5102 goto out_dbg; 5103 5104 bitmap_bh = ext4_read_block_bitmap(sb, group); 5105 if (IS_ERR(bitmap_bh)) { 5106 err = PTR_ERR(bitmap_bh); 5107 ext4_error_err(sb, -err, 5108 "Error %d reading block bitmap for %u", 5109 err, group); 5110 goto out_dbg; 5111 } 5112 5113 err = ext4_mb_load_buddy(sb, group, &e4b); 5114 if (err) { 5115 ext4_warning(sb, "Error %d loading buddy information for %u", 5116 err, group); 5117 put_bh(bitmap_bh); 5118 goto out_dbg; 5119 } 5120 5121 INIT_LIST_HEAD(&list); 5122 ext4_lock_group(sb, group); 5123 list_for_each_entry_safe(pa, tmp, 5124 &grp->bb_prealloc_list, pa_group_list) { 5125 spin_lock(&pa->pa_lock); 5126 if (atomic_read(&pa->pa_count)) { 5127 spin_unlock(&pa->pa_lock); 5128 *busy = 1; 5129 continue; 5130 } 5131 if (pa->pa_deleted) { 5132 spin_unlock(&pa->pa_lock); 5133 continue; 5134 } 5135 5136 /* seems this one can be freed ... */ 5137 ext4_mb_mark_pa_deleted(sb, pa); 5138 5139 if (!free) 5140 this_cpu_inc(discard_pa_seq); 5141 5142 /* we can trust pa_free ... */ 5143 free += pa->pa_free; 5144 5145 spin_unlock(&pa->pa_lock); 5146 5147 list_del(&pa->pa_group_list); 5148 list_add(&pa->u.pa_tmp_list, &list); 5149 } 5150 5151 /* now free all selected PAs */ 5152 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5153 5154 /* remove from object (inode or locality group) */ 5155 if (pa->pa_type == MB_GROUP_PA) { 5156 spin_lock(pa->pa_node_lock.lg_lock); 5157 list_del_rcu(&pa->pa_node.lg_list); 5158 spin_unlock(pa->pa_node_lock.lg_lock); 5159 } else { 5160 write_lock(pa->pa_node_lock.inode_lock); 5161 ei = EXT4_I(pa->pa_inode); 5162 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5163 write_unlock(pa->pa_node_lock.inode_lock); 5164 } 5165 5166 list_del(&pa->u.pa_tmp_list); 5167 5168 if (pa->pa_type == MB_GROUP_PA) { 5169 ext4_mb_release_group_pa(&e4b, pa); 5170 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5171 } else { 5172 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5173 ext4_mb_pa_free(pa); 5174 } 5175 } 5176 5177 ext4_unlock_group(sb, group); 5178 ext4_mb_unload_buddy(&e4b); 5179 put_bh(bitmap_bh); 5180 out_dbg: 5181 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5182 free, group, grp->bb_free); 5183 return free; 5184 } 5185 5186 /* 5187 * releases all non-used preallocated blocks for given inode 5188 * 5189 * It's important to discard preallocations under i_data_sem 5190 * We don't want another block to be served from the prealloc 5191 * space when we are discarding the inode prealloc space. 5192 * 5193 * FIXME!! Make sure it is valid at all the call sites 5194 */ 5195 void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 5196 { 5197 struct ext4_inode_info *ei = EXT4_I(inode); 5198 struct super_block *sb = inode->i_sb; 5199 struct buffer_head *bitmap_bh = NULL; 5200 struct ext4_prealloc_space *pa, *tmp; 5201 ext4_group_t group = 0; 5202 struct list_head list; 5203 struct ext4_buddy e4b; 5204 struct rb_node *iter; 5205 int err; 5206 5207 if (!S_ISREG(inode->i_mode)) { 5208 return; 5209 } 5210 5211 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5212 return; 5213 5214 mb_debug(sb, "discard preallocation for inode %lu\n", 5215 inode->i_ino); 5216 trace_ext4_discard_preallocations(inode, 5217 atomic_read(&ei->i_prealloc_active), needed); 5218 5219 INIT_LIST_HEAD(&list); 5220 5221 if (needed == 0) 5222 needed = UINT_MAX; 5223 5224 repeat: 5225 /* first, collect all pa's in the inode */ 5226 write_lock(&ei->i_prealloc_lock); 5227 for (iter = rb_first(&ei->i_prealloc_node); iter && needed; 5228 iter = rb_next(iter)) { 5229 pa = rb_entry(iter, struct ext4_prealloc_space, 5230 pa_node.inode_node); 5231 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5232 5233 spin_lock(&pa->pa_lock); 5234 if (atomic_read(&pa->pa_count)) { 5235 /* this shouldn't happen often - nobody should 5236 * use preallocation while we're discarding it */ 5237 spin_unlock(&pa->pa_lock); 5238 write_unlock(&ei->i_prealloc_lock); 5239 ext4_msg(sb, KERN_ERR, 5240 "uh-oh! used pa while discarding"); 5241 WARN_ON(1); 5242 schedule_timeout_uninterruptible(HZ); 5243 goto repeat; 5244 5245 } 5246 if (pa->pa_deleted == 0) { 5247 ext4_mb_mark_pa_deleted(sb, pa); 5248 spin_unlock(&pa->pa_lock); 5249 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5250 list_add(&pa->u.pa_tmp_list, &list); 5251 needed--; 5252 continue; 5253 } 5254 5255 /* someone is deleting pa right now */ 5256 spin_unlock(&pa->pa_lock); 5257 write_unlock(&ei->i_prealloc_lock); 5258 5259 /* we have to wait here because pa_deleted 5260 * doesn't mean pa is already unlinked from 5261 * the list. as we might be called from 5262 * ->clear_inode() the inode will get freed 5263 * and concurrent thread which is unlinking 5264 * pa from inode's list may access already 5265 * freed memory, bad-bad-bad */ 5266 5267 /* XXX: if this happens too often, we can 5268 * add a flag to force wait only in case 5269 * of ->clear_inode(), but not in case of 5270 * regular truncate */ 5271 schedule_timeout_uninterruptible(HZ); 5272 goto repeat; 5273 } 5274 write_unlock(&ei->i_prealloc_lock); 5275 5276 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5277 BUG_ON(pa->pa_type != MB_INODE_PA); 5278 group = ext4_get_group_number(sb, pa->pa_pstart); 5279 5280 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5281 GFP_NOFS|__GFP_NOFAIL); 5282 if (err) { 5283 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5284 err, group); 5285 continue; 5286 } 5287 5288 bitmap_bh = ext4_read_block_bitmap(sb, group); 5289 if (IS_ERR(bitmap_bh)) { 5290 err = PTR_ERR(bitmap_bh); 5291 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5292 err, group); 5293 ext4_mb_unload_buddy(&e4b); 5294 continue; 5295 } 5296 5297 ext4_lock_group(sb, group); 5298 list_del(&pa->pa_group_list); 5299 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5300 ext4_unlock_group(sb, group); 5301 5302 ext4_mb_unload_buddy(&e4b); 5303 put_bh(bitmap_bh); 5304 5305 list_del(&pa->u.pa_tmp_list); 5306 ext4_mb_pa_free(pa); 5307 } 5308 } 5309 5310 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5311 { 5312 struct ext4_prealloc_space *pa; 5313 5314 BUG_ON(ext4_pspace_cachep == NULL); 5315 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5316 if (!pa) 5317 return -ENOMEM; 5318 atomic_set(&pa->pa_count, 1); 5319 ac->ac_pa = pa; 5320 return 0; 5321 } 5322 5323 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5324 { 5325 struct ext4_prealloc_space *pa = ac->ac_pa; 5326 5327 BUG_ON(!pa); 5328 ac->ac_pa = NULL; 5329 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5330 /* 5331 * current function is only called due to an error or due to 5332 * len of found blocks < len of requested blocks hence the PA has not 5333 * been added to grp->bb_prealloc_list. So we don't need to lock it 5334 */ 5335 pa->pa_deleted = 1; 5336 ext4_mb_pa_free(pa); 5337 } 5338 5339 #ifdef CONFIG_EXT4_DEBUG 5340 static inline void ext4_mb_show_pa(struct super_block *sb) 5341 { 5342 ext4_group_t i, ngroups; 5343 5344 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5345 return; 5346 5347 ngroups = ext4_get_groups_count(sb); 5348 mb_debug(sb, "groups: "); 5349 for (i = 0; i < ngroups; i++) { 5350 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5351 struct ext4_prealloc_space *pa; 5352 ext4_grpblk_t start; 5353 struct list_head *cur; 5354 5355 if (!grp) 5356 continue; 5357 ext4_lock_group(sb, i); 5358 list_for_each(cur, &grp->bb_prealloc_list) { 5359 pa = list_entry(cur, struct ext4_prealloc_space, 5360 pa_group_list); 5361 spin_lock(&pa->pa_lock); 5362 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5363 NULL, &start); 5364 spin_unlock(&pa->pa_lock); 5365 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5366 pa->pa_len); 5367 } 5368 ext4_unlock_group(sb, i); 5369 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5370 grp->bb_fragments); 5371 } 5372 } 5373 5374 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5375 { 5376 struct super_block *sb = ac->ac_sb; 5377 5378 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5379 return; 5380 5381 mb_debug(sb, "Can't allocate:" 5382 " Allocation context details:"); 5383 mb_debug(sb, "status %u flags 0x%x", 5384 ac->ac_status, ac->ac_flags); 5385 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5386 "goal %lu/%lu/%lu@%lu, " 5387 "best %lu/%lu/%lu@%lu cr %d", 5388 (unsigned long)ac->ac_o_ex.fe_group, 5389 (unsigned long)ac->ac_o_ex.fe_start, 5390 (unsigned long)ac->ac_o_ex.fe_len, 5391 (unsigned long)ac->ac_o_ex.fe_logical, 5392 (unsigned long)ac->ac_g_ex.fe_group, 5393 (unsigned long)ac->ac_g_ex.fe_start, 5394 (unsigned long)ac->ac_g_ex.fe_len, 5395 (unsigned long)ac->ac_g_ex.fe_logical, 5396 (unsigned long)ac->ac_b_ex.fe_group, 5397 (unsigned long)ac->ac_b_ex.fe_start, 5398 (unsigned long)ac->ac_b_ex.fe_len, 5399 (unsigned long)ac->ac_b_ex.fe_logical, 5400 (int)ac->ac_criteria); 5401 mb_debug(sb, "%u found", ac->ac_found); 5402 ext4_mb_show_pa(sb); 5403 } 5404 #else 5405 static inline void ext4_mb_show_pa(struct super_block *sb) 5406 { 5407 return; 5408 } 5409 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5410 { 5411 ext4_mb_show_pa(ac->ac_sb); 5412 return; 5413 } 5414 #endif 5415 5416 /* 5417 * We use locality group preallocation for small size file. The size of the 5418 * file is determined by the current size or the resulting size after 5419 * allocation which ever is larger 5420 * 5421 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5422 */ 5423 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5424 { 5425 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5426 int bsbits = ac->ac_sb->s_blocksize_bits; 5427 loff_t size, isize; 5428 bool inode_pa_eligible, group_pa_eligible; 5429 5430 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5431 return; 5432 5433 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5434 return; 5435 5436 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5437 inode_pa_eligible = true; 5438 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 5439 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5440 >> bsbits; 5441 5442 /* No point in using inode preallocation for closed files */ 5443 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5444 !inode_is_open_for_write(ac->ac_inode)) 5445 inode_pa_eligible = false; 5446 5447 size = max(size, isize); 5448 /* Don't use group allocation for large files */ 5449 if (size > sbi->s_mb_stream_request) 5450 group_pa_eligible = false; 5451 5452 if (!group_pa_eligible) { 5453 if (inode_pa_eligible) 5454 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5455 else 5456 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5457 return; 5458 } 5459 5460 BUG_ON(ac->ac_lg != NULL); 5461 /* 5462 * locality group prealloc space are per cpu. The reason for having 5463 * per cpu locality group is to reduce the contention between block 5464 * request from multiple CPUs. 5465 */ 5466 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5467 5468 /* we're going to use group allocation */ 5469 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5470 5471 /* serialize all allocations in the group */ 5472 mutex_lock(&ac->ac_lg->lg_mutex); 5473 } 5474 5475 static noinline_for_stack void 5476 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5477 struct ext4_allocation_request *ar) 5478 { 5479 struct super_block *sb = ar->inode->i_sb; 5480 struct ext4_sb_info *sbi = EXT4_SB(sb); 5481 struct ext4_super_block *es = sbi->s_es; 5482 ext4_group_t group; 5483 unsigned int len; 5484 ext4_fsblk_t goal; 5485 ext4_grpblk_t block; 5486 5487 /* we can't allocate > group size */ 5488 len = ar->len; 5489 5490 /* just a dirty hack to filter too big requests */ 5491 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5492 len = EXT4_CLUSTERS_PER_GROUP(sb); 5493 5494 /* start searching from the goal */ 5495 goal = ar->goal; 5496 if (goal < le32_to_cpu(es->s_first_data_block) || 5497 goal >= ext4_blocks_count(es)) 5498 goal = le32_to_cpu(es->s_first_data_block); 5499 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5500 5501 /* set up allocation goals */ 5502 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5503 ac->ac_status = AC_STATUS_CONTINUE; 5504 ac->ac_sb = sb; 5505 ac->ac_inode = ar->inode; 5506 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5507 ac->ac_o_ex.fe_group = group; 5508 ac->ac_o_ex.fe_start = block; 5509 ac->ac_o_ex.fe_len = len; 5510 ac->ac_g_ex = ac->ac_o_ex; 5511 ac->ac_flags = ar->flags; 5512 5513 /* we have to define context: we'll work with a file or 5514 * locality group. this is a policy, actually */ 5515 ext4_mb_group_or_file(ac); 5516 5517 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5518 "left: %u/%u, right %u/%u to %swritable\n", 5519 (unsigned) ar->len, (unsigned) ar->logical, 5520 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5521 (unsigned) ar->lleft, (unsigned) ar->pleft, 5522 (unsigned) ar->lright, (unsigned) ar->pright, 5523 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5524 } 5525 5526 static noinline_for_stack void 5527 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5528 struct ext4_locality_group *lg, 5529 int order, int total_entries) 5530 { 5531 ext4_group_t group = 0; 5532 struct ext4_buddy e4b; 5533 struct list_head discard_list; 5534 struct ext4_prealloc_space *pa, *tmp; 5535 5536 mb_debug(sb, "discard locality group preallocation\n"); 5537 5538 INIT_LIST_HEAD(&discard_list); 5539 5540 spin_lock(&lg->lg_prealloc_lock); 5541 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5542 pa_node.lg_list, 5543 lockdep_is_held(&lg->lg_prealloc_lock)) { 5544 spin_lock(&pa->pa_lock); 5545 if (atomic_read(&pa->pa_count)) { 5546 /* 5547 * This is the pa that we just used 5548 * for block allocation. So don't 5549 * free that 5550 */ 5551 spin_unlock(&pa->pa_lock); 5552 continue; 5553 } 5554 if (pa->pa_deleted) { 5555 spin_unlock(&pa->pa_lock); 5556 continue; 5557 } 5558 /* only lg prealloc space */ 5559 BUG_ON(pa->pa_type != MB_GROUP_PA); 5560 5561 /* seems this one can be freed ... */ 5562 ext4_mb_mark_pa_deleted(sb, pa); 5563 spin_unlock(&pa->pa_lock); 5564 5565 list_del_rcu(&pa->pa_node.lg_list); 5566 list_add(&pa->u.pa_tmp_list, &discard_list); 5567 5568 total_entries--; 5569 if (total_entries <= 5) { 5570 /* 5571 * we want to keep only 5 entries 5572 * allowing it to grow to 8. This 5573 * mak sure we don't call discard 5574 * soon for this list. 5575 */ 5576 break; 5577 } 5578 } 5579 spin_unlock(&lg->lg_prealloc_lock); 5580 5581 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5582 int err; 5583 5584 group = ext4_get_group_number(sb, pa->pa_pstart); 5585 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5586 GFP_NOFS|__GFP_NOFAIL); 5587 if (err) { 5588 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5589 err, group); 5590 continue; 5591 } 5592 ext4_lock_group(sb, group); 5593 list_del(&pa->pa_group_list); 5594 ext4_mb_release_group_pa(&e4b, pa); 5595 ext4_unlock_group(sb, group); 5596 5597 ext4_mb_unload_buddy(&e4b); 5598 list_del(&pa->u.pa_tmp_list); 5599 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5600 } 5601 } 5602 5603 /* 5604 * We have incremented pa_count. So it cannot be freed at this 5605 * point. Also we hold lg_mutex. So no parallel allocation is 5606 * possible from this lg. That means pa_free cannot be updated. 5607 * 5608 * A parallel ext4_mb_discard_group_preallocations is possible. 5609 * which can cause the lg_prealloc_list to be updated. 5610 */ 5611 5612 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 5613 { 5614 int order, added = 0, lg_prealloc_count = 1; 5615 struct super_block *sb = ac->ac_sb; 5616 struct ext4_locality_group *lg = ac->ac_lg; 5617 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 5618 5619 order = fls(pa->pa_free) - 1; 5620 if (order > PREALLOC_TB_SIZE - 1) 5621 /* The max size of hash table is PREALLOC_TB_SIZE */ 5622 order = PREALLOC_TB_SIZE - 1; 5623 /* Add the prealloc space to lg */ 5624 spin_lock(&lg->lg_prealloc_lock); 5625 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5626 pa_node.lg_list, 5627 lockdep_is_held(&lg->lg_prealloc_lock)) { 5628 spin_lock(&tmp_pa->pa_lock); 5629 if (tmp_pa->pa_deleted) { 5630 spin_unlock(&tmp_pa->pa_lock); 5631 continue; 5632 } 5633 if (!added && pa->pa_free < tmp_pa->pa_free) { 5634 /* Add to the tail of the previous entry */ 5635 list_add_tail_rcu(&pa->pa_node.lg_list, 5636 &tmp_pa->pa_node.lg_list); 5637 added = 1; 5638 /* 5639 * we want to count the total 5640 * number of entries in the list 5641 */ 5642 } 5643 spin_unlock(&tmp_pa->pa_lock); 5644 lg_prealloc_count++; 5645 } 5646 if (!added) 5647 list_add_tail_rcu(&pa->pa_node.lg_list, 5648 &lg->lg_prealloc_list[order]); 5649 spin_unlock(&lg->lg_prealloc_lock); 5650 5651 /* Now trim the list to be not more than 8 elements */ 5652 if (lg_prealloc_count > 8) { 5653 ext4_mb_discard_lg_preallocations(sb, lg, 5654 order, lg_prealloc_count); 5655 return; 5656 } 5657 return ; 5658 } 5659 5660 /* 5661 * release all resource we used in allocation 5662 */ 5663 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5664 { 5665 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5666 struct ext4_prealloc_space *pa = ac->ac_pa; 5667 if (pa) { 5668 if (pa->pa_type == MB_GROUP_PA) { 5669 /* see comment in ext4_mb_use_group_pa() */ 5670 spin_lock(&pa->pa_lock); 5671 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5672 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5673 pa->pa_free -= ac->ac_b_ex.fe_len; 5674 pa->pa_len -= ac->ac_b_ex.fe_len; 5675 spin_unlock(&pa->pa_lock); 5676 5677 /* 5678 * We want to add the pa to the right bucket. 5679 * Remove it from the list and while adding 5680 * make sure the list to which we are adding 5681 * doesn't grow big. 5682 */ 5683 if (likely(pa->pa_free)) { 5684 spin_lock(pa->pa_node_lock.lg_lock); 5685 list_del_rcu(&pa->pa_node.lg_list); 5686 spin_unlock(pa->pa_node_lock.lg_lock); 5687 ext4_mb_add_n_trim(ac); 5688 } 5689 } 5690 5691 ext4_mb_put_pa(ac, ac->ac_sb, pa); 5692 } 5693 if (ac->ac_bitmap_page) 5694 put_page(ac->ac_bitmap_page); 5695 if (ac->ac_buddy_page) 5696 put_page(ac->ac_buddy_page); 5697 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5698 mutex_unlock(&ac->ac_lg->lg_mutex); 5699 ext4_mb_collect_stats(ac); 5700 return 0; 5701 } 5702 5703 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5704 { 5705 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5706 int ret; 5707 int freed = 0, busy = 0; 5708 int retry = 0; 5709 5710 trace_ext4_mb_discard_preallocations(sb, needed); 5711 5712 if (needed == 0) 5713 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 5714 repeat: 5715 for (i = 0; i < ngroups && needed > 0; i++) { 5716 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5717 freed += ret; 5718 needed -= ret; 5719 cond_resched(); 5720 } 5721 5722 if (needed > 0 && busy && ++retry < 3) { 5723 busy = 0; 5724 goto repeat; 5725 } 5726 5727 return freed; 5728 } 5729 5730 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 5731 struct ext4_allocation_context *ac, u64 *seq) 5732 { 5733 int freed; 5734 u64 seq_retry = 0; 5735 bool ret = false; 5736 5737 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 5738 if (freed) { 5739 ret = true; 5740 goto out_dbg; 5741 } 5742 seq_retry = ext4_get_discard_pa_seq_sum(); 5743 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 5744 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 5745 *seq = seq_retry; 5746 ret = true; 5747 } 5748 5749 out_dbg: 5750 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 5751 return ret; 5752 } 5753 5754 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 5755 struct ext4_allocation_request *ar, int *errp); 5756 5757 /* 5758 * Main entry point into mballoc to allocate blocks 5759 * it tries to use preallocation first, then falls back 5760 * to usual allocation 5761 */ 5762 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5763 struct ext4_allocation_request *ar, int *errp) 5764 { 5765 struct ext4_allocation_context *ac = NULL; 5766 struct ext4_sb_info *sbi; 5767 struct super_block *sb; 5768 ext4_fsblk_t block = 0; 5769 unsigned int inquota = 0; 5770 unsigned int reserv_clstrs = 0; 5771 int retries = 0; 5772 u64 seq; 5773 5774 might_sleep(); 5775 sb = ar->inode->i_sb; 5776 sbi = EXT4_SB(sb); 5777 5778 trace_ext4_request_blocks(ar); 5779 if (sbi->s_mount_state & EXT4_FC_REPLAY) 5780 return ext4_mb_new_blocks_simple(handle, ar, errp); 5781 5782 /* Allow to use superuser reservation for quota file */ 5783 if (ext4_is_quota_file(ar->inode)) 5784 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 5785 5786 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 5787 /* Without delayed allocation we need to verify 5788 * there is enough free blocks to do block allocation 5789 * and verify allocation doesn't exceed the quota limits. 5790 */ 5791 while (ar->len && 5792 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 5793 5794 /* let others to free the space */ 5795 cond_resched(); 5796 ar->len = ar->len >> 1; 5797 } 5798 if (!ar->len) { 5799 ext4_mb_show_pa(sb); 5800 *errp = -ENOSPC; 5801 return 0; 5802 } 5803 reserv_clstrs = ar->len; 5804 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 5805 dquot_alloc_block_nofail(ar->inode, 5806 EXT4_C2B(sbi, ar->len)); 5807 } else { 5808 while (ar->len && 5809 dquot_alloc_block(ar->inode, 5810 EXT4_C2B(sbi, ar->len))) { 5811 5812 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5813 ar->len--; 5814 } 5815 } 5816 inquota = ar->len; 5817 if (ar->len == 0) { 5818 *errp = -EDQUOT; 5819 goto out; 5820 } 5821 } 5822 5823 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5824 if (!ac) { 5825 ar->len = 0; 5826 *errp = -ENOMEM; 5827 goto out; 5828 } 5829 5830 ext4_mb_initialize_context(ac, ar); 5831 5832 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 5833 seq = this_cpu_read(discard_pa_seq); 5834 if (!ext4_mb_use_preallocated(ac)) { 5835 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5836 ext4_mb_normalize_request(ac, ar); 5837 5838 *errp = ext4_mb_pa_alloc(ac); 5839 if (*errp) 5840 goto errout; 5841 repeat: 5842 /* allocate space in core */ 5843 *errp = ext4_mb_regular_allocator(ac); 5844 /* 5845 * pa allocated above is added to grp->bb_prealloc_list only 5846 * when we were able to allocate some block i.e. when 5847 * ac->ac_status == AC_STATUS_FOUND. 5848 * And error from above mean ac->ac_status != AC_STATUS_FOUND 5849 * So we have to free this pa here itself. 5850 */ 5851 if (*errp) { 5852 ext4_mb_pa_put_free(ac); 5853 ext4_discard_allocated_blocks(ac); 5854 goto errout; 5855 } 5856 if (ac->ac_status == AC_STATUS_FOUND && 5857 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 5858 ext4_mb_pa_put_free(ac); 5859 } 5860 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 5861 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5862 if (*errp) { 5863 ext4_discard_allocated_blocks(ac); 5864 goto errout; 5865 } else { 5866 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5867 ar->len = ac->ac_b_ex.fe_len; 5868 } 5869 } else { 5870 if (++retries < 3 && 5871 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5872 goto repeat; 5873 /* 5874 * If block allocation fails then the pa allocated above 5875 * needs to be freed here itself. 5876 */ 5877 ext4_mb_pa_put_free(ac); 5878 *errp = -ENOSPC; 5879 } 5880 5881 if (*errp) { 5882 errout: 5883 ac->ac_b_ex.fe_len = 0; 5884 ar->len = 0; 5885 ext4_mb_show_ac(ac); 5886 } 5887 ext4_mb_release_context(ac); 5888 kmem_cache_free(ext4_ac_cachep, ac); 5889 out: 5890 if (inquota && ar->len < inquota) 5891 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 5892 if (!ar->len) { 5893 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 5894 /* release all the reserved blocks if non delalloc */ 5895 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 5896 reserv_clstrs); 5897 } 5898 5899 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5900 5901 return block; 5902 } 5903 5904 /* 5905 * We can merge two free data extents only if the physical blocks 5906 * are contiguous, AND the extents were freed by the same transaction, 5907 * AND the blocks are associated with the same group. 5908 */ 5909 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5910 struct ext4_free_data *entry, 5911 struct ext4_free_data *new_entry, 5912 struct rb_root *entry_rb_root) 5913 { 5914 if ((entry->efd_tid != new_entry->efd_tid) || 5915 (entry->efd_group != new_entry->efd_group)) 5916 return; 5917 if (entry->efd_start_cluster + entry->efd_count == 5918 new_entry->efd_start_cluster) { 5919 new_entry->efd_start_cluster = entry->efd_start_cluster; 5920 new_entry->efd_count += entry->efd_count; 5921 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5922 entry->efd_start_cluster) { 5923 new_entry->efd_count += entry->efd_count; 5924 } else 5925 return; 5926 spin_lock(&sbi->s_md_lock); 5927 list_del(&entry->efd_list); 5928 spin_unlock(&sbi->s_md_lock); 5929 rb_erase(&entry->efd_node, entry_rb_root); 5930 kmem_cache_free(ext4_free_data_cachep, entry); 5931 } 5932 5933 static noinline_for_stack void 5934 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 5935 struct ext4_free_data *new_entry) 5936 { 5937 ext4_group_t group = e4b->bd_group; 5938 ext4_grpblk_t cluster; 5939 ext4_grpblk_t clusters = new_entry->efd_count; 5940 struct ext4_free_data *entry; 5941 struct ext4_group_info *db = e4b->bd_info; 5942 struct super_block *sb = e4b->bd_sb; 5943 struct ext4_sb_info *sbi = EXT4_SB(sb); 5944 struct rb_node **n = &db->bb_free_root.rb_node, *node; 5945 struct rb_node *parent = NULL, *new_node; 5946 5947 BUG_ON(!ext4_handle_valid(handle)); 5948 BUG_ON(e4b->bd_bitmap_page == NULL); 5949 BUG_ON(e4b->bd_buddy_page == NULL); 5950 5951 new_node = &new_entry->efd_node; 5952 cluster = new_entry->efd_start_cluster; 5953 5954 if (!*n) { 5955 /* first free block exent. We need to 5956 protect buddy cache from being freed, 5957 * otherwise we'll refresh it from 5958 * on-disk bitmap and lose not-yet-available 5959 * blocks */ 5960 get_page(e4b->bd_buddy_page); 5961 get_page(e4b->bd_bitmap_page); 5962 } 5963 while (*n) { 5964 parent = *n; 5965 entry = rb_entry(parent, struct ext4_free_data, efd_node); 5966 if (cluster < entry->efd_start_cluster) 5967 n = &(*n)->rb_left; 5968 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5969 n = &(*n)->rb_right; 5970 else { 5971 ext4_grp_locked_error(sb, group, 0, 5972 ext4_group_first_block_no(sb, group) + 5973 EXT4_C2B(sbi, cluster), 5974 "Block already on to-be-freed list"); 5975 kmem_cache_free(ext4_free_data_cachep, new_entry); 5976 return; 5977 } 5978 } 5979 5980 rb_link_node(new_node, parent, n); 5981 rb_insert_color(new_node, &db->bb_free_root); 5982 5983 /* Now try to see the extent can be merged to left and right */ 5984 node = rb_prev(new_node); 5985 if (node) { 5986 entry = rb_entry(node, struct ext4_free_data, efd_node); 5987 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5988 &(db->bb_free_root)); 5989 } 5990 5991 node = rb_next(new_node); 5992 if (node) { 5993 entry = rb_entry(node, struct ext4_free_data, efd_node); 5994 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5995 &(db->bb_free_root)); 5996 } 5997 5998 spin_lock(&sbi->s_md_lock); 5999 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 6000 sbi->s_mb_free_pending += clusters; 6001 spin_unlock(&sbi->s_md_lock); 6002 } 6003 6004 /* 6005 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 6006 * linearly starting at the goal block and also excludes the blocks which 6007 * are going to be in use after fast commit replay. 6008 */ 6009 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 6010 struct ext4_allocation_request *ar, int *errp) 6011 { 6012 struct buffer_head *bitmap_bh; 6013 struct super_block *sb = ar->inode->i_sb; 6014 ext4_group_t group; 6015 ext4_grpblk_t blkoff; 6016 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 6017 ext4_grpblk_t i = 0; 6018 ext4_fsblk_t goal, block; 6019 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 6020 6021 goal = ar->goal; 6022 if (goal < le32_to_cpu(es->s_first_data_block) || 6023 goal >= ext4_blocks_count(es)) 6024 goal = le32_to_cpu(es->s_first_data_block); 6025 6026 ar->len = 0; 6027 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6028 for (; group < ext4_get_groups_count(sb); group++) { 6029 bitmap_bh = ext4_read_block_bitmap(sb, group); 6030 if (IS_ERR(bitmap_bh)) { 6031 *errp = PTR_ERR(bitmap_bh); 6032 pr_warn("Failed to read block bitmap\n"); 6033 return 0; 6034 } 6035 6036 while (1) { 6037 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6038 blkoff); 6039 if (i >= max) 6040 break; 6041 if (ext4_fc_replay_check_excluded(sb, 6042 ext4_group_first_block_no(sb, group) + i)) { 6043 blkoff = i + 1; 6044 } else 6045 break; 6046 } 6047 brelse(bitmap_bh); 6048 if (i < max) 6049 break; 6050 6051 blkoff = 0; 6052 } 6053 6054 if (group >= ext4_get_groups_count(sb) || i >= max) { 6055 *errp = -ENOSPC; 6056 return 0; 6057 } 6058 6059 block = ext4_group_first_block_no(sb, group) + i; 6060 ext4_mb_mark_bb(sb, block, 1, 1); 6061 ar->len = 1; 6062 6063 return block; 6064 } 6065 6066 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6067 unsigned long count) 6068 { 6069 struct buffer_head *bitmap_bh; 6070 struct super_block *sb = inode->i_sb; 6071 struct ext4_group_desc *gdp; 6072 struct buffer_head *gdp_bh; 6073 ext4_group_t group; 6074 ext4_grpblk_t blkoff; 6075 int already_freed = 0, err, i; 6076 6077 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6078 bitmap_bh = ext4_read_block_bitmap(sb, group); 6079 if (IS_ERR(bitmap_bh)) { 6080 pr_warn("Failed to read block bitmap\n"); 6081 return; 6082 } 6083 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 6084 if (!gdp) 6085 goto err_out; 6086 6087 for (i = 0; i < count; i++) { 6088 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 6089 already_freed++; 6090 } 6091 mb_clear_bits(bitmap_bh->b_data, blkoff, count); 6092 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 6093 if (err) 6094 goto err_out; 6095 ext4_free_group_clusters_set( 6096 sb, gdp, ext4_free_group_clusters(sb, gdp) + 6097 count - already_freed); 6098 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6099 ext4_group_desc_csum_set(sb, group, gdp); 6100 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 6101 sync_dirty_buffer(bitmap_bh); 6102 sync_dirty_buffer(gdp_bh); 6103 6104 err_out: 6105 brelse(bitmap_bh); 6106 } 6107 6108 /** 6109 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6110 * Used by ext4_free_blocks() 6111 * @handle: handle for this transaction 6112 * @inode: inode 6113 * @block: starting physical block to be freed 6114 * @count: number of blocks to be freed 6115 * @flags: flags used by ext4_free_blocks 6116 */ 6117 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6118 ext4_fsblk_t block, unsigned long count, 6119 int flags) 6120 { 6121 struct buffer_head *bitmap_bh = NULL; 6122 struct super_block *sb = inode->i_sb; 6123 struct ext4_group_desc *gdp; 6124 struct ext4_group_info *grp; 6125 unsigned int overflow; 6126 ext4_grpblk_t bit; 6127 struct buffer_head *gd_bh; 6128 ext4_group_t block_group; 6129 struct ext4_sb_info *sbi; 6130 struct ext4_buddy e4b; 6131 unsigned int count_clusters; 6132 int err = 0; 6133 int ret; 6134 6135 sbi = EXT4_SB(sb); 6136 6137 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6138 !ext4_inode_block_valid(inode, block, count)) { 6139 ext4_error(sb, "Freeing blocks in system zone - " 6140 "Block = %llu, count = %lu", block, count); 6141 /* err = 0. ext4_std_error should be a no op */ 6142 goto error_return; 6143 } 6144 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6145 6146 do_more: 6147 overflow = 0; 6148 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6149 6150 grp = ext4_get_group_info(sb, block_group); 6151 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6152 return; 6153 6154 /* 6155 * Check to see if we are freeing blocks across a group 6156 * boundary. 6157 */ 6158 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6159 overflow = EXT4_C2B(sbi, bit) + count - 6160 EXT4_BLOCKS_PER_GROUP(sb); 6161 count -= overflow; 6162 /* The range changed so it's no longer validated */ 6163 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6164 } 6165 count_clusters = EXT4_NUM_B2C(sbi, count); 6166 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6167 if (IS_ERR(bitmap_bh)) { 6168 err = PTR_ERR(bitmap_bh); 6169 bitmap_bh = NULL; 6170 goto error_return; 6171 } 6172 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 6173 if (!gdp) { 6174 err = -EIO; 6175 goto error_return; 6176 } 6177 6178 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6179 !ext4_inode_block_valid(inode, block, count)) { 6180 ext4_error(sb, "Freeing blocks in system zone - " 6181 "Block = %llu, count = %lu", block, count); 6182 /* err = 0. ext4_std_error should be a no op */ 6183 goto error_return; 6184 } 6185 6186 BUFFER_TRACE(bitmap_bh, "getting write access"); 6187 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6188 EXT4_JTR_NONE); 6189 if (err) 6190 goto error_return; 6191 6192 /* 6193 * We are about to modify some metadata. Call the journal APIs 6194 * to unshare ->b_data if a currently-committing transaction is 6195 * using it 6196 */ 6197 BUFFER_TRACE(gd_bh, "get_write_access"); 6198 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6199 if (err) 6200 goto error_return; 6201 #ifdef AGGRESSIVE_CHECK 6202 { 6203 int i; 6204 for (i = 0; i < count_clusters; i++) 6205 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 6206 } 6207 #endif 6208 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6209 6210 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6211 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6212 GFP_NOFS|__GFP_NOFAIL); 6213 if (err) 6214 goto error_return; 6215 6216 /* 6217 * We need to make sure we don't reuse the freed block until after the 6218 * transaction is committed. We make an exception if the inode is to be 6219 * written in writeback mode since writeback mode has weak data 6220 * consistency guarantees. 6221 */ 6222 if (ext4_handle_valid(handle) && 6223 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6224 !ext4_should_writeback_data(inode))) { 6225 struct ext4_free_data *new_entry; 6226 /* 6227 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6228 * to fail. 6229 */ 6230 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6231 GFP_NOFS|__GFP_NOFAIL); 6232 new_entry->efd_start_cluster = bit; 6233 new_entry->efd_group = block_group; 6234 new_entry->efd_count = count_clusters; 6235 new_entry->efd_tid = handle->h_transaction->t_tid; 6236 6237 ext4_lock_group(sb, block_group); 6238 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6239 ext4_mb_free_metadata(handle, &e4b, new_entry); 6240 } else { 6241 /* need to update group_info->bb_free and bitmap 6242 * with group lock held. generate_buddy look at 6243 * them with group lock_held 6244 */ 6245 if (test_opt(sb, DISCARD)) { 6246 err = ext4_issue_discard(sb, block_group, bit, count, 6247 NULL); 6248 if (err && err != -EOPNOTSUPP) 6249 ext4_msg(sb, KERN_WARNING, "discard request in" 6250 " group:%u block:%d count:%lu failed" 6251 " with %d", block_group, bit, count, 6252 err); 6253 } else 6254 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6255 6256 ext4_lock_group(sb, block_group); 6257 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 6258 mb_free_blocks(inode, &e4b, bit, count_clusters); 6259 } 6260 6261 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6262 ext4_free_group_clusters_set(sb, gdp, ret); 6263 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6264 ext4_group_desc_csum_set(sb, block_group, gdp); 6265 ext4_unlock_group(sb, block_group); 6266 6267 if (sbi->s_log_groups_per_flex) { 6268 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6269 atomic64_add(count_clusters, 6270 &sbi_array_rcu_deref(sbi, s_flex_groups, 6271 flex_group)->free_clusters); 6272 } 6273 6274 /* 6275 * on a bigalloc file system, defer the s_freeclusters_counter 6276 * update to the caller (ext4_remove_space and friends) so they 6277 * can determine if a cluster freed here should be rereserved 6278 */ 6279 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6280 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6281 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6282 percpu_counter_add(&sbi->s_freeclusters_counter, 6283 count_clusters); 6284 } 6285 6286 ext4_mb_unload_buddy(&e4b); 6287 6288 /* We dirtied the bitmap block */ 6289 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6290 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6291 6292 /* And the group descriptor block */ 6293 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6294 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6295 if (!err) 6296 err = ret; 6297 6298 if (overflow && !err) { 6299 block += count; 6300 count = overflow; 6301 put_bh(bitmap_bh); 6302 /* The range changed so it's no longer validated */ 6303 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6304 goto do_more; 6305 } 6306 error_return: 6307 brelse(bitmap_bh); 6308 ext4_std_error(sb, err); 6309 return; 6310 } 6311 6312 /** 6313 * ext4_free_blocks() -- Free given blocks and update quota 6314 * @handle: handle for this transaction 6315 * @inode: inode 6316 * @bh: optional buffer of the block to be freed 6317 * @block: starting physical block to be freed 6318 * @count: number of blocks to be freed 6319 * @flags: flags used by ext4_free_blocks 6320 */ 6321 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6322 struct buffer_head *bh, ext4_fsblk_t block, 6323 unsigned long count, int flags) 6324 { 6325 struct super_block *sb = inode->i_sb; 6326 unsigned int overflow; 6327 struct ext4_sb_info *sbi; 6328 6329 sbi = EXT4_SB(sb); 6330 6331 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6332 ext4_free_blocks_simple(inode, block, count); 6333 return; 6334 } 6335 6336 might_sleep(); 6337 if (bh) { 6338 if (block) 6339 BUG_ON(block != bh->b_blocknr); 6340 else 6341 block = bh->b_blocknr; 6342 } 6343 6344 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6345 !ext4_inode_block_valid(inode, block, count)) { 6346 ext4_error(sb, "Freeing blocks not in datazone - " 6347 "block = %llu, count = %lu", block, count); 6348 return; 6349 } 6350 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6351 6352 ext4_debug("freeing block %llu\n", block); 6353 trace_ext4_free_blocks(inode, block, count, flags); 6354 6355 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6356 BUG_ON(count > 1); 6357 6358 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6359 inode, bh, block); 6360 } 6361 6362 /* 6363 * If the extent to be freed does not begin on a cluster 6364 * boundary, we need to deal with partial clusters at the 6365 * beginning and end of the extent. Normally we will free 6366 * blocks at the beginning or the end unless we are explicitly 6367 * requested to avoid doing so. 6368 */ 6369 overflow = EXT4_PBLK_COFF(sbi, block); 6370 if (overflow) { 6371 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6372 overflow = sbi->s_cluster_ratio - overflow; 6373 block += overflow; 6374 if (count > overflow) 6375 count -= overflow; 6376 else 6377 return; 6378 } else { 6379 block -= overflow; 6380 count += overflow; 6381 } 6382 /* The range changed so it's no longer validated */ 6383 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6384 } 6385 overflow = EXT4_LBLK_COFF(sbi, count); 6386 if (overflow) { 6387 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6388 if (count > overflow) 6389 count -= overflow; 6390 else 6391 return; 6392 } else 6393 count += sbi->s_cluster_ratio - overflow; 6394 /* The range changed so it's no longer validated */ 6395 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6396 } 6397 6398 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6399 int i; 6400 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6401 6402 for (i = 0; i < count; i++) { 6403 cond_resched(); 6404 if (is_metadata) 6405 bh = sb_find_get_block(inode->i_sb, block + i); 6406 ext4_forget(handle, is_metadata, inode, bh, block + i); 6407 } 6408 } 6409 6410 ext4_mb_clear_bb(handle, inode, block, count, flags); 6411 return; 6412 } 6413 6414 /** 6415 * ext4_group_add_blocks() -- Add given blocks to an existing group 6416 * @handle: handle to this transaction 6417 * @sb: super block 6418 * @block: start physical block to add to the block group 6419 * @count: number of blocks to free 6420 * 6421 * This marks the blocks as free in the bitmap and buddy. 6422 */ 6423 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6424 ext4_fsblk_t block, unsigned long count) 6425 { 6426 struct buffer_head *bitmap_bh = NULL; 6427 struct buffer_head *gd_bh; 6428 ext4_group_t block_group; 6429 ext4_grpblk_t bit; 6430 unsigned int i; 6431 struct ext4_group_desc *desc; 6432 struct ext4_sb_info *sbi = EXT4_SB(sb); 6433 struct ext4_buddy e4b; 6434 int err = 0, ret, free_clusters_count; 6435 ext4_grpblk_t clusters_freed; 6436 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6437 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6438 unsigned long cluster_count = last_cluster - first_cluster + 1; 6439 6440 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6441 6442 if (count == 0) 6443 return 0; 6444 6445 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6446 /* 6447 * Check to see if we are freeing blocks across a group 6448 * boundary. 6449 */ 6450 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6451 ext4_warning(sb, "too many blocks added to group %u", 6452 block_group); 6453 err = -EINVAL; 6454 goto error_return; 6455 } 6456 6457 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 6458 if (IS_ERR(bitmap_bh)) { 6459 err = PTR_ERR(bitmap_bh); 6460 bitmap_bh = NULL; 6461 goto error_return; 6462 } 6463 6464 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6465 if (!desc) { 6466 err = -EIO; 6467 goto error_return; 6468 } 6469 6470 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6471 ext4_error(sb, "Adding blocks in system zones - " 6472 "Block = %llu, count = %lu", 6473 block, count); 6474 err = -EINVAL; 6475 goto error_return; 6476 } 6477 6478 BUFFER_TRACE(bitmap_bh, "getting write access"); 6479 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6480 EXT4_JTR_NONE); 6481 if (err) 6482 goto error_return; 6483 6484 /* 6485 * We are about to modify some metadata. Call the journal APIs 6486 * to unshare ->b_data if a currently-committing transaction is 6487 * using it 6488 */ 6489 BUFFER_TRACE(gd_bh, "get_write_access"); 6490 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 6491 if (err) 6492 goto error_return; 6493 6494 for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 6495 BUFFER_TRACE(bitmap_bh, "clear bit"); 6496 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 6497 ext4_error(sb, "bit already cleared for block %llu", 6498 (ext4_fsblk_t)(block + i)); 6499 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 6500 } else { 6501 clusters_freed++; 6502 } 6503 } 6504 6505 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6506 if (err) 6507 goto error_return; 6508 6509 /* 6510 * need to update group_info->bb_free and bitmap 6511 * with group lock held. generate_buddy look at 6512 * them with group lock_held 6513 */ 6514 ext4_lock_group(sb, block_group); 6515 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6516 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6517 free_clusters_count = clusters_freed + 6518 ext4_free_group_clusters(sb, desc); 6519 ext4_free_group_clusters_set(sb, desc, free_clusters_count); 6520 ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6521 ext4_group_desc_csum_set(sb, block_group, desc); 6522 ext4_unlock_group(sb, block_group); 6523 percpu_counter_add(&sbi->s_freeclusters_counter, 6524 clusters_freed); 6525 6526 if (sbi->s_log_groups_per_flex) { 6527 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6528 atomic64_add(clusters_freed, 6529 &sbi_array_rcu_deref(sbi, s_flex_groups, 6530 flex_group)->free_clusters); 6531 } 6532 6533 ext4_mb_unload_buddy(&e4b); 6534 6535 /* We dirtied the bitmap block */ 6536 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 6537 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 6538 6539 /* And the group descriptor block */ 6540 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 6541 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6542 if (!err) 6543 err = ret; 6544 6545 error_return: 6546 brelse(bitmap_bh); 6547 ext4_std_error(sb, err); 6548 return err; 6549 } 6550 6551 /** 6552 * ext4_trim_extent -- function to TRIM one single free extent in the group 6553 * @sb: super block for the file system 6554 * @start: starting block of the free extent in the alloc. group 6555 * @count: number of blocks to TRIM 6556 * @e4b: ext4 buddy for the group 6557 * 6558 * Trim "count" blocks starting at "start" in the "group". To assure that no 6559 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6560 * be called with under the group lock. 6561 */ 6562 static int ext4_trim_extent(struct super_block *sb, 6563 int start, int count, struct ext4_buddy *e4b) 6564 __releases(bitlock) 6565 __acquires(bitlock) 6566 { 6567 struct ext4_free_extent ex; 6568 ext4_group_t group = e4b->bd_group; 6569 int ret = 0; 6570 6571 trace_ext4_trim_extent(sb, group, start, count); 6572 6573 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6574 6575 ex.fe_start = start; 6576 ex.fe_group = group; 6577 ex.fe_len = count; 6578 6579 /* 6580 * Mark blocks used, so no one can reuse them while 6581 * being trimmed. 6582 */ 6583 mb_mark_used(e4b, &ex); 6584 ext4_unlock_group(sb, group); 6585 ret = ext4_issue_discard(sb, group, start, count, NULL); 6586 ext4_lock_group(sb, group); 6587 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6588 return ret; 6589 } 6590 6591 static int ext4_try_to_trim_range(struct super_block *sb, 6592 struct ext4_buddy *e4b, ext4_grpblk_t start, 6593 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6594 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6595 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6596 { 6597 ext4_grpblk_t next, count, free_count; 6598 void *bitmap; 6599 6600 bitmap = e4b->bd_bitmap; 6601 start = (e4b->bd_info->bb_first_free > start) ? 6602 e4b->bd_info->bb_first_free : start; 6603 count = 0; 6604 free_count = 0; 6605 6606 while (start <= max) { 6607 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6608 if (start > max) 6609 break; 6610 next = mb_find_next_bit(bitmap, max + 1, start); 6611 6612 if ((next - start) >= minblocks) { 6613 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6614 6615 if (ret && ret != -EOPNOTSUPP) 6616 break; 6617 count += next - start; 6618 } 6619 free_count += next - start; 6620 start = next + 1; 6621 6622 if (fatal_signal_pending(current)) { 6623 count = -ERESTARTSYS; 6624 break; 6625 } 6626 6627 if (need_resched()) { 6628 ext4_unlock_group(sb, e4b->bd_group); 6629 cond_resched(); 6630 ext4_lock_group(sb, e4b->bd_group); 6631 } 6632 6633 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6634 break; 6635 } 6636 6637 return count; 6638 } 6639 6640 /** 6641 * ext4_trim_all_free -- function to trim all free space in alloc. group 6642 * @sb: super block for file system 6643 * @group: group to be trimmed 6644 * @start: first group block to examine 6645 * @max: last group block to examine 6646 * @minblocks: minimum extent block count 6647 * @set_trimmed: set the trimmed flag if at least one block is trimmed 6648 * 6649 * ext4_trim_all_free walks through group's block bitmap searching for free 6650 * extents. When the free extent is found, mark it as used in group buddy 6651 * bitmap. Then issue a TRIM command on this extent and free the extent in 6652 * the group buddy bitmap. 6653 */ 6654 static ext4_grpblk_t 6655 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 6656 ext4_grpblk_t start, ext4_grpblk_t max, 6657 ext4_grpblk_t minblocks, bool set_trimmed) 6658 { 6659 struct ext4_buddy e4b; 6660 int ret; 6661 6662 trace_ext4_trim_all_free(sb, group, start, max); 6663 6664 ret = ext4_mb_load_buddy(sb, group, &e4b); 6665 if (ret) { 6666 ext4_warning(sb, "Error %d loading buddy information for %u", 6667 ret, group); 6668 return ret; 6669 } 6670 6671 ext4_lock_group(sb, group); 6672 6673 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 6674 minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 6675 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6676 if (ret >= 0 && set_trimmed) 6677 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 6678 } else { 6679 ret = 0; 6680 } 6681 6682 ext4_unlock_group(sb, group); 6683 ext4_mb_unload_buddy(&e4b); 6684 6685 ext4_debug("trimmed %d blocks in the group %d\n", 6686 ret, group); 6687 6688 return ret; 6689 } 6690 6691 /** 6692 * ext4_trim_fs() -- trim ioctl handle function 6693 * @sb: superblock for filesystem 6694 * @range: fstrim_range structure 6695 * 6696 * start: First Byte to trim 6697 * len: number of Bytes to trim from start 6698 * minlen: minimum extent length in Bytes 6699 * ext4_trim_fs goes through all allocation groups containing Bytes from 6700 * start to start+len. For each such a group ext4_trim_all_free function 6701 * is invoked to trim all free space. 6702 */ 6703 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 6704 { 6705 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 6706 struct ext4_group_info *grp; 6707 ext4_group_t group, first_group, last_group; 6708 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6709 uint64_t start, end, minlen, trimmed = 0; 6710 ext4_fsblk_t first_data_blk = 6711 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6712 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6713 bool whole_group, eof = false; 6714 int ret = 0; 6715 6716 start = range->start >> sb->s_blocksize_bits; 6717 end = start + (range->len >> sb->s_blocksize_bits) - 1; 6718 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6719 range->minlen >> sb->s_blocksize_bits); 6720 6721 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 6722 start >= max_blks || 6723 range->len < sb->s_blocksize) 6724 return -EINVAL; 6725 /* No point to try to trim less than discard granularity */ 6726 if (range->minlen < discard_granularity) { 6727 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6728 discard_granularity >> sb->s_blocksize_bits); 6729 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6730 goto out; 6731 } 6732 if (end >= max_blks - 1) { 6733 end = max_blks - 1; 6734 eof = true; 6735 } 6736 if (end <= first_data_blk) 6737 goto out; 6738 if (start < first_data_blk) 6739 start = first_data_blk; 6740 6741 /* Determine first and last group to examine based on start and end */ 6742 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 6743 &first_group, &first_cluster); 6744 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 6745 &last_group, &last_cluster); 6746 6747 /* end now represents the last cluster to discard in this group */ 6748 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6749 whole_group = true; 6750 6751 for (group = first_group; group <= last_group; group++) { 6752 grp = ext4_get_group_info(sb, group); 6753 if (!grp) 6754 continue; 6755 /* We only do this if the grp has never been initialized */ 6756 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6757 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 6758 if (ret) 6759 break; 6760 } 6761 6762 /* 6763 * For all the groups except the last one, last cluster will 6764 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6765 * change it for the last group, note that last_cluster is 6766 * already computed earlier by ext4_get_group_no_and_offset() 6767 */ 6768 if (group == last_group) { 6769 end = last_cluster; 6770 whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6771 } 6772 if (grp->bb_free >= minlen) { 6773 cnt = ext4_trim_all_free(sb, group, first_cluster, 6774 end, minlen, whole_group); 6775 if (cnt < 0) { 6776 ret = cnt; 6777 break; 6778 } 6779 trimmed += cnt; 6780 } 6781 6782 /* 6783 * For every group except the first one, we are sure 6784 * that the first cluster to discard will be cluster #0. 6785 */ 6786 first_cluster = 0; 6787 } 6788 6789 if (!ret) 6790 EXT4_SB(sb)->s_last_trim_minblks = minlen; 6791 6792 out: 6793 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 6794 return ret; 6795 } 6796 6797 /* Iterate all the free extents in the group. */ 6798 int 6799 ext4_mballoc_query_range( 6800 struct super_block *sb, 6801 ext4_group_t group, 6802 ext4_grpblk_t start, 6803 ext4_grpblk_t end, 6804 ext4_mballoc_query_range_fn formatter, 6805 void *priv) 6806 { 6807 void *bitmap; 6808 ext4_grpblk_t next; 6809 struct ext4_buddy e4b; 6810 int error; 6811 6812 error = ext4_mb_load_buddy(sb, group, &e4b); 6813 if (error) 6814 return error; 6815 bitmap = e4b.bd_bitmap; 6816 6817 ext4_lock_group(sb, group); 6818 6819 start = (e4b.bd_info->bb_first_free > start) ? 6820 e4b.bd_info->bb_first_free : start; 6821 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 6822 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6823 6824 while (start <= end) { 6825 start = mb_find_next_zero_bit(bitmap, end + 1, start); 6826 if (start > end) 6827 break; 6828 next = mb_find_next_bit(bitmap, end + 1, start); 6829 6830 ext4_unlock_group(sb, group); 6831 error = formatter(sb, group, start, next - start, priv); 6832 if (error) 6833 goto out_unload; 6834 ext4_lock_group(sb, group); 6835 6836 start = next + 1; 6837 } 6838 6839 ext4_unlock_group(sb, group); 6840 out_unload: 6841 ext4_mb_unload_buddy(&e4b); 6842 6843 return error; 6844 } 6845