1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <trace/events/ext4.h> 20 21 /* 22 * MUSTDO: 23 * - test ext4_ext_search_left() and ext4_ext_search_right() 24 * - search for metadata in few groups 25 * 26 * TODO v4: 27 * - normalization should take into account whether file is still open 28 * - discard preallocations if no free space left (policy?) 29 * - don't normalize tails 30 * - quota 31 * - reservation for superuser 32 * 33 * TODO v3: 34 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35 * - track min/max extents in each group for better group selection 36 * - mb_mark_used() may allocate chunk right after splitting buddy 37 * - tree of groups sorted by number of free blocks 38 * - error handling 39 */ 40 41 /* 42 * The allocation request involve request for multiple number of blocks 43 * near to the goal(block) value specified. 44 * 45 * During initialization phase of the allocator we decide to use the 46 * group preallocation or inode preallocation depending on the size of 47 * the file. The size of the file could be the resulting file size we 48 * would have after allocation, or the current file size, which ever 49 * is larger. If the size is less than sbi->s_mb_stream_request we 50 * select to use the group preallocation. The default value of 51 * s_mb_stream_request is 16 blocks. This can also be tuned via 52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53 * terms of number of blocks. 54 * 55 * The main motivation for having small file use group preallocation is to 56 * ensure that we have small files closer together on the disk. 57 * 58 * First stage the allocator looks at the inode prealloc list, 59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60 * spaces for this particular inode. The inode prealloc space is 61 * represented as: 62 * 63 * pa_lstart -> the logical start block for this prealloc space 64 * pa_pstart -> the physical start block for this prealloc space 65 * pa_len -> length for this prealloc space (in clusters) 66 * pa_free -> free space available in this prealloc space (in clusters) 67 * 68 * The inode preallocation space is used looking at the _logical_ start 69 * block. If only the logical file block falls within the range of prealloc 70 * space we will consume the particular prealloc space. This makes sure that 71 * we have contiguous physical blocks representing the file blocks 72 * 73 * The important thing to be noted in case of inode prealloc space is that 74 * we don't modify the values associated to inode prealloc space except 75 * pa_free. 76 * 77 * If we are not able to find blocks in the inode prealloc space and if we 78 * have the group allocation flag set then we look at the locality group 79 * prealloc space. These are per CPU prealloc list represented as 80 * 81 * ext4_sb_info.s_locality_groups[smp_processor_id()] 82 * 83 * The reason for having a per cpu locality group is to reduce the contention 84 * between CPUs. It is possible to get scheduled at this point. 85 * 86 * The locality group prealloc space is used looking at whether we have 87 * enough free space (pa_free) within the prealloc space. 88 * 89 * If we can't allocate blocks via inode prealloc or/and locality group 90 * prealloc then we look at the buddy cache. The buddy cache is represented 91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92 * mapped to the buddy and bitmap information regarding different 93 * groups. The buddy information is attached to buddy cache inode so that 94 * we can access them through the page cache. The information regarding 95 * each group is loaded via ext4_mb_load_buddy. The information involve 96 * block bitmap and buddy information. The information are stored in the 97 * inode as: 98 * 99 * { page } 100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101 * 102 * 103 * one block each for bitmap and buddy information. So for each group we 104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105 * blocksize) blocks. So it can have information regarding groups_per_page 106 * which is blocks_per_page/2 107 * 108 * The buddy cache inode is not stored on disk. The inode is thrown 109 * away when the filesystem is unmounted. 110 * 111 * We look for count number of blocks in the buddy cache. If we were able 112 * to locate that many free blocks we return with additional information 113 * regarding rest of the contiguous physical block available 114 * 115 * Before allocating blocks via buddy cache we normalize the request 116 * blocks. This ensure we ask for more blocks that we needed. The extra 117 * blocks that we get after allocation is added to the respective prealloc 118 * list. In case of inode preallocation we follow a list of heuristics 119 * based on file size. This can be found in ext4_mb_normalize_request. If 120 * we are doing a group prealloc we try to normalize the request to 121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 122 * dependent on the cluster size; for non-bigalloc file systems, it is 123 * 512 blocks. This can be tuned via 124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125 * terms of number of blocks. If we have mounted the file system with -O 126 * stripe=<value> option the group prealloc request is normalized to the 127 * smallest multiple of the stripe value (sbi->s_stripe) which is 128 * greater than the default mb_group_prealloc. 129 * 130 * The regular allocator (using the buddy cache) supports a few tunables. 131 * 132 * /sys/fs/ext4/<partition>/mb_min_to_scan 133 * /sys/fs/ext4/<partition>/mb_max_to_scan 134 * /sys/fs/ext4/<partition>/mb_order2_req 135 * 136 * The regular allocator uses buddy scan only if the request len is power of 137 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 138 * value of s_mb_order2_reqs can be tuned via 139 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 140 * stripe size (sbi->s_stripe), we try to search for contiguous block in 141 * stripe size. This should result in better allocation on RAID setups. If 142 * not, we search in the specific group using bitmap for best extents. The 143 * tunable min_to_scan and max_to_scan control the behaviour here. 144 * min_to_scan indicate how long the mballoc __must__ look for a best 145 * extent and max_to_scan indicates how long the mballoc __can__ look for a 146 * best extent in the found extents. Searching for the blocks starts with 147 * the group specified as the goal value in allocation context via 148 * ac_g_ex. Each group is first checked based on the criteria whether it 149 * can be used for allocation. ext4_mb_good_group explains how the groups are 150 * checked. 151 * 152 * Both the prealloc space are getting populated as above. So for the first 153 * request we will hit the buddy cache which will result in this prealloc 154 * space getting filled. The prealloc space is then later used for the 155 * subsequent request. 156 */ 157 158 /* 159 * mballoc operates on the following data: 160 * - on-disk bitmap 161 * - in-core buddy (actually includes buddy and bitmap) 162 * - preallocation descriptors (PAs) 163 * 164 * there are two types of preallocations: 165 * - inode 166 * assiged to specific inode and can be used for this inode only. 167 * it describes part of inode's space preallocated to specific 168 * physical blocks. any block from that preallocated can be used 169 * independent. the descriptor just tracks number of blocks left 170 * unused. so, before taking some block from descriptor, one must 171 * make sure corresponded logical block isn't allocated yet. this 172 * also means that freeing any block within descriptor's range 173 * must discard all preallocated blocks. 174 * - locality group 175 * assigned to specific locality group which does not translate to 176 * permanent set of inodes: inode can join and leave group. space 177 * from this type of preallocation can be used for any inode. thus 178 * it's consumed from the beginning to the end. 179 * 180 * relation between them can be expressed as: 181 * in-core buddy = on-disk bitmap + preallocation descriptors 182 * 183 * this mean blocks mballoc considers used are: 184 * - allocated blocks (persistent) 185 * - preallocated blocks (non-persistent) 186 * 187 * consistency in mballoc world means that at any time a block is either 188 * free or used in ALL structures. notice: "any time" should not be read 189 * literally -- time is discrete and delimited by locks. 190 * 191 * to keep it simple, we don't use block numbers, instead we count number of 192 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 193 * 194 * all operations can be expressed as: 195 * - init buddy: buddy = on-disk + PAs 196 * - new PA: buddy += N; PA = N 197 * - use inode PA: on-disk += N; PA -= N 198 * - discard inode PA buddy -= on-disk - PA; PA = 0 199 * - use locality group PA on-disk += N; PA -= N 200 * - discard locality group PA buddy -= PA; PA = 0 201 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 202 * is used in real operation because we can't know actual used 203 * bits from PA, only from on-disk bitmap 204 * 205 * if we follow this strict logic, then all operations above should be atomic. 206 * given some of them can block, we'd have to use something like semaphores 207 * killing performance on high-end SMP hardware. let's try to relax it using 208 * the following knowledge: 209 * 1) if buddy is referenced, it's already initialized 210 * 2) while block is used in buddy and the buddy is referenced, 211 * nobody can re-allocate that block 212 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 213 * bit set and PA claims same block, it's OK. IOW, one can set bit in 214 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 215 * block 216 * 217 * so, now we're building a concurrency table: 218 * - init buddy vs. 219 * - new PA 220 * blocks for PA are allocated in the buddy, buddy must be referenced 221 * until PA is linked to allocation group to avoid concurrent buddy init 222 * - use inode PA 223 * we need to make sure that either on-disk bitmap or PA has uptodate data 224 * given (3) we care that PA-=N operation doesn't interfere with init 225 * - discard inode PA 226 * the simplest way would be to have buddy initialized by the discard 227 * - use locality group PA 228 * again PA-=N must be serialized with init 229 * - discard locality group PA 230 * the simplest way would be to have buddy initialized by the discard 231 * - new PA vs. 232 * - use inode PA 233 * i_data_sem serializes them 234 * - discard inode PA 235 * discard process must wait until PA isn't used by another process 236 * - use locality group PA 237 * some mutex should serialize them 238 * - discard locality group PA 239 * discard process must wait until PA isn't used by another process 240 * - use inode PA 241 * - use inode PA 242 * i_data_sem or another mutex should serializes them 243 * - discard inode PA 244 * discard process must wait until PA isn't used by another process 245 * - use locality group PA 246 * nothing wrong here -- they're different PAs covering different blocks 247 * - discard locality group PA 248 * discard process must wait until PA isn't used by another process 249 * 250 * now we're ready to make few consequences: 251 * - PA is referenced and while it is no discard is possible 252 * - PA is referenced until block isn't marked in on-disk bitmap 253 * - PA changes only after on-disk bitmap 254 * - discard must not compete with init. either init is done before 255 * any discard or they're serialized somehow 256 * - buddy init as sum of on-disk bitmap and PAs is done atomically 257 * 258 * a special case when we've used PA to emptiness. no need to modify buddy 259 * in this case, but we should care about concurrent init 260 * 261 */ 262 263 /* 264 * Logic in few words: 265 * 266 * - allocation: 267 * load group 268 * find blocks 269 * mark bits in on-disk bitmap 270 * release group 271 * 272 * - use preallocation: 273 * find proper PA (per-inode or group) 274 * load group 275 * mark bits in on-disk bitmap 276 * release group 277 * release PA 278 * 279 * - free: 280 * load group 281 * mark bits in on-disk bitmap 282 * release group 283 * 284 * - discard preallocations in group: 285 * mark PAs deleted 286 * move them onto local list 287 * load on-disk bitmap 288 * load group 289 * remove PA from object (inode or locality group) 290 * mark free blocks in-core 291 * 292 * - discard inode's preallocations: 293 */ 294 295 /* 296 * Locking rules 297 * 298 * Locks: 299 * - bitlock on a group (group) 300 * - object (inode/locality) (object) 301 * - per-pa lock (pa) 302 * 303 * Paths: 304 * - new pa 305 * object 306 * group 307 * 308 * - find and use pa: 309 * pa 310 * 311 * - release consumed pa: 312 * pa 313 * group 314 * object 315 * 316 * - generate in-core bitmap: 317 * group 318 * pa 319 * 320 * - discard all for given object (inode, locality group): 321 * object 322 * pa 323 * group 324 * 325 * - discard all for given group: 326 * group 327 * pa 328 * group 329 * object 330 * 331 */ 332 static struct kmem_cache *ext4_pspace_cachep; 333 static struct kmem_cache *ext4_ac_cachep; 334 static struct kmem_cache *ext4_free_data_cachep; 335 336 /* We create slab caches for groupinfo data structures based on the 337 * superblock block size. There will be one per mounted filesystem for 338 * each unique s_blocksize_bits */ 339 #define NR_GRPINFO_CACHES 8 340 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 341 342 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 343 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 344 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 345 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 346 }; 347 348 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 349 ext4_group_t group); 350 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 351 ext4_group_t group); 352 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 353 354 /* 355 * The algorithm using this percpu seq counter goes below: 356 * 1. We sample the percpu discard_pa_seq counter before trying for block 357 * allocation in ext4_mb_new_blocks(). 358 * 2. We increment this percpu discard_pa_seq counter when we either allocate 359 * or free these blocks i.e. while marking those blocks as used/free in 360 * mb_mark_used()/mb_free_blocks(). 361 * 3. We also increment this percpu seq counter when we successfully identify 362 * that the bb_prealloc_list is not empty and hence proceed for discarding 363 * of those PAs inside ext4_mb_discard_group_preallocations(). 364 * 365 * Now to make sure that the regular fast path of block allocation is not 366 * affected, as a small optimization we only sample the percpu seq counter 367 * on that cpu. Only when the block allocation fails and when freed blocks 368 * found were 0, that is when we sample percpu seq counter for all cpus using 369 * below function ext4_get_discard_pa_seq_sum(). This happens after making 370 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 371 */ 372 static DEFINE_PER_CPU(u64, discard_pa_seq); 373 static inline u64 ext4_get_discard_pa_seq_sum(void) 374 { 375 int __cpu; 376 u64 __seq = 0; 377 378 for_each_possible_cpu(__cpu) 379 __seq += per_cpu(discard_pa_seq, __cpu); 380 return __seq; 381 } 382 383 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 384 { 385 #if BITS_PER_LONG == 64 386 *bit += ((unsigned long) addr & 7UL) << 3; 387 addr = (void *) ((unsigned long) addr & ~7UL); 388 #elif BITS_PER_LONG == 32 389 *bit += ((unsigned long) addr & 3UL) << 3; 390 addr = (void *) ((unsigned long) addr & ~3UL); 391 #else 392 #error "how many bits you are?!" 393 #endif 394 return addr; 395 } 396 397 static inline int mb_test_bit(int bit, void *addr) 398 { 399 /* 400 * ext4_test_bit on architecture like powerpc 401 * needs unsigned long aligned address 402 */ 403 addr = mb_correct_addr_and_bit(&bit, addr); 404 return ext4_test_bit(bit, addr); 405 } 406 407 static inline void mb_set_bit(int bit, void *addr) 408 { 409 addr = mb_correct_addr_and_bit(&bit, addr); 410 ext4_set_bit(bit, addr); 411 } 412 413 static inline void mb_clear_bit(int bit, void *addr) 414 { 415 addr = mb_correct_addr_and_bit(&bit, addr); 416 ext4_clear_bit(bit, addr); 417 } 418 419 static inline int mb_test_and_clear_bit(int bit, void *addr) 420 { 421 addr = mb_correct_addr_and_bit(&bit, addr); 422 return ext4_test_and_clear_bit(bit, addr); 423 } 424 425 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 426 { 427 int fix = 0, ret, tmpmax; 428 addr = mb_correct_addr_and_bit(&fix, addr); 429 tmpmax = max + fix; 430 start += fix; 431 432 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 433 if (ret > max) 434 return max; 435 return ret; 436 } 437 438 static inline int mb_find_next_bit(void *addr, int max, int start) 439 { 440 int fix = 0, ret, tmpmax; 441 addr = mb_correct_addr_and_bit(&fix, addr); 442 tmpmax = max + fix; 443 start += fix; 444 445 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 446 if (ret > max) 447 return max; 448 return ret; 449 } 450 451 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 452 { 453 char *bb; 454 455 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 456 BUG_ON(max == NULL); 457 458 if (order > e4b->bd_blkbits + 1) { 459 *max = 0; 460 return NULL; 461 } 462 463 /* at order 0 we see each particular block */ 464 if (order == 0) { 465 *max = 1 << (e4b->bd_blkbits + 3); 466 return e4b->bd_bitmap; 467 } 468 469 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 470 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 471 472 return bb; 473 } 474 475 #ifdef DOUBLE_CHECK 476 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 477 int first, int count) 478 { 479 int i; 480 struct super_block *sb = e4b->bd_sb; 481 482 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 483 return; 484 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 485 for (i = 0; i < count; i++) { 486 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 487 ext4_fsblk_t blocknr; 488 489 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 490 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 491 ext4_grp_locked_error(sb, e4b->bd_group, 492 inode ? inode->i_ino : 0, 493 blocknr, 494 "freeing block already freed " 495 "(bit %u)", 496 first + i); 497 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 498 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 499 } 500 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 501 } 502 } 503 504 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 505 { 506 int i; 507 508 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 509 return; 510 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 511 for (i = 0; i < count; i++) { 512 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 513 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 514 } 515 } 516 517 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 518 { 519 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 520 return; 521 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 522 unsigned char *b1, *b2; 523 int i; 524 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 525 b2 = (unsigned char *) bitmap; 526 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 527 if (b1[i] != b2[i]) { 528 ext4_msg(e4b->bd_sb, KERN_ERR, 529 "corruption in group %u " 530 "at byte %u(%u): %x in copy != %x " 531 "on disk/prealloc", 532 e4b->bd_group, i, i * 8, b1[i], b2[i]); 533 BUG(); 534 } 535 } 536 } 537 } 538 539 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 540 struct ext4_group_info *grp, ext4_group_t group) 541 { 542 struct buffer_head *bh; 543 544 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 545 if (!grp->bb_bitmap) 546 return; 547 548 bh = ext4_read_block_bitmap(sb, group); 549 if (IS_ERR_OR_NULL(bh)) { 550 kfree(grp->bb_bitmap); 551 grp->bb_bitmap = NULL; 552 return; 553 } 554 555 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 556 put_bh(bh); 557 } 558 559 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 560 { 561 kfree(grp->bb_bitmap); 562 } 563 564 #else 565 static inline void mb_free_blocks_double(struct inode *inode, 566 struct ext4_buddy *e4b, int first, int count) 567 { 568 return; 569 } 570 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 571 int first, int count) 572 { 573 return; 574 } 575 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 576 { 577 return; 578 } 579 580 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 581 struct ext4_group_info *grp, ext4_group_t group) 582 { 583 return; 584 } 585 586 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 587 { 588 return; 589 } 590 #endif 591 592 #ifdef AGGRESSIVE_CHECK 593 594 #define MB_CHECK_ASSERT(assert) \ 595 do { \ 596 if (!(assert)) { \ 597 printk(KERN_EMERG \ 598 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 599 function, file, line, # assert); \ 600 BUG(); \ 601 } \ 602 } while (0) 603 604 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 605 const char *function, int line) 606 { 607 struct super_block *sb = e4b->bd_sb; 608 int order = e4b->bd_blkbits + 1; 609 int max; 610 int max2; 611 int i; 612 int j; 613 int k; 614 int count; 615 struct ext4_group_info *grp; 616 int fragments = 0; 617 int fstart; 618 struct list_head *cur; 619 void *buddy; 620 void *buddy2; 621 622 if (e4b->bd_info->bb_check_counter++ % 10) 623 return 0; 624 625 while (order > 1) { 626 buddy = mb_find_buddy(e4b, order, &max); 627 MB_CHECK_ASSERT(buddy); 628 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 629 MB_CHECK_ASSERT(buddy2); 630 MB_CHECK_ASSERT(buddy != buddy2); 631 MB_CHECK_ASSERT(max * 2 == max2); 632 633 count = 0; 634 for (i = 0; i < max; i++) { 635 636 if (mb_test_bit(i, buddy)) { 637 /* only single bit in buddy2 may be 1 */ 638 if (!mb_test_bit(i << 1, buddy2)) { 639 MB_CHECK_ASSERT( 640 mb_test_bit((i<<1)+1, buddy2)); 641 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 642 MB_CHECK_ASSERT( 643 mb_test_bit(i << 1, buddy2)); 644 } 645 continue; 646 } 647 648 /* both bits in buddy2 must be 1 */ 649 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 650 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 651 652 for (j = 0; j < (1 << order); j++) { 653 k = (i * (1 << order)) + j; 654 MB_CHECK_ASSERT( 655 !mb_test_bit(k, e4b->bd_bitmap)); 656 } 657 count++; 658 } 659 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 660 order--; 661 } 662 663 fstart = -1; 664 buddy = mb_find_buddy(e4b, 0, &max); 665 for (i = 0; i < max; i++) { 666 if (!mb_test_bit(i, buddy)) { 667 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 668 if (fstart == -1) { 669 fragments++; 670 fstart = i; 671 } 672 continue; 673 } 674 fstart = -1; 675 /* check used bits only */ 676 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 677 buddy2 = mb_find_buddy(e4b, j, &max2); 678 k = i >> j; 679 MB_CHECK_ASSERT(k < max2); 680 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 681 } 682 } 683 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 684 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 685 686 grp = ext4_get_group_info(sb, e4b->bd_group); 687 list_for_each(cur, &grp->bb_prealloc_list) { 688 ext4_group_t groupnr; 689 struct ext4_prealloc_space *pa; 690 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 691 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 692 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 693 for (i = 0; i < pa->pa_len; i++) 694 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 695 } 696 return 0; 697 } 698 #undef MB_CHECK_ASSERT 699 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 700 __FILE__, __func__, __LINE__) 701 #else 702 #define mb_check_buddy(e4b) 703 #endif 704 705 /* 706 * Divide blocks started from @first with length @len into 707 * smaller chunks with power of 2 blocks. 708 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 709 * then increase bb_counters[] for corresponded chunk size. 710 */ 711 static void ext4_mb_mark_free_simple(struct super_block *sb, 712 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 713 struct ext4_group_info *grp) 714 { 715 struct ext4_sb_info *sbi = EXT4_SB(sb); 716 ext4_grpblk_t min; 717 ext4_grpblk_t max; 718 ext4_grpblk_t chunk; 719 unsigned int border; 720 721 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 722 723 border = 2 << sb->s_blocksize_bits; 724 725 while (len > 0) { 726 /* find how many blocks can be covered since this position */ 727 max = ffs(first | border) - 1; 728 729 /* find how many blocks of power 2 we need to mark */ 730 min = fls(len) - 1; 731 732 if (max < min) 733 min = max; 734 chunk = 1 << min; 735 736 /* mark multiblock chunks only */ 737 grp->bb_counters[min]++; 738 if (min > 0) 739 mb_clear_bit(first >> min, 740 buddy + sbi->s_mb_offsets[min]); 741 742 len -= chunk; 743 first += chunk; 744 } 745 } 746 747 /* 748 * Cache the order of the largest free extent we have available in this block 749 * group. 750 */ 751 static void 752 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 753 { 754 int i; 755 int bits; 756 757 grp->bb_largest_free_order = -1; /* uninit */ 758 759 bits = sb->s_blocksize_bits + 1; 760 for (i = bits; i >= 0; i--) { 761 if (grp->bb_counters[i] > 0) { 762 grp->bb_largest_free_order = i; 763 break; 764 } 765 } 766 } 767 768 static noinline_for_stack 769 void ext4_mb_generate_buddy(struct super_block *sb, 770 void *buddy, void *bitmap, ext4_group_t group) 771 { 772 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 773 struct ext4_sb_info *sbi = EXT4_SB(sb); 774 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 775 ext4_grpblk_t i = 0; 776 ext4_grpblk_t first; 777 ext4_grpblk_t len; 778 unsigned free = 0; 779 unsigned fragments = 0; 780 unsigned long long period = get_cycles(); 781 782 /* initialize buddy from bitmap which is aggregation 783 * of on-disk bitmap and preallocations */ 784 i = mb_find_next_zero_bit(bitmap, max, 0); 785 grp->bb_first_free = i; 786 while (i < max) { 787 fragments++; 788 first = i; 789 i = mb_find_next_bit(bitmap, max, i); 790 len = i - first; 791 free += len; 792 if (len > 1) 793 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 794 else 795 grp->bb_counters[0]++; 796 if (i < max) 797 i = mb_find_next_zero_bit(bitmap, max, i); 798 } 799 grp->bb_fragments = fragments; 800 801 if (free != grp->bb_free) { 802 ext4_grp_locked_error(sb, group, 0, 0, 803 "block bitmap and bg descriptor " 804 "inconsistent: %u vs %u free clusters", 805 free, grp->bb_free); 806 /* 807 * If we intend to continue, we consider group descriptor 808 * corrupt and update bb_free using bitmap value 809 */ 810 grp->bb_free = free; 811 ext4_mark_group_bitmap_corrupted(sb, group, 812 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 813 } 814 mb_set_largest_free_order(sb, grp); 815 816 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 817 818 period = get_cycles() - period; 819 spin_lock(&sbi->s_bal_lock); 820 sbi->s_mb_buddies_generated++; 821 sbi->s_mb_generation_time += period; 822 spin_unlock(&sbi->s_bal_lock); 823 } 824 825 /* The buddy information is attached the buddy cache inode 826 * for convenience. The information regarding each group 827 * is loaded via ext4_mb_load_buddy. The information involve 828 * block bitmap and buddy information. The information are 829 * stored in the inode as 830 * 831 * { page } 832 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 833 * 834 * 835 * one block each for bitmap and buddy information. 836 * So for each group we take up 2 blocks. A page can 837 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 838 * So it can have information regarding groups_per_page which 839 * is blocks_per_page/2 840 * 841 * Locking note: This routine takes the block group lock of all groups 842 * for this page; do not hold this lock when calling this routine! 843 */ 844 845 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 846 { 847 ext4_group_t ngroups; 848 int blocksize; 849 int blocks_per_page; 850 int groups_per_page; 851 int err = 0; 852 int i; 853 ext4_group_t first_group, group; 854 int first_block; 855 struct super_block *sb; 856 struct buffer_head *bhs; 857 struct buffer_head **bh = NULL; 858 struct inode *inode; 859 char *data; 860 char *bitmap; 861 struct ext4_group_info *grinfo; 862 863 inode = page->mapping->host; 864 sb = inode->i_sb; 865 ngroups = ext4_get_groups_count(sb); 866 blocksize = i_blocksize(inode); 867 blocks_per_page = PAGE_SIZE / blocksize; 868 869 mb_debug(sb, "init page %lu\n", page->index); 870 871 groups_per_page = blocks_per_page >> 1; 872 if (groups_per_page == 0) 873 groups_per_page = 1; 874 875 /* allocate buffer_heads to read bitmaps */ 876 if (groups_per_page > 1) { 877 i = sizeof(struct buffer_head *) * groups_per_page; 878 bh = kzalloc(i, gfp); 879 if (bh == NULL) { 880 err = -ENOMEM; 881 goto out; 882 } 883 } else 884 bh = &bhs; 885 886 first_group = page->index * blocks_per_page / 2; 887 888 /* read all groups the page covers into the cache */ 889 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 890 if (group >= ngroups) 891 break; 892 893 grinfo = ext4_get_group_info(sb, group); 894 /* 895 * If page is uptodate then we came here after online resize 896 * which added some new uninitialized group info structs, so 897 * we must skip all initialized uptodate buddies on the page, 898 * which may be currently in use by an allocating task. 899 */ 900 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 901 bh[i] = NULL; 902 continue; 903 } 904 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 905 if (IS_ERR(bh[i])) { 906 err = PTR_ERR(bh[i]); 907 bh[i] = NULL; 908 goto out; 909 } 910 mb_debug(sb, "read bitmap for group %u\n", group); 911 } 912 913 /* wait for I/O completion */ 914 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 915 int err2; 916 917 if (!bh[i]) 918 continue; 919 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 920 if (!err) 921 err = err2; 922 } 923 924 first_block = page->index * blocks_per_page; 925 for (i = 0; i < blocks_per_page; i++) { 926 group = (first_block + i) >> 1; 927 if (group >= ngroups) 928 break; 929 930 if (!bh[group - first_group]) 931 /* skip initialized uptodate buddy */ 932 continue; 933 934 if (!buffer_verified(bh[group - first_group])) 935 /* Skip faulty bitmaps */ 936 continue; 937 err = 0; 938 939 /* 940 * data carry information regarding this 941 * particular group in the format specified 942 * above 943 * 944 */ 945 data = page_address(page) + (i * blocksize); 946 bitmap = bh[group - first_group]->b_data; 947 948 /* 949 * We place the buddy block and bitmap block 950 * close together 951 */ 952 if ((first_block + i) & 1) { 953 /* this is block of buddy */ 954 BUG_ON(incore == NULL); 955 mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 956 group, page->index, i * blocksize); 957 trace_ext4_mb_buddy_bitmap_load(sb, group); 958 grinfo = ext4_get_group_info(sb, group); 959 grinfo->bb_fragments = 0; 960 memset(grinfo->bb_counters, 0, 961 sizeof(*grinfo->bb_counters) * 962 (sb->s_blocksize_bits+2)); 963 /* 964 * incore got set to the group block bitmap below 965 */ 966 ext4_lock_group(sb, group); 967 /* init the buddy */ 968 memset(data, 0xff, blocksize); 969 ext4_mb_generate_buddy(sb, data, incore, group); 970 ext4_unlock_group(sb, group); 971 incore = NULL; 972 } else { 973 /* this is block of bitmap */ 974 BUG_ON(incore != NULL); 975 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 976 group, page->index, i * blocksize); 977 trace_ext4_mb_bitmap_load(sb, group); 978 979 /* see comments in ext4_mb_put_pa() */ 980 ext4_lock_group(sb, group); 981 memcpy(data, bitmap, blocksize); 982 983 /* mark all preallocated blks used in in-core bitmap */ 984 ext4_mb_generate_from_pa(sb, data, group); 985 ext4_mb_generate_from_freelist(sb, data, group); 986 ext4_unlock_group(sb, group); 987 988 /* set incore so that the buddy information can be 989 * generated using this 990 */ 991 incore = data; 992 } 993 } 994 SetPageUptodate(page); 995 996 out: 997 if (bh) { 998 for (i = 0; i < groups_per_page; i++) 999 brelse(bh[i]); 1000 if (bh != &bhs) 1001 kfree(bh); 1002 } 1003 return err; 1004 } 1005 1006 /* 1007 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1008 * on the same buddy page doesn't happen whild holding the buddy page lock. 1009 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1010 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1011 */ 1012 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1013 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1014 { 1015 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1016 int block, pnum, poff; 1017 int blocks_per_page; 1018 struct page *page; 1019 1020 e4b->bd_buddy_page = NULL; 1021 e4b->bd_bitmap_page = NULL; 1022 1023 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1024 /* 1025 * the buddy cache inode stores the block bitmap 1026 * and buddy information in consecutive blocks. 1027 * So for each group we need two blocks. 1028 */ 1029 block = group * 2; 1030 pnum = block / blocks_per_page; 1031 poff = block % blocks_per_page; 1032 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1033 if (!page) 1034 return -ENOMEM; 1035 BUG_ON(page->mapping != inode->i_mapping); 1036 e4b->bd_bitmap_page = page; 1037 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1038 1039 if (blocks_per_page >= 2) { 1040 /* buddy and bitmap are on the same page */ 1041 return 0; 1042 } 1043 1044 block++; 1045 pnum = block / blocks_per_page; 1046 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1047 if (!page) 1048 return -ENOMEM; 1049 BUG_ON(page->mapping != inode->i_mapping); 1050 e4b->bd_buddy_page = page; 1051 return 0; 1052 } 1053 1054 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1055 { 1056 if (e4b->bd_bitmap_page) { 1057 unlock_page(e4b->bd_bitmap_page); 1058 put_page(e4b->bd_bitmap_page); 1059 } 1060 if (e4b->bd_buddy_page) { 1061 unlock_page(e4b->bd_buddy_page); 1062 put_page(e4b->bd_buddy_page); 1063 } 1064 } 1065 1066 /* 1067 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1068 * block group lock of all groups for this page; do not hold the BG lock when 1069 * calling this routine! 1070 */ 1071 static noinline_for_stack 1072 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1073 { 1074 1075 struct ext4_group_info *this_grp; 1076 struct ext4_buddy e4b; 1077 struct page *page; 1078 int ret = 0; 1079 1080 might_sleep(); 1081 mb_debug(sb, "init group %u\n", group); 1082 this_grp = ext4_get_group_info(sb, group); 1083 /* 1084 * This ensures that we don't reinit the buddy cache 1085 * page which map to the group from which we are already 1086 * allocating. If we are looking at the buddy cache we would 1087 * have taken a reference using ext4_mb_load_buddy and that 1088 * would have pinned buddy page to page cache. 1089 * The call to ext4_mb_get_buddy_page_lock will mark the 1090 * page accessed. 1091 */ 1092 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1093 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1094 /* 1095 * somebody initialized the group 1096 * return without doing anything 1097 */ 1098 goto err; 1099 } 1100 1101 page = e4b.bd_bitmap_page; 1102 ret = ext4_mb_init_cache(page, NULL, gfp); 1103 if (ret) 1104 goto err; 1105 if (!PageUptodate(page)) { 1106 ret = -EIO; 1107 goto err; 1108 } 1109 1110 if (e4b.bd_buddy_page == NULL) { 1111 /* 1112 * If both the bitmap and buddy are in 1113 * the same page we don't need to force 1114 * init the buddy 1115 */ 1116 ret = 0; 1117 goto err; 1118 } 1119 /* init buddy cache */ 1120 page = e4b.bd_buddy_page; 1121 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1122 if (ret) 1123 goto err; 1124 if (!PageUptodate(page)) { 1125 ret = -EIO; 1126 goto err; 1127 } 1128 err: 1129 ext4_mb_put_buddy_page_lock(&e4b); 1130 return ret; 1131 } 1132 1133 /* 1134 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1135 * block group lock of all groups for this page; do not hold the BG lock when 1136 * calling this routine! 1137 */ 1138 static noinline_for_stack int 1139 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1140 struct ext4_buddy *e4b, gfp_t gfp) 1141 { 1142 int blocks_per_page; 1143 int block; 1144 int pnum; 1145 int poff; 1146 struct page *page; 1147 int ret; 1148 struct ext4_group_info *grp; 1149 struct ext4_sb_info *sbi = EXT4_SB(sb); 1150 struct inode *inode = sbi->s_buddy_cache; 1151 1152 might_sleep(); 1153 mb_debug(sb, "load group %u\n", group); 1154 1155 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1156 grp = ext4_get_group_info(sb, group); 1157 1158 e4b->bd_blkbits = sb->s_blocksize_bits; 1159 e4b->bd_info = grp; 1160 e4b->bd_sb = sb; 1161 e4b->bd_group = group; 1162 e4b->bd_buddy_page = NULL; 1163 e4b->bd_bitmap_page = NULL; 1164 1165 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1166 /* 1167 * we need full data about the group 1168 * to make a good selection 1169 */ 1170 ret = ext4_mb_init_group(sb, group, gfp); 1171 if (ret) 1172 return ret; 1173 } 1174 1175 /* 1176 * the buddy cache inode stores the block bitmap 1177 * and buddy information in consecutive blocks. 1178 * So for each group we need two blocks. 1179 */ 1180 block = group * 2; 1181 pnum = block / blocks_per_page; 1182 poff = block % blocks_per_page; 1183 1184 /* we could use find_or_create_page(), but it locks page 1185 * what we'd like to avoid in fast path ... */ 1186 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1187 if (page == NULL || !PageUptodate(page)) { 1188 if (page) 1189 /* 1190 * drop the page reference and try 1191 * to get the page with lock. If we 1192 * are not uptodate that implies 1193 * somebody just created the page but 1194 * is yet to initialize the same. So 1195 * wait for it to initialize. 1196 */ 1197 put_page(page); 1198 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1199 if (page) { 1200 BUG_ON(page->mapping != inode->i_mapping); 1201 if (!PageUptodate(page)) { 1202 ret = ext4_mb_init_cache(page, NULL, gfp); 1203 if (ret) { 1204 unlock_page(page); 1205 goto err; 1206 } 1207 mb_cmp_bitmaps(e4b, page_address(page) + 1208 (poff * sb->s_blocksize)); 1209 } 1210 unlock_page(page); 1211 } 1212 } 1213 if (page == NULL) { 1214 ret = -ENOMEM; 1215 goto err; 1216 } 1217 if (!PageUptodate(page)) { 1218 ret = -EIO; 1219 goto err; 1220 } 1221 1222 /* Pages marked accessed already */ 1223 e4b->bd_bitmap_page = page; 1224 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1225 1226 block++; 1227 pnum = block / blocks_per_page; 1228 poff = block % blocks_per_page; 1229 1230 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1231 if (page == NULL || !PageUptodate(page)) { 1232 if (page) 1233 put_page(page); 1234 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1235 if (page) { 1236 BUG_ON(page->mapping != inode->i_mapping); 1237 if (!PageUptodate(page)) { 1238 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1239 gfp); 1240 if (ret) { 1241 unlock_page(page); 1242 goto err; 1243 } 1244 } 1245 unlock_page(page); 1246 } 1247 } 1248 if (page == NULL) { 1249 ret = -ENOMEM; 1250 goto err; 1251 } 1252 if (!PageUptodate(page)) { 1253 ret = -EIO; 1254 goto err; 1255 } 1256 1257 /* Pages marked accessed already */ 1258 e4b->bd_buddy_page = page; 1259 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1260 1261 return 0; 1262 1263 err: 1264 if (page) 1265 put_page(page); 1266 if (e4b->bd_bitmap_page) 1267 put_page(e4b->bd_bitmap_page); 1268 if (e4b->bd_buddy_page) 1269 put_page(e4b->bd_buddy_page); 1270 e4b->bd_buddy = NULL; 1271 e4b->bd_bitmap = NULL; 1272 return ret; 1273 } 1274 1275 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1276 struct ext4_buddy *e4b) 1277 { 1278 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1279 } 1280 1281 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1282 { 1283 if (e4b->bd_bitmap_page) 1284 put_page(e4b->bd_bitmap_page); 1285 if (e4b->bd_buddy_page) 1286 put_page(e4b->bd_buddy_page); 1287 } 1288 1289 1290 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1291 { 1292 int order = 1, max; 1293 void *bb; 1294 1295 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1296 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1297 1298 while (order <= e4b->bd_blkbits + 1) { 1299 bb = mb_find_buddy(e4b, order, &max); 1300 if (!mb_test_bit(block >> order, bb)) { 1301 /* this block is part of buddy of order 'order' */ 1302 return order; 1303 } 1304 order++; 1305 } 1306 return 0; 1307 } 1308 1309 static void mb_clear_bits(void *bm, int cur, int len) 1310 { 1311 __u32 *addr; 1312 1313 len = cur + len; 1314 while (cur < len) { 1315 if ((cur & 31) == 0 && (len - cur) >= 32) { 1316 /* fast path: clear whole word at once */ 1317 addr = bm + (cur >> 3); 1318 *addr = 0; 1319 cur += 32; 1320 continue; 1321 } 1322 mb_clear_bit(cur, bm); 1323 cur++; 1324 } 1325 } 1326 1327 /* clear bits in given range 1328 * will return first found zero bit if any, -1 otherwise 1329 */ 1330 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1331 { 1332 __u32 *addr; 1333 int zero_bit = -1; 1334 1335 len = cur + len; 1336 while (cur < len) { 1337 if ((cur & 31) == 0 && (len - cur) >= 32) { 1338 /* fast path: clear whole word at once */ 1339 addr = bm + (cur >> 3); 1340 if (*addr != (__u32)(-1) && zero_bit == -1) 1341 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1342 *addr = 0; 1343 cur += 32; 1344 continue; 1345 } 1346 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1347 zero_bit = cur; 1348 cur++; 1349 } 1350 1351 return zero_bit; 1352 } 1353 1354 void ext4_set_bits(void *bm, int cur, int len) 1355 { 1356 __u32 *addr; 1357 1358 len = cur + len; 1359 while (cur < len) { 1360 if ((cur & 31) == 0 && (len - cur) >= 32) { 1361 /* fast path: set whole word at once */ 1362 addr = bm + (cur >> 3); 1363 *addr = 0xffffffff; 1364 cur += 32; 1365 continue; 1366 } 1367 mb_set_bit(cur, bm); 1368 cur++; 1369 } 1370 } 1371 1372 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1373 { 1374 if (mb_test_bit(*bit + side, bitmap)) { 1375 mb_clear_bit(*bit, bitmap); 1376 (*bit) -= side; 1377 return 1; 1378 } 1379 else { 1380 (*bit) += side; 1381 mb_set_bit(*bit, bitmap); 1382 return -1; 1383 } 1384 } 1385 1386 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1387 { 1388 int max; 1389 int order = 1; 1390 void *buddy = mb_find_buddy(e4b, order, &max); 1391 1392 while (buddy) { 1393 void *buddy2; 1394 1395 /* Bits in range [first; last] are known to be set since 1396 * corresponding blocks were allocated. Bits in range 1397 * (first; last) will stay set because they form buddies on 1398 * upper layer. We just deal with borders if they don't 1399 * align with upper layer and then go up. 1400 * Releasing entire group is all about clearing 1401 * single bit of highest order buddy. 1402 */ 1403 1404 /* Example: 1405 * --------------------------------- 1406 * | 1 | 1 | 1 | 1 | 1407 * --------------------------------- 1408 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1409 * --------------------------------- 1410 * 0 1 2 3 4 5 6 7 1411 * \_____________________/ 1412 * 1413 * Neither [1] nor [6] is aligned to above layer. 1414 * Left neighbour [0] is free, so mark it busy, 1415 * decrease bb_counters and extend range to 1416 * [0; 6] 1417 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1418 * mark [6] free, increase bb_counters and shrink range to 1419 * [0; 5]. 1420 * Then shift range to [0; 2], go up and do the same. 1421 */ 1422 1423 1424 if (first & 1) 1425 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1426 if (!(last & 1)) 1427 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1428 if (first > last) 1429 break; 1430 order++; 1431 1432 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1433 mb_clear_bits(buddy, first, last - first + 1); 1434 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1435 break; 1436 } 1437 first >>= 1; 1438 last >>= 1; 1439 buddy = buddy2; 1440 } 1441 } 1442 1443 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1444 int first, int count) 1445 { 1446 int left_is_free = 0; 1447 int right_is_free = 0; 1448 int block; 1449 int last = first + count - 1; 1450 struct super_block *sb = e4b->bd_sb; 1451 1452 if (WARN_ON(count == 0)) 1453 return; 1454 BUG_ON(last >= (sb->s_blocksize << 3)); 1455 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1456 /* Don't bother if the block group is corrupt. */ 1457 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1458 return; 1459 1460 mb_check_buddy(e4b); 1461 mb_free_blocks_double(inode, e4b, first, count); 1462 1463 this_cpu_inc(discard_pa_seq); 1464 e4b->bd_info->bb_free += count; 1465 if (first < e4b->bd_info->bb_first_free) 1466 e4b->bd_info->bb_first_free = first; 1467 1468 /* access memory sequentially: check left neighbour, 1469 * clear range and then check right neighbour 1470 */ 1471 if (first != 0) 1472 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1473 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1474 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1475 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1476 1477 if (unlikely(block != -1)) { 1478 struct ext4_sb_info *sbi = EXT4_SB(sb); 1479 ext4_fsblk_t blocknr; 1480 1481 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1482 blocknr += EXT4_C2B(sbi, block); 1483 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 1484 ext4_grp_locked_error(sb, e4b->bd_group, 1485 inode ? inode->i_ino : 0, 1486 blocknr, 1487 "freeing already freed block (bit %u); block bitmap corrupt.", 1488 block); 1489 ext4_mark_group_bitmap_corrupted( 1490 sb, e4b->bd_group, 1491 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1492 } 1493 goto done; 1494 } 1495 1496 /* let's maintain fragments counter */ 1497 if (left_is_free && right_is_free) 1498 e4b->bd_info->bb_fragments--; 1499 else if (!left_is_free && !right_is_free) 1500 e4b->bd_info->bb_fragments++; 1501 1502 /* buddy[0] == bd_bitmap is a special case, so handle 1503 * it right away and let mb_buddy_mark_free stay free of 1504 * zero order checks. 1505 * Check if neighbours are to be coaleasced, 1506 * adjust bitmap bb_counters and borders appropriately. 1507 */ 1508 if (first & 1) { 1509 first += !left_is_free; 1510 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1511 } 1512 if (!(last & 1)) { 1513 last -= !right_is_free; 1514 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1515 } 1516 1517 if (first <= last) 1518 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1519 1520 done: 1521 mb_set_largest_free_order(sb, e4b->bd_info); 1522 mb_check_buddy(e4b); 1523 } 1524 1525 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1526 int needed, struct ext4_free_extent *ex) 1527 { 1528 int next = block; 1529 int max, order; 1530 void *buddy; 1531 1532 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1533 BUG_ON(ex == NULL); 1534 1535 buddy = mb_find_buddy(e4b, 0, &max); 1536 BUG_ON(buddy == NULL); 1537 BUG_ON(block >= max); 1538 if (mb_test_bit(block, buddy)) { 1539 ex->fe_len = 0; 1540 ex->fe_start = 0; 1541 ex->fe_group = 0; 1542 return 0; 1543 } 1544 1545 /* find actual order */ 1546 order = mb_find_order_for_block(e4b, block); 1547 block = block >> order; 1548 1549 ex->fe_len = 1 << order; 1550 ex->fe_start = block << order; 1551 ex->fe_group = e4b->bd_group; 1552 1553 /* calc difference from given start */ 1554 next = next - ex->fe_start; 1555 ex->fe_len -= next; 1556 ex->fe_start += next; 1557 1558 while (needed > ex->fe_len && 1559 mb_find_buddy(e4b, order, &max)) { 1560 1561 if (block + 1 >= max) 1562 break; 1563 1564 next = (block + 1) * (1 << order); 1565 if (mb_test_bit(next, e4b->bd_bitmap)) 1566 break; 1567 1568 order = mb_find_order_for_block(e4b, next); 1569 1570 block = next >> order; 1571 ex->fe_len += 1 << order; 1572 } 1573 1574 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 1575 /* Should never happen! (but apparently sometimes does?!?) */ 1576 WARN_ON(1); 1577 ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent " 1578 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 1579 block, order, needed, ex->fe_group, ex->fe_start, 1580 ex->fe_len, ex->fe_logical); 1581 ex->fe_len = 0; 1582 ex->fe_start = 0; 1583 ex->fe_group = 0; 1584 } 1585 return ex->fe_len; 1586 } 1587 1588 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1589 { 1590 int ord; 1591 int mlen = 0; 1592 int max = 0; 1593 int cur; 1594 int start = ex->fe_start; 1595 int len = ex->fe_len; 1596 unsigned ret = 0; 1597 int len0 = len; 1598 void *buddy; 1599 1600 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1601 BUG_ON(e4b->bd_group != ex->fe_group); 1602 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1603 mb_check_buddy(e4b); 1604 mb_mark_used_double(e4b, start, len); 1605 1606 this_cpu_inc(discard_pa_seq); 1607 e4b->bd_info->bb_free -= len; 1608 if (e4b->bd_info->bb_first_free == start) 1609 e4b->bd_info->bb_first_free += len; 1610 1611 /* let's maintain fragments counter */ 1612 if (start != 0) 1613 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1614 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1615 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1616 if (mlen && max) 1617 e4b->bd_info->bb_fragments++; 1618 else if (!mlen && !max) 1619 e4b->bd_info->bb_fragments--; 1620 1621 /* let's maintain buddy itself */ 1622 while (len) { 1623 ord = mb_find_order_for_block(e4b, start); 1624 1625 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1626 /* the whole chunk may be allocated at once! */ 1627 mlen = 1 << ord; 1628 buddy = mb_find_buddy(e4b, ord, &max); 1629 BUG_ON((start >> ord) >= max); 1630 mb_set_bit(start >> ord, buddy); 1631 e4b->bd_info->bb_counters[ord]--; 1632 start += mlen; 1633 len -= mlen; 1634 BUG_ON(len < 0); 1635 continue; 1636 } 1637 1638 /* store for history */ 1639 if (ret == 0) 1640 ret = len | (ord << 16); 1641 1642 /* we have to split large buddy */ 1643 BUG_ON(ord <= 0); 1644 buddy = mb_find_buddy(e4b, ord, &max); 1645 mb_set_bit(start >> ord, buddy); 1646 e4b->bd_info->bb_counters[ord]--; 1647 1648 ord--; 1649 cur = (start >> ord) & ~1U; 1650 buddy = mb_find_buddy(e4b, ord, &max); 1651 mb_clear_bit(cur, buddy); 1652 mb_clear_bit(cur + 1, buddy); 1653 e4b->bd_info->bb_counters[ord]++; 1654 e4b->bd_info->bb_counters[ord]++; 1655 } 1656 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1657 1658 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1659 mb_check_buddy(e4b); 1660 1661 return ret; 1662 } 1663 1664 /* 1665 * Must be called under group lock! 1666 */ 1667 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1668 struct ext4_buddy *e4b) 1669 { 1670 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1671 int ret; 1672 1673 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1674 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1675 1676 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1677 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1678 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1679 1680 /* preallocation can change ac_b_ex, thus we store actually 1681 * allocated blocks for history */ 1682 ac->ac_f_ex = ac->ac_b_ex; 1683 1684 ac->ac_status = AC_STATUS_FOUND; 1685 ac->ac_tail = ret & 0xffff; 1686 ac->ac_buddy = ret >> 16; 1687 1688 /* 1689 * take the page reference. We want the page to be pinned 1690 * so that we don't get a ext4_mb_init_cache_call for this 1691 * group until we update the bitmap. That would mean we 1692 * double allocate blocks. The reference is dropped 1693 * in ext4_mb_release_context 1694 */ 1695 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1696 get_page(ac->ac_bitmap_page); 1697 ac->ac_buddy_page = e4b->bd_buddy_page; 1698 get_page(ac->ac_buddy_page); 1699 /* store last allocated for subsequent stream allocation */ 1700 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1701 spin_lock(&sbi->s_md_lock); 1702 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1703 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1704 spin_unlock(&sbi->s_md_lock); 1705 } 1706 /* 1707 * As we've just preallocated more space than 1708 * user requested originally, we store allocated 1709 * space in a special descriptor. 1710 */ 1711 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 1712 ext4_mb_new_preallocation(ac); 1713 1714 } 1715 1716 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1717 struct ext4_buddy *e4b, 1718 int finish_group) 1719 { 1720 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1721 struct ext4_free_extent *bex = &ac->ac_b_ex; 1722 struct ext4_free_extent *gex = &ac->ac_g_ex; 1723 struct ext4_free_extent ex; 1724 int max; 1725 1726 if (ac->ac_status == AC_STATUS_FOUND) 1727 return; 1728 /* 1729 * We don't want to scan for a whole year 1730 */ 1731 if (ac->ac_found > sbi->s_mb_max_to_scan && 1732 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1733 ac->ac_status = AC_STATUS_BREAK; 1734 return; 1735 } 1736 1737 /* 1738 * Haven't found good chunk so far, let's continue 1739 */ 1740 if (bex->fe_len < gex->fe_len) 1741 return; 1742 1743 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1744 && bex->fe_group == e4b->bd_group) { 1745 /* recheck chunk's availability - we don't know 1746 * when it was found (within this lock-unlock 1747 * period or not) */ 1748 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1749 if (max >= gex->fe_len) { 1750 ext4_mb_use_best_found(ac, e4b); 1751 return; 1752 } 1753 } 1754 } 1755 1756 /* 1757 * The routine checks whether found extent is good enough. If it is, 1758 * then the extent gets marked used and flag is set to the context 1759 * to stop scanning. Otherwise, the extent is compared with the 1760 * previous found extent and if new one is better, then it's stored 1761 * in the context. Later, the best found extent will be used, if 1762 * mballoc can't find good enough extent. 1763 * 1764 * FIXME: real allocation policy is to be designed yet! 1765 */ 1766 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1767 struct ext4_free_extent *ex, 1768 struct ext4_buddy *e4b) 1769 { 1770 struct ext4_free_extent *bex = &ac->ac_b_ex; 1771 struct ext4_free_extent *gex = &ac->ac_g_ex; 1772 1773 BUG_ON(ex->fe_len <= 0); 1774 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1775 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1776 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1777 1778 ac->ac_found++; 1779 1780 /* 1781 * The special case - take what you catch first 1782 */ 1783 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1784 *bex = *ex; 1785 ext4_mb_use_best_found(ac, e4b); 1786 return; 1787 } 1788 1789 /* 1790 * Let's check whether the chuck is good enough 1791 */ 1792 if (ex->fe_len == gex->fe_len) { 1793 *bex = *ex; 1794 ext4_mb_use_best_found(ac, e4b); 1795 return; 1796 } 1797 1798 /* 1799 * If this is first found extent, just store it in the context 1800 */ 1801 if (bex->fe_len == 0) { 1802 *bex = *ex; 1803 return; 1804 } 1805 1806 /* 1807 * If new found extent is better, store it in the context 1808 */ 1809 if (bex->fe_len < gex->fe_len) { 1810 /* if the request isn't satisfied, any found extent 1811 * larger than previous best one is better */ 1812 if (ex->fe_len > bex->fe_len) 1813 *bex = *ex; 1814 } else if (ex->fe_len > gex->fe_len) { 1815 /* if the request is satisfied, then we try to find 1816 * an extent that still satisfy the request, but is 1817 * smaller than previous one */ 1818 if (ex->fe_len < bex->fe_len) 1819 *bex = *ex; 1820 } 1821 1822 ext4_mb_check_limits(ac, e4b, 0); 1823 } 1824 1825 static noinline_for_stack 1826 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1827 struct ext4_buddy *e4b) 1828 { 1829 struct ext4_free_extent ex = ac->ac_b_ex; 1830 ext4_group_t group = ex.fe_group; 1831 int max; 1832 int err; 1833 1834 BUG_ON(ex.fe_len <= 0); 1835 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1836 if (err) 1837 return err; 1838 1839 ext4_lock_group(ac->ac_sb, group); 1840 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1841 1842 if (max > 0) { 1843 ac->ac_b_ex = ex; 1844 ext4_mb_use_best_found(ac, e4b); 1845 } 1846 1847 ext4_unlock_group(ac->ac_sb, group); 1848 ext4_mb_unload_buddy(e4b); 1849 1850 return 0; 1851 } 1852 1853 static noinline_for_stack 1854 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1855 struct ext4_buddy *e4b) 1856 { 1857 ext4_group_t group = ac->ac_g_ex.fe_group; 1858 int max; 1859 int err; 1860 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1861 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1862 struct ext4_free_extent ex; 1863 1864 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1865 return 0; 1866 if (grp->bb_free == 0) 1867 return 0; 1868 1869 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1870 if (err) 1871 return err; 1872 1873 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1874 ext4_mb_unload_buddy(e4b); 1875 return 0; 1876 } 1877 1878 ext4_lock_group(ac->ac_sb, group); 1879 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1880 ac->ac_g_ex.fe_len, &ex); 1881 ex.fe_logical = 0xDEADFA11; /* debug value */ 1882 1883 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1884 ext4_fsblk_t start; 1885 1886 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1887 ex.fe_start; 1888 /* use do_div to get remainder (would be 64-bit modulo) */ 1889 if (do_div(start, sbi->s_stripe) == 0) { 1890 ac->ac_found++; 1891 ac->ac_b_ex = ex; 1892 ext4_mb_use_best_found(ac, e4b); 1893 } 1894 } else if (max >= ac->ac_g_ex.fe_len) { 1895 BUG_ON(ex.fe_len <= 0); 1896 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1897 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1898 ac->ac_found++; 1899 ac->ac_b_ex = ex; 1900 ext4_mb_use_best_found(ac, e4b); 1901 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1902 /* Sometimes, caller may want to merge even small 1903 * number of blocks to an existing extent */ 1904 BUG_ON(ex.fe_len <= 0); 1905 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1906 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1907 ac->ac_found++; 1908 ac->ac_b_ex = ex; 1909 ext4_mb_use_best_found(ac, e4b); 1910 } 1911 ext4_unlock_group(ac->ac_sb, group); 1912 ext4_mb_unload_buddy(e4b); 1913 1914 return 0; 1915 } 1916 1917 /* 1918 * The routine scans buddy structures (not bitmap!) from given order 1919 * to max order and tries to find big enough chunk to satisfy the req 1920 */ 1921 static noinline_for_stack 1922 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1923 struct ext4_buddy *e4b) 1924 { 1925 struct super_block *sb = ac->ac_sb; 1926 struct ext4_group_info *grp = e4b->bd_info; 1927 void *buddy; 1928 int i; 1929 int k; 1930 int max; 1931 1932 BUG_ON(ac->ac_2order <= 0); 1933 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1934 if (grp->bb_counters[i] == 0) 1935 continue; 1936 1937 buddy = mb_find_buddy(e4b, i, &max); 1938 BUG_ON(buddy == NULL); 1939 1940 k = mb_find_next_zero_bit(buddy, max, 0); 1941 if (k >= max) { 1942 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 1943 "%d free clusters of order %d. But found 0", 1944 grp->bb_counters[i], i); 1945 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 1946 e4b->bd_group, 1947 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1948 break; 1949 } 1950 ac->ac_found++; 1951 1952 ac->ac_b_ex.fe_len = 1 << i; 1953 ac->ac_b_ex.fe_start = k << i; 1954 ac->ac_b_ex.fe_group = e4b->bd_group; 1955 1956 ext4_mb_use_best_found(ac, e4b); 1957 1958 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 1959 1960 if (EXT4_SB(sb)->s_mb_stats) 1961 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1962 1963 break; 1964 } 1965 } 1966 1967 /* 1968 * The routine scans the group and measures all found extents. 1969 * In order to optimize scanning, caller must pass number of 1970 * free blocks in the group, so the routine can know upper limit. 1971 */ 1972 static noinline_for_stack 1973 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1974 struct ext4_buddy *e4b) 1975 { 1976 struct super_block *sb = ac->ac_sb; 1977 void *bitmap = e4b->bd_bitmap; 1978 struct ext4_free_extent ex; 1979 int i; 1980 int free; 1981 1982 free = e4b->bd_info->bb_free; 1983 if (WARN_ON(free <= 0)) 1984 return; 1985 1986 i = e4b->bd_info->bb_first_free; 1987 1988 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1989 i = mb_find_next_zero_bit(bitmap, 1990 EXT4_CLUSTERS_PER_GROUP(sb), i); 1991 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1992 /* 1993 * IF we have corrupt bitmap, we won't find any 1994 * free blocks even though group info says we 1995 * have free blocks 1996 */ 1997 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1998 "%d free clusters as per " 1999 "group info. But bitmap says 0", 2000 free); 2001 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2002 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2003 break; 2004 } 2005 2006 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2007 if (WARN_ON(ex.fe_len <= 0)) 2008 break; 2009 if (free < ex.fe_len) { 2010 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2011 "%d free clusters as per " 2012 "group info. But got %d blocks", 2013 free, ex.fe_len); 2014 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2015 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2016 /* 2017 * The number of free blocks differs. This mostly 2018 * indicate that the bitmap is corrupt. So exit 2019 * without claiming the space. 2020 */ 2021 break; 2022 } 2023 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2024 ext4_mb_measure_extent(ac, &ex, e4b); 2025 2026 i += ex.fe_len; 2027 free -= ex.fe_len; 2028 } 2029 2030 ext4_mb_check_limits(ac, e4b, 1); 2031 } 2032 2033 /* 2034 * This is a special case for storages like raid5 2035 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2036 */ 2037 static noinline_for_stack 2038 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2039 struct ext4_buddy *e4b) 2040 { 2041 struct super_block *sb = ac->ac_sb; 2042 struct ext4_sb_info *sbi = EXT4_SB(sb); 2043 void *bitmap = e4b->bd_bitmap; 2044 struct ext4_free_extent ex; 2045 ext4_fsblk_t first_group_block; 2046 ext4_fsblk_t a; 2047 ext4_grpblk_t i; 2048 int max; 2049 2050 BUG_ON(sbi->s_stripe == 0); 2051 2052 /* find first stripe-aligned block in group */ 2053 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2054 2055 a = first_group_block + sbi->s_stripe - 1; 2056 do_div(a, sbi->s_stripe); 2057 i = (a * sbi->s_stripe) - first_group_block; 2058 2059 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2060 if (!mb_test_bit(i, bitmap)) { 2061 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2062 if (max >= sbi->s_stripe) { 2063 ac->ac_found++; 2064 ex.fe_logical = 0xDEADF00D; /* debug value */ 2065 ac->ac_b_ex = ex; 2066 ext4_mb_use_best_found(ac, e4b); 2067 break; 2068 } 2069 } 2070 i += sbi->s_stripe; 2071 } 2072 } 2073 2074 /* 2075 * This is also called BEFORE we load the buddy bitmap. 2076 * Returns either 1 or 0 indicating that the group is either suitable 2077 * for the allocation or not. 2078 */ 2079 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2080 ext4_group_t group, int cr) 2081 { 2082 ext4_grpblk_t free, fragments; 2083 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2084 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2085 2086 BUG_ON(cr < 0 || cr >= 4); 2087 2088 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2089 return false; 2090 2091 free = grp->bb_free; 2092 if (free == 0) 2093 return false; 2094 2095 fragments = grp->bb_fragments; 2096 if (fragments == 0) 2097 return false; 2098 2099 switch (cr) { 2100 case 0: 2101 BUG_ON(ac->ac_2order == 0); 2102 2103 /* Avoid using the first bg of a flexgroup for data files */ 2104 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2105 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2106 ((group % flex_size) == 0)) 2107 return false; 2108 2109 if (free < ac->ac_g_ex.fe_len) 2110 return false; 2111 2112 if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) 2113 return true; 2114 2115 if (grp->bb_largest_free_order < ac->ac_2order) 2116 return false; 2117 2118 return true; 2119 case 1: 2120 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2121 return true; 2122 break; 2123 case 2: 2124 if (free >= ac->ac_g_ex.fe_len) 2125 return true; 2126 break; 2127 case 3: 2128 return true; 2129 default: 2130 BUG(); 2131 } 2132 2133 return false; 2134 } 2135 2136 /* 2137 * This could return negative error code if something goes wrong 2138 * during ext4_mb_init_group(). This should not be called with 2139 * ext4_lock_group() held. 2140 */ 2141 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2142 ext4_group_t group, int cr) 2143 { 2144 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2145 struct super_block *sb = ac->ac_sb; 2146 struct ext4_sb_info *sbi = EXT4_SB(sb); 2147 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2148 ext4_grpblk_t free; 2149 int ret = 0; 2150 2151 if (should_lock) 2152 ext4_lock_group(sb, group); 2153 free = grp->bb_free; 2154 if (free == 0) 2155 goto out; 2156 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2157 goto out; 2158 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2159 goto out; 2160 if (should_lock) 2161 ext4_unlock_group(sb, group); 2162 2163 /* We only do this if the grp has never been initialized */ 2164 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2165 struct ext4_group_desc *gdp = 2166 ext4_get_group_desc(sb, group, NULL); 2167 int ret; 2168 2169 /* cr=0/1 is a very optimistic search to find large 2170 * good chunks almost for free. If buddy data is not 2171 * ready, then this optimization makes no sense. But 2172 * we never skip the first block group in a flex_bg, 2173 * since this gets used for metadata block allocation, 2174 * and we want to make sure we locate metadata blocks 2175 * in the first block group in the flex_bg if possible. 2176 */ 2177 if (cr < 2 && 2178 (!sbi->s_log_groups_per_flex || 2179 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2180 !(ext4_has_group_desc_csum(sb) && 2181 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2182 return 0; 2183 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2184 if (ret) 2185 return ret; 2186 } 2187 2188 if (should_lock) 2189 ext4_lock_group(sb, group); 2190 ret = ext4_mb_good_group(ac, group, cr); 2191 out: 2192 if (should_lock) 2193 ext4_unlock_group(sb, group); 2194 return ret; 2195 } 2196 2197 /* 2198 * Start prefetching @nr block bitmaps starting at @group. 2199 * Return the next group which needs to be prefetched. 2200 */ 2201 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2202 unsigned int nr, int *cnt) 2203 { 2204 ext4_group_t ngroups = ext4_get_groups_count(sb); 2205 struct buffer_head *bh; 2206 struct blk_plug plug; 2207 2208 blk_start_plug(&plug); 2209 while (nr-- > 0) { 2210 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2211 NULL); 2212 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2213 2214 /* 2215 * Prefetch block groups with free blocks; but don't 2216 * bother if it is marked uninitialized on disk, since 2217 * it won't require I/O to read. Also only try to 2218 * prefetch once, so we avoid getblk() call, which can 2219 * be expensive. 2220 */ 2221 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2222 EXT4_MB_GRP_NEED_INIT(grp) && 2223 ext4_free_group_clusters(sb, gdp) > 0 && 2224 !(ext4_has_group_desc_csum(sb) && 2225 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2226 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2227 if (bh && !IS_ERR(bh)) { 2228 if (!buffer_uptodate(bh) && cnt) 2229 (*cnt)++; 2230 brelse(bh); 2231 } 2232 } 2233 if (++group >= ngroups) 2234 group = 0; 2235 } 2236 blk_finish_plug(&plug); 2237 return group; 2238 } 2239 2240 /* 2241 * Prefetching reads the block bitmap into the buffer cache; but we 2242 * need to make sure that the buddy bitmap in the page cache has been 2243 * initialized. Note that ext4_mb_init_group() will block if the I/O 2244 * is not yet completed, or indeed if it was not initiated by 2245 * ext4_mb_prefetch did not start the I/O. 2246 * 2247 * TODO: We should actually kick off the buddy bitmap setup in a work 2248 * queue when the buffer I/O is completed, so that we don't block 2249 * waiting for the block allocation bitmap read to finish when 2250 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2251 */ 2252 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2253 unsigned int nr) 2254 { 2255 while (nr-- > 0) { 2256 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2257 NULL); 2258 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2259 2260 if (!group) 2261 group = ext4_get_groups_count(sb); 2262 group--; 2263 grp = ext4_get_group_info(sb, group); 2264 2265 if (EXT4_MB_GRP_NEED_INIT(grp) && 2266 ext4_free_group_clusters(sb, gdp) > 0 && 2267 !(ext4_has_group_desc_csum(sb) && 2268 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2269 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2270 break; 2271 } 2272 } 2273 } 2274 2275 static noinline_for_stack int 2276 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2277 { 2278 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2279 int cr = -1; 2280 int err = 0, first_err = 0; 2281 unsigned int nr = 0, prefetch_ios = 0; 2282 struct ext4_sb_info *sbi; 2283 struct super_block *sb; 2284 struct ext4_buddy e4b; 2285 int lost; 2286 2287 sb = ac->ac_sb; 2288 sbi = EXT4_SB(sb); 2289 ngroups = ext4_get_groups_count(sb); 2290 /* non-extent files are limited to low blocks/groups */ 2291 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2292 ngroups = sbi->s_blockfile_groups; 2293 2294 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2295 2296 /* first, try the goal */ 2297 err = ext4_mb_find_by_goal(ac, &e4b); 2298 if (err || ac->ac_status == AC_STATUS_FOUND) 2299 goto out; 2300 2301 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2302 goto out; 2303 2304 /* 2305 * ac->ac_2order is set only if the fe_len is a power of 2 2306 * if ac->ac_2order is set we also set criteria to 0 so that we 2307 * try exact allocation using buddy. 2308 */ 2309 i = fls(ac->ac_g_ex.fe_len); 2310 ac->ac_2order = 0; 2311 /* 2312 * We search using buddy data only if the order of the request 2313 * is greater than equal to the sbi_s_mb_order2_reqs 2314 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2315 * We also support searching for power-of-two requests only for 2316 * requests upto maximum buddy size we have constructed. 2317 */ 2318 if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) { 2319 /* 2320 * This should tell if fe_len is exactly power of 2 2321 */ 2322 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2323 ac->ac_2order = array_index_nospec(i - 1, 2324 sb->s_blocksize_bits + 2); 2325 } 2326 2327 /* if stream allocation is enabled, use global goal */ 2328 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2329 /* TBD: may be hot point */ 2330 spin_lock(&sbi->s_md_lock); 2331 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2332 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2333 spin_unlock(&sbi->s_md_lock); 2334 } 2335 2336 /* Let's just scan groups to find more-less suitable blocks */ 2337 cr = ac->ac_2order ? 0 : 1; 2338 /* 2339 * cr == 0 try to get exact allocation, 2340 * cr == 3 try to get anything 2341 */ 2342 repeat: 2343 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2344 ac->ac_criteria = cr; 2345 /* 2346 * searching for the right group start 2347 * from the goal value specified 2348 */ 2349 group = ac->ac_g_ex.fe_group; 2350 prefetch_grp = group; 2351 2352 for (i = 0; i < ngroups; group++, i++) { 2353 int ret = 0; 2354 cond_resched(); 2355 /* 2356 * Artificially restricted ngroups for non-extent 2357 * files makes group > ngroups possible on first loop. 2358 */ 2359 if (group >= ngroups) 2360 group = 0; 2361 2362 /* 2363 * Batch reads of the block allocation bitmaps 2364 * to get multiple READs in flight; limit 2365 * prefetching at cr=0/1, otherwise mballoc can 2366 * spend a lot of time loading imperfect groups 2367 */ 2368 if ((prefetch_grp == group) && 2369 (cr > 1 || 2370 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2371 unsigned int curr_ios = prefetch_ios; 2372 2373 nr = sbi->s_mb_prefetch; 2374 if (ext4_has_feature_flex_bg(sb)) { 2375 nr = 1 << sbi->s_log_groups_per_flex; 2376 nr -= group & (nr - 1); 2377 nr = min(nr, sbi->s_mb_prefetch); 2378 } 2379 prefetch_grp = ext4_mb_prefetch(sb, group, 2380 nr, &prefetch_ios); 2381 if (prefetch_ios == curr_ios) 2382 nr = 0; 2383 } 2384 2385 /* This now checks without needing the buddy page */ 2386 ret = ext4_mb_good_group_nolock(ac, group, cr); 2387 if (ret <= 0) { 2388 if (!first_err) 2389 first_err = ret; 2390 continue; 2391 } 2392 2393 err = ext4_mb_load_buddy(sb, group, &e4b); 2394 if (err) 2395 goto out; 2396 2397 ext4_lock_group(sb, group); 2398 2399 /* 2400 * We need to check again after locking the 2401 * block group 2402 */ 2403 ret = ext4_mb_good_group(ac, group, cr); 2404 if (ret == 0) { 2405 ext4_unlock_group(sb, group); 2406 ext4_mb_unload_buddy(&e4b); 2407 continue; 2408 } 2409 2410 ac->ac_groups_scanned++; 2411 if (cr == 0) 2412 ext4_mb_simple_scan_group(ac, &e4b); 2413 else if (cr == 1 && sbi->s_stripe && 2414 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2415 ext4_mb_scan_aligned(ac, &e4b); 2416 else 2417 ext4_mb_complex_scan_group(ac, &e4b); 2418 2419 ext4_unlock_group(sb, group); 2420 ext4_mb_unload_buddy(&e4b); 2421 2422 if (ac->ac_status != AC_STATUS_CONTINUE) 2423 break; 2424 } 2425 } 2426 2427 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2428 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2429 /* 2430 * We've been searching too long. Let's try to allocate 2431 * the best chunk we've found so far 2432 */ 2433 ext4_mb_try_best_found(ac, &e4b); 2434 if (ac->ac_status != AC_STATUS_FOUND) { 2435 /* 2436 * Someone more lucky has already allocated it. 2437 * The only thing we can do is just take first 2438 * found block(s) 2439 */ 2440 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2441 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2442 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2443 ac->ac_b_ex.fe_len, lost); 2444 2445 ac->ac_b_ex.fe_group = 0; 2446 ac->ac_b_ex.fe_start = 0; 2447 ac->ac_b_ex.fe_len = 0; 2448 ac->ac_status = AC_STATUS_CONTINUE; 2449 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2450 cr = 3; 2451 goto repeat; 2452 } 2453 } 2454 out: 2455 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2456 err = first_err; 2457 2458 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2459 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2460 ac->ac_flags, cr, err); 2461 2462 if (nr) 2463 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2464 2465 return err; 2466 } 2467 2468 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2469 { 2470 struct super_block *sb = PDE_DATA(file_inode(seq->file)); 2471 ext4_group_t group; 2472 2473 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2474 return NULL; 2475 group = *pos + 1; 2476 return (void *) ((unsigned long) group); 2477 } 2478 2479 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2480 { 2481 struct super_block *sb = PDE_DATA(file_inode(seq->file)); 2482 ext4_group_t group; 2483 2484 ++*pos; 2485 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2486 return NULL; 2487 group = *pos + 1; 2488 return (void *) ((unsigned long) group); 2489 } 2490 2491 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2492 { 2493 struct super_block *sb = PDE_DATA(file_inode(seq->file)); 2494 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2495 int i; 2496 int err, buddy_loaded = 0; 2497 struct ext4_buddy e4b; 2498 struct ext4_group_info *grinfo; 2499 unsigned char blocksize_bits = min_t(unsigned char, 2500 sb->s_blocksize_bits, 2501 EXT4_MAX_BLOCK_LOG_SIZE); 2502 struct sg { 2503 struct ext4_group_info info; 2504 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2505 } sg; 2506 2507 group--; 2508 if (group == 0) 2509 seq_puts(seq, "#group: free frags first [" 2510 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2511 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2512 2513 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2514 sizeof(struct ext4_group_info); 2515 2516 grinfo = ext4_get_group_info(sb, group); 2517 /* Load the group info in memory only if not already loaded. */ 2518 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2519 err = ext4_mb_load_buddy(sb, group, &e4b); 2520 if (err) { 2521 seq_printf(seq, "#%-5u: I/O error\n", group); 2522 return 0; 2523 } 2524 buddy_loaded = 1; 2525 } 2526 2527 memcpy(&sg, ext4_get_group_info(sb, group), i); 2528 2529 if (buddy_loaded) 2530 ext4_mb_unload_buddy(&e4b); 2531 2532 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2533 sg.info.bb_fragments, sg.info.bb_first_free); 2534 for (i = 0; i <= 13; i++) 2535 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2536 sg.info.bb_counters[i] : 0); 2537 seq_puts(seq, " ]\n"); 2538 2539 return 0; 2540 } 2541 2542 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2543 { 2544 } 2545 2546 const struct seq_operations ext4_mb_seq_groups_ops = { 2547 .start = ext4_mb_seq_groups_start, 2548 .next = ext4_mb_seq_groups_next, 2549 .stop = ext4_mb_seq_groups_stop, 2550 .show = ext4_mb_seq_groups_show, 2551 }; 2552 2553 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2554 { 2555 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2556 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2557 2558 BUG_ON(!cachep); 2559 return cachep; 2560 } 2561 2562 /* 2563 * Allocate the top-level s_group_info array for the specified number 2564 * of groups 2565 */ 2566 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2567 { 2568 struct ext4_sb_info *sbi = EXT4_SB(sb); 2569 unsigned size; 2570 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 2571 2572 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2573 EXT4_DESC_PER_BLOCK_BITS(sb); 2574 if (size <= sbi->s_group_info_size) 2575 return 0; 2576 2577 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2578 new_groupinfo = kvzalloc(size, GFP_KERNEL); 2579 if (!new_groupinfo) { 2580 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2581 return -ENOMEM; 2582 } 2583 rcu_read_lock(); 2584 old_groupinfo = rcu_dereference(sbi->s_group_info); 2585 if (old_groupinfo) 2586 memcpy(new_groupinfo, old_groupinfo, 2587 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2588 rcu_read_unlock(); 2589 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 2590 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2591 if (old_groupinfo) 2592 ext4_kvfree_array_rcu(old_groupinfo); 2593 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2594 sbi->s_group_info_size); 2595 return 0; 2596 } 2597 2598 /* Create and initialize ext4_group_info data for the given group. */ 2599 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2600 struct ext4_group_desc *desc) 2601 { 2602 int i; 2603 int metalen = 0; 2604 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 2605 struct ext4_sb_info *sbi = EXT4_SB(sb); 2606 struct ext4_group_info **meta_group_info; 2607 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2608 2609 /* 2610 * First check if this group is the first of a reserved block. 2611 * If it's true, we have to allocate a new table of pointers 2612 * to ext4_group_info structures 2613 */ 2614 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2615 metalen = sizeof(*meta_group_info) << 2616 EXT4_DESC_PER_BLOCK_BITS(sb); 2617 meta_group_info = kmalloc(metalen, GFP_NOFS); 2618 if (meta_group_info == NULL) { 2619 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2620 "for a buddy group"); 2621 goto exit_meta_group_info; 2622 } 2623 rcu_read_lock(); 2624 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 2625 rcu_read_unlock(); 2626 } 2627 2628 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 2629 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2630 2631 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 2632 if (meta_group_info[i] == NULL) { 2633 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2634 goto exit_group_info; 2635 } 2636 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2637 &(meta_group_info[i]->bb_state)); 2638 2639 /* 2640 * initialize bb_free to be able to skip 2641 * empty groups without initialization 2642 */ 2643 if (ext4_has_group_desc_csum(sb) && 2644 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 2645 meta_group_info[i]->bb_free = 2646 ext4_free_clusters_after_init(sb, group, desc); 2647 } else { 2648 meta_group_info[i]->bb_free = 2649 ext4_free_group_clusters(sb, desc); 2650 } 2651 2652 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2653 init_rwsem(&meta_group_info[i]->alloc_sem); 2654 meta_group_info[i]->bb_free_root = RB_ROOT; 2655 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2656 2657 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 2658 return 0; 2659 2660 exit_group_info: 2661 /* If a meta_group_info table has been allocated, release it now */ 2662 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2663 struct ext4_group_info ***group_info; 2664 2665 rcu_read_lock(); 2666 group_info = rcu_dereference(sbi->s_group_info); 2667 kfree(group_info[idx]); 2668 group_info[idx] = NULL; 2669 rcu_read_unlock(); 2670 } 2671 exit_meta_group_info: 2672 return -ENOMEM; 2673 } /* ext4_mb_add_groupinfo */ 2674 2675 static int ext4_mb_init_backend(struct super_block *sb) 2676 { 2677 ext4_group_t ngroups = ext4_get_groups_count(sb); 2678 ext4_group_t i; 2679 struct ext4_sb_info *sbi = EXT4_SB(sb); 2680 int err; 2681 struct ext4_group_desc *desc; 2682 struct ext4_group_info ***group_info; 2683 struct kmem_cache *cachep; 2684 2685 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2686 if (err) 2687 return err; 2688 2689 sbi->s_buddy_cache = new_inode(sb); 2690 if (sbi->s_buddy_cache == NULL) { 2691 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2692 goto err_freesgi; 2693 } 2694 /* To avoid potentially colliding with an valid on-disk inode number, 2695 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2696 * not in the inode hash, so it should never be found by iget(), but 2697 * this will avoid confusion if it ever shows up during debugging. */ 2698 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2699 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2700 for (i = 0; i < ngroups; i++) { 2701 cond_resched(); 2702 desc = ext4_get_group_desc(sb, i, NULL); 2703 if (desc == NULL) { 2704 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2705 goto err_freebuddy; 2706 } 2707 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2708 goto err_freebuddy; 2709 } 2710 2711 if (ext4_has_feature_flex_bg(sb)) { 2712 /* a single flex group is supposed to be read by a single IO. 2713 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 2714 * unsigned integer, so the maximum shift is 32. 2715 */ 2716 if (sbi->s_es->s_log_groups_per_flex >= 32) { 2717 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 2718 goto err_freesgi; 2719 } 2720 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 2721 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 2722 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 2723 } else { 2724 sbi->s_mb_prefetch = 32; 2725 } 2726 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 2727 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 2728 /* now many real IOs to prefetch within a single allocation at cr=0 2729 * given cr=0 is an CPU-related optimization we shouldn't try to 2730 * load too many groups, at some point we should start to use what 2731 * we've got in memory. 2732 * with an average random access time 5ms, it'd take a second to get 2733 * 200 groups (* N with flex_bg), so let's make this limit 4 2734 */ 2735 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 2736 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 2737 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 2738 2739 return 0; 2740 2741 err_freebuddy: 2742 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2743 while (i-- > 0) 2744 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2745 i = sbi->s_group_info_size; 2746 rcu_read_lock(); 2747 group_info = rcu_dereference(sbi->s_group_info); 2748 while (i-- > 0) 2749 kfree(group_info[i]); 2750 rcu_read_unlock(); 2751 iput(sbi->s_buddy_cache); 2752 err_freesgi: 2753 rcu_read_lock(); 2754 kvfree(rcu_dereference(sbi->s_group_info)); 2755 rcu_read_unlock(); 2756 return -ENOMEM; 2757 } 2758 2759 static void ext4_groupinfo_destroy_slabs(void) 2760 { 2761 int i; 2762 2763 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2764 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2765 ext4_groupinfo_caches[i] = NULL; 2766 } 2767 } 2768 2769 static int ext4_groupinfo_create_slab(size_t size) 2770 { 2771 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2772 int slab_size; 2773 int blocksize_bits = order_base_2(size); 2774 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2775 struct kmem_cache *cachep; 2776 2777 if (cache_index >= NR_GRPINFO_CACHES) 2778 return -EINVAL; 2779 2780 if (unlikely(cache_index < 0)) 2781 cache_index = 0; 2782 2783 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2784 if (ext4_groupinfo_caches[cache_index]) { 2785 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2786 return 0; /* Already created */ 2787 } 2788 2789 slab_size = offsetof(struct ext4_group_info, 2790 bb_counters[blocksize_bits + 2]); 2791 2792 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2793 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2794 NULL); 2795 2796 ext4_groupinfo_caches[cache_index] = cachep; 2797 2798 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2799 if (!cachep) { 2800 printk(KERN_EMERG 2801 "EXT4-fs: no memory for groupinfo slab cache\n"); 2802 return -ENOMEM; 2803 } 2804 2805 return 0; 2806 } 2807 2808 int ext4_mb_init(struct super_block *sb) 2809 { 2810 struct ext4_sb_info *sbi = EXT4_SB(sb); 2811 unsigned i, j; 2812 unsigned offset, offset_incr; 2813 unsigned max; 2814 int ret; 2815 2816 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2817 2818 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2819 if (sbi->s_mb_offsets == NULL) { 2820 ret = -ENOMEM; 2821 goto out; 2822 } 2823 2824 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2825 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2826 if (sbi->s_mb_maxs == NULL) { 2827 ret = -ENOMEM; 2828 goto out; 2829 } 2830 2831 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2832 if (ret < 0) 2833 goto out; 2834 2835 /* order 0 is regular bitmap */ 2836 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2837 sbi->s_mb_offsets[0] = 0; 2838 2839 i = 1; 2840 offset = 0; 2841 offset_incr = 1 << (sb->s_blocksize_bits - 1); 2842 max = sb->s_blocksize << 2; 2843 do { 2844 sbi->s_mb_offsets[i] = offset; 2845 sbi->s_mb_maxs[i] = max; 2846 offset += offset_incr; 2847 offset_incr = offset_incr >> 1; 2848 max = max >> 1; 2849 i++; 2850 } while (i <= sb->s_blocksize_bits + 1); 2851 2852 spin_lock_init(&sbi->s_md_lock); 2853 spin_lock_init(&sbi->s_bal_lock); 2854 sbi->s_mb_free_pending = 0; 2855 INIT_LIST_HEAD(&sbi->s_freed_data_list); 2856 2857 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2858 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2859 sbi->s_mb_stats = MB_DEFAULT_STATS; 2860 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2861 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2862 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; 2863 /* 2864 * The default group preallocation is 512, which for 4k block 2865 * sizes translates to 2 megabytes. However for bigalloc file 2866 * systems, this is probably too big (i.e, if the cluster size 2867 * is 1 megabyte, then group preallocation size becomes half a 2868 * gigabyte!). As a default, we will keep a two megabyte 2869 * group pralloc size for cluster sizes up to 64k, and after 2870 * that, we will force a minimum group preallocation size of 2871 * 32 clusters. This translates to 8 megs when the cluster 2872 * size is 256k, and 32 megs when the cluster size is 1 meg, 2873 * which seems reasonable as a default. 2874 */ 2875 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2876 sbi->s_cluster_bits, 32); 2877 /* 2878 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2879 * to the lowest multiple of s_stripe which is bigger than 2880 * the s_mb_group_prealloc as determined above. We want 2881 * the preallocation size to be an exact multiple of the 2882 * RAID stripe size so that preallocations don't fragment 2883 * the stripes. 2884 */ 2885 if (sbi->s_stripe > 1) { 2886 sbi->s_mb_group_prealloc = roundup( 2887 sbi->s_mb_group_prealloc, sbi->s_stripe); 2888 } 2889 2890 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2891 if (sbi->s_locality_groups == NULL) { 2892 ret = -ENOMEM; 2893 goto out; 2894 } 2895 for_each_possible_cpu(i) { 2896 struct ext4_locality_group *lg; 2897 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2898 mutex_init(&lg->lg_mutex); 2899 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2900 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2901 spin_lock_init(&lg->lg_prealloc_lock); 2902 } 2903 2904 /* init file for buddy data */ 2905 ret = ext4_mb_init_backend(sb); 2906 if (ret != 0) 2907 goto out_free_locality_groups; 2908 2909 return 0; 2910 2911 out_free_locality_groups: 2912 free_percpu(sbi->s_locality_groups); 2913 sbi->s_locality_groups = NULL; 2914 out: 2915 kfree(sbi->s_mb_offsets); 2916 sbi->s_mb_offsets = NULL; 2917 kfree(sbi->s_mb_maxs); 2918 sbi->s_mb_maxs = NULL; 2919 return ret; 2920 } 2921 2922 /* need to called with the ext4 group lock held */ 2923 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2924 { 2925 struct ext4_prealloc_space *pa; 2926 struct list_head *cur, *tmp; 2927 int count = 0; 2928 2929 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2930 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2931 list_del(&pa->pa_group_list); 2932 count++; 2933 kmem_cache_free(ext4_pspace_cachep, pa); 2934 } 2935 return count; 2936 } 2937 2938 int ext4_mb_release(struct super_block *sb) 2939 { 2940 ext4_group_t ngroups = ext4_get_groups_count(sb); 2941 ext4_group_t i; 2942 int num_meta_group_infos; 2943 struct ext4_group_info *grinfo, ***group_info; 2944 struct ext4_sb_info *sbi = EXT4_SB(sb); 2945 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2946 int count; 2947 2948 if (sbi->s_group_info) { 2949 for (i = 0; i < ngroups; i++) { 2950 cond_resched(); 2951 grinfo = ext4_get_group_info(sb, i); 2952 mb_group_bb_bitmap_free(grinfo); 2953 ext4_lock_group(sb, i); 2954 count = ext4_mb_cleanup_pa(grinfo); 2955 if (count) 2956 mb_debug(sb, "mballoc: %d PAs left\n", 2957 count); 2958 ext4_unlock_group(sb, i); 2959 kmem_cache_free(cachep, grinfo); 2960 } 2961 num_meta_group_infos = (ngroups + 2962 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2963 EXT4_DESC_PER_BLOCK_BITS(sb); 2964 rcu_read_lock(); 2965 group_info = rcu_dereference(sbi->s_group_info); 2966 for (i = 0; i < num_meta_group_infos; i++) 2967 kfree(group_info[i]); 2968 kvfree(group_info); 2969 rcu_read_unlock(); 2970 } 2971 kfree(sbi->s_mb_offsets); 2972 kfree(sbi->s_mb_maxs); 2973 iput(sbi->s_buddy_cache); 2974 if (sbi->s_mb_stats) { 2975 ext4_msg(sb, KERN_INFO, 2976 "mballoc: %u blocks %u reqs (%u success)", 2977 atomic_read(&sbi->s_bal_allocated), 2978 atomic_read(&sbi->s_bal_reqs), 2979 atomic_read(&sbi->s_bal_success)); 2980 ext4_msg(sb, KERN_INFO, 2981 "mballoc: %u extents scanned, %u goal hits, " 2982 "%u 2^N hits, %u breaks, %u lost", 2983 atomic_read(&sbi->s_bal_ex_scanned), 2984 atomic_read(&sbi->s_bal_goals), 2985 atomic_read(&sbi->s_bal_2orders), 2986 atomic_read(&sbi->s_bal_breaks), 2987 atomic_read(&sbi->s_mb_lost_chunks)); 2988 ext4_msg(sb, KERN_INFO, 2989 "mballoc: %lu generated and it took %Lu", 2990 sbi->s_mb_buddies_generated, 2991 sbi->s_mb_generation_time); 2992 ext4_msg(sb, KERN_INFO, 2993 "mballoc: %u preallocated, %u discarded", 2994 atomic_read(&sbi->s_mb_preallocated), 2995 atomic_read(&sbi->s_mb_discarded)); 2996 } 2997 2998 free_percpu(sbi->s_locality_groups); 2999 3000 return 0; 3001 } 3002 3003 static inline int ext4_issue_discard(struct super_block *sb, 3004 ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3005 struct bio **biop) 3006 { 3007 ext4_fsblk_t discard_block; 3008 3009 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3010 ext4_group_first_block_no(sb, block_group)); 3011 count = EXT4_C2B(EXT4_SB(sb), count); 3012 trace_ext4_discard_blocks(sb, 3013 (unsigned long long) discard_block, count); 3014 if (biop) { 3015 return __blkdev_issue_discard(sb->s_bdev, 3016 (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3017 (sector_t)count << (sb->s_blocksize_bits - 9), 3018 GFP_NOFS, 0, biop); 3019 } else 3020 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3021 } 3022 3023 static void ext4_free_data_in_buddy(struct super_block *sb, 3024 struct ext4_free_data *entry) 3025 { 3026 struct ext4_buddy e4b; 3027 struct ext4_group_info *db; 3028 int err, count = 0, count2 = 0; 3029 3030 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3031 entry->efd_count, entry->efd_group, entry); 3032 3033 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3034 /* we expect to find existing buddy because it's pinned */ 3035 BUG_ON(err != 0); 3036 3037 spin_lock(&EXT4_SB(sb)->s_md_lock); 3038 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3039 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3040 3041 db = e4b.bd_info; 3042 /* there are blocks to put in buddy to make them really free */ 3043 count += entry->efd_count; 3044 count2++; 3045 ext4_lock_group(sb, entry->efd_group); 3046 /* Take it out of per group rb tree */ 3047 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3048 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3049 3050 /* 3051 * Clear the trimmed flag for the group so that the next 3052 * ext4_trim_fs can trim it. 3053 * If the volume is mounted with -o discard, online discard 3054 * is supported and the free blocks will be trimmed online. 3055 */ 3056 if (!test_opt(sb, DISCARD)) 3057 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3058 3059 if (!db->bb_free_root.rb_node) { 3060 /* No more items in the per group rb tree 3061 * balance refcounts from ext4_mb_free_metadata() 3062 */ 3063 put_page(e4b.bd_buddy_page); 3064 put_page(e4b.bd_bitmap_page); 3065 } 3066 ext4_unlock_group(sb, entry->efd_group); 3067 kmem_cache_free(ext4_free_data_cachep, entry); 3068 ext4_mb_unload_buddy(&e4b); 3069 3070 mb_debug(sb, "freed %d blocks in %d structures\n", count, 3071 count2); 3072 } 3073 3074 /* 3075 * This function is called by the jbd2 layer once the commit has finished, 3076 * so we know we can free the blocks that were released with that commit. 3077 */ 3078 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3079 { 3080 struct ext4_sb_info *sbi = EXT4_SB(sb); 3081 struct ext4_free_data *entry, *tmp; 3082 struct bio *discard_bio = NULL; 3083 struct list_head freed_data_list; 3084 struct list_head *cut_pos = NULL; 3085 int err; 3086 3087 INIT_LIST_HEAD(&freed_data_list); 3088 3089 spin_lock(&sbi->s_md_lock); 3090 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3091 if (entry->efd_tid != commit_tid) 3092 break; 3093 cut_pos = &entry->efd_list; 3094 } 3095 if (cut_pos) 3096 list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3097 cut_pos); 3098 spin_unlock(&sbi->s_md_lock); 3099 3100 if (test_opt(sb, DISCARD)) { 3101 list_for_each_entry(entry, &freed_data_list, efd_list) { 3102 err = ext4_issue_discard(sb, entry->efd_group, 3103 entry->efd_start_cluster, 3104 entry->efd_count, 3105 &discard_bio); 3106 if (err && err != -EOPNOTSUPP) { 3107 ext4_msg(sb, KERN_WARNING, "discard request in" 3108 " group:%d block:%d count:%d failed" 3109 " with %d", entry->efd_group, 3110 entry->efd_start_cluster, 3111 entry->efd_count, err); 3112 } else if (err == -EOPNOTSUPP) 3113 break; 3114 } 3115 3116 if (discard_bio) { 3117 submit_bio_wait(discard_bio); 3118 bio_put(discard_bio); 3119 } 3120 } 3121 3122 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3123 ext4_free_data_in_buddy(sb, entry); 3124 } 3125 3126 int __init ext4_init_mballoc(void) 3127 { 3128 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3129 SLAB_RECLAIM_ACCOUNT); 3130 if (ext4_pspace_cachep == NULL) 3131 goto out; 3132 3133 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3134 SLAB_RECLAIM_ACCOUNT); 3135 if (ext4_ac_cachep == NULL) 3136 goto out_pa_free; 3137 3138 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3139 SLAB_RECLAIM_ACCOUNT); 3140 if (ext4_free_data_cachep == NULL) 3141 goto out_ac_free; 3142 3143 return 0; 3144 3145 out_ac_free: 3146 kmem_cache_destroy(ext4_ac_cachep); 3147 out_pa_free: 3148 kmem_cache_destroy(ext4_pspace_cachep); 3149 out: 3150 return -ENOMEM; 3151 } 3152 3153 void ext4_exit_mballoc(void) 3154 { 3155 /* 3156 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3157 * before destroying the slab cache. 3158 */ 3159 rcu_barrier(); 3160 kmem_cache_destroy(ext4_pspace_cachep); 3161 kmem_cache_destroy(ext4_ac_cachep); 3162 kmem_cache_destroy(ext4_free_data_cachep); 3163 ext4_groupinfo_destroy_slabs(); 3164 } 3165 3166 3167 /* 3168 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3169 * Returns 0 if success or error code 3170 */ 3171 static noinline_for_stack int 3172 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 3173 handle_t *handle, unsigned int reserv_clstrs) 3174 { 3175 struct buffer_head *bitmap_bh = NULL; 3176 struct ext4_group_desc *gdp; 3177 struct buffer_head *gdp_bh; 3178 struct ext4_sb_info *sbi; 3179 struct super_block *sb; 3180 ext4_fsblk_t block; 3181 int err, len; 3182 3183 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3184 BUG_ON(ac->ac_b_ex.fe_len <= 0); 3185 3186 sb = ac->ac_sb; 3187 sbi = EXT4_SB(sb); 3188 3189 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 3190 if (IS_ERR(bitmap_bh)) { 3191 err = PTR_ERR(bitmap_bh); 3192 bitmap_bh = NULL; 3193 goto out_err; 3194 } 3195 3196 BUFFER_TRACE(bitmap_bh, "getting write access"); 3197 err = ext4_journal_get_write_access(handle, bitmap_bh); 3198 if (err) 3199 goto out_err; 3200 3201 err = -EIO; 3202 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3203 if (!gdp) 3204 goto out_err; 3205 3206 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3207 ext4_free_group_clusters(sb, gdp)); 3208 3209 BUFFER_TRACE(gdp_bh, "get_write_access"); 3210 err = ext4_journal_get_write_access(handle, gdp_bh); 3211 if (err) 3212 goto out_err; 3213 3214 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3215 3216 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3217 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 3218 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 3219 "fs metadata", block, block+len); 3220 /* File system mounted not to panic on error 3221 * Fix the bitmap and return EFSCORRUPTED 3222 * We leak some of the blocks here. 3223 */ 3224 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3225 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3226 ac->ac_b_ex.fe_len); 3227 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3228 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3229 if (!err) 3230 err = -EFSCORRUPTED; 3231 goto out_err; 3232 } 3233 3234 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3235 #ifdef AGGRESSIVE_CHECK 3236 { 3237 int i; 3238 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3239 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3240 bitmap_bh->b_data)); 3241 } 3242 } 3243 #endif 3244 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3245 ac->ac_b_ex.fe_len); 3246 if (ext4_has_group_desc_csum(sb) && 3247 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3248 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3249 ext4_free_group_clusters_set(sb, gdp, 3250 ext4_free_clusters_after_init(sb, 3251 ac->ac_b_ex.fe_group, gdp)); 3252 } 3253 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3254 ext4_free_group_clusters_set(sb, gdp, len); 3255 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 3256 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3257 3258 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3259 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3260 /* 3261 * Now reduce the dirty block count also. Should not go negative 3262 */ 3263 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 3264 /* release all the reserved blocks if non delalloc */ 3265 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 3266 reserv_clstrs); 3267 3268 if (sbi->s_log_groups_per_flex) { 3269 ext4_group_t flex_group = ext4_flex_group(sbi, 3270 ac->ac_b_ex.fe_group); 3271 atomic64_sub(ac->ac_b_ex.fe_len, 3272 &sbi_array_rcu_deref(sbi, s_flex_groups, 3273 flex_group)->free_clusters); 3274 } 3275 3276 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3277 if (err) 3278 goto out_err; 3279 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3280 3281 out_err: 3282 brelse(bitmap_bh); 3283 return err; 3284 } 3285 3286 /* 3287 * Idempotent helper for Ext4 fast commit replay path to set the state of 3288 * blocks in bitmaps and update counters. 3289 */ 3290 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 3291 int len, int state) 3292 { 3293 struct buffer_head *bitmap_bh = NULL; 3294 struct ext4_group_desc *gdp; 3295 struct buffer_head *gdp_bh; 3296 struct ext4_sb_info *sbi = EXT4_SB(sb); 3297 ext4_group_t group; 3298 ext4_grpblk_t blkoff; 3299 int i, clen, err; 3300 int already; 3301 3302 clen = EXT4_B2C(sbi, len); 3303 3304 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3305 bitmap_bh = ext4_read_block_bitmap(sb, group); 3306 if (IS_ERR(bitmap_bh)) { 3307 err = PTR_ERR(bitmap_bh); 3308 bitmap_bh = NULL; 3309 goto out_err; 3310 } 3311 3312 err = -EIO; 3313 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 3314 if (!gdp) 3315 goto out_err; 3316 3317 ext4_lock_group(sb, group); 3318 already = 0; 3319 for (i = 0; i < clen; i++) 3320 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state) 3321 already++; 3322 3323 if (state) 3324 ext4_set_bits(bitmap_bh->b_data, blkoff, clen); 3325 else 3326 mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); 3327 if (ext4_has_group_desc_csum(sb) && 3328 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3329 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3330 ext4_free_group_clusters_set(sb, gdp, 3331 ext4_free_clusters_after_init(sb, 3332 group, gdp)); 3333 } 3334 if (state) 3335 clen = ext4_free_group_clusters(sb, gdp) - clen + already; 3336 else 3337 clen = ext4_free_group_clusters(sb, gdp) + clen - already; 3338 3339 ext4_free_group_clusters_set(sb, gdp, clen); 3340 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 3341 ext4_group_desc_csum_set(sb, group, gdp); 3342 3343 ext4_unlock_group(sb, group); 3344 3345 if (sbi->s_log_groups_per_flex) { 3346 ext4_group_t flex_group = ext4_flex_group(sbi, group); 3347 3348 atomic64_sub(len, 3349 &sbi_array_rcu_deref(sbi, s_flex_groups, 3350 flex_group)->free_clusters); 3351 } 3352 3353 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 3354 if (err) 3355 goto out_err; 3356 sync_dirty_buffer(bitmap_bh); 3357 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 3358 sync_dirty_buffer(gdp_bh); 3359 3360 out_err: 3361 brelse(bitmap_bh); 3362 } 3363 3364 /* 3365 * here we normalize request for locality group 3366 * Group request are normalized to s_mb_group_prealloc, which goes to 3367 * s_strip if we set the same via mount option. 3368 * s_mb_group_prealloc can be configured via 3369 * /sys/fs/ext4/<partition>/mb_group_prealloc 3370 * 3371 * XXX: should we try to preallocate more than the group has now? 3372 */ 3373 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3374 { 3375 struct super_block *sb = ac->ac_sb; 3376 struct ext4_locality_group *lg = ac->ac_lg; 3377 3378 BUG_ON(lg == NULL); 3379 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3380 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 3381 } 3382 3383 /* 3384 * Normalization means making request better in terms of 3385 * size and alignment 3386 */ 3387 static noinline_for_stack void 3388 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3389 struct ext4_allocation_request *ar) 3390 { 3391 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3392 int bsbits, max; 3393 ext4_lblk_t end; 3394 loff_t size, start_off; 3395 loff_t orig_size __maybe_unused; 3396 ext4_lblk_t start; 3397 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3398 struct ext4_prealloc_space *pa; 3399 3400 /* do normalize only data requests, metadata requests 3401 do not need preallocation */ 3402 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3403 return; 3404 3405 /* sometime caller may want exact blocks */ 3406 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3407 return; 3408 3409 /* caller may indicate that preallocation isn't 3410 * required (it's a tail, for example) */ 3411 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3412 return; 3413 3414 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3415 ext4_mb_normalize_group_request(ac); 3416 return ; 3417 } 3418 3419 bsbits = ac->ac_sb->s_blocksize_bits; 3420 3421 /* first, let's learn actual file size 3422 * given current request is allocated */ 3423 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3424 size = size << bsbits; 3425 if (size < i_size_read(ac->ac_inode)) 3426 size = i_size_read(ac->ac_inode); 3427 orig_size = size; 3428 3429 /* max size of free chunks */ 3430 max = 2 << bsbits; 3431 3432 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3433 (req <= (size) || max <= (chunk_size)) 3434 3435 /* first, try to predict filesize */ 3436 /* XXX: should this table be tunable? */ 3437 start_off = 0; 3438 if (size <= 16 * 1024) { 3439 size = 16 * 1024; 3440 } else if (size <= 32 * 1024) { 3441 size = 32 * 1024; 3442 } else if (size <= 64 * 1024) { 3443 size = 64 * 1024; 3444 } else if (size <= 128 * 1024) { 3445 size = 128 * 1024; 3446 } else if (size <= 256 * 1024) { 3447 size = 256 * 1024; 3448 } else if (size <= 512 * 1024) { 3449 size = 512 * 1024; 3450 } else if (size <= 1024 * 1024) { 3451 size = 1024 * 1024; 3452 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3453 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3454 (21 - bsbits)) << 21; 3455 size = 2 * 1024 * 1024; 3456 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3457 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3458 (22 - bsbits)) << 22; 3459 size = 4 * 1024 * 1024; 3460 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3461 (8<<20)>>bsbits, max, 8 * 1024)) { 3462 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3463 (23 - bsbits)) << 23; 3464 size = 8 * 1024 * 1024; 3465 } else { 3466 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 3467 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 3468 ac->ac_o_ex.fe_len) << bsbits; 3469 } 3470 size = size >> bsbits; 3471 start = start_off >> bsbits; 3472 3473 /* don't cover already allocated blocks in selected range */ 3474 if (ar->pleft && start <= ar->lleft) { 3475 size -= ar->lleft + 1 - start; 3476 start = ar->lleft + 1; 3477 } 3478 if (ar->pright && start + size - 1 >= ar->lright) 3479 size -= start + size - ar->lright; 3480 3481 /* 3482 * Trim allocation request for filesystems with artificially small 3483 * groups. 3484 */ 3485 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 3486 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 3487 3488 end = start + size; 3489 3490 /* check we don't cross already preallocated blocks */ 3491 rcu_read_lock(); 3492 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3493 ext4_lblk_t pa_end; 3494 3495 if (pa->pa_deleted) 3496 continue; 3497 spin_lock(&pa->pa_lock); 3498 if (pa->pa_deleted) { 3499 spin_unlock(&pa->pa_lock); 3500 continue; 3501 } 3502 3503 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3504 pa->pa_len); 3505 3506 /* PA must not overlap original request */ 3507 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3508 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3509 3510 /* skip PAs this normalized request doesn't overlap with */ 3511 if (pa->pa_lstart >= end || pa_end <= start) { 3512 spin_unlock(&pa->pa_lock); 3513 continue; 3514 } 3515 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3516 3517 /* adjust start or end to be adjacent to this pa */ 3518 if (pa_end <= ac->ac_o_ex.fe_logical) { 3519 BUG_ON(pa_end < start); 3520 start = pa_end; 3521 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3522 BUG_ON(pa->pa_lstart > end); 3523 end = pa->pa_lstart; 3524 } 3525 spin_unlock(&pa->pa_lock); 3526 } 3527 rcu_read_unlock(); 3528 size = end - start; 3529 3530 /* XXX: extra loop to check we really don't overlap preallocations */ 3531 rcu_read_lock(); 3532 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3533 ext4_lblk_t pa_end; 3534 3535 spin_lock(&pa->pa_lock); 3536 if (pa->pa_deleted == 0) { 3537 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3538 pa->pa_len); 3539 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3540 } 3541 spin_unlock(&pa->pa_lock); 3542 } 3543 rcu_read_unlock(); 3544 3545 if (start + size <= ac->ac_o_ex.fe_logical && 3546 start > ac->ac_o_ex.fe_logical) { 3547 ext4_msg(ac->ac_sb, KERN_ERR, 3548 "start %lu, size %lu, fe_logical %lu", 3549 (unsigned long) start, (unsigned long) size, 3550 (unsigned long) ac->ac_o_ex.fe_logical); 3551 BUG(); 3552 } 3553 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3554 3555 /* now prepare goal request */ 3556 3557 /* XXX: is it better to align blocks WRT to logical 3558 * placement or satisfy big request as is */ 3559 ac->ac_g_ex.fe_logical = start; 3560 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3561 3562 /* define goal start in order to merge */ 3563 if (ar->pright && (ar->lright == (start + size))) { 3564 /* merge to the right */ 3565 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3566 &ac->ac_f_ex.fe_group, 3567 &ac->ac_f_ex.fe_start); 3568 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3569 } 3570 if (ar->pleft && (ar->lleft + 1 == start)) { 3571 /* merge to the left */ 3572 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3573 &ac->ac_f_ex.fe_group, 3574 &ac->ac_f_ex.fe_start); 3575 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3576 } 3577 3578 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 3579 orig_size, start); 3580 } 3581 3582 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3583 { 3584 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3585 3586 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3587 atomic_inc(&sbi->s_bal_reqs); 3588 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3589 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3590 atomic_inc(&sbi->s_bal_success); 3591 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3592 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3593 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3594 atomic_inc(&sbi->s_bal_goals); 3595 if (ac->ac_found > sbi->s_mb_max_to_scan) 3596 atomic_inc(&sbi->s_bal_breaks); 3597 } 3598 3599 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3600 trace_ext4_mballoc_alloc(ac); 3601 else 3602 trace_ext4_mballoc_prealloc(ac); 3603 } 3604 3605 /* 3606 * Called on failure; free up any blocks from the inode PA for this 3607 * context. We don't need this for MB_GROUP_PA because we only change 3608 * pa_free in ext4_mb_release_context(), but on failure, we've already 3609 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3610 */ 3611 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3612 { 3613 struct ext4_prealloc_space *pa = ac->ac_pa; 3614 struct ext4_buddy e4b; 3615 int err; 3616 3617 if (pa == NULL) { 3618 if (ac->ac_f_ex.fe_len == 0) 3619 return; 3620 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 3621 if (err) { 3622 /* 3623 * This should never happen since we pin the 3624 * pages in the ext4_allocation_context so 3625 * ext4_mb_load_buddy() should never fail. 3626 */ 3627 WARN(1, "mb_load_buddy failed (%d)", err); 3628 return; 3629 } 3630 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3631 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 3632 ac->ac_f_ex.fe_len); 3633 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3634 ext4_mb_unload_buddy(&e4b); 3635 return; 3636 } 3637 if (pa->pa_type == MB_INODE_PA) 3638 pa->pa_free += ac->ac_b_ex.fe_len; 3639 } 3640 3641 /* 3642 * use blocks preallocated to inode 3643 */ 3644 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3645 struct ext4_prealloc_space *pa) 3646 { 3647 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3648 ext4_fsblk_t start; 3649 ext4_fsblk_t end; 3650 int len; 3651 3652 /* found preallocated blocks, use them */ 3653 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3654 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3655 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3656 len = EXT4_NUM_B2C(sbi, end - start); 3657 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3658 &ac->ac_b_ex.fe_start); 3659 ac->ac_b_ex.fe_len = len; 3660 ac->ac_status = AC_STATUS_FOUND; 3661 ac->ac_pa = pa; 3662 3663 BUG_ON(start < pa->pa_pstart); 3664 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3665 BUG_ON(pa->pa_free < len); 3666 pa->pa_free -= len; 3667 3668 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 3669 } 3670 3671 /* 3672 * use blocks preallocated to locality group 3673 */ 3674 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3675 struct ext4_prealloc_space *pa) 3676 { 3677 unsigned int len = ac->ac_o_ex.fe_len; 3678 3679 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3680 &ac->ac_b_ex.fe_group, 3681 &ac->ac_b_ex.fe_start); 3682 ac->ac_b_ex.fe_len = len; 3683 ac->ac_status = AC_STATUS_FOUND; 3684 ac->ac_pa = pa; 3685 3686 /* we don't correct pa_pstart or pa_plen here to avoid 3687 * possible race when the group is being loaded concurrently 3688 * instead we correct pa later, after blocks are marked 3689 * in on-disk bitmap -- see ext4_mb_release_context() 3690 * Other CPUs are prevented from allocating from this pa by lg_mutex 3691 */ 3692 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 3693 pa->pa_lstart-len, len, pa); 3694 } 3695 3696 /* 3697 * Return the prealloc space that have minimal distance 3698 * from the goal block. @cpa is the prealloc 3699 * space that is having currently known minimal distance 3700 * from the goal block. 3701 */ 3702 static struct ext4_prealloc_space * 3703 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3704 struct ext4_prealloc_space *pa, 3705 struct ext4_prealloc_space *cpa) 3706 { 3707 ext4_fsblk_t cur_distance, new_distance; 3708 3709 if (cpa == NULL) { 3710 atomic_inc(&pa->pa_count); 3711 return pa; 3712 } 3713 cur_distance = abs(goal_block - cpa->pa_pstart); 3714 new_distance = abs(goal_block - pa->pa_pstart); 3715 3716 if (cur_distance <= new_distance) 3717 return cpa; 3718 3719 /* drop the previous reference */ 3720 atomic_dec(&cpa->pa_count); 3721 atomic_inc(&pa->pa_count); 3722 return pa; 3723 } 3724 3725 /* 3726 * search goal blocks in preallocated space 3727 */ 3728 static noinline_for_stack bool 3729 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3730 { 3731 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3732 int order, i; 3733 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3734 struct ext4_locality_group *lg; 3735 struct ext4_prealloc_space *pa, *cpa = NULL; 3736 ext4_fsblk_t goal_block; 3737 3738 /* only data can be preallocated */ 3739 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3740 return false; 3741 3742 /* first, try per-file preallocation */ 3743 rcu_read_lock(); 3744 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3745 3746 /* all fields in this condition don't change, 3747 * so we can skip locking for them */ 3748 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3749 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3750 EXT4_C2B(sbi, pa->pa_len))) 3751 continue; 3752 3753 /* non-extent files can't have physical blocks past 2^32 */ 3754 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3755 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3756 EXT4_MAX_BLOCK_FILE_PHYS)) 3757 continue; 3758 3759 /* found preallocated blocks, use them */ 3760 spin_lock(&pa->pa_lock); 3761 if (pa->pa_deleted == 0 && pa->pa_free) { 3762 atomic_inc(&pa->pa_count); 3763 ext4_mb_use_inode_pa(ac, pa); 3764 spin_unlock(&pa->pa_lock); 3765 ac->ac_criteria = 10; 3766 rcu_read_unlock(); 3767 return true; 3768 } 3769 spin_unlock(&pa->pa_lock); 3770 } 3771 rcu_read_unlock(); 3772 3773 /* can we use group allocation? */ 3774 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3775 return false; 3776 3777 /* inode may have no locality group for some reason */ 3778 lg = ac->ac_lg; 3779 if (lg == NULL) 3780 return false; 3781 order = fls(ac->ac_o_ex.fe_len) - 1; 3782 if (order > PREALLOC_TB_SIZE - 1) 3783 /* The max size of hash table is PREALLOC_TB_SIZE */ 3784 order = PREALLOC_TB_SIZE - 1; 3785 3786 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3787 /* 3788 * search for the prealloc space that is having 3789 * minimal distance from the goal block. 3790 */ 3791 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3792 rcu_read_lock(); 3793 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3794 pa_inode_list) { 3795 spin_lock(&pa->pa_lock); 3796 if (pa->pa_deleted == 0 && 3797 pa->pa_free >= ac->ac_o_ex.fe_len) { 3798 3799 cpa = ext4_mb_check_group_pa(goal_block, 3800 pa, cpa); 3801 } 3802 spin_unlock(&pa->pa_lock); 3803 } 3804 rcu_read_unlock(); 3805 } 3806 if (cpa) { 3807 ext4_mb_use_group_pa(ac, cpa); 3808 ac->ac_criteria = 20; 3809 return true; 3810 } 3811 return false; 3812 } 3813 3814 /* 3815 * the function goes through all block freed in the group 3816 * but not yet committed and marks them used in in-core bitmap. 3817 * buddy must be generated from this bitmap 3818 * Need to be called with the ext4 group lock held 3819 */ 3820 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3821 ext4_group_t group) 3822 { 3823 struct rb_node *n; 3824 struct ext4_group_info *grp; 3825 struct ext4_free_data *entry; 3826 3827 grp = ext4_get_group_info(sb, group); 3828 n = rb_first(&(grp->bb_free_root)); 3829 3830 while (n) { 3831 entry = rb_entry(n, struct ext4_free_data, efd_node); 3832 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3833 n = rb_next(n); 3834 } 3835 return; 3836 } 3837 3838 /* 3839 * the function goes through all preallocation in this group and marks them 3840 * used in in-core bitmap. buddy must be generated from this bitmap 3841 * Need to be called with ext4 group lock held 3842 */ 3843 static noinline_for_stack 3844 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3845 ext4_group_t group) 3846 { 3847 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3848 struct ext4_prealloc_space *pa; 3849 struct list_head *cur; 3850 ext4_group_t groupnr; 3851 ext4_grpblk_t start; 3852 int preallocated = 0; 3853 int len; 3854 3855 /* all form of preallocation discards first load group, 3856 * so the only competing code is preallocation use. 3857 * we don't need any locking here 3858 * notice we do NOT ignore preallocations with pa_deleted 3859 * otherwise we could leave used blocks available for 3860 * allocation in buddy when concurrent ext4_mb_put_pa() 3861 * is dropping preallocation 3862 */ 3863 list_for_each(cur, &grp->bb_prealloc_list) { 3864 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3865 spin_lock(&pa->pa_lock); 3866 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3867 &groupnr, &start); 3868 len = pa->pa_len; 3869 spin_unlock(&pa->pa_lock); 3870 if (unlikely(len == 0)) 3871 continue; 3872 BUG_ON(groupnr != group); 3873 ext4_set_bits(bitmap, start, len); 3874 preallocated += len; 3875 } 3876 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 3877 } 3878 3879 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 3880 struct ext4_prealloc_space *pa) 3881 { 3882 struct ext4_inode_info *ei; 3883 3884 if (pa->pa_deleted) { 3885 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 3886 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 3887 pa->pa_len); 3888 return; 3889 } 3890 3891 pa->pa_deleted = 1; 3892 3893 if (pa->pa_type == MB_INODE_PA) { 3894 ei = EXT4_I(pa->pa_inode); 3895 atomic_dec(&ei->i_prealloc_active); 3896 } 3897 } 3898 3899 static void ext4_mb_pa_callback(struct rcu_head *head) 3900 { 3901 struct ext4_prealloc_space *pa; 3902 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3903 3904 BUG_ON(atomic_read(&pa->pa_count)); 3905 BUG_ON(pa->pa_deleted == 0); 3906 kmem_cache_free(ext4_pspace_cachep, pa); 3907 } 3908 3909 /* 3910 * drops a reference to preallocated space descriptor 3911 * if this was the last reference and the space is consumed 3912 */ 3913 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3914 struct super_block *sb, struct ext4_prealloc_space *pa) 3915 { 3916 ext4_group_t grp; 3917 ext4_fsblk_t grp_blk; 3918 3919 /* in this short window concurrent discard can set pa_deleted */ 3920 spin_lock(&pa->pa_lock); 3921 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3922 spin_unlock(&pa->pa_lock); 3923 return; 3924 } 3925 3926 if (pa->pa_deleted == 1) { 3927 spin_unlock(&pa->pa_lock); 3928 return; 3929 } 3930 3931 ext4_mb_mark_pa_deleted(sb, pa); 3932 spin_unlock(&pa->pa_lock); 3933 3934 grp_blk = pa->pa_pstart; 3935 /* 3936 * If doing group-based preallocation, pa_pstart may be in the 3937 * next group when pa is used up 3938 */ 3939 if (pa->pa_type == MB_GROUP_PA) 3940 grp_blk--; 3941 3942 grp = ext4_get_group_number(sb, grp_blk); 3943 3944 /* 3945 * possible race: 3946 * 3947 * P1 (buddy init) P2 (regular allocation) 3948 * find block B in PA 3949 * copy on-disk bitmap to buddy 3950 * mark B in on-disk bitmap 3951 * drop PA from group 3952 * mark all PAs in buddy 3953 * 3954 * thus, P1 initializes buddy with B available. to prevent this 3955 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3956 * against that pair 3957 */ 3958 ext4_lock_group(sb, grp); 3959 list_del(&pa->pa_group_list); 3960 ext4_unlock_group(sb, grp); 3961 3962 spin_lock(pa->pa_obj_lock); 3963 list_del_rcu(&pa->pa_inode_list); 3964 spin_unlock(pa->pa_obj_lock); 3965 3966 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3967 } 3968 3969 /* 3970 * creates new preallocated space for given inode 3971 */ 3972 static noinline_for_stack void 3973 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3974 { 3975 struct super_block *sb = ac->ac_sb; 3976 struct ext4_sb_info *sbi = EXT4_SB(sb); 3977 struct ext4_prealloc_space *pa; 3978 struct ext4_group_info *grp; 3979 struct ext4_inode_info *ei; 3980 3981 /* preallocate only when found space is larger then requested */ 3982 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3983 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3984 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3985 BUG_ON(ac->ac_pa == NULL); 3986 3987 pa = ac->ac_pa; 3988 3989 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3990 int winl; 3991 int wins; 3992 int win; 3993 int offs; 3994 3995 /* we can't allocate as much as normalizer wants. 3996 * so, found space must get proper lstart 3997 * to cover original request */ 3998 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3999 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4000 4001 /* we're limited by original request in that 4002 * logical block must be covered any way 4003 * winl is window we can move our chunk within */ 4004 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 4005 4006 /* also, we should cover whole original request */ 4007 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 4008 4009 /* the smallest one defines real window */ 4010 win = min(winl, wins); 4011 4012 offs = ac->ac_o_ex.fe_logical % 4013 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4014 if (offs && offs < win) 4015 win = offs; 4016 4017 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 4018 EXT4_NUM_B2C(sbi, win); 4019 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4020 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4021 } 4022 4023 /* preallocation can change ac_b_ex, thus we store actually 4024 * allocated blocks for history */ 4025 ac->ac_f_ex = ac->ac_b_ex; 4026 4027 pa->pa_lstart = ac->ac_b_ex.fe_logical; 4028 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4029 pa->pa_len = ac->ac_b_ex.fe_len; 4030 pa->pa_free = pa->pa_len; 4031 spin_lock_init(&pa->pa_lock); 4032 INIT_LIST_HEAD(&pa->pa_inode_list); 4033 INIT_LIST_HEAD(&pa->pa_group_list); 4034 pa->pa_deleted = 0; 4035 pa->pa_type = MB_INODE_PA; 4036 4037 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4038 pa->pa_len, pa->pa_lstart); 4039 trace_ext4_mb_new_inode_pa(ac, pa); 4040 4041 ext4_mb_use_inode_pa(ac, pa); 4042 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4043 4044 ei = EXT4_I(ac->ac_inode); 4045 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4046 4047 pa->pa_obj_lock = &ei->i_prealloc_lock; 4048 pa->pa_inode = ac->ac_inode; 4049 4050 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4051 4052 spin_lock(pa->pa_obj_lock); 4053 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 4054 spin_unlock(pa->pa_obj_lock); 4055 atomic_inc(&ei->i_prealloc_active); 4056 } 4057 4058 /* 4059 * creates new preallocated space for locality group inodes belongs to 4060 */ 4061 static noinline_for_stack void 4062 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4063 { 4064 struct super_block *sb = ac->ac_sb; 4065 struct ext4_locality_group *lg; 4066 struct ext4_prealloc_space *pa; 4067 struct ext4_group_info *grp; 4068 4069 /* preallocate only when found space is larger then requested */ 4070 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4071 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4072 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 4073 BUG_ON(ac->ac_pa == NULL); 4074 4075 pa = ac->ac_pa; 4076 4077 /* preallocation can change ac_b_ex, thus we store actually 4078 * allocated blocks for history */ 4079 ac->ac_f_ex = ac->ac_b_ex; 4080 4081 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4082 pa->pa_lstart = pa->pa_pstart; 4083 pa->pa_len = ac->ac_b_ex.fe_len; 4084 pa->pa_free = pa->pa_len; 4085 spin_lock_init(&pa->pa_lock); 4086 INIT_LIST_HEAD(&pa->pa_inode_list); 4087 INIT_LIST_HEAD(&pa->pa_group_list); 4088 pa->pa_deleted = 0; 4089 pa->pa_type = MB_GROUP_PA; 4090 4091 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4092 pa->pa_len, pa->pa_lstart); 4093 trace_ext4_mb_new_group_pa(ac, pa); 4094 4095 ext4_mb_use_group_pa(ac, pa); 4096 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4097 4098 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4099 lg = ac->ac_lg; 4100 BUG_ON(lg == NULL); 4101 4102 pa->pa_obj_lock = &lg->lg_prealloc_lock; 4103 pa->pa_inode = NULL; 4104 4105 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4106 4107 /* 4108 * We will later add the new pa to the right bucket 4109 * after updating the pa_free in ext4_mb_release_context 4110 */ 4111 } 4112 4113 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4114 { 4115 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4116 ext4_mb_new_group_pa(ac); 4117 else 4118 ext4_mb_new_inode_pa(ac); 4119 } 4120 4121 /* 4122 * finds all unused blocks in on-disk bitmap, frees them in 4123 * in-core bitmap and buddy. 4124 * @pa must be unlinked from inode and group lists, so that 4125 * nobody else can find/use it. 4126 * the caller MUST hold group/inode locks. 4127 * TODO: optimize the case when there are no in-core structures yet 4128 */ 4129 static noinline_for_stack int 4130 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 4131 struct ext4_prealloc_space *pa) 4132 { 4133 struct super_block *sb = e4b->bd_sb; 4134 struct ext4_sb_info *sbi = EXT4_SB(sb); 4135 unsigned int end; 4136 unsigned int next; 4137 ext4_group_t group; 4138 ext4_grpblk_t bit; 4139 unsigned long long grp_blk_start; 4140 int free = 0; 4141 4142 BUG_ON(pa->pa_deleted == 0); 4143 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4144 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 4145 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4146 end = bit + pa->pa_len; 4147 4148 while (bit < end) { 4149 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 4150 if (bit >= end) 4151 break; 4152 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 4153 mb_debug(sb, "free preallocated %u/%u in group %u\n", 4154 (unsigned) ext4_group_first_block_no(sb, group) + bit, 4155 (unsigned) next - bit, (unsigned) group); 4156 free += next - bit; 4157 4158 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 4159 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 4160 EXT4_C2B(sbi, bit)), 4161 next - bit); 4162 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 4163 bit = next + 1; 4164 } 4165 if (free != pa->pa_free) { 4166 ext4_msg(e4b->bd_sb, KERN_CRIT, 4167 "pa %p: logic %lu, phys. %lu, len %d", 4168 pa, (unsigned long) pa->pa_lstart, 4169 (unsigned long) pa->pa_pstart, 4170 pa->pa_len); 4171 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 4172 free, pa->pa_free); 4173 /* 4174 * pa is already deleted so we use the value obtained 4175 * from the bitmap and continue. 4176 */ 4177 } 4178 atomic_add(free, &sbi->s_mb_discarded); 4179 4180 return 0; 4181 } 4182 4183 static noinline_for_stack int 4184 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 4185 struct ext4_prealloc_space *pa) 4186 { 4187 struct super_block *sb = e4b->bd_sb; 4188 ext4_group_t group; 4189 ext4_grpblk_t bit; 4190 4191 trace_ext4_mb_release_group_pa(sb, pa); 4192 BUG_ON(pa->pa_deleted == 0); 4193 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4194 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4195 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 4196 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 4197 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 4198 4199 return 0; 4200 } 4201 4202 /* 4203 * releases all preallocations in given group 4204 * 4205 * first, we need to decide discard policy: 4206 * - when do we discard 4207 * 1) ENOSPC 4208 * - how many do we discard 4209 * 1) how many requested 4210 */ 4211 static noinline_for_stack int 4212 ext4_mb_discard_group_preallocations(struct super_block *sb, 4213 ext4_group_t group, int needed) 4214 { 4215 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4216 struct buffer_head *bitmap_bh = NULL; 4217 struct ext4_prealloc_space *pa, *tmp; 4218 struct list_head list; 4219 struct ext4_buddy e4b; 4220 int err; 4221 int busy = 0; 4222 int free, free_total = 0; 4223 4224 mb_debug(sb, "discard preallocation for group %u\n", group); 4225 if (list_empty(&grp->bb_prealloc_list)) 4226 goto out_dbg; 4227 4228 bitmap_bh = ext4_read_block_bitmap(sb, group); 4229 if (IS_ERR(bitmap_bh)) { 4230 err = PTR_ERR(bitmap_bh); 4231 ext4_error_err(sb, -err, 4232 "Error %d reading block bitmap for %u", 4233 err, group); 4234 goto out_dbg; 4235 } 4236 4237 err = ext4_mb_load_buddy(sb, group, &e4b); 4238 if (err) { 4239 ext4_warning(sb, "Error %d loading buddy information for %u", 4240 err, group); 4241 put_bh(bitmap_bh); 4242 goto out_dbg; 4243 } 4244 4245 if (needed == 0) 4246 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 4247 4248 INIT_LIST_HEAD(&list); 4249 repeat: 4250 free = 0; 4251 ext4_lock_group(sb, group); 4252 list_for_each_entry_safe(pa, tmp, 4253 &grp->bb_prealloc_list, pa_group_list) { 4254 spin_lock(&pa->pa_lock); 4255 if (atomic_read(&pa->pa_count)) { 4256 spin_unlock(&pa->pa_lock); 4257 busy = 1; 4258 continue; 4259 } 4260 if (pa->pa_deleted) { 4261 spin_unlock(&pa->pa_lock); 4262 continue; 4263 } 4264 4265 /* seems this one can be freed ... */ 4266 ext4_mb_mark_pa_deleted(sb, pa); 4267 4268 if (!free) 4269 this_cpu_inc(discard_pa_seq); 4270 4271 /* we can trust pa_free ... */ 4272 free += pa->pa_free; 4273 4274 spin_unlock(&pa->pa_lock); 4275 4276 list_del(&pa->pa_group_list); 4277 list_add(&pa->u.pa_tmp_list, &list); 4278 } 4279 4280 /* now free all selected PAs */ 4281 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4282 4283 /* remove from object (inode or locality group) */ 4284 spin_lock(pa->pa_obj_lock); 4285 list_del_rcu(&pa->pa_inode_list); 4286 spin_unlock(pa->pa_obj_lock); 4287 4288 if (pa->pa_type == MB_GROUP_PA) 4289 ext4_mb_release_group_pa(&e4b, pa); 4290 else 4291 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4292 4293 list_del(&pa->u.pa_tmp_list); 4294 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4295 } 4296 4297 free_total += free; 4298 4299 /* if we still need more blocks and some PAs were used, try again */ 4300 if (free_total < needed && busy) { 4301 ext4_unlock_group(sb, group); 4302 cond_resched(); 4303 busy = 0; 4304 goto repeat; 4305 } 4306 ext4_unlock_group(sb, group); 4307 ext4_mb_unload_buddy(&e4b); 4308 put_bh(bitmap_bh); 4309 out_dbg: 4310 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 4311 free_total, group, grp->bb_free); 4312 return free_total; 4313 } 4314 4315 /* 4316 * releases all non-used preallocated blocks for given inode 4317 * 4318 * It's important to discard preallocations under i_data_sem 4319 * We don't want another block to be served from the prealloc 4320 * space when we are discarding the inode prealloc space. 4321 * 4322 * FIXME!! Make sure it is valid at all the call sites 4323 */ 4324 void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 4325 { 4326 struct ext4_inode_info *ei = EXT4_I(inode); 4327 struct super_block *sb = inode->i_sb; 4328 struct buffer_head *bitmap_bh = NULL; 4329 struct ext4_prealloc_space *pa, *tmp; 4330 ext4_group_t group = 0; 4331 struct list_head list; 4332 struct ext4_buddy e4b; 4333 int err; 4334 4335 if (!S_ISREG(inode->i_mode)) { 4336 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 4337 return; 4338 } 4339 4340 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 4341 return; 4342 4343 mb_debug(sb, "discard preallocation for inode %lu\n", 4344 inode->i_ino); 4345 trace_ext4_discard_preallocations(inode, 4346 atomic_read(&ei->i_prealloc_active), needed); 4347 4348 INIT_LIST_HEAD(&list); 4349 4350 if (needed == 0) 4351 needed = UINT_MAX; 4352 4353 repeat: 4354 /* first, collect all pa's in the inode */ 4355 spin_lock(&ei->i_prealloc_lock); 4356 while (!list_empty(&ei->i_prealloc_list) && needed) { 4357 pa = list_entry(ei->i_prealloc_list.prev, 4358 struct ext4_prealloc_space, pa_inode_list); 4359 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 4360 spin_lock(&pa->pa_lock); 4361 if (atomic_read(&pa->pa_count)) { 4362 /* this shouldn't happen often - nobody should 4363 * use preallocation while we're discarding it */ 4364 spin_unlock(&pa->pa_lock); 4365 spin_unlock(&ei->i_prealloc_lock); 4366 ext4_msg(sb, KERN_ERR, 4367 "uh-oh! used pa while discarding"); 4368 WARN_ON(1); 4369 schedule_timeout_uninterruptible(HZ); 4370 goto repeat; 4371 4372 } 4373 if (pa->pa_deleted == 0) { 4374 ext4_mb_mark_pa_deleted(sb, pa); 4375 spin_unlock(&pa->pa_lock); 4376 list_del_rcu(&pa->pa_inode_list); 4377 list_add(&pa->u.pa_tmp_list, &list); 4378 needed--; 4379 continue; 4380 } 4381 4382 /* someone is deleting pa right now */ 4383 spin_unlock(&pa->pa_lock); 4384 spin_unlock(&ei->i_prealloc_lock); 4385 4386 /* we have to wait here because pa_deleted 4387 * doesn't mean pa is already unlinked from 4388 * the list. as we might be called from 4389 * ->clear_inode() the inode will get freed 4390 * and concurrent thread which is unlinking 4391 * pa from inode's list may access already 4392 * freed memory, bad-bad-bad */ 4393 4394 /* XXX: if this happens too often, we can 4395 * add a flag to force wait only in case 4396 * of ->clear_inode(), but not in case of 4397 * regular truncate */ 4398 schedule_timeout_uninterruptible(HZ); 4399 goto repeat; 4400 } 4401 spin_unlock(&ei->i_prealloc_lock); 4402 4403 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4404 BUG_ON(pa->pa_type != MB_INODE_PA); 4405 group = ext4_get_group_number(sb, pa->pa_pstart); 4406 4407 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 4408 GFP_NOFS|__GFP_NOFAIL); 4409 if (err) { 4410 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 4411 err, group); 4412 continue; 4413 } 4414 4415 bitmap_bh = ext4_read_block_bitmap(sb, group); 4416 if (IS_ERR(bitmap_bh)) { 4417 err = PTR_ERR(bitmap_bh); 4418 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 4419 err, group); 4420 ext4_mb_unload_buddy(&e4b); 4421 continue; 4422 } 4423 4424 ext4_lock_group(sb, group); 4425 list_del(&pa->pa_group_list); 4426 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4427 ext4_unlock_group(sb, group); 4428 4429 ext4_mb_unload_buddy(&e4b); 4430 put_bh(bitmap_bh); 4431 4432 list_del(&pa->u.pa_tmp_list); 4433 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4434 } 4435 } 4436 4437 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 4438 { 4439 struct ext4_prealloc_space *pa; 4440 4441 BUG_ON(ext4_pspace_cachep == NULL); 4442 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 4443 if (!pa) 4444 return -ENOMEM; 4445 atomic_set(&pa->pa_count, 1); 4446 ac->ac_pa = pa; 4447 return 0; 4448 } 4449 4450 static void ext4_mb_pa_free(struct ext4_allocation_context *ac) 4451 { 4452 struct ext4_prealloc_space *pa = ac->ac_pa; 4453 4454 BUG_ON(!pa); 4455 ac->ac_pa = NULL; 4456 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 4457 kmem_cache_free(ext4_pspace_cachep, pa); 4458 } 4459 4460 #ifdef CONFIG_EXT4_DEBUG 4461 static inline void ext4_mb_show_pa(struct super_block *sb) 4462 { 4463 ext4_group_t i, ngroups; 4464 4465 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 4466 return; 4467 4468 ngroups = ext4_get_groups_count(sb); 4469 mb_debug(sb, "groups: "); 4470 for (i = 0; i < ngroups; i++) { 4471 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4472 struct ext4_prealloc_space *pa; 4473 ext4_grpblk_t start; 4474 struct list_head *cur; 4475 ext4_lock_group(sb, i); 4476 list_for_each(cur, &grp->bb_prealloc_list) { 4477 pa = list_entry(cur, struct ext4_prealloc_space, 4478 pa_group_list); 4479 spin_lock(&pa->pa_lock); 4480 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4481 NULL, &start); 4482 spin_unlock(&pa->pa_lock); 4483 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 4484 pa->pa_len); 4485 } 4486 ext4_unlock_group(sb, i); 4487 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 4488 grp->bb_fragments); 4489 } 4490 } 4491 4492 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4493 { 4494 struct super_block *sb = ac->ac_sb; 4495 4496 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 4497 return; 4498 4499 mb_debug(sb, "Can't allocate:" 4500 " Allocation context details:"); 4501 mb_debug(sb, "status %u flags 0x%x", 4502 ac->ac_status, ac->ac_flags); 4503 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 4504 "goal %lu/%lu/%lu@%lu, " 4505 "best %lu/%lu/%lu@%lu cr %d", 4506 (unsigned long)ac->ac_o_ex.fe_group, 4507 (unsigned long)ac->ac_o_ex.fe_start, 4508 (unsigned long)ac->ac_o_ex.fe_len, 4509 (unsigned long)ac->ac_o_ex.fe_logical, 4510 (unsigned long)ac->ac_g_ex.fe_group, 4511 (unsigned long)ac->ac_g_ex.fe_start, 4512 (unsigned long)ac->ac_g_ex.fe_len, 4513 (unsigned long)ac->ac_g_ex.fe_logical, 4514 (unsigned long)ac->ac_b_ex.fe_group, 4515 (unsigned long)ac->ac_b_ex.fe_start, 4516 (unsigned long)ac->ac_b_ex.fe_len, 4517 (unsigned long)ac->ac_b_ex.fe_logical, 4518 (int)ac->ac_criteria); 4519 mb_debug(sb, "%u found", ac->ac_found); 4520 ext4_mb_show_pa(sb); 4521 } 4522 #else 4523 static inline void ext4_mb_show_pa(struct super_block *sb) 4524 { 4525 return; 4526 } 4527 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4528 { 4529 ext4_mb_show_pa(ac->ac_sb); 4530 return; 4531 } 4532 #endif 4533 4534 /* 4535 * We use locality group preallocation for small size file. The size of the 4536 * file is determined by the current size or the resulting size after 4537 * allocation which ever is larger 4538 * 4539 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4540 */ 4541 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4542 { 4543 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4544 int bsbits = ac->ac_sb->s_blocksize_bits; 4545 loff_t size, isize; 4546 4547 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4548 return; 4549 4550 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4551 return; 4552 4553 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4554 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4555 >> bsbits; 4556 4557 if ((size == isize) && !ext4_fs_is_busy(sbi) && 4558 !inode_is_open_for_write(ac->ac_inode)) { 4559 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4560 return; 4561 } 4562 4563 if (sbi->s_mb_group_prealloc <= 0) { 4564 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4565 return; 4566 } 4567 4568 /* don't use group allocation for large files */ 4569 size = max(size, isize); 4570 if (size > sbi->s_mb_stream_request) { 4571 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4572 return; 4573 } 4574 4575 BUG_ON(ac->ac_lg != NULL); 4576 /* 4577 * locality group prealloc space are per cpu. The reason for having 4578 * per cpu locality group is to reduce the contention between block 4579 * request from multiple CPUs. 4580 */ 4581 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 4582 4583 /* we're going to use group allocation */ 4584 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4585 4586 /* serialize all allocations in the group */ 4587 mutex_lock(&ac->ac_lg->lg_mutex); 4588 } 4589 4590 static noinline_for_stack int 4591 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4592 struct ext4_allocation_request *ar) 4593 { 4594 struct super_block *sb = ar->inode->i_sb; 4595 struct ext4_sb_info *sbi = EXT4_SB(sb); 4596 struct ext4_super_block *es = sbi->s_es; 4597 ext4_group_t group; 4598 unsigned int len; 4599 ext4_fsblk_t goal; 4600 ext4_grpblk_t block; 4601 4602 /* we can't allocate > group size */ 4603 len = ar->len; 4604 4605 /* just a dirty hack to filter too big requests */ 4606 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4607 len = EXT4_CLUSTERS_PER_GROUP(sb); 4608 4609 /* start searching from the goal */ 4610 goal = ar->goal; 4611 if (goal < le32_to_cpu(es->s_first_data_block) || 4612 goal >= ext4_blocks_count(es)) 4613 goal = le32_to_cpu(es->s_first_data_block); 4614 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4615 4616 /* set up allocation goals */ 4617 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4618 ac->ac_status = AC_STATUS_CONTINUE; 4619 ac->ac_sb = sb; 4620 ac->ac_inode = ar->inode; 4621 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4622 ac->ac_o_ex.fe_group = group; 4623 ac->ac_o_ex.fe_start = block; 4624 ac->ac_o_ex.fe_len = len; 4625 ac->ac_g_ex = ac->ac_o_ex; 4626 ac->ac_flags = ar->flags; 4627 4628 /* we have to define context: we'll work with a file or 4629 * locality group. this is a policy, actually */ 4630 ext4_mb_group_or_file(ac); 4631 4632 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 4633 "left: %u/%u, right %u/%u to %swritable\n", 4634 (unsigned) ar->len, (unsigned) ar->logical, 4635 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4636 (unsigned) ar->lleft, (unsigned) ar->pleft, 4637 (unsigned) ar->lright, (unsigned) ar->pright, 4638 inode_is_open_for_write(ar->inode) ? "" : "non-"); 4639 return 0; 4640 4641 } 4642 4643 static noinline_for_stack void 4644 ext4_mb_discard_lg_preallocations(struct super_block *sb, 4645 struct ext4_locality_group *lg, 4646 int order, int total_entries) 4647 { 4648 ext4_group_t group = 0; 4649 struct ext4_buddy e4b; 4650 struct list_head discard_list; 4651 struct ext4_prealloc_space *pa, *tmp; 4652 4653 mb_debug(sb, "discard locality group preallocation\n"); 4654 4655 INIT_LIST_HEAD(&discard_list); 4656 4657 spin_lock(&lg->lg_prealloc_lock); 4658 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4659 pa_inode_list, 4660 lockdep_is_held(&lg->lg_prealloc_lock)) { 4661 spin_lock(&pa->pa_lock); 4662 if (atomic_read(&pa->pa_count)) { 4663 /* 4664 * This is the pa that we just used 4665 * for block allocation. So don't 4666 * free that 4667 */ 4668 spin_unlock(&pa->pa_lock); 4669 continue; 4670 } 4671 if (pa->pa_deleted) { 4672 spin_unlock(&pa->pa_lock); 4673 continue; 4674 } 4675 /* only lg prealloc space */ 4676 BUG_ON(pa->pa_type != MB_GROUP_PA); 4677 4678 /* seems this one can be freed ... */ 4679 ext4_mb_mark_pa_deleted(sb, pa); 4680 spin_unlock(&pa->pa_lock); 4681 4682 list_del_rcu(&pa->pa_inode_list); 4683 list_add(&pa->u.pa_tmp_list, &discard_list); 4684 4685 total_entries--; 4686 if (total_entries <= 5) { 4687 /* 4688 * we want to keep only 5 entries 4689 * allowing it to grow to 8. This 4690 * mak sure we don't call discard 4691 * soon for this list. 4692 */ 4693 break; 4694 } 4695 } 4696 spin_unlock(&lg->lg_prealloc_lock); 4697 4698 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4699 int err; 4700 4701 group = ext4_get_group_number(sb, pa->pa_pstart); 4702 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 4703 GFP_NOFS|__GFP_NOFAIL); 4704 if (err) { 4705 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 4706 err, group); 4707 continue; 4708 } 4709 ext4_lock_group(sb, group); 4710 list_del(&pa->pa_group_list); 4711 ext4_mb_release_group_pa(&e4b, pa); 4712 ext4_unlock_group(sb, group); 4713 4714 ext4_mb_unload_buddy(&e4b); 4715 list_del(&pa->u.pa_tmp_list); 4716 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4717 } 4718 } 4719 4720 /* 4721 * We have incremented pa_count. So it cannot be freed at this 4722 * point. Also we hold lg_mutex. So no parallel allocation is 4723 * possible from this lg. That means pa_free cannot be updated. 4724 * 4725 * A parallel ext4_mb_discard_group_preallocations is possible. 4726 * which can cause the lg_prealloc_list to be updated. 4727 */ 4728 4729 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4730 { 4731 int order, added = 0, lg_prealloc_count = 1; 4732 struct super_block *sb = ac->ac_sb; 4733 struct ext4_locality_group *lg = ac->ac_lg; 4734 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4735 4736 order = fls(pa->pa_free) - 1; 4737 if (order > PREALLOC_TB_SIZE - 1) 4738 /* The max size of hash table is PREALLOC_TB_SIZE */ 4739 order = PREALLOC_TB_SIZE - 1; 4740 /* Add the prealloc space to lg */ 4741 spin_lock(&lg->lg_prealloc_lock); 4742 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4743 pa_inode_list, 4744 lockdep_is_held(&lg->lg_prealloc_lock)) { 4745 spin_lock(&tmp_pa->pa_lock); 4746 if (tmp_pa->pa_deleted) { 4747 spin_unlock(&tmp_pa->pa_lock); 4748 continue; 4749 } 4750 if (!added && pa->pa_free < tmp_pa->pa_free) { 4751 /* Add to the tail of the previous entry */ 4752 list_add_tail_rcu(&pa->pa_inode_list, 4753 &tmp_pa->pa_inode_list); 4754 added = 1; 4755 /* 4756 * we want to count the total 4757 * number of entries in the list 4758 */ 4759 } 4760 spin_unlock(&tmp_pa->pa_lock); 4761 lg_prealloc_count++; 4762 } 4763 if (!added) 4764 list_add_tail_rcu(&pa->pa_inode_list, 4765 &lg->lg_prealloc_list[order]); 4766 spin_unlock(&lg->lg_prealloc_lock); 4767 4768 /* Now trim the list to be not more than 8 elements */ 4769 if (lg_prealloc_count > 8) { 4770 ext4_mb_discard_lg_preallocations(sb, lg, 4771 order, lg_prealloc_count); 4772 return; 4773 } 4774 return ; 4775 } 4776 4777 /* 4778 * if per-inode prealloc list is too long, trim some PA 4779 */ 4780 static void ext4_mb_trim_inode_pa(struct inode *inode) 4781 { 4782 struct ext4_inode_info *ei = EXT4_I(inode); 4783 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4784 int count, delta; 4785 4786 count = atomic_read(&ei->i_prealloc_active); 4787 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; 4788 if (count > sbi->s_mb_max_inode_prealloc + delta) { 4789 count -= sbi->s_mb_max_inode_prealloc; 4790 ext4_discard_preallocations(inode, count); 4791 } 4792 } 4793 4794 /* 4795 * release all resource we used in allocation 4796 */ 4797 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4798 { 4799 struct inode *inode = ac->ac_inode; 4800 struct ext4_inode_info *ei = EXT4_I(inode); 4801 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4802 struct ext4_prealloc_space *pa = ac->ac_pa; 4803 if (pa) { 4804 if (pa->pa_type == MB_GROUP_PA) { 4805 /* see comment in ext4_mb_use_group_pa() */ 4806 spin_lock(&pa->pa_lock); 4807 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4808 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4809 pa->pa_free -= ac->ac_b_ex.fe_len; 4810 pa->pa_len -= ac->ac_b_ex.fe_len; 4811 spin_unlock(&pa->pa_lock); 4812 4813 /* 4814 * We want to add the pa to the right bucket. 4815 * Remove it from the list and while adding 4816 * make sure the list to which we are adding 4817 * doesn't grow big. 4818 */ 4819 if (likely(pa->pa_free)) { 4820 spin_lock(pa->pa_obj_lock); 4821 list_del_rcu(&pa->pa_inode_list); 4822 spin_unlock(pa->pa_obj_lock); 4823 ext4_mb_add_n_trim(ac); 4824 } 4825 } 4826 4827 if (pa->pa_type == MB_INODE_PA) { 4828 /* 4829 * treat per-inode prealloc list as a lru list, then try 4830 * to trim the least recently used PA. 4831 */ 4832 spin_lock(pa->pa_obj_lock); 4833 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); 4834 spin_unlock(pa->pa_obj_lock); 4835 } 4836 4837 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4838 } 4839 if (ac->ac_bitmap_page) 4840 put_page(ac->ac_bitmap_page); 4841 if (ac->ac_buddy_page) 4842 put_page(ac->ac_buddy_page); 4843 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4844 mutex_unlock(&ac->ac_lg->lg_mutex); 4845 ext4_mb_collect_stats(ac); 4846 ext4_mb_trim_inode_pa(inode); 4847 return 0; 4848 } 4849 4850 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4851 { 4852 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4853 int ret; 4854 int freed = 0; 4855 4856 trace_ext4_mb_discard_preallocations(sb, needed); 4857 for (i = 0; i < ngroups && needed > 0; i++) { 4858 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4859 freed += ret; 4860 needed -= ret; 4861 } 4862 4863 return freed; 4864 } 4865 4866 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 4867 struct ext4_allocation_context *ac, u64 *seq) 4868 { 4869 int freed; 4870 u64 seq_retry = 0; 4871 bool ret = false; 4872 4873 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4874 if (freed) { 4875 ret = true; 4876 goto out_dbg; 4877 } 4878 seq_retry = ext4_get_discard_pa_seq_sum(); 4879 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 4880 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 4881 *seq = seq_retry; 4882 ret = true; 4883 } 4884 4885 out_dbg: 4886 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 4887 return ret; 4888 } 4889 4890 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 4891 struct ext4_allocation_request *ar, int *errp); 4892 4893 /* 4894 * Main entry point into mballoc to allocate blocks 4895 * it tries to use preallocation first, then falls back 4896 * to usual allocation 4897 */ 4898 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4899 struct ext4_allocation_request *ar, int *errp) 4900 { 4901 struct ext4_allocation_context *ac = NULL; 4902 struct ext4_sb_info *sbi; 4903 struct super_block *sb; 4904 ext4_fsblk_t block = 0; 4905 unsigned int inquota = 0; 4906 unsigned int reserv_clstrs = 0; 4907 u64 seq; 4908 4909 might_sleep(); 4910 sb = ar->inode->i_sb; 4911 sbi = EXT4_SB(sb); 4912 4913 trace_ext4_request_blocks(ar); 4914 if (sbi->s_mount_state & EXT4_FC_REPLAY) 4915 return ext4_mb_new_blocks_simple(handle, ar, errp); 4916 4917 /* Allow to use superuser reservation for quota file */ 4918 if (ext4_is_quota_file(ar->inode)) 4919 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4920 4921 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 4922 /* Without delayed allocation we need to verify 4923 * there is enough free blocks to do block allocation 4924 * and verify allocation doesn't exceed the quota limits. 4925 */ 4926 while (ar->len && 4927 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4928 4929 /* let others to free the space */ 4930 cond_resched(); 4931 ar->len = ar->len >> 1; 4932 } 4933 if (!ar->len) { 4934 ext4_mb_show_pa(sb); 4935 *errp = -ENOSPC; 4936 return 0; 4937 } 4938 reserv_clstrs = ar->len; 4939 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4940 dquot_alloc_block_nofail(ar->inode, 4941 EXT4_C2B(sbi, ar->len)); 4942 } else { 4943 while (ar->len && 4944 dquot_alloc_block(ar->inode, 4945 EXT4_C2B(sbi, ar->len))) { 4946 4947 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4948 ar->len--; 4949 } 4950 } 4951 inquota = ar->len; 4952 if (ar->len == 0) { 4953 *errp = -EDQUOT; 4954 goto out; 4955 } 4956 } 4957 4958 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4959 if (!ac) { 4960 ar->len = 0; 4961 *errp = -ENOMEM; 4962 goto out; 4963 } 4964 4965 *errp = ext4_mb_initialize_context(ac, ar); 4966 if (*errp) { 4967 ar->len = 0; 4968 goto out; 4969 } 4970 4971 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4972 seq = this_cpu_read(discard_pa_seq); 4973 if (!ext4_mb_use_preallocated(ac)) { 4974 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4975 ext4_mb_normalize_request(ac, ar); 4976 4977 *errp = ext4_mb_pa_alloc(ac); 4978 if (*errp) 4979 goto errout; 4980 repeat: 4981 /* allocate space in core */ 4982 *errp = ext4_mb_regular_allocator(ac); 4983 /* 4984 * pa allocated above is added to grp->bb_prealloc_list only 4985 * when we were able to allocate some block i.e. when 4986 * ac->ac_status == AC_STATUS_FOUND. 4987 * And error from above mean ac->ac_status != AC_STATUS_FOUND 4988 * So we have to free this pa here itself. 4989 */ 4990 if (*errp) { 4991 ext4_mb_pa_free(ac); 4992 ext4_discard_allocated_blocks(ac); 4993 goto errout; 4994 } 4995 if (ac->ac_status == AC_STATUS_FOUND && 4996 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 4997 ext4_mb_pa_free(ac); 4998 } 4999 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 5000 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5001 if (*errp) { 5002 ext4_discard_allocated_blocks(ac); 5003 goto errout; 5004 } else { 5005 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5006 ar->len = ac->ac_b_ex.fe_len; 5007 } 5008 } else { 5009 if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5010 goto repeat; 5011 /* 5012 * If block allocation fails then the pa allocated above 5013 * needs to be freed here itself. 5014 */ 5015 ext4_mb_pa_free(ac); 5016 *errp = -ENOSPC; 5017 } 5018 5019 errout: 5020 if (*errp) { 5021 ac->ac_b_ex.fe_len = 0; 5022 ar->len = 0; 5023 ext4_mb_show_ac(ac); 5024 } 5025 ext4_mb_release_context(ac); 5026 out: 5027 if (ac) 5028 kmem_cache_free(ext4_ac_cachep, ac); 5029 if (inquota && ar->len < inquota) 5030 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 5031 if (!ar->len) { 5032 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 5033 /* release all the reserved blocks if non delalloc */ 5034 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 5035 reserv_clstrs); 5036 } 5037 5038 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5039 5040 return block; 5041 } 5042 5043 /* 5044 * We can merge two free data extents only if the physical blocks 5045 * are contiguous, AND the extents were freed by the same transaction, 5046 * AND the blocks are associated with the same group. 5047 */ 5048 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5049 struct ext4_free_data *entry, 5050 struct ext4_free_data *new_entry, 5051 struct rb_root *entry_rb_root) 5052 { 5053 if ((entry->efd_tid != new_entry->efd_tid) || 5054 (entry->efd_group != new_entry->efd_group)) 5055 return; 5056 if (entry->efd_start_cluster + entry->efd_count == 5057 new_entry->efd_start_cluster) { 5058 new_entry->efd_start_cluster = entry->efd_start_cluster; 5059 new_entry->efd_count += entry->efd_count; 5060 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5061 entry->efd_start_cluster) { 5062 new_entry->efd_count += entry->efd_count; 5063 } else 5064 return; 5065 spin_lock(&sbi->s_md_lock); 5066 list_del(&entry->efd_list); 5067 spin_unlock(&sbi->s_md_lock); 5068 rb_erase(&entry->efd_node, entry_rb_root); 5069 kmem_cache_free(ext4_free_data_cachep, entry); 5070 } 5071 5072 static noinline_for_stack int 5073 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 5074 struct ext4_free_data *new_entry) 5075 { 5076 ext4_group_t group = e4b->bd_group; 5077 ext4_grpblk_t cluster; 5078 ext4_grpblk_t clusters = new_entry->efd_count; 5079 struct ext4_free_data *entry; 5080 struct ext4_group_info *db = e4b->bd_info; 5081 struct super_block *sb = e4b->bd_sb; 5082 struct ext4_sb_info *sbi = EXT4_SB(sb); 5083 struct rb_node **n = &db->bb_free_root.rb_node, *node; 5084 struct rb_node *parent = NULL, *new_node; 5085 5086 BUG_ON(!ext4_handle_valid(handle)); 5087 BUG_ON(e4b->bd_bitmap_page == NULL); 5088 BUG_ON(e4b->bd_buddy_page == NULL); 5089 5090 new_node = &new_entry->efd_node; 5091 cluster = new_entry->efd_start_cluster; 5092 5093 if (!*n) { 5094 /* first free block exent. We need to 5095 protect buddy cache from being freed, 5096 * otherwise we'll refresh it from 5097 * on-disk bitmap and lose not-yet-available 5098 * blocks */ 5099 get_page(e4b->bd_buddy_page); 5100 get_page(e4b->bd_bitmap_page); 5101 } 5102 while (*n) { 5103 parent = *n; 5104 entry = rb_entry(parent, struct ext4_free_data, efd_node); 5105 if (cluster < entry->efd_start_cluster) 5106 n = &(*n)->rb_left; 5107 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5108 n = &(*n)->rb_right; 5109 else { 5110 ext4_grp_locked_error(sb, group, 0, 5111 ext4_group_first_block_no(sb, group) + 5112 EXT4_C2B(sbi, cluster), 5113 "Block already on to-be-freed list"); 5114 kmem_cache_free(ext4_free_data_cachep, new_entry); 5115 return 0; 5116 } 5117 } 5118 5119 rb_link_node(new_node, parent, n); 5120 rb_insert_color(new_node, &db->bb_free_root); 5121 5122 /* Now try to see the extent can be merged to left and right */ 5123 node = rb_prev(new_node); 5124 if (node) { 5125 entry = rb_entry(node, struct ext4_free_data, efd_node); 5126 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5127 &(db->bb_free_root)); 5128 } 5129 5130 node = rb_next(new_node); 5131 if (node) { 5132 entry = rb_entry(node, struct ext4_free_data, efd_node); 5133 ext4_try_merge_freed_extent(sbi, entry, new_entry, 5134 &(db->bb_free_root)); 5135 } 5136 5137 spin_lock(&sbi->s_md_lock); 5138 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 5139 sbi->s_mb_free_pending += clusters; 5140 spin_unlock(&sbi->s_md_lock); 5141 return 0; 5142 } 5143 5144 /* 5145 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 5146 * linearly starting at the goal block and also excludes the blocks which 5147 * are going to be in use after fast commit replay. 5148 */ 5149 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 5150 struct ext4_allocation_request *ar, int *errp) 5151 { 5152 struct buffer_head *bitmap_bh; 5153 struct super_block *sb = ar->inode->i_sb; 5154 ext4_group_t group; 5155 ext4_grpblk_t blkoff; 5156 int i = sb->s_blocksize; 5157 ext4_fsblk_t goal, block; 5158 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 5159 5160 goal = ar->goal; 5161 if (goal < le32_to_cpu(es->s_first_data_block) || 5162 goal >= ext4_blocks_count(es)) 5163 goal = le32_to_cpu(es->s_first_data_block); 5164 5165 ar->len = 0; 5166 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 5167 for (; group < ext4_get_groups_count(sb); group++) { 5168 bitmap_bh = ext4_read_block_bitmap(sb, group); 5169 if (IS_ERR(bitmap_bh)) { 5170 *errp = PTR_ERR(bitmap_bh); 5171 pr_warn("Failed to read block bitmap\n"); 5172 return 0; 5173 } 5174 5175 ext4_get_group_no_and_offset(sb, 5176 max(ext4_group_first_block_no(sb, group), goal), 5177 NULL, &blkoff); 5178 i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize, 5179 blkoff); 5180 brelse(bitmap_bh); 5181 if (i >= sb->s_blocksize) 5182 continue; 5183 if (ext4_fc_replay_check_excluded(sb, 5184 ext4_group_first_block_no(sb, group) + i)) 5185 continue; 5186 break; 5187 } 5188 5189 if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize) 5190 return 0; 5191 5192 block = ext4_group_first_block_no(sb, group) + i; 5193 ext4_mb_mark_bb(sb, block, 1, 1); 5194 ar->len = 1; 5195 5196 return block; 5197 } 5198 5199 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 5200 unsigned long count) 5201 { 5202 struct buffer_head *bitmap_bh; 5203 struct super_block *sb = inode->i_sb; 5204 struct ext4_group_desc *gdp; 5205 struct buffer_head *gdp_bh; 5206 ext4_group_t group; 5207 ext4_grpblk_t blkoff; 5208 int already_freed = 0, err, i; 5209 5210 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 5211 bitmap_bh = ext4_read_block_bitmap(sb, group); 5212 if (IS_ERR(bitmap_bh)) { 5213 err = PTR_ERR(bitmap_bh); 5214 pr_warn("Failed to read block bitmap\n"); 5215 return; 5216 } 5217 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 5218 if (!gdp) 5219 return; 5220 5221 for (i = 0; i < count; i++) { 5222 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 5223 already_freed++; 5224 } 5225 mb_clear_bits(bitmap_bh->b_data, blkoff, count); 5226 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 5227 if (err) 5228 return; 5229 ext4_free_group_clusters_set( 5230 sb, gdp, ext4_free_group_clusters(sb, gdp) + 5231 count - already_freed); 5232 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 5233 ext4_group_desc_csum_set(sb, group, gdp); 5234 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 5235 sync_dirty_buffer(bitmap_bh); 5236 sync_dirty_buffer(gdp_bh); 5237 brelse(bitmap_bh); 5238 } 5239 5240 /** 5241 * ext4_free_blocks() -- Free given blocks and update quota 5242 * @handle: handle for this transaction 5243 * @inode: inode 5244 * @bh: optional buffer of the block to be freed 5245 * @block: starting physical block to be freed 5246 * @count: number of blocks to be freed 5247 * @flags: flags used by ext4_free_blocks 5248 */ 5249 void ext4_free_blocks(handle_t *handle, struct inode *inode, 5250 struct buffer_head *bh, ext4_fsblk_t block, 5251 unsigned long count, int flags) 5252 { 5253 struct buffer_head *bitmap_bh = NULL; 5254 struct super_block *sb = inode->i_sb; 5255 struct ext4_group_desc *gdp; 5256 unsigned int overflow; 5257 ext4_grpblk_t bit; 5258 struct buffer_head *gd_bh; 5259 ext4_group_t block_group; 5260 struct ext4_sb_info *sbi; 5261 struct ext4_buddy e4b; 5262 unsigned int count_clusters; 5263 int err = 0; 5264 int ret; 5265 5266 sbi = EXT4_SB(sb); 5267 5268 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 5269 ext4_free_blocks_simple(inode, block, count); 5270 return; 5271 } 5272 5273 might_sleep(); 5274 if (bh) { 5275 if (block) 5276 BUG_ON(block != bh->b_blocknr); 5277 else 5278 block = bh->b_blocknr; 5279 } 5280 5281 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 5282 !ext4_inode_block_valid(inode, block, count)) { 5283 ext4_error(sb, "Freeing blocks not in datazone - " 5284 "block = %llu, count = %lu", block, count); 5285 goto error_return; 5286 } 5287 5288 ext4_debug("freeing block %llu\n", block); 5289 trace_ext4_free_blocks(inode, block, count, flags); 5290 5291 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 5292 BUG_ON(count > 1); 5293 5294 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 5295 inode, bh, block); 5296 } 5297 5298 /* 5299 * If the extent to be freed does not begin on a cluster 5300 * boundary, we need to deal with partial clusters at the 5301 * beginning and end of the extent. Normally we will free 5302 * blocks at the beginning or the end unless we are explicitly 5303 * requested to avoid doing so. 5304 */ 5305 overflow = EXT4_PBLK_COFF(sbi, block); 5306 if (overflow) { 5307 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 5308 overflow = sbi->s_cluster_ratio - overflow; 5309 block += overflow; 5310 if (count > overflow) 5311 count -= overflow; 5312 else 5313 return; 5314 } else { 5315 block -= overflow; 5316 count += overflow; 5317 } 5318 } 5319 overflow = EXT4_LBLK_COFF(sbi, count); 5320 if (overflow) { 5321 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 5322 if (count > overflow) 5323 count -= overflow; 5324 else 5325 return; 5326 } else 5327 count += sbi->s_cluster_ratio - overflow; 5328 } 5329 5330 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 5331 int i; 5332 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 5333 5334 for (i = 0; i < count; i++) { 5335 cond_resched(); 5336 if (is_metadata) 5337 bh = sb_find_get_block(inode->i_sb, block + i); 5338 ext4_forget(handle, is_metadata, inode, bh, block + i); 5339 } 5340 } 5341 5342 do_more: 5343 overflow = 0; 5344 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5345 5346 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 5347 ext4_get_group_info(sb, block_group)))) 5348 return; 5349 5350 /* 5351 * Check to see if we are freeing blocks across a group 5352 * boundary. 5353 */ 5354 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 5355 overflow = EXT4_C2B(sbi, bit) + count - 5356 EXT4_BLOCKS_PER_GROUP(sb); 5357 count -= overflow; 5358 } 5359 count_clusters = EXT4_NUM_B2C(sbi, count); 5360 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 5361 if (IS_ERR(bitmap_bh)) { 5362 err = PTR_ERR(bitmap_bh); 5363 bitmap_bh = NULL; 5364 goto error_return; 5365 } 5366 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 5367 if (!gdp) { 5368 err = -EIO; 5369 goto error_return; 5370 } 5371 5372 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 5373 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 5374 in_range(block, ext4_inode_table(sb, gdp), 5375 sbi->s_itb_per_group) || 5376 in_range(block + count - 1, ext4_inode_table(sb, gdp), 5377 sbi->s_itb_per_group)) { 5378 5379 ext4_error(sb, "Freeing blocks in system zone - " 5380 "Block = %llu, count = %lu", block, count); 5381 /* err = 0. ext4_std_error should be a no op */ 5382 goto error_return; 5383 } 5384 5385 BUFFER_TRACE(bitmap_bh, "getting write access"); 5386 err = ext4_journal_get_write_access(handle, bitmap_bh); 5387 if (err) 5388 goto error_return; 5389 5390 /* 5391 * We are about to modify some metadata. Call the journal APIs 5392 * to unshare ->b_data if a currently-committing transaction is 5393 * using it 5394 */ 5395 BUFFER_TRACE(gd_bh, "get_write_access"); 5396 err = ext4_journal_get_write_access(handle, gd_bh); 5397 if (err) 5398 goto error_return; 5399 #ifdef AGGRESSIVE_CHECK 5400 { 5401 int i; 5402 for (i = 0; i < count_clusters; i++) 5403 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 5404 } 5405 #endif 5406 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 5407 5408 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 5409 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 5410 GFP_NOFS|__GFP_NOFAIL); 5411 if (err) 5412 goto error_return; 5413 5414 /* 5415 * We need to make sure we don't reuse the freed block until after the 5416 * transaction is committed. We make an exception if the inode is to be 5417 * written in writeback mode since writeback mode has weak data 5418 * consistency guarantees. 5419 */ 5420 if (ext4_handle_valid(handle) && 5421 ((flags & EXT4_FREE_BLOCKS_METADATA) || 5422 !ext4_should_writeback_data(inode))) { 5423 struct ext4_free_data *new_entry; 5424 /* 5425 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 5426 * to fail. 5427 */ 5428 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 5429 GFP_NOFS|__GFP_NOFAIL); 5430 new_entry->efd_start_cluster = bit; 5431 new_entry->efd_group = block_group; 5432 new_entry->efd_count = count_clusters; 5433 new_entry->efd_tid = handle->h_transaction->t_tid; 5434 5435 ext4_lock_group(sb, block_group); 5436 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 5437 ext4_mb_free_metadata(handle, &e4b, new_entry); 5438 } else { 5439 /* need to update group_info->bb_free and bitmap 5440 * with group lock held. generate_buddy look at 5441 * them with group lock_held 5442 */ 5443 if (test_opt(sb, DISCARD)) { 5444 err = ext4_issue_discard(sb, block_group, bit, count, 5445 NULL); 5446 if (err && err != -EOPNOTSUPP) 5447 ext4_msg(sb, KERN_WARNING, "discard request in" 5448 " group:%d block:%d count:%lu failed" 5449 " with %d", block_group, bit, count, 5450 err); 5451 } else 5452 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 5453 5454 ext4_lock_group(sb, block_group); 5455 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 5456 mb_free_blocks(inode, &e4b, bit, count_clusters); 5457 } 5458 5459 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 5460 ext4_free_group_clusters_set(sb, gdp, ret); 5461 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 5462 ext4_group_desc_csum_set(sb, block_group, gdp); 5463 ext4_unlock_group(sb, block_group); 5464 5465 if (sbi->s_log_groups_per_flex) { 5466 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 5467 atomic64_add(count_clusters, 5468 &sbi_array_rcu_deref(sbi, s_flex_groups, 5469 flex_group)->free_clusters); 5470 } 5471 5472 /* 5473 * on a bigalloc file system, defer the s_freeclusters_counter 5474 * update to the caller (ext4_remove_space and friends) so they 5475 * can determine if a cluster freed here should be rereserved 5476 */ 5477 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 5478 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 5479 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 5480 percpu_counter_add(&sbi->s_freeclusters_counter, 5481 count_clusters); 5482 } 5483 5484 ext4_mb_unload_buddy(&e4b); 5485 5486 /* We dirtied the bitmap block */ 5487 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5488 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5489 5490 /* And the group descriptor block */ 5491 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5492 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5493 if (!err) 5494 err = ret; 5495 5496 if (overflow && !err) { 5497 block += count; 5498 count = overflow; 5499 put_bh(bitmap_bh); 5500 goto do_more; 5501 } 5502 error_return: 5503 brelse(bitmap_bh); 5504 ext4_std_error(sb, err); 5505 return; 5506 } 5507 5508 /** 5509 * ext4_group_add_blocks() -- Add given blocks to an existing group 5510 * @handle: handle to this transaction 5511 * @sb: super block 5512 * @block: start physical block to add to the block group 5513 * @count: number of blocks to free 5514 * 5515 * This marks the blocks as free in the bitmap and buddy. 5516 */ 5517 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 5518 ext4_fsblk_t block, unsigned long count) 5519 { 5520 struct buffer_head *bitmap_bh = NULL; 5521 struct buffer_head *gd_bh; 5522 ext4_group_t block_group; 5523 ext4_grpblk_t bit; 5524 unsigned int i; 5525 struct ext4_group_desc *desc; 5526 struct ext4_sb_info *sbi = EXT4_SB(sb); 5527 struct ext4_buddy e4b; 5528 int err = 0, ret, free_clusters_count; 5529 ext4_grpblk_t clusters_freed; 5530 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 5531 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 5532 unsigned long cluster_count = last_cluster - first_cluster + 1; 5533 5534 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 5535 5536 if (count == 0) 5537 return 0; 5538 5539 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5540 /* 5541 * Check to see if we are freeing blocks across a group 5542 * boundary. 5543 */ 5544 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 5545 ext4_warning(sb, "too many blocks added to group %u", 5546 block_group); 5547 err = -EINVAL; 5548 goto error_return; 5549 } 5550 5551 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 5552 if (IS_ERR(bitmap_bh)) { 5553 err = PTR_ERR(bitmap_bh); 5554 bitmap_bh = NULL; 5555 goto error_return; 5556 } 5557 5558 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 5559 if (!desc) { 5560 err = -EIO; 5561 goto error_return; 5562 } 5563 5564 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 5565 in_range(ext4_inode_bitmap(sb, desc), block, count) || 5566 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 5567 in_range(block + count - 1, ext4_inode_table(sb, desc), 5568 sbi->s_itb_per_group)) { 5569 ext4_error(sb, "Adding blocks in system zones - " 5570 "Block = %llu, count = %lu", 5571 block, count); 5572 err = -EINVAL; 5573 goto error_return; 5574 } 5575 5576 BUFFER_TRACE(bitmap_bh, "getting write access"); 5577 err = ext4_journal_get_write_access(handle, bitmap_bh); 5578 if (err) 5579 goto error_return; 5580 5581 /* 5582 * We are about to modify some metadata. Call the journal APIs 5583 * to unshare ->b_data if a currently-committing transaction is 5584 * using it 5585 */ 5586 BUFFER_TRACE(gd_bh, "get_write_access"); 5587 err = ext4_journal_get_write_access(handle, gd_bh); 5588 if (err) 5589 goto error_return; 5590 5591 for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 5592 BUFFER_TRACE(bitmap_bh, "clear bit"); 5593 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 5594 ext4_error(sb, "bit already cleared for block %llu", 5595 (ext4_fsblk_t)(block + i)); 5596 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 5597 } else { 5598 clusters_freed++; 5599 } 5600 } 5601 5602 err = ext4_mb_load_buddy(sb, block_group, &e4b); 5603 if (err) 5604 goto error_return; 5605 5606 /* 5607 * need to update group_info->bb_free and bitmap 5608 * with group lock held. generate_buddy look at 5609 * them with group lock_held 5610 */ 5611 ext4_lock_group(sb, block_group); 5612 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 5613 mb_free_blocks(NULL, &e4b, bit, cluster_count); 5614 free_clusters_count = clusters_freed + 5615 ext4_free_group_clusters(sb, desc); 5616 ext4_free_group_clusters_set(sb, desc, free_clusters_count); 5617 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 5618 ext4_group_desc_csum_set(sb, block_group, desc); 5619 ext4_unlock_group(sb, block_group); 5620 percpu_counter_add(&sbi->s_freeclusters_counter, 5621 clusters_freed); 5622 5623 if (sbi->s_log_groups_per_flex) { 5624 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 5625 atomic64_add(clusters_freed, 5626 &sbi_array_rcu_deref(sbi, s_flex_groups, 5627 flex_group)->free_clusters); 5628 } 5629 5630 ext4_mb_unload_buddy(&e4b); 5631 5632 /* We dirtied the bitmap block */ 5633 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5634 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5635 5636 /* And the group descriptor block */ 5637 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5638 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5639 if (!err) 5640 err = ret; 5641 5642 error_return: 5643 brelse(bitmap_bh); 5644 ext4_std_error(sb, err); 5645 return err; 5646 } 5647 5648 /** 5649 * ext4_trim_extent -- function to TRIM one single free extent in the group 5650 * @sb: super block for the file system 5651 * @start: starting block of the free extent in the alloc. group 5652 * @count: number of blocks to TRIM 5653 * @group: alloc. group we are working with 5654 * @e4b: ext4 buddy for the group 5655 * 5656 * Trim "count" blocks starting at "start" in the "group". To assure that no 5657 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5658 * be called with under the group lock. 5659 */ 5660 static int ext4_trim_extent(struct super_block *sb, int start, int count, 5661 ext4_group_t group, struct ext4_buddy *e4b) 5662 __releases(bitlock) 5663 __acquires(bitlock) 5664 { 5665 struct ext4_free_extent ex; 5666 int ret = 0; 5667 5668 trace_ext4_trim_extent(sb, group, start, count); 5669 5670 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5671 5672 ex.fe_start = start; 5673 ex.fe_group = group; 5674 ex.fe_len = count; 5675 5676 /* 5677 * Mark blocks used, so no one can reuse them while 5678 * being trimmed. 5679 */ 5680 mb_mark_used(e4b, &ex); 5681 ext4_unlock_group(sb, group); 5682 ret = ext4_issue_discard(sb, group, start, count, NULL); 5683 ext4_lock_group(sb, group); 5684 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5685 return ret; 5686 } 5687 5688 /** 5689 * ext4_trim_all_free -- function to trim all free space in alloc. group 5690 * @sb: super block for file system 5691 * @group: group to be trimmed 5692 * @start: first group block to examine 5693 * @max: last group block to examine 5694 * @minblocks: minimum extent block count 5695 * 5696 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5697 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5698 * the extent. 5699 * 5700 * 5701 * ext4_trim_all_free walks through group's block bitmap searching for free 5702 * extents. When the free extent is found, mark it as used in group buddy 5703 * bitmap. Then issue a TRIM command on this extent and free the extent in 5704 * the group buddy bitmap. This is done until whole group is scanned. 5705 */ 5706 static ext4_grpblk_t 5707 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5708 ext4_grpblk_t start, ext4_grpblk_t max, 5709 ext4_grpblk_t minblocks) 5710 { 5711 void *bitmap; 5712 ext4_grpblk_t next, count = 0, free_count = 0; 5713 struct ext4_buddy e4b; 5714 int ret = 0; 5715 5716 trace_ext4_trim_all_free(sb, group, start, max); 5717 5718 ret = ext4_mb_load_buddy(sb, group, &e4b); 5719 if (ret) { 5720 ext4_warning(sb, "Error %d loading buddy information for %u", 5721 ret, group); 5722 return ret; 5723 } 5724 bitmap = e4b.bd_bitmap; 5725 5726 ext4_lock_group(sb, group); 5727 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5728 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5729 goto out; 5730 5731 start = (e4b.bd_info->bb_first_free > start) ? 5732 e4b.bd_info->bb_first_free : start; 5733 5734 while (start <= max) { 5735 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5736 if (start > max) 5737 break; 5738 next = mb_find_next_bit(bitmap, max + 1, start); 5739 5740 if ((next - start) >= minblocks) { 5741 ret = ext4_trim_extent(sb, start, 5742 next - start, group, &e4b); 5743 if (ret && ret != -EOPNOTSUPP) 5744 break; 5745 ret = 0; 5746 count += next - start; 5747 } 5748 free_count += next - start; 5749 start = next + 1; 5750 5751 if (fatal_signal_pending(current)) { 5752 count = -ERESTARTSYS; 5753 break; 5754 } 5755 5756 if (need_resched()) { 5757 ext4_unlock_group(sb, group); 5758 cond_resched(); 5759 ext4_lock_group(sb, group); 5760 } 5761 5762 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5763 break; 5764 } 5765 5766 if (!ret) { 5767 ret = count; 5768 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5769 } 5770 out: 5771 ext4_unlock_group(sb, group); 5772 ext4_mb_unload_buddy(&e4b); 5773 5774 ext4_debug("trimmed %d blocks in the group %d\n", 5775 count, group); 5776 5777 return ret; 5778 } 5779 5780 /** 5781 * ext4_trim_fs() -- trim ioctl handle function 5782 * @sb: superblock for filesystem 5783 * @range: fstrim_range structure 5784 * 5785 * start: First Byte to trim 5786 * len: number of Bytes to trim from start 5787 * minlen: minimum extent length in Bytes 5788 * ext4_trim_fs goes through all allocation groups containing Bytes from 5789 * start to start+len. For each such a group ext4_trim_all_free function 5790 * is invoked to trim all free space. 5791 */ 5792 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5793 { 5794 struct ext4_group_info *grp; 5795 ext4_group_t group, first_group, last_group; 5796 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5797 uint64_t start, end, minlen, trimmed = 0; 5798 ext4_fsblk_t first_data_blk = 5799 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5800 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5801 int ret = 0; 5802 5803 start = range->start >> sb->s_blocksize_bits; 5804 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5805 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5806 range->minlen >> sb->s_blocksize_bits); 5807 5808 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5809 start >= max_blks || 5810 range->len < sb->s_blocksize) 5811 return -EINVAL; 5812 if (end >= max_blks) 5813 end = max_blks - 1; 5814 if (end <= first_data_blk) 5815 goto out; 5816 if (start < first_data_blk) 5817 start = first_data_blk; 5818 5819 /* Determine first and last group to examine based on start and end */ 5820 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5821 &first_group, &first_cluster); 5822 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5823 &last_group, &last_cluster); 5824 5825 /* end now represents the last cluster to discard in this group */ 5826 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5827 5828 for (group = first_group; group <= last_group; group++) { 5829 grp = ext4_get_group_info(sb, group); 5830 /* We only do this if the grp has never been initialized */ 5831 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5832 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 5833 if (ret) 5834 break; 5835 } 5836 5837 /* 5838 * For all the groups except the last one, last cluster will 5839 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5840 * change it for the last group, note that last_cluster is 5841 * already computed earlier by ext4_get_group_no_and_offset() 5842 */ 5843 if (group == last_group) 5844 end = last_cluster; 5845 5846 if (grp->bb_free >= minlen) { 5847 cnt = ext4_trim_all_free(sb, group, first_cluster, 5848 end, minlen); 5849 if (cnt < 0) { 5850 ret = cnt; 5851 break; 5852 } 5853 trimmed += cnt; 5854 } 5855 5856 /* 5857 * For every group except the first one, we are sure 5858 * that the first cluster to discard will be cluster #0. 5859 */ 5860 first_cluster = 0; 5861 } 5862 5863 if (!ret) 5864 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5865 5866 out: 5867 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5868 return ret; 5869 } 5870 5871 /* Iterate all the free extents in the group. */ 5872 int 5873 ext4_mballoc_query_range( 5874 struct super_block *sb, 5875 ext4_group_t group, 5876 ext4_grpblk_t start, 5877 ext4_grpblk_t end, 5878 ext4_mballoc_query_range_fn formatter, 5879 void *priv) 5880 { 5881 void *bitmap; 5882 ext4_grpblk_t next; 5883 struct ext4_buddy e4b; 5884 int error; 5885 5886 error = ext4_mb_load_buddy(sb, group, &e4b); 5887 if (error) 5888 return error; 5889 bitmap = e4b.bd_bitmap; 5890 5891 ext4_lock_group(sb, group); 5892 5893 start = (e4b.bd_info->bb_first_free > start) ? 5894 e4b.bd_info->bb_first_free : start; 5895 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 5896 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5897 5898 while (start <= end) { 5899 start = mb_find_next_zero_bit(bitmap, end + 1, start); 5900 if (start > end) 5901 break; 5902 next = mb_find_next_bit(bitmap, end + 1, start); 5903 5904 ext4_unlock_group(sb, group); 5905 error = formatter(sb, group, start, next - start, priv); 5906 if (error) 5907 goto out_unload; 5908 ext4_lock_group(sb, group); 5909 5910 start = next + 1; 5911 } 5912 5913 ext4_unlock_group(sb, group); 5914 out_unload: 5915 ext4_mb_unload_buddy(&e4b); 5916 5917 return error; 5918 } 5919