1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20 /* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24 #include "ext4_jbd2.h" 25 #include "mballoc.h" 26 #include <linux/log2.h> 27 #include <linux/module.h> 28 #include <linux/slab.h> 29 #include <trace/events/ext4.h> 30 31 #ifdef CONFIG_EXT4_DEBUG 32 ushort ext4_mballoc_debug __read_mostly; 33 34 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); 35 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); 36 #endif 37 38 /* 39 * MUSTDO: 40 * - test ext4_ext_search_left() and ext4_ext_search_right() 41 * - search for metadata in few groups 42 * 43 * TODO v4: 44 * - normalization should take into account whether file is still open 45 * - discard preallocations if no free space left (policy?) 46 * - don't normalize tails 47 * - quota 48 * - reservation for superuser 49 * 50 * TODO v3: 51 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 52 * - track min/max extents in each group for better group selection 53 * - mb_mark_used() may allocate chunk right after splitting buddy 54 * - tree of groups sorted by number of free blocks 55 * - error handling 56 */ 57 58 /* 59 * The allocation request involve request for multiple number of blocks 60 * near to the goal(block) value specified. 61 * 62 * During initialization phase of the allocator we decide to use the 63 * group preallocation or inode preallocation depending on the size of 64 * the file. The size of the file could be the resulting file size we 65 * would have after allocation, or the current file size, which ever 66 * is larger. If the size is less than sbi->s_mb_stream_request we 67 * select to use the group preallocation. The default value of 68 * s_mb_stream_request is 16 blocks. This can also be tuned via 69 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 70 * terms of number of blocks. 71 * 72 * The main motivation for having small file use group preallocation is to 73 * ensure that we have small files closer together on the disk. 74 * 75 * First stage the allocator looks at the inode prealloc list, 76 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 77 * spaces for this particular inode. The inode prealloc space is 78 * represented as: 79 * 80 * pa_lstart -> the logical start block for this prealloc space 81 * pa_pstart -> the physical start block for this prealloc space 82 * pa_len -> length for this prealloc space (in clusters) 83 * pa_free -> free space available in this prealloc space (in clusters) 84 * 85 * The inode preallocation space is used looking at the _logical_ start 86 * block. If only the logical file block falls within the range of prealloc 87 * space we will consume the particular prealloc space. This makes sure that 88 * we have contiguous physical blocks representing the file blocks 89 * 90 * The important thing to be noted in case of inode prealloc space is that 91 * we don't modify the values associated to inode prealloc space except 92 * pa_free. 93 * 94 * If we are not able to find blocks in the inode prealloc space and if we 95 * have the group allocation flag set then we look at the locality group 96 * prealloc space. These are per CPU prealloc list represented as 97 * 98 * ext4_sb_info.s_locality_groups[smp_processor_id()] 99 * 100 * The reason for having a per cpu locality group is to reduce the contention 101 * between CPUs. It is possible to get scheduled at this point. 102 * 103 * The locality group prealloc space is used looking at whether we have 104 * enough free space (pa_free) within the prealloc space. 105 * 106 * If we can't allocate blocks via inode prealloc or/and locality group 107 * prealloc then we look at the buddy cache. The buddy cache is represented 108 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 109 * mapped to the buddy and bitmap information regarding different 110 * groups. The buddy information is attached to buddy cache inode so that 111 * we can access them through the page cache. The information regarding 112 * each group is loaded via ext4_mb_load_buddy. The information involve 113 * block bitmap and buddy information. The information are stored in the 114 * inode as: 115 * 116 * { page } 117 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 118 * 119 * 120 * one block each for bitmap and buddy information. So for each group we 121 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * blocksize) blocks. So it can have information regarding groups_per_page 123 * which is blocks_per_page/2 124 * 125 * The buddy cache inode is not stored on disk. The inode is thrown 126 * away when the filesystem is unmounted. 127 * 128 * We look for count number of blocks in the buddy cache. If we were able 129 * to locate that many free blocks we return with additional information 130 * regarding rest of the contiguous physical block available 131 * 132 * Before allocating blocks via buddy cache we normalize the request 133 * blocks. This ensure we ask for more blocks that we needed. The extra 134 * blocks that we get after allocation is added to the respective prealloc 135 * list. In case of inode preallocation we follow a list of heuristics 136 * based on file size. This can be found in ext4_mb_normalize_request. If 137 * we are doing a group prealloc we try to normalize the request to 138 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 139 * dependent on the cluster size; for non-bigalloc file systems, it is 140 * 512 blocks. This can be tuned via 141 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 142 * terms of number of blocks. If we have mounted the file system with -O 143 * stripe=<value> option the group prealloc request is normalized to the 144 * the smallest multiple of the stripe value (sbi->s_stripe) which is 145 * greater than the default mb_group_prealloc. 146 * 147 * The regular allocator (using the buddy cache) supports a few tunables. 148 * 149 * /sys/fs/ext4/<partition>/mb_min_to_scan 150 * /sys/fs/ext4/<partition>/mb_max_to_scan 151 * /sys/fs/ext4/<partition>/mb_order2_req 152 * 153 * The regular allocator uses buddy scan only if the request len is power of 154 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 155 * value of s_mb_order2_reqs can be tuned via 156 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 157 * stripe size (sbi->s_stripe), we try to search for contiguous block in 158 * stripe size. This should result in better allocation on RAID setups. If 159 * not, we search in the specific group using bitmap for best extents. The 160 * tunable min_to_scan and max_to_scan control the behaviour here. 161 * min_to_scan indicate how long the mballoc __must__ look for a best 162 * extent and max_to_scan indicates how long the mballoc __can__ look for a 163 * best extent in the found extents. Searching for the blocks starts with 164 * the group specified as the goal value in allocation context via 165 * ac_g_ex. Each group is first checked based on the criteria whether it 166 * can be used for allocation. ext4_mb_good_group explains how the groups are 167 * checked. 168 * 169 * Both the prealloc space are getting populated as above. So for the first 170 * request we will hit the buddy cache which will result in this prealloc 171 * space getting filled. The prealloc space is then later used for the 172 * subsequent request. 173 */ 174 175 /* 176 * mballoc operates on the following data: 177 * - on-disk bitmap 178 * - in-core buddy (actually includes buddy and bitmap) 179 * - preallocation descriptors (PAs) 180 * 181 * there are two types of preallocations: 182 * - inode 183 * assiged to specific inode and can be used for this inode only. 184 * it describes part of inode's space preallocated to specific 185 * physical blocks. any block from that preallocated can be used 186 * independent. the descriptor just tracks number of blocks left 187 * unused. so, before taking some block from descriptor, one must 188 * make sure corresponded logical block isn't allocated yet. this 189 * also means that freeing any block within descriptor's range 190 * must discard all preallocated blocks. 191 * - locality group 192 * assigned to specific locality group which does not translate to 193 * permanent set of inodes: inode can join and leave group. space 194 * from this type of preallocation can be used for any inode. thus 195 * it's consumed from the beginning to the end. 196 * 197 * relation between them can be expressed as: 198 * in-core buddy = on-disk bitmap + preallocation descriptors 199 * 200 * this mean blocks mballoc considers used are: 201 * - allocated blocks (persistent) 202 * - preallocated blocks (non-persistent) 203 * 204 * consistency in mballoc world means that at any time a block is either 205 * free or used in ALL structures. notice: "any time" should not be read 206 * literally -- time is discrete and delimited by locks. 207 * 208 * to keep it simple, we don't use block numbers, instead we count number of 209 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 210 * 211 * all operations can be expressed as: 212 * - init buddy: buddy = on-disk + PAs 213 * - new PA: buddy += N; PA = N 214 * - use inode PA: on-disk += N; PA -= N 215 * - discard inode PA buddy -= on-disk - PA; PA = 0 216 * - use locality group PA on-disk += N; PA -= N 217 * - discard locality group PA buddy -= PA; PA = 0 218 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 219 * is used in real operation because we can't know actual used 220 * bits from PA, only from on-disk bitmap 221 * 222 * if we follow this strict logic, then all operations above should be atomic. 223 * given some of them can block, we'd have to use something like semaphores 224 * killing performance on high-end SMP hardware. let's try to relax it using 225 * the following knowledge: 226 * 1) if buddy is referenced, it's already initialized 227 * 2) while block is used in buddy and the buddy is referenced, 228 * nobody can re-allocate that block 229 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 230 * bit set and PA claims same block, it's OK. IOW, one can set bit in 231 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 232 * block 233 * 234 * so, now we're building a concurrency table: 235 * - init buddy vs. 236 * - new PA 237 * blocks for PA are allocated in the buddy, buddy must be referenced 238 * until PA is linked to allocation group to avoid concurrent buddy init 239 * - use inode PA 240 * we need to make sure that either on-disk bitmap or PA has uptodate data 241 * given (3) we care that PA-=N operation doesn't interfere with init 242 * - discard inode PA 243 * the simplest way would be to have buddy initialized by the discard 244 * - use locality group PA 245 * again PA-=N must be serialized with init 246 * - discard locality group PA 247 * the simplest way would be to have buddy initialized by the discard 248 * - new PA vs. 249 * - use inode PA 250 * i_data_sem serializes them 251 * - discard inode PA 252 * discard process must wait until PA isn't used by another process 253 * - use locality group PA 254 * some mutex should serialize them 255 * - discard locality group PA 256 * discard process must wait until PA isn't used by another process 257 * - use inode PA 258 * - use inode PA 259 * i_data_sem or another mutex should serializes them 260 * - discard inode PA 261 * discard process must wait until PA isn't used by another process 262 * - use locality group PA 263 * nothing wrong here -- they're different PAs covering different blocks 264 * - discard locality group PA 265 * discard process must wait until PA isn't used by another process 266 * 267 * now we're ready to make few consequences: 268 * - PA is referenced and while it is no discard is possible 269 * - PA is referenced until block isn't marked in on-disk bitmap 270 * - PA changes only after on-disk bitmap 271 * - discard must not compete with init. either init is done before 272 * any discard or they're serialized somehow 273 * - buddy init as sum of on-disk bitmap and PAs is done atomically 274 * 275 * a special case when we've used PA to emptiness. no need to modify buddy 276 * in this case, but we should care about concurrent init 277 * 278 */ 279 280 /* 281 * Logic in few words: 282 * 283 * - allocation: 284 * load group 285 * find blocks 286 * mark bits in on-disk bitmap 287 * release group 288 * 289 * - use preallocation: 290 * find proper PA (per-inode or group) 291 * load group 292 * mark bits in on-disk bitmap 293 * release group 294 * release PA 295 * 296 * - free: 297 * load group 298 * mark bits in on-disk bitmap 299 * release group 300 * 301 * - discard preallocations in group: 302 * mark PAs deleted 303 * move them onto local list 304 * load on-disk bitmap 305 * load group 306 * remove PA from object (inode or locality group) 307 * mark free blocks in-core 308 * 309 * - discard inode's preallocations: 310 */ 311 312 /* 313 * Locking rules 314 * 315 * Locks: 316 * - bitlock on a group (group) 317 * - object (inode/locality) (object) 318 * - per-pa lock (pa) 319 * 320 * Paths: 321 * - new pa 322 * object 323 * group 324 * 325 * - find and use pa: 326 * pa 327 * 328 * - release consumed pa: 329 * pa 330 * group 331 * object 332 * 333 * - generate in-core bitmap: 334 * group 335 * pa 336 * 337 * - discard all for given object (inode, locality group): 338 * object 339 * pa 340 * group 341 * 342 * - discard all for given group: 343 * group 344 * pa 345 * group 346 * object 347 * 348 */ 349 static struct kmem_cache *ext4_pspace_cachep; 350 static struct kmem_cache *ext4_ac_cachep; 351 static struct kmem_cache *ext4_free_data_cachep; 352 353 /* We create slab caches for groupinfo data structures based on the 354 * superblock block size. There will be one per mounted filesystem for 355 * each unique s_blocksize_bits */ 356 #define NR_GRPINFO_CACHES 8 357 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 358 359 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 360 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 361 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 362 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 363 }; 364 365 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 366 ext4_group_t group); 367 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 368 ext4_group_t group); 369 static void ext4_free_data_callback(struct super_block *sb, 370 struct ext4_journal_cb_entry *jce, int rc); 371 372 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 373 { 374 #if BITS_PER_LONG == 64 375 *bit += ((unsigned long) addr & 7UL) << 3; 376 addr = (void *) ((unsigned long) addr & ~7UL); 377 #elif BITS_PER_LONG == 32 378 *bit += ((unsigned long) addr & 3UL) << 3; 379 addr = (void *) ((unsigned long) addr & ~3UL); 380 #else 381 #error "how many bits you are?!" 382 #endif 383 return addr; 384 } 385 386 static inline int mb_test_bit(int bit, void *addr) 387 { 388 /* 389 * ext4_test_bit on architecture like powerpc 390 * needs unsigned long aligned address 391 */ 392 addr = mb_correct_addr_and_bit(&bit, addr); 393 return ext4_test_bit(bit, addr); 394 } 395 396 static inline void mb_set_bit(int bit, void *addr) 397 { 398 addr = mb_correct_addr_and_bit(&bit, addr); 399 ext4_set_bit(bit, addr); 400 } 401 402 static inline void mb_clear_bit(int bit, void *addr) 403 { 404 addr = mb_correct_addr_and_bit(&bit, addr); 405 ext4_clear_bit(bit, addr); 406 } 407 408 static inline int mb_test_and_clear_bit(int bit, void *addr) 409 { 410 addr = mb_correct_addr_and_bit(&bit, addr); 411 return ext4_test_and_clear_bit(bit, addr); 412 } 413 414 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 415 { 416 int fix = 0, ret, tmpmax; 417 addr = mb_correct_addr_and_bit(&fix, addr); 418 tmpmax = max + fix; 419 start += fix; 420 421 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 422 if (ret > max) 423 return max; 424 return ret; 425 } 426 427 static inline int mb_find_next_bit(void *addr, int max, int start) 428 { 429 int fix = 0, ret, tmpmax; 430 addr = mb_correct_addr_and_bit(&fix, addr); 431 tmpmax = max + fix; 432 start += fix; 433 434 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 435 if (ret > max) 436 return max; 437 return ret; 438 } 439 440 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 441 { 442 char *bb; 443 444 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 445 BUG_ON(max == NULL); 446 447 if (order > e4b->bd_blkbits + 1) { 448 *max = 0; 449 return NULL; 450 } 451 452 /* at order 0 we see each particular block */ 453 if (order == 0) { 454 *max = 1 << (e4b->bd_blkbits + 3); 455 return e4b->bd_bitmap; 456 } 457 458 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 459 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 460 461 return bb; 462 } 463 464 #ifdef DOUBLE_CHECK 465 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 466 int first, int count) 467 { 468 int i; 469 struct super_block *sb = e4b->bd_sb; 470 471 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 472 return; 473 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 474 for (i = 0; i < count; i++) { 475 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 476 ext4_fsblk_t blocknr; 477 478 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 479 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 480 ext4_grp_locked_error(sb, e4b->bd_group, 481 inode ? inode->i_ino : 0, 482 blocknr, 483 "freeing block already freed " 484 "(bit %u)", 485 first + i); 486 } 487 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 488 } 489 } 490 491 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 492 { 493 int i; 494 495 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 496 return; 497 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 498 for (i = 0; i < count; i++) { 499 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 500 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 501 } 502 } 503 504 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 505 { 506 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 507 unsigned char *b1, *b2; 508 int i; 509 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 510 b2 = (unsigned char *) bitmap; 511 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 512 if (b1[i] != b2[i]) { 513 ext4_msg(e4b->bd_sb, KERN_ERR, 514 "corruption in group %u " 515 "at byte %u(%u): %x in copy != %x " 516 "on disk/prealloc", 517 e4b->bd_group, i, i * 8, b1[i], b2[i]); 518 BUG(); 519 } 520 } 521 } 522 } 523 524 #else 525 static inline void mb_free_blocks_double(struct inode *inode, 526 struct ext4_buddy *e4b, int first, int count) 527 { 528 return; 529 } 530 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 531 int first, int count) 532 { 533 return; 534 } 535 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 536 { 537 return; 538 } 539 #endif 540 541 #ifdef AGGRESSIVE_CHECK 542 543 #define MB_CHECK_ASSERT(assert) \ 544 do { \ 545 if (!(assert)) { \ 546 printk(KERN_EMERG \ 547 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 548 function, file, line, # assert); \ 549 BUG(); \ 550 } \ 551 } while (0) 552 553 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 554 const char *function, int line) 555 { 556 struct super_block *sb = e4b->bd_sb; 557 int order = e4b->bd_blkbits + 1; 558 int max; 559 int max2; 560 int i; 561 int j; 562 int k; 563 int count; 564 struct ext4_group_info *grp; 565 int fragments = 0; 566 int fstart; 567 struct list_head *cur; 568 void *buddy; 569 void *buddy2; 570 571 { 572 static int mb_check_counter; 573 if (mb_check_counter++ % 100 != 0) 574 return 0; 575 } 576 577 while (order > 1) { 578 buddy = mb_find_buddy(e4b, order, &max); 579 MB_CHECK_ASSERT(buddy); 580 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 581 MB_CHECK_ASSERT(buddy2); 582 MB_CHECK_ASSERT(buddy != buddy2); 583 MB_CHECK_ASSERT(max * 2 == max2); 584 585 count = 0; 586 for (i = 0; i < max; i++) { 587 588 if (mb_test_bit(i, buddy)) { 589 /* only single bit in buddy2 may be 1 */ 590 if (!mb_test_bit(i << 1, buddy2)) { 591 MB_CHECK_ASSERT( 592 mb_test_bit((i<<1)+1, buddy2)); 593 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 594 MB_CHECK_ASSERT( 595 mb_test_bit(i << 1, buddy2)); 596 } 597 continue; 598 } 599 600 /* both bits in buddy2 must be 1 */ 601 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 602 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 603 604 for (j = 0; j < (1 << order); j++) { 605 k = (i * (1 << order)) + j; 606 MB_CHECK_ASSERT( 607 !mb_test_bit(k, e4b->bd_bitmap)); 608 } 609 count++; 610 } 611 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 612 order--; 613 } 614 615 fstart = -1; 616 buddy = mb_find_buddy(e4b, 0, &max); 617 for (i = 0; i < max; i++) { 618 if (!mb_test_bit(i, buddy)) { 619 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 620 if (fstart == -1) { 621 fragments++; 622 fstart = i; 623 } 624 continue; 625 } 626 fstart = -1; 627 /* check used bits only */ 628 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 629 buddy2 = mb_find_buddy(e4b, j, &max2); 630 k = i >> j; 631 MB_CHECK_ASSERT(k < max2); 632 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 633 } 634 } 635 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 636 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 637 638 grp = ext4_get_group_info(sb, e4b->bd_group); 639 list_for_each(cur, &grp->bb_prealloc_list) { 640 ext4_group_t groupnr; 641 struct ext4_prealloc_space *pa; 642 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 643 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 644 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 645 for (i = 0; i < pa->pa_len; i++) 646 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 647 } 648 return 0; 649 } 650 #undef MB_CHECK_ASSERT 651 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 652 __FILE__, __func__, __LINE__) 653 #else 654 #define mb_check_buddy(e4b) 655 #endif 656 657 /* 658 * Divide blocks started from @first with length @len into 659 * smaller chunks with power of 2 blocks. 660 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 661 * then increase bb_counters[] for corresponded chunk size. 662 */ 663 static void ext4_mb_mark_free_simple(struct super_block *sb, 664 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 665 struct ext4_group_info *grp) 666 { 667 struct ext4_sb_info *sbi = EXT4_SB(sb); 668 ext4_grpblk_t min; 669 ext4_grpblk_t max; 670 ext4_grpblk_t chunk; 671 unsigned short border; 672 673 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 674 675 border = 2 << sb->s_blocksize_bits; 676 677 while (len > 0) { 678 /* find how many blocks can be covered since this position */ 679 max = ffs(first | border) - 1; 680 681 /* find how many blocks of power 2 we need to mark */ 682 min = fls(len) - 1; 683 684 if (max < min) 685 min = max; 686 chunk = 1 << min; 687 688 /* mark multiblock chunks only */ 689 grp->bb_counters[min]++; 690 if (min > 0) 691 mb_clear_bit(first >> min, 692 buddy + sbi->s_mb_offsets[min]); 693 694 len -= chunk; 695 first += chunk; 696 } 697 } 698 699 /* 700 * Cache the order of the largest free extent we have available in this block 701 * group. 702 */ 703 static void 704 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 705 { 706 int i; 707 int bits; 708 709 grp->bb_largest_free_order = -1; /* uninit */ 710 711 bits = sb->s_blocksize_bits + 1; 712 for (i = bits; i >= 0; i--) { 713 if (grp->bb_counters[i] > 0) { 714 grp->bb_largest_free_order = i; 715 break; 716 } 717 } 718 } 719 720 static noinline_for_stack 721 void ext4_mb_generate_buddy(struct super_block *sb, 722 void *buddy, void *bitmap, ext4_group_t group) 723 { 724 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 725 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 726 ext4_grpblk_t i = 0; 727 ext4_grpblk_t first; 728 ext4_grpblk_t len; 729 unsigned free = 0; 730 unsigned fragments = 0; 731 unsigned long long period = get_cycles(); 732 733 /* initialize buddy from bitmap which is aggregation 734 * of on-disk bitmap and preallocations */ 735 i = mb_find_next_zero_bit(bitmap, max, 0); 736 grp->bb_first_free = i; 737 while (i < max) { 738 fragments++; 739 first = i; 740 i = mb_find_next_bit(bitmap, max, i); 741 len = i - first; 742 free += len; 743 if (len > 1) 744 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 745 else 746 grp->bb_counters[0]++; 747 if (i < max) 748 i = mb_find_next_zero_bit(bitmap, max, i); 749 } 750 grp->bb_fragments = fragments; 751 752 if (free != grp->bb_free) { 753 ext4_grp_locked_error(sb, group, 0, 0, 754 "%u clusters in bitmap, %u in gd; " 755 "block bitmap corrupt.", 756 free, grp->bb_free); 757 /* 758 * If we intend to continue, we consider group descriptor 759 * corrupt and update bb_free using bitmap value 760 */ 761 grp->bb_free = free; 762 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 763 } 764 mb_set_largest_free_order(sb, grp); 765 766 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 767 768 period = get_cycles() - period; 769 spin_lock(&EXT4_SB(sb)->s_bal_lock); 770 EXT4_SB(sb)->s_mb_buddies_generated++; 771 EXT4_SB(sb)->s_mb_generation_time += period; 772 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 773 } 774 775 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 776 { 777 int count; 778 int order = 1; 779 void *buddy; 780 781 while ((buddy = mb_find_buddy(e4b, order++, &count))) { 782 ext4_set_bits(buddy, 0, count); 783 } 784 e4b->bd_info->bb_fragments = 0; 785 memset(e4b->bd_info->bb_counters, 0, 786 sizeof(*e4b->bd_info->bb_counters) * 787 (e4b->bd_sb->s_blocksize_bits + 2)); 788 789 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 790 e4b->bd_bitmap, e4b->bd_group); 791 } 792 793 /* The buddy information is attached the buddy cache inode 794 * for convenience. The information regarding each group 795 * is loaded via ext4_mb_load_buddy. The information involve 796 * block bitmap and buddy information. The information are 797 * stored in the inode as 798 * 799 * { page } 800 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 801 * 802 * 803 * one block each for bitmap and buddy information. 804 * So for each group we take up 2 blocks. A page can 805 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 806 * So it can have information regarding groups_per_page which 807 * is blocks_per_page/2 808 * 809 * Locking note: This routine takes the block group lock of all groups 810 * for this page; do not hold this lock when calling this routine! 811 */ 812 813 static int ext4_mb_init_cache(struct page *page, char *incore) 814 { 815 ext4_group_t ngroups; 816 int blocksize; 817 int blocks_per_page; 818 int groups_per_page; 819 int err = 0; 820 int i; 821 ext4_group_t first_group, group; 822 int first_block; 823 struct super_block *sb; 824 struct buffer_head *bhs; 825 struct buffer_head **bh = NULL; 826 struct inode *inode; 827 char *data; 828 char *bitmap; 829 struct ext4_group_info *grinfo; 830 831 mb_debug(1, "init page %lu\n", page->index); 832 833 inode = page->mapping->host; 834 sb = inode->i_sb; 835 ngroups = ext4_get_groups_count(sb); 836 blocksize = 1 << inode->i_blkbits; 837 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 838 839 groups_per_page = blocks_per_page >> 1; 840 if (groups_per_page == 0) 841 groups_per_page = 1; 842 843 /* allocate buffer_heads to read bitmaps */ 844 if (groups_per_page > 1) { 845 i = sizeof(struct buffer_head *) * groups_per_page; 846 bh = kzalloc(i, GFP_NOFS); 847 if (bh == NULL) { 848 err = -ENOMEM; 849 goto out; 850 } 851 } else 852 bh = &bhs; 853 854 first_group = page->index * blocks_per_page / 2; 855 856 /* read all groups the page covers into the cache */ 857 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 858 if (group >= ngroups) 859 break; 860 861 grinfo = ext4_get_group_info(sb, group); 862 /* 863 * If page is uptodate then we came here after online resize 864 * which added some new uninitialized group info structs, so 865 * we must skip all initialized uptodate buddies on the page, 866 * which may be currently in use by an allocating task. 867 */ 868 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 869 bh[i] = NULL; 870 continue; 871 } 872 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { 873 err = -ENOMEM; 874 goto out; 875 } 876 mb_debug(1, "read bitmap for group %u\n", group); 877 } 878 879 /* wait for I/O completion */ 880 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 881 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { 882 err = -EIO; 883 goto out; 884 } 885 } 886 887 first_block = page->index * blocks_per_page; 888 for (i = 0; i < blocks_per_page; i++) { 889 group = (first_block + i) >> 1; 890 if (group >= ngroups) 891 break; 892 893 if (!bh[group - first_group]) 894 /* skip initialized uptodate buddy */ 895 continue; 896 897 /* 898 * data carry information regarding this 899 * particular group in the format specified 900 * above 901 * 902 */ 903 data = page_address(page) + (i * blocksize); 904 bitmap = bh[group - first_group]->b_data; 905 906 /* 907 * We place the buddy block and bitmap block 908 * close together 909 */ 910 if ((first_block + i) & 1) { 911 /* this is block of buddy */ 912 BUG_ON(incore == NULL); 913 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 914 group, page->index, i * blocksize); 915 trace_ext4_mb_buddy_bitmap_load(sb, group); 916 grinfo = ext4_get_group_info(sb, group); 917 grinfo->bb_fragments = 0; 918 memset(grinfo->bb_counters, 0, 919 sizeof(*grinfo->bb_counters) * 920 (sb->s_blocksize_bits+2)); 921 /* 922 * incore got set to the group block bitmap below 923 */ 924 ext4_lock_group(sb, group); 925 /* init the buddy */ 926 memset(data, 0xff, blocksize); 927 ext4_mb_generate_buddy(sb, data, incore, group); 928 ext4_unlock_group(sb, group); 929 incore = NULL; 930 } else { 931 /* this is block of bitmap */ 932 BUG_ON(incore != NULL); 933 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 934 group, page->index, i * blocksize); 935 trace_ext4_mb_bitmap_load(sb, group); 936 937 /* see comments in ext4_mb_put_pa() */ 938 ext4_lock_group(sb, group); 939 memcpy(data, bitmap, blocksize); 940 941 /* mark all preallocated blks used in in-core bitmap */ 942 ext4_mb_generate_from_pa(sb, data, group); 943 ext4_mb_generate_from_freelist(sb, data, group); 944 ext4_unlock_group(sb, group); 945 946 /* set incore so that the buddy information can be 947 * generated using this 948 */ 949 incore = data; 950 } 951 } 952 SetPageUptodate(page); 953 954 out: 955 if (bh) { 956 for (i = 0; i < groups_per_page; i++) 957 brelse(bh[i]); 958 if (bh != &bhs) 959 kfree(bh); 960 } 961 return err; 962 } 963 964 /* 965 * Lock the buddy and bitmap pages. This make sure other parallel init_group 966 * on the same buddy page doesn't happen whild holding the buddy page lock. 967 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 968 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 969 */ 970 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 971 ext4_group_t group, struct ext4_buddy *e4b) 972 { 973 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 974 int block, pnum, poff; 975 int blocks_per_page; 976 struct page *page; 977 978 e4b->bd_buddy_page = NULL; 979 e4b->bd_bitmap_page = NULL; 980 981 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 982 /* 983 * the buddy cache inode stores the block bitmap 984 * and buddy information in consecutive blocks. 985 * So for each group we need two blocks. 986 */ 987 block = group * 2; 988 pnum = block / blocks_per_page; 989 poff = block % blocks_per_page; 990 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 991 if (!page) 992 return -ENOMEM; 993 BUG_ON(page->mapping != inode->i_mapping); 994 e4b->bd_bitmap_page = page; 995 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 996 997 if (blocks_per_page >= 2) { 998 /* buddy and bitmap are on the same page */ 999 return 0; 1000 } 1001 1002 block++; 1003 pnum = block / blocks_per_page; 1004 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1005 if (!page) 1006 return -ENOMEM; 1007 BUG_ON(page->mapping != inode->i_mapping); 1008 e4b->bd_buddy_page = page; 1009 return 0; 1010 } 1011 1012 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1013 { 1014 if (e4b->bd_bitmap_page) { 1015 unlock_page(e4b->bd_bitmap_page); 1016 page_cache_release(e4b->bd_bitmap_page); 1017 } 1018 if (e4b->bd_buddy_page) { 1019 unlock_page(e4b->bd_buddy_page); 1020 page_cache_release(e4b->bd_buddy_page); 1021 } 1022 } 1023 1024 /* 1025 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1026 * block group lock of all groups for this page; do not hold the BG lock when 1027 * calling this routine! 1028 */ 1029 static noinline_for_stack 1030 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1031 { 1032 1033 struct ext4_group_info *this_grp; 1034 struct ext4_buddy e4b; 1035 struct page *page; 1036 int ret = 0; 1037 1038 might_sleep(); 1039 mb_debug(1, "init group %u\n", group); 1040 this_grp = ext4_get_group_info(sb, group); 1041 /* 1042 * This ensures that we don't reinit the buddy cache 1043 * page which map to the group from which we are already 1044 * allocating. If we are looking at the buddy cache we would 1045 * have taken a reference using ext4_mb_load_buddy and that 1046 * would have pinned buddy page to page cache. 1047 * The call to ext4_mb_get_buddy_page_lock will mark the 1048 * page accessed. 1049 */ 1050 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); 1051 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1052 /* 1053 * somebody initialized the group 1054 * return without doing anything 1055 */ 1056 goto err; 1057 } 1058 1059 page = e4b.bd_bitmap_page; 1060 ret = ext4_mb_init_cache(page, NULL); 1061 if (ret) 1062 goto err; 1063 if (!PageUptodate(page)) { 1064 ret = -EIO; 1065 goto err; 1066 } 1067 1068 if (e4b.bd_buddy_page == NULL) { 1069 /* 1070 * If both the bitmap and buddy are in 1071 * the same page we don't need to force 1072 * init the buddy 1073 */ 1074 ret = 0; 1075 goto err; 1076 } 1077 /* init buddy cache */ 1078 page = e4b.bd_buddy_page; 1079 ret = ext4_mb_init_cache(page, e4b.bd_bitmap); 1080 if (ret) 1081 goto err; 1082 if (!PageUptodate(page)) { 1083 ret = -EIO; 1084 goto err; 1085 } 1086 err: 1087 ext4_mb_put_buddy_page_lock(&e4b); 1088 return ret; 1089 } 1090 1091 /* 1092 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1093 * block group lock of all groups for this page; do not hold the BG lock when 1094 * calling this routine! 1095 */ 1096 static noinline_for_stack int 1097 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1098 struct ext4_buddy *e4b) 1099 { 1100 int blocks_per_page; 1101 int block; 1102 int pnum; 1103 int poff; 1104 struct page *page; 1105 int ret; 1106 struct ext4_group_info *grp; 1107 struct ext4_sb_info *sbi = EXT4_SB(sb); 1108 struct inode *inode = sbi->s_buddy_cache; 1109 1110 might_sleep(); 1111 mb_debug(1, "load group %u\n", group); 1112 1113 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1114 grp = ext4_get_group_info(sb, group); 1115 1116 e4b->bd_blkbits = sb->s_blocksize_bits; 1117 e4b->bd_info = grp; 1118 e4b->bd_sb = sb; 1119 e4b->bd_group = group; 1120 e4b->bd_buddy_page = NULL; 1121 e4b->bd_bitmap_page = NULL; 1122 1123 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1124 /* 1125 * we need full data about the group 1126 * to make a good selection 1127 */ 1128 ret = ext4_mb_init_group(sb, group); 1129 if (ret) 1130 return ret; 1131 } 1132 1133 /* 1134 * the buddy cache inode stores the block bitmap 1135 * and buddy information in consecutive blocks. 1136 * So for each group we need two blocks. 1137 */ 1138 block = group * 2; 1139 pnum = block / blocks_per_page; 1140 poff = block % blocks_per_page; 1141 1142 /* we could use find_or_create_page(), but it locks page 1143 * what we'd like to avoid in fast path ... */ 1144 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1145 if (page == NULL || !PageUptodate(page)) { 1146 if (page) 1147 /* 1148 * drop the page reference and try 1149 * to get the page with lock. If we 1150 * are not uptodate that implies 1151 * somebody just created the page but 1152 * is yet to initialize the same. So 1153 * wait for it to initialize. 1154 */ 1155 page_cache_release(page); 1156 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1157 if (page) { 1158 BUG_ON(page->mapping != inode->i_mapping); 1159 if (!PageUptodate(page)) { 1160 ret = ext4_mb_init_cache(page, NULL); 1161 if (ret) { 1162 unlock_page(page); 1163 goto err; 1164 } 1165 mb_cmp_bitmaps(e4b, page_address(page) + 1166 (poff * sb->s_blocksize)); 1167 } 1168 unlock_page(page); 1169 } 1170 } 1171 if (page == NULL) { 1172 ret = -ENOMEM; 1173 goto err; 1174 } 1175 if (!PageUptodate(page)) { 1176 ret = -EIO; 1177 goto err; 1178 } 1179 1180 /* Pages marked accessed already */ 1181 e4b->bd_bitmap_page = page; 1182 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1183 1184 block++; 1185 pnum = block / blocks_per_page; 1186 poff = block % blocks_per_page; 1187 1188 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1189 if (page == NULL || !PageUptodate(page)) { 1190 if (page) 1191 page_cache_release(page); 1192 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1193 if (page) { 1194 BUG_ON(page->mapping != inode->i_mapping); 1195 if (!PageUptodate(page)) { 1196 ret = ext4_mb_init_cache(page, e4b->bd_bitmap); 1197 if (ret) { 1198 unlock_page(page); 1199 goto err; 1200 } 1201 } 1202 unlock_page(page); 1203 } 1204 } 1205 if (page == NULL) { 1206 ret = -ENOMEM; 1207 goto err; 1208 } 1209 if (!PageUptodate(page)) { 1210 ret = -EIO; 1211 goto err; 1212 } 1213 1214 /* Pages marked accessed already */ 1215 e4b->bd_buddy_page = page; 1216 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1217 1218 BUG_ON(e4b->bd_bitmap_page == NULL); 1219 BUG_ON(e4b->bd_buddy_page == NULL); 1220 1221 return 0; 1222 1223 err: 1224 if (page) 1225 page_cache_release(page); 1226 if (e4b->bd_bitmap_page) 1227 page_cache_release(e4b->bd_bitmap_page); 1228 if (e4b->bd_buddy_page) 1229 page_cache_release(e4b->bd_buddy_page); 1230 e4b->bd_buddy = NULL; 1231 e4b->bd_bitmap = NULL; 1232 return ret; 1233 } 1234 1235 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1236 { 1237 if (e4b->bd_bitmap_page) 1238 page_cache_release(e4b->bd_bitmap_page); 1239 if (e4b->bd_buddy_page) 1240 page_cache_release(e4b->bd_buddy_page); 1241 } 1242 1243 1244 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1245 { 1246 int order = 1; 1247 void *bb; 1248 1249 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1250 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1251 1252 bb = e4b->bd_buddy; 1253 while (order <= e4b->bd_blkbits + 1) { 1254 block = block >> 1; 1255 if (!mb_test_bit(block, bb)) { 1256 /* this block is part of buddy of order 'order' */ 1257 return order; 1258 } 1259 bb += 1 << (e4b->bd_blkbits - order); 1260 order++; 1261 } 1262 return 0; 1263 } 1264 1265 static void mb_clear_bits(void *bm, int cur, int len) 1266 { 1267 __u32 *addr; 1268 1269 len = cur + len; 1270 while (cur < len) { 1271 if ((cur & 31) == 0 && (len - cur) >= 32) { 1272 /* fast path: clear whole word at once */ 1273 addr = bm + (cur >> 3); 1274 *addr = 0; 1275 cur += 32; 1276 continue; 1277 } 1278 mb_clear_bit(cur, bm); 1279 cur++; 1280 } 1281 } 1282 1283 /* clear bits in given range 1284 * will return first found zero bit if any, -1 otherwise 1285 */ 1286 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1287 { 1288 __u32 *addr; 1289 int zero_bit = -1; 1290 1291 len = cur + len; 1292 while (cur < len) { 1293 if ((cur & 31) == 0 && (len - cur) >= 32) { 1294 /* fast path: clear whole word at once */ 1295 addr = bm + (cur >> 3); 1296 if (*addr != (__u32)(-1) && zero_bit == -1) 1297 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1298 *addr = 0; 1299 cur += 32; 1300 continue; 1301 } 1302 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1303 zero_bit = cur; 1304 cur++; 1305 } 1306 1307 return zero_bit; 1308 } 1309 1310 void ext4_set_bits(void *bm, int cur, int len) 1311 { 1312 __u32 *addr; 1313 1314 len = cur + len; 1315 while (cur < len) { 1316 if ((cur & 31) == 0 && (len - cur) >= 32) { 1317 /* fast path: set whole word at once */ 1318 addr = bm + (cur >> 3); 1319 *addr = 0xffffffff; 1320 cur += 32; 1321 continue; 1322 } 1323 mb_set_bit(cur, bm); 1324 cur++; 1325 } 1326 } 1327 1328 /* 1329 * _________________________________________________________________ */ 1330 1331 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1332 { 1333 if (mb_test_bit(*bit + side, bitmap)) { 1334 mb_clear_bit(*bit, bitmap); 1335 (*bit) -= side; 1336 return 1; 1337 } 1338 else { 1339 (*bit) += side; 1340 mb_set_bit(*bit, bitmap); 1341 return -1; 1342 } 1343 } 1344 1345 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1346 { 1347 int max; 1348 int order = 1; 1349 void *buddy = mb_find_buddy(e4b, order, &max); 1350 1351 while (buddy) { 1352 void *buddy2; 1353 1354 /* Bits in range [first; last] are known to be set since 1355 * corresponding blocks were allocated. Bits in range 1356 * (first; last) will stay set because they form buddies on 1357 * upper layer. We just deal with borders if they don't 1358 * align with upper layer and then go up. 1359 * Releasing entire group is all about clearing 1360 * single bit of highest order buddy. 1361 */ 1362 1363 /* Example: 1364 * --------------------------------- 1365 * | 1 | 1 | 1 | 1 | 1366 * --------------------------------- 1367 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1368 * --------------------------------- 1369 * 0 1 2 3 4 5 6 7 1370 * \_____________________/ 1371 * 1372 * Neither [1] nor [6] is aligned to above layer. 1373 * Left neighbour [0] is free, so mark it busy, 1374 * decrease bb_counters and extend range to 1375 * [0; 6] 1376 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1377 * mark [6] free, increase bb_counters and shrink range to 1378 * [0; 5]. 1379 * Then shift range to [0; 2], go up and do the same. 1380 */ 1381 1382 1383 if (first & 1) 1384 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1385 if (!(last & 1)) 1386 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1387 if (first > last) 1388 break; 1389 order++; 1390 1391 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1392 mb_clear_bits(buddy, first, last - first + 1); 1393 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1394 break; 1395 } 1396 first >>= 1; 1397 last >>= 1; 1398 buddy = buddy2; 1399 } 1400 } 1401 1402 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1403 int first, int count) 1404 { 1405 int left_is_free = 0; 1406 int right_is_free = 0; 1407 int block; 1408 int last = first + count - 1; 1409 struct super_block *sb = e4b->bd_sb; 1410 1411 BUG_ON(last >= (sb->s_blocksize << 3)); 1412 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1413 /* Don't bother if the block group is corrupt. */ 1414 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1415 return; 1416 1417 mb_check_buddy(e4b); 1418 mb_free_blocks_double(inode, e4b, first, count); 1419 1420 e4b->bd_info->bb_free += count; 1421 if (first < e4b->bd_info->bb_first_free) 1422 e4b->bd_info->bb_first_free = first; 1423 1424 /* access memory sequentially: check left neighbour, 1425 * clear range and then check right neighbour 1426 */ 1427 if (first != 0) 1428 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1429 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1430 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1431 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1432 1433 if (unlikely(block != -1)) { 1434 ext4_fsblk_t blocknr; 1435 1436 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1437 blocknr += EXT4_C2B(EXT4_SB(sb), block); 1438 ext4_grp_locked_error(sb, e4b->bd_group, 1439 inode ? inode->i_ino : 0, 1440 blocknr, 1441 "freeing already freed block " 1442 "(bit %u); block bitmap corrupt.", 1443 block); 1444 /* Mark the block group as corrupt. */ 1445 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1446 &e4b->bd_info->bb_state); 1447 mb_regenerate_buddy(e4b); 1448 goto done; 1449 } 1450 1451 /* let's maintain fragments counter */ 1452 if (left_is_free && right_is_free) 1453 e4b->bd_info->bb_fragments--; 1454 else if (!left_is_free && !right_is_free) 1455 e4b->bd_info->bb_fragments++; 1456 1457 /* buddy[0] == bd_bitmap is a special case, so handle 1458 * it right away and let mb_buddy_mark_free stay free of 1459 * zero order checks. 1460 * Check if neighbours are to be coaleasced, 1461 * adjust bitmap bb_counters and borders appropriately. 1462 */ 1463 if (first & 1) { 1464 first += !left_is_free; 1465 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1466 } 1467 if (!(last & 1)) { 1468 last -= !right_is_free; 1469 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1470 } 1471 1472 if (first <= last) 1473 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1474 1475 done: 1476 mb_set_largest_free_order(sb, e4b->bd_info); 1477 mb_check_buddy(e4b); 1478 } 1479 1480 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1481 int needed, struct ext4_free_extent *ex) 1482 { 1483 int next = block; 1484 int max, order; 1485 void *buddy; 1486 1487 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1488 BUG_ON(ex == NULL); 1489 1490 buddy = mb_find_buddy(e4b, 0, &max); 1491 BUG_ON(buddy == NULL); 1492 BUG_ON(block >= max); 1493 if (mb_test_bit(block, buddy)) { 1494 ex->fe_len = 0; 1495 ex->fe_start = 0; 1496 ex->fe_group = 0; 1497 return 0; 1498 } 1499 1500 /* find actual order */ 1501 order = mb_find_order_for_block(e4b, block); 1502 block = block >> order; 1503 1504 ex->fe_len = 1 << order; 1505 ex->fe_start = block << order; 1506 ex->fe_group = e4b->bd_group; 1507 1508 /* calc difference from given start */ 1509 next = next - ex->fe_start; 1510 ex->fe_len -= next; 1511 ex->fe_start += next; 1512 1513 while (needed > ex->fe_len && 1514 mb_find_buddy(e4b, order, &max)) { 1515 1516 if (block + 1 >= max) 1517 break; 1518 1519 next = (block + 1) * (1 << order); 1520 if (mb_test_bit(next, e4b->bd_bitmap)) 1521 break; 1522 1523 order = mb_find_order_for_block(e4b, next); 1524 1525 block = next >> order; 1526 ex->fe_len += 1 << order; 1527 } 1528 1529 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); 1530 return ex->fe_len; 1531 } 1532 1533 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1534 { 1535 int ord; 1536 int mlen = 0; 1537 int max = 0; 1538 int cur; 1539 int start = ex->fe_start; 1540 int len = ex->fe_len; 1541 unsigned ret = 0; 1542 int len0 = len; 1543 void *buddy; 1544 1545 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1546 BUG_ON(e4b->bd_group != ex->fe_group); 1547 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1548 mb_check_buddy(e4b); 1549 mb_mark_used_double(e4b, start, len); 1550 1551 e4b->bd_info->bb_free -= len; 1552 if (e4b->bd_info->bb_first_free == start) 1553 e4b->bd_info->bb_first_free += len; 1554 1555 /* let's maintain fragments counter */ 1556 if (start != 0) 1557 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1558 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1559 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1560 if (mlen && max) 1561 e4b->bd_info->bb_fragments++; 1562 else if (!mlen && !max) 1563 e4b->bd_info->bb_fragments--; 1564 1565 /* let's maintain buddy itself */ 1566 while (len) { 1567 ord = mb_find_order_for_block(e4b, start); 1568 1569 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1570 /* the whole chunk may be allocated at once! */ 1571 mlen = 1 << ord; 1572 buddy = mb_find_buddy(e4b, ord, &max); 1573 BUG_ON((start >> ord) >= max); 1574 mb_set_bit(start >> ord, buddy); 1575 e4b->bd_info->bb_counters[ord]--; 1576 start += mlen; 1577 len -= mlen; 1578 BUG_ON(len < 0); 1579 continue; 1580 } 1581 1582 /* store for history */ 1583 if (ret == 0) 1584 ret = len | (ord << 16); 1585 1586 /* we have to split large buddy */ 1587 BUG_ON(ord <= 0); 1588 buddy = mb_find_buddy(e4b, ord, &max); 1589 mb_set_bit(start >> ord, buddy); 1590 e4b->bd_info->bb_counters[ord]--; 1591 1592 ord--; 1593 cur = (start >> ord) & ~1U; 1594 buddy = mb_find_buddy(e4b, ord, &max); 1595 mb_clear_bit(cur, buddy); 1596 mb_clear_bit(cur + 1, buddy); 1597 e4b->bd_info->bb_counters[ord]++; 1598 e4b->bd_info->bb_counters[ord]++; 1599 } 1600 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1601 1602 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1603 mb_check_buddy(e4b); 1604 1605 return ret; 1606 } 1607 1608 /* 1609 * Must be called under group lock! 1610 */ 1611 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1612 struct ext4_buddy *e4b) 1613 { 1614 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1615 int ret; 1616 1617 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1618 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1619 1620 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1621 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1622 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1623 1624 /* preallocation can change ac_b_ex, thus we store actually 1625 * allocated blocks for history */ 1626 ac->ac_f_ex = ac->ac_b_ex; 1627 1628 ac->ac_status = AC_STATUS_FOUND; 1629 ac->ac_tail = ret & 0xffff; 1630 ac->ac_buddy = ret >> 16; 1631 1632 /* 1633 * take the page reference. We want the page to be pinned 1634 * so that we don't get a ext4_mb_init_cache_call for this 1635 * group until we update the bitmap. That would mean we 1636 * double allocate blocks. The reference is dropped 1637 * in ext4_mb_release_context 1638 */ 1639 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1640 get_page(ac->ac_bitmap_page); 1641 ac->ac_buddy_page = e4b->bd_buddy_page; 1642 get_page(ac->ac_buddy_page); 1643 /* store last allocated for subsequent stream allocation */ 1644 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1645 spin_lock(&sbi->s_md_lock); 1646 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1647 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1648 spin_unlock(&sbi->s_md_lock); 1649 } 1650 } 1651 1652 /* 1653 * regular allocator, for general purposes allocation 1654 */ 1655 1656 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1657 struct ext4_buddy *e4b, 1658 int finish_group) 1659 { 1660 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1661 struct ext4_free_extent *bex = &ac->ac_b_ex; 1662 struct ext4_free_extent *gex = &ac->ac_g_ex; 1663 struct ext4_free_extent ex; 1664 int max; 1665 1666 if (ac->ac_status == AC_STATUS_FOUND) 1667 return; 1668 /* 1669 * We don't want to scan for a whole year 1670 */ 1671 if (ac->ac_found > sbi->s_mb_max_to_scan && 1672 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1673 ac->ac_status = AC_STATUS_BREAK; 1674 return; 1675 } 1676 1677 /* 1678 * Haven't found good chunk so far, let's continue 1679 */ 1680 if (bex->fe_len < gex->fe_len) 1681 return; 1682 1683 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1684 && bex->fe_group == e4b->bd_group) { 1685 /* recheck chunk's availability - we don't know 1686 * when it was found (within this lock-unlock 1687 * period or not) */ 1688 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1689 if (max >= gex->fe_len) { 1690 ext4_mb_use_best_found(ac, e4b); 1691 return; 1692 } 1693 } 1694 } 1695 1696 /* 1697 * The routine checks whether found extent is good enough. If it is, 1698 * then the extent gets marked used and flag is set to the context 1699 * to stop scanning. Otherwise, the extent is compared with the 1700 * previous found extent and if new one is better, then it's stored 1701 * in the context. Later, the best found extent will be used, if 1702 * mballoc can't find good enough extent. 1703 * 1704 * FIXME: real allocation policy is to be designed yet! 1705 */ 1706 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1707 struct ext4_free_extent *ex, 1708 struct ext4_buddy *e4b) 1709 { 1710 struct ext4_free_extent *bex = &ac->ac_b_ex; 1711 struct ext4_free_extent *gex = &ac->ac_g_ex; 1712 1713 BUG_ON(ex->fe_len <= 0); 1714 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1715 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1716 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1717 1718 ac->ac_found++; 1719 1720 /* 1721 * The special case - take what you catch first 1722 */ 1723 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1724 *bex = *ex; 1725 ext4_mb_use_best_found(ac, e4b); 1726 return; 1727 } 1728 1729 /* 1730 * Let's check whether the chuck is good enough 1731 */ 1732 if (ex->fe_len == gex->fe_len) { 1733 *bex = *ex; 1734 ext4_mb_use_best_found(ac, e4b); 1735 return; 1736 } 1737 1738 /* 1739 * If this is first found extent, just store it in the context 1740 */ 1741 if (bex->fe_len == 0) { 1742 *bex = *ex; 1743 return; 1744 } 1745 1746 /* 1747 * If new found extent is better, store it in the context 1748 */ 1749 if (bex->fe_len < gex->fe_len) { 1750 /* if the request isn't satisfied, any found extent 1751 * larger than previous best one is better */ 1752 if (ex->fe_len > bex->fe_len) 1753 *bex = *ex; 1754 } else if (ex->fe_len > gex->fe_len) { 1755 /* if the request is satisfied, then we try to find 1756 * an extent that still satisfy the request, but is 1757 * smaller than previous one */ 1758 if (ex->fe_len < bex->fe_len) 1759 *bex = *ex; 1760 } 1761 1762 ext4_mb_check_limits(ac, e4b, 0); 1763 } 1764 1765 static noinline_for_stack 1766 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1767 struct ext4_buddy *e4b) 1768 { 1769 struct ext4_free_extent ex = ac->ac_b_ex; 1770 ext4_group_t group = ex.fe_group; 1771 int max; 1772 int err; 1773 1774 BUG_ON(ex.fe_len <= 0); 1775 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1776 if (err) 1777 return err; 1778 1779 ext4_lock_group(ac->ac_sb, group); 1780 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1781 1782 if (max > 0) { 1783 ac->ac_b_ex = ex; 1784 ext4_mb_use_best_found(ac, e4b); 1785 } 1786 1787 ext4_unlock_group(ac->ac_sb, group); 1788 ext4_mb_unload_buddy(e4b); 1789 1790 return 0; 1791 } 1792 1793 static noinline_for_stack 1794 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1795 struct ext4_buddy *e4b) 1796 { 1797 ext4_group_t group = ac->ac_g_ex.fe_group; 1798 int max; 1799 int err; 1800 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1801 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1802 struct ext4_free_extent ex; 1803 1804 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1805 return 0; 1806 if (grp->bb_free == 0) 1807 return 0; 1808 1809 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1810 if (err) 1811 return err; 1812 1813 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1814 ext4_mb_unload_buddy(e4b); 1815 return 0; 1816 } 1817 1818 ext4_lock_group(ac->ac_sb, group); 1819 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1820 ac->ac_g_ex.fe_len, &ex); 1821 ex.fe_logical = 0xDEADFA11; /* debug value */ 1822 1823 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1824 ext4_fsblk_t start; 1825 1826 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1827 ex.fe_start; 1828 /* use do_div to get remainder (would be 64-bit modulo) */ 1829 if (do_div(start, sbi->s_stripe) == 0) { 1830 ac->ac_found++; 1831 ac->ac_b_ex = ex; 1832 ext4_mb_use_best_found(ac, e4b); 1833 } 1834 } else if (max >= ac->ac_g_ex.fe_len) { 1835 BUG_ON(ex.fe_len <= 0); 1836 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1837 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1838 ac->ac_found++; 1839 ac->ac_b_ex = ex; 1840 ext4_mb_use_best_found(ac, e4b); 1841 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1842 /* Sometimes, caller may want to merge even small 1843 * number of blocks to an existing extent */ 1844 BUG_ON(ex.fe_len <= 0); 1845 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1846 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1847 ac->ac_found++; 1848 ac->ac_b_ex = ex; 1849 ext4_mb_use_best_found(ac, e4b); 1850 } 1851 ext4_unlock_group(ac->ac_sb, group); 1852 ext4_mb_unload_buddy(e4b); 1853 1854 return 0; 1855 } 1856 1857 /* 1858 * The routine scans buddy structures (not bitmap!) from given order 1859 * to max order and tries to find big enough chunk to satisfy the req 1860 */ 1861 static noinline_for_stack 1862 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1863 struct ext4_buddy *e4b) 1864 { 1865 struct super_block *sb = ac->ac_sb; 1866 struct ext4_group_info *grp = e4b->bd_info; 1867 void *buddy; 1868 int i; 1869 int k; 1870 int max; 1871 1872 BUG_ON(ac->ac_2order <= 0); 1873 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1874 if (grp->bb_counters[i] == 0) 1875 continue; 1876 1877 buddy = mb_find_buddy(e4b, i, &max); 1878 BUG_ON(buddy == NULL); 1879 1880 k = mb_find_next_zero_bit(buddy, max, 0); 1881 BUG_ON(k >= max); 1882 1883 ac->ac_found++; 1884 1885 ac->ac_b_ex.fe_len = 1 << i; 1886 ac->ac_b_ex.fe_start = k << i; 1887 ac->ac_b_ex.fe_group = e4b->bd_group; 1888 1889 ext4_mb_use_best_found(ac, e4b); 1890 1891 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1892 1893 if (EXT4_SB(sb)->s_mb_stats) 1894 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1895 1896 break; 1897 } 1898 } 1899 1900 /* 1901 * The routine scans the group and measures all found extents. 1902 * In order to optimize scanning, caller must pass number of 1903 * free blocks in the group, so the routine can know upper limit. 1904 */ 1905 static noinline_for_stack 1906 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1907 struct ext4_buddy *e4b) 1908 { 1909 struct super_block *sb = ac->ac_sb; 1910 void *bitmap = e4b->bd_bitmap; 1911 struct ext4_free_extent ex; 1912 int i; 1913 int free; 1914 1915 free = e4b->bd_info->bb_free; 1916 BUG_ON(free <= 0); 1917 1918 i = e4b->bd_info->bb_first_free; 1919 1920 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1921 i = mb_find_next_zero_bit(bitmap, 1922 EXT4_CLUSTERS_PER_GROUP(sb), i); 1923 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1924 /* 1925 * IF we have corrupt bitmap, we won't find any 1926 * free blocks even though group info says we 1927 * we have free blocks 1928 */ 1929 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1930 "%d free clusters as per " 1931 "group info. But bitmap says 0", 1932 free); 1933 break; 1934 } 1935 1936 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 1937 BUG_ON(ex.fe_len <= 0); 1938 if (free < ex.fe_len) { 1939 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1940 "%d free clusters as per " 1941 "group info. But got %d blocks", 1942 free, ex.fe_len); 1943 /* 1944 * The number of free blocks differs. This mostly 1945 * indicate that the bitmap is corrupt. So exit 1946 * without claiming the space. 1947 */ 1948 break; 1949 } 1950 ex.fe_logical = 0xDEADC0DE; /* debug value */ 1951 ext4_mb_measure_extent(ac, &ex, e4b); 1952 1953 i += ex.fe_len; 1954 free -= ex.fe_len; 1955 } 1956 1957 ext4_mb_check_limits(ac, e4b, 1); 1958 } 1959 1960 /* 1961 * This is a special case for storages like raid5 1962 * we try to find stripe-aligned chunks for stripe-size-multiple requests 1963 */ 1964 static noinline_for_stack 1965 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1966 struct ext4_buddy *e4b) 1967 { 1968 struct super_block *sb = ac->ac_sb; 1969 struct ext4_sb_info *sbi = EXT4_SB(sb); 1970 void *bitmap = e4b->bd_bitmap; 1971 struct ext4_free_extent ex; 1972 ext4_fsblk_t first_group_block; 1973 ext4_fsblk_t a; 1974 ext4_grpblk_t i; 1975 int max; 1976 1977 BUG_ON(sbi->s_stripe == 0); 1978 1979 /* find first stripe-aligned block in group */ 1980 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 1981 1982 a = first_group_block + sbi->s_stripe - 1; 1983 do_div(a, sbi->s_stripe); 1984 i = (a * sbi->s_stripe) - first_group_block; 1985 1986 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 1987 if (!mb_test_bit(i, bitmap)) { 1988 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 1989 if (max >= sbi->s_stripe) { 1990 ac->ac_found++; 1991 ex.fe_logical = 0xDEADF00D; /* debug value */ 1992 ac->ac_b_ex = ex; 1993 ext4_mb_use_best_found(ac, e4b); 1994 break; 1995 } 1996 } 1997 i += sbi->s_stripe; 1998 } 1999 } 2000 2001 /* This is now called BEFORE we load the buddy bitmap. */ 2002 static int ext4_mb_good_group(struct ext4_allocation_context *ac, 2003 ext4_group_t group, int cr) 2004 { 2005 unsigned free, fragments; 2006 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2007 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2008 2009 BUG_ON(cr < 0 || cr >= 4); 2010 2011 free = grp->bb_free; 2012 if (free == 0) 2013 return 0; 2014 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2015 return 0; 2016 2017 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2018 return 0; 2019 2020 /* We only do this if the grp has never been initialized */ 2021 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2022 int ret = ext4_mb_init_group(ac->ac_sb, group); 2023 if (ret) 2024 return 0; 2025 } 2026 2027 fragments = grp->bb_fragments; 2028 if (fragments == 0) 2029 return 0; 2030 2031 switch (cr) { 2032 case 0: 2033 BUG_ON(ac->ac_2order == 0); 2034 2035 /* Avoid using the first bg of a flexgroup for data files */ 2036 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2037 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2038 ((group % flex_size) == 0)) 2039 return 0; 2040 2041 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || 2042 (free / fragments) >= ac->ac_g_ex.fe_len) 2043 return 1; 2044 2045 if (grp->bb_largest_free_order < ac->ac_2order) 2046 return 0; 2047 2048 return 1; 2049 case 1: 2050 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2051 return 1; 2052 break; 2053 case 2: 2054 if (free >= ac->ac_g_ex.fe_len) 2055 return 1; 2056 break; 2057 case 3: 2058 return 1; 2059 default: 2060 BUG(); 2061 } 2062 2063 return 0; 2064 } 2065 2066 static noinline_for_stack int 2067 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2068 { 2069 ext4_group_t ngroups, group, i; 2070 int cr; 2071 int err = 0; 2072 struct ext4_sb_info *sbi; 2073 struct super_block *sb; 2074 struct ext4_buddy e4b; 2075 2076 sb = ac->ac_sb; 2077 sbi = EXT4_SB(sb); 2078 ngroups = ext4_get_groups_count(sb); 2079 /* non-extent files are limited to low blocks/groups */ 2080 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2081 ngroups = sbi->s_blockfile_groups; 2082 2083 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2084 2085 /* first, try the goal */ 2086 err = ext4_mb_find_by_goal(ac, &e4b); 2087 if (err || ac->ac_status == AC_STATUS_FOUND) 2088 goto out; 2089 2090 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2091 goto out; 2092 2093 /* 2094 * ac->ac2_order is set only if the fe_len is a power of 2 2095 * if ac2_order is set we also set criteria to 0 so that we 2096 * try exact allocation using buddy. 2097 */ 2098 i = fls(ac->ac_g_ex.fe_len); 2099 ac->ac_2order = 0; 2100 /* 2101 * We search using buddy data only if the order of the request 2102 * is greater than equal to the sbi_s_mb_order2_reqs 2103 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2104 */ 2105 if (i >= sbi->s_mb_order2_reqs) { 2106 /* 2107 * This should tell if fe_len is exactly power of 2 2108 */ 2109 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2110 ac->ac_2order = i - 1; 2111 } 2112 2113 /* if stream allocation is enabled, use global goal */ 2114 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2115 /* TBD: may be hot point */ 2116 spin_lock(&sbi->s_md_lock); 2117 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2118 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2119 spin_unlock(&sbi->s_md_lock); 2120 } 2121 2122 /* Let's just scan groups to find more-less suitable blocks */ 2123 cr = ac->ac_2order ? 0 : 1; 2124 /* 2125 * cr == 0 try to get exact allocation, 2126 * cr == 3 try to get anything 2127 */ 2128 repeat: 2129 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2130 ac->ac_criteria = cr; 2131 /* 2132 * searching for the right group start 2133 * from the goal value specified 2134 */ 2135 group = ac->ac_g_ex.fe_group; 2136 2137 for (i = 0; i < ngroups; group++, i++) { 2138 cond_resched(); 2139 /* 2140 * Artificially restricted ngroups for non-extent 2141 * files makes group > ngroups possible on first loop. 2142 */ 2143 if (group >= ngroups) 2144 group = 0; 2145 2146 /* This now checks without needing the buddy page */ 2147 if (!ext4_mb_good_group(ac, group, cr)) 2148 continue; 2149 2150 err = ext4_mb_load_buddy(sb, group, &e4b); 2151 if (err) 2152 goto out; 2153 2154 ext4_lock_group(sb, group); 2155 2156 /* 2157 * We need to check again after locking the 2158 * block group 2159 */ 2160 if (!ext4_mb_good_group(ac, group, cr)) { 2161 ext4_unlock_group(sb, group); 2162 ext4_mb_unload_buddy(&e4b); 2163 continue; 2164 } 2165 2166 ac->ac_groups_scanned++; 2167 if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) 2168 ext4_mb_simple_scan_group(ac, &e4b); 2169 else if (cr == 1 && sbi->s_stripe && 2170 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2171 ext4_mb_scan_aligned(ac, &e4b); 2172 else 2173 ext4_mb_complex_scan_group(ac, &e4b); 2174 2175 ext4_unlock_group(sb, group); 2176 ext4_mb_unload_buddy(&e4b); 2177 2178 if (ac->ac_status != AC_STATUS_CONTINUE) 2179 break; 2180 } 2181 } 2182 2183 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2184 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2185 /* 2186 * We've been searching too long. Let's try to allocate 2187 * the best chunk we've found so far 2188 */ 2189 2190 ext4_mb_try_best_found(ac, &e4b); 2191 if (ac->ac_status != AC_STATUS_FOUND) { 2192 /* 2193 * Someone more lucky has already allocated it. 2194 * The only thing we can do is just take first 2195 * found block(s) 2196 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2197 */ 2198 ac->ac_b_ex.fe_group = 0; 2199 ac->ac_b_ex.fe_start = 0; 2200 ac->ac_b_ex.fe_len = 0; 2201 ac->ac_status = AC_STATUS_CONTINUE; 2202 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2203 cr = 3; 2204 atomic_inc(&sbi->s_mb_lost_chunks); 2205 goto repeat; 2206 } 2207 } 2208 out: 2209 return err; 2210 } 2211 2212 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2213 { 2214 struct super_block *sb = seq->private; 2215 ext4_group_t group; 2216 2217 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2218 return NULL; 2219 group = *pos + 1; 2220 return (void *) ((unsigned long) group); 2221 } 2222 2223 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2224 { 2225 struct super_block *sb = seq->private; 2226 ext4_group_t group; 2227 2228 ++*pos; 2229 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2230 return NULL; 2231 group = *pos + 1; 2232 return (void *) ((unsigned long) group); 2233 } 2234 2235 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2236 { 2237 struct super_block *sb = seq->private; 2238 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2239 int i; 2240 int err, buddy_loaded = 0; 2241 struct ext4_buddy e4b; 2242 struct ext4_group_info *grinfo; 2243 struct sg { 2244 struct ext4_group_info info; 2245 ext4_grpblk_t counters[16]; 2246 } sg; 2247 2248 group--; 2249 if (group == 0) 2250 seq_printf(seq, "#%-5s: %-5s %-5s %-5s " 2251 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " 2252 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", 2253 "group", "free", "frags", "first", 2254 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", 2255 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); 2256 2257 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2258 sizeof(struct ext4_group_info); 2259 grinfo = ext4_get_group_info(sb, group); 2260 /* Load the group info in memory only if not already loaded. */ 2261 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2262 err = ext4_mb_load_buddy(sb, group, &e4b); 2263 if (err) { 2264 seq_printf(seq, "#%-5u: I/O error\n", group); 2265 return 0; 2266 } 2267 buddy_loaded = 1; 2268 } 2269 2270 memcpy(&sg, ext4_get_group_info(sb, group), i); 2271 2272 if (buddy_loaded) 2273 ext4_mb_unload_buddy(&e4b); 2274 2275 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2276 sg.info.bb_fragments, sg.info.bb_first_free); 2277 for (i = 0; i <= 13; i++) 2278 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2279 sg.info.bb_counters[i] : 0); 2280 seq_printf(seq, " ]\n"); 2281 2282 return 0; 2283 } 2284 2285 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2286 { 2287 } 2288 2289 static const struct seq_operations ext4_mb_seq_groups_ops = { 2290 .start = ext4_mb_seq_groups_start, 2291 .next = ext4_mb_seq_groups_next, 2292 .stop = ext4_mb_seq_groups_stop, 2293 .show = ext4_mb_seq_groups_show, 2294 }; 2295 2296 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2297 { 2298 struct super_block *sb = PDE_DATA(inode); 2299 int rc; 2300 2301 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2302 if (rc == 0) { 2303 struct seq_file *m = file->private_data; 2304 m->private = sb; 2305 } 2306 return rc; 2307 2308 } 2309 2310 static const struct file_operations ext4_mb_seq_groups_fops = { 2311 .owner = THIS_MODULE, 2312 .open = ext4_mb_seq_groups_open, 2313 .read = seq_read, 2314 .llseek = seq_lseek, 2315 .release = seq_release, 2316 }; 2317 2318 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2319 { 2320 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2321 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2322 2323 BUG_ON(!cachep); 2324 return cachep; 2325 } 2326 2327 /* 2328 * Allocate the top-level s_group_info array for the specified number 2329 * of groups 2330 */ 2331 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2332 { 2333 struct ext4_sb_info *sbi = EXT4_SB(sb); 2334 unsigned size; 2335 struct ext4_group_info ***new_groupinfo; 2336 2337 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2338 EXT4_DESC_PER_BLOCK_BITS(sb); 2339 if (size <= sbi->s_group_info_size) 2340 return 0; 2341 2342 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2343 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); 2344 if (!new_groupinfo) { 2345 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2346 return -ENOMEM; 2347 } 2348 if (sbi->s_group_info) { 2349 memcpy(new_groupinfo, sbi->s_group_info, 2350 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2351 ext4_kvfree(sbi->s_group_info); 2352 } 2353 sbi->s_group_info = new_groupinfo; 2354 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2355 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2356 sbi->s_group_info_size); 2357 return 0; 2358 } 2359 2360 /* Create and initialize ext4_group_info data for the given group. */ 2361 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2362 struct ext4_group_desc *desc) 2363 { 2364 int i; 2365 int metalen = 0; 2366 struct ext4_sb_info *sbi = EXT4_SB(sb); 2367 struct ext4_group_info **meta_group_info; 2368 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2369 2370 /* 2371 * First check if this group is the first of a reserved block. 2372 * If it's true, we have to allocate a new table of pointers 2373 * to ext4_group_info structures 2374 */ 2375 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2376 metalen = sizeof(*meta_group_info) << 2377 EXT4_DESC_PER_BLOCK_BITS(sb); 2378 meta_group_info = kmalloc(metalen, GFP_KERNEL); 2379 if (meta_group_info == NULL) { 2380 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2381 "for a buddy group"); 2382 goto exit_meta_group_info; 2383 } 2384 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2385 meta_group_info; 2386 } 2387 2388 meta_group_info = 2389 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2390 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2391 2392 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); 2393 if (meta_group_info[i] == NULL) { 2394 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2395 goto exit_group_info; 2396 } 2397 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2398 &(meta_group_info[i]->bb_state)); 2399 2400 /* 2401 * initialize bb_free to be able to skip 2402 * empty groups without initialization 2403 */ 2404 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2405 meta_group_info[i]->bb_free = 2406 ext4_free_clusters_after_init(sb, group, desc); 2407 } else { 2408 meta_group_info[i]->bb_free = 2409 ext4_free_group_clusters(sb, desc); 2410 } 2411 2412 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2413 init_rwsem(&meta_group_info[i]->alloc_sem); 2414 meta_group_info[i]->bb_free_root = RB_ROOT; 2415 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2416 2417 #ifdef DOUBLE_CHECK 2418 { 2419 struct buffer_head *bh; 2420 meta_group_info[i]->bb_bitmap = 2421 kmalloc(sb->s_blocksize, GFP_KERNEL); 2422 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2423 bh = ext4_read_block_bitmap(sb, group); 2424 BUG_ON(bh == NULL); 2425 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2426 sb->s_blocksize); 2427 put_bh(bh); 2428 } 2429 #endif 2430 2431 return 0; 2432 2433 exit_group_info: 2434 /* If a meta_group_info table has been allocated, release it now */ 2435 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2436 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2437 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; 2438 } 2439 exit_meta_group_info: 2440 return -ENOMEM; 2441 } /* ext4_mb_add_groupinfo */ 2442 2443 static int ext4_mb_init_backend(struct super_block *sb) 2444 { 2445 ext4_group_t ngroups = ext4_get_groups_count(sb); 2446 ext4_group_t i; 2447 struct ext4_sb_info *sbi = EXT4_SB(sb); 2448 int err; 2449 struct ext4_group_desc *desc; 2450 struct kmem_cache *cachep; 2451 2452 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2453 if (err) 2454 return err; 2455 2456 sbi->s_buddy_cache = new_inode(sb); 2457 if (sbi->s_buddy_cache == NULL) { 2458 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2459 goto err_freesgi; 2460 } 2461 /* To avoid potentially colliding with an valid on-disk inode number, 2462 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2463 * not in the inode hash, so it should never be found by iget(), but 2464 * this will avoid confusion if it ever shows up during debugging. */ 2465 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2466 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2467 for (i = 0; i < ngroups; i++) { 2468 desc = ext4_get_group_desc(sb, i, NULL); 2469 if (desc == NULL) { 2470 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2471 goto err_freebuddy; 2472 } 2473 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2474 goto err_freebuddy; 2475 } 2476 2477 return 0; 2478 2479 err_freebuddy: 2480 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2481 while (i-- > 0) 2482 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2483 i = sbi->s_group_info_size; 2484 while (i-- > 0) 2485 kfree(sbi->s_group_info[i]); 2486 iput(sbi->s_buddy_cache); 2487 err_freesgi: 2488 ext4_kvfree(sbi->s_group_info); 2489 return -ENOMEM; 2490 } 2491 2492 static void ext4_groupinfo_destroy_slabs(void) 2493 { 2494 int i; 2495 2496 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2497 if (ext4_groupinfo_caches[i]) 2498 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2499 ext4_groupinfo_caches[i] = NULL; 2500 } 2501 } 2502 2503 static int ext4_groupinfo_create_slab(size_t size) 2504 { 2505 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2506 int slab_size; 2507 int blocksize_bits = order_base_2(size); 2508 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2509 struct kmem_cache *cachep; 2510 2511 if (cache_index >= NR_GRPINFO_CACHES) 2512 return -EINVAL; 2513 2514 if (unlikely(cache_index < 0)) 2515 cache_index = 0; 2516 2517 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2518 if (ext4_groupinfo_caches[cache_index]) { 2519 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2520 return 0; /* Already created */ 2521 } 2522 2523 slab_size = offsetof(struct ext4_group_info, 2524 bb_counters[blocksize_bits + 2]); 2525 2526 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2527 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2528 NULL); 2529 2530 ext4_groupinfo_caches[cache_index] = cachep; 2531 2532 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2533 if (!cachep) { 2534 printk(KERN_EMERG 2535 "EXT4-fs: no memory for groupinfo slab cache\n"); 2536 return -ENOMEM; 2537 } 2538 2539 return 0; 2540 } 2541 2542 int ext4_mb_init(struct super_block *sb) 2543 { 2544 struct ext4_sb_info *sbi = EXT4_SB(sb); 2545 unsigned i, j; 2546 unsigned offset; 2547 unsigned max; 2548 int ret; 2549 2550 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2551 2552 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2553 if (sbi->s_mb_offsets == NULL) { 2554 ret = -ENOMEM; 2555 goto out; 2556 } 2557 2558 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2559 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2560 if (sbi->s_mb_maxs == NULL) { 2561 ret = -ENOMEM; 2562 goto out; 2563 } 2564 2565 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2566 if (ret < 0) 2567 goto out; 2568 2569 /* order 0 is regular bitmap */ 2570 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2571 sbi->s_mb_offsets[0] = 0; 2572 2573 i = 1; 2574 offset = 0; 2575 max = sb->s_blocksize << 2; 2576 do { 2577 sbi->s_mb_offsets[i] = offset; 2578 sbi->s_mb_maxs[i] = max; 2579 offset += 1 << (sb->s_blocksize_bits - i); 2580 max = max >> 1; 2581 i++; 2582 } while (i <= sb->s_blocksize_bits + 1); 2583 2584 spin_lock_init(&sbi->s_md_lock); 2585 spin_lock_init(&sbi->s_bal_lock); 2586 2587 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2588 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2589 sbi->s_mb_stats = MB_DEFAULT_STATS; 2590 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2591 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2592 /* 2593 * The default group preallocation is 512, which for 4k block 2594 * sizes translates to 2 megabytes. However for bigalloc file 2595 * systems, this is probably too big (i.e, if the cluster size 2596 * is 1 megabyte, then group preallocation size becomes half a 2597 * gigabyte!). As a default, we will keep a two megabyte 2598 * group pralloc size for cluster sizes up to 64k, and after 2599 * that, we will force a minimum group preallocation size of 2600 * 32 clusters. This translates to 8 megs when the cluster 2601 * size is 256k, and 32 megs when the cluster size is 1 meg, 2602 * which seems reasonable as a default. 2603 */ 2604 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2605 sbi->s_cluster_bits, 32); 2606 /* 2607 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2608 * to the lowest multiple of s_stripe which is bigger than 2609 * the s_mb_group_prealloc as determined above. We want 2610 * the preallocation size to be an exact multiple of the 2611 * RAID stripe size so that preallocations don't fragment 2612 * the stripes. 2613 */ 2614 if (sbi->s_stripe > 1) { 2615 sbi->s_mb_group_prealloc = roundup( 2616 sbi->s_mb_group_prealloc, sbi->s_stripe); 2617 } 2618 2619 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2620 if (sbi->s_locality_groups == NULL) { 2621 ret = -ENOMEM; 2622 goto out; 2623 } 2624 for_each_possible_cpu(i) { 2625 struct ext4_locality_group *lg; 2626 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2627 mutex_init(&lg->lg_mutex); 2628 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2629 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2630 spin_lock_init(&lg->lg_prealloc_lock); 2631 } 2632 2633 /* init file for buddy data */ 2634 ret = ext4_mb_init_backend(sb); 2635 if (ret != 0) 2636 goto out_free_locality_groups; 2637 2638 if (sbi->s_proc) 2639 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, 2640 &ext4_mb_seq_groups_fops, sb); 2641 2642 return 0; 2643 2644 out_free_locality_groups: 2645 free_percpu(sbi->s_locality_groups); 2646 sbi->s_locality_groups = NULL; 2647 out: 2648 kfree(sbi->s_mb_offsets); 2649 sbi->s_mb_offsets = NULL; 2650 kfree(sbi->s_mb_maxs); 2651 sbi->s_mb_maxs = NULL; 2652 return ret; 2653 } 2654 2655 /* need to called with the ext4 group lock held */ 2656 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2657 { 2658 struct ext4_prealloc_space *pa; 2659 struct list_head *cur, *tmp; 2660 int count = 0; 2661 2662 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2663 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2664 list_del(&pa->pa_group_list); 2665 count++; 2666 kmem_cache_free(ext4_pspace_cachep, pa); 2667 } 2668 if (count) 2669 mb_debug(1, "mballoc: %u PAs left\n", count); 2670 2671 } 2672 2673 int ext4_mb_release(struct super_block *sb) 2674 { 2675 ext4_group_t ngroups = ext4_get_groups_count(sb); 2676 ext4_group_t i; 2677 int num_meta_group_infos; 2678 struct ext4_group_info *grinfo; 2679 struct ext4_sb_info *sbi = EXT4_SB(sb); 2680 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2681 2682 if (sbi->s_proc) 2683 remove_proc_entry("mb_groups", sbi->s_proc); 2684 2685 if (sbi->s_group_info) { 2686 for (i = 0; i < ngroups; i++) { 2687 grinfo = ext4_get_group_info(sb, i); 2688 #ifdef DOUBLE_CHECK 2689 kfree(grinfo->bb_bitmap); 2690 #endif 2691 ext4_lock_group(sb, i); 2692 ext4_mb_cleanup_pa(grinfo); 2693 ext4_unlock_group(sb, i); 2694 kmem_cache_free(cachep, grinfo); 2695 } 2696 num_meta_group_infos = (ngroups + 2697 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2698 EXT4_DESC_PER_BLOCK_BITS(sb); 2699 for (i = 0; i < num_meta_group_infos; i++) 2700 kfree(sbi->s_group_info[i]); 2701 ext4_kvfree(sbi->s_group_info); 2702 } 2703 kfree(sbi->s_mb_offsets); 2704 kfree(sbi->s_mb_maxs); 2705 if (sbi->s_buddy_cache) 2706 iput(sbi->s_buddy_cache); 2707 if (sbi->s_mb_stats) { 2708 ext4_msg(sb, KERN_INFO, 2709 "mballoc: %u blocks %u reqs (%u success)", 2710 atomic_read(&sbi->s_bal_allocated), 2711 atomic_read(&sbi->s_bal_reqs), 2712 atomic_read(&sbi->s_bal_success)); 2713 ext4_msg(sb, KERN_INFO, 2714 "mballoc: %u extents scanned, %u goal hits, " 2715 "%u 2^N hits, %u breaks, %u lost", 2716 atomic_read(&sbi->s_bal_ex_scanned), 2717 atomic_read(&sbi->s_bal_goals), 2718 atomic_read(&sbi->s_bal_2orders), 2719 atomic_read(&sbi->s_bal_breaks), 2720 atomic_read(&sbi->s_mb_lost_chunks)); 2721 ext4_msg(sb, KERN_INFO, 2722 "mballoc: %lu generated and it took %Lu", 2723 sbi->s_mb_buddies_generated, 2724 sbi->s_mb_generation_time); 2725 ext4_msg(sb, KERN_INFO, 2726 "mballoc: %u preallocated, %u discarded", 2727 atomic_read(&sbi->s_mb_preallocated), 2728 atomic_read(&sbi->s_mb_discarded)); 2729 } 2730 2731 free_percpu(sbi->s_locality_groups); 2732 2733 return 0; 2734 } 2735 2736 static inline int ext4_issue_discard(struct super_block *sb, 2737 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 2738 { 2739 ext4_fsblk_t discard_block; 2740 2741 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 2742 ext4_group_first_block_no(sb, block_group)); 2743 count = EXT4_C2B(EXT4_SB(sb), count); 2744 trace_ext4_discard_blocks(sb, 2745 (unsigned long long) discard_block, count); 2746 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2747 } 2748 2749 /* 2750 * This function is called by the jbd2 layer once the commit has finished, 2751 * so we know we can free the blocks that were released with that commit. 2752 */ 2753 static void ext4_free_data_callback(struct super_block *sb, 2754 struct ext4_journal_cb_entry *jce, 2755 int rc) 2756 { 2757 struct ext4_free_data *entry = (struct ext4_free_data *)jce; 2758 struct ext4_buddy e4b; 2759 struct ext4_group_info *db; 2760 int err, count = 0, count2 = 0; 2761 2762 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2763 entry->efd_count, entry->efd_group, entry); 2764 2765 if (test_opt(sb, DISCARD)) { 2766 err = ext4_issue_discard(sb, entry->efd_group, 2767 entry->efd_start_cluster, 2768 entry->efd_count); 2769 if (err && err != -EOPNOTSUPP) 2770 ext4_msg(sb, KERN_WARNING, "discard request in" 2771 " group:%d block:%d count:%d failed" 2772 " with %d", entry->efd_group, 2773 entry->efd_start_cluster, 2774 entry->efd_count, err); 2775 } 2776 2777 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 2778 /* we expect to find existing buddy because it's pinned */ 2779 BUG_ON(err != 0); 2780 2781 2782 db = e4b.bd_info; 2783 /* there are blocks to put in buddy to make them really free */ 2784 count += entry->efd_count; 2785 count2++; 2786 ext4_lock_group(sb, entry->efd_group); 2787 /* Take it out of per group rb tree */ 2788 rb_erase(&entry->efd_node, &(db->bb_free_root)); 2789 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 2790 2791 /* 2792 * Clear the trimmed flag for the group so that the next 2793 * ext4_trim_fs can trim it. 2794 * If the volume is mounted with -o discard, online discard 2795 * is supported and the free blocks will be trimmed online. 2796 */ 2797 if (!test_opt(sb, DISCARD)) 2798 EXT4_MB_GRP_CLEAR_TRIMMED(db); 2799 2800 if (!db->bb_free_root.rb_node) { 2801 /* No more items in the per group rb tree 2802 * balance refcounts from ext4_mb_free_metadata() 2803 */ 2804 page_cache_release(e4b.bd_buddy_page); 2805 page_cache_release(e4b.bd_bitmap_page); 2806 } 2807 ext4_unlock_group(sb, entry->efd_group); 2808 kmem_cache_free(ext4_free_data_cachep, entry); 2809 ext4_mb_unload_buddy(&e4b); 2810 2811 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2812 } 2813 2814 int __init ext4_init_mballoc(void) 2815 { 2816 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 2817 SLAB_RECLAIM_ACCOUNT); 2818 if (ext4_pspace_cachep == NULL) 2819 return -ENOMEM; 2820 2821 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 2822 SLAB_RECLAIM_ACCOUNT); 2823 if (ext4_ac_cachep == NULL) { 2824 kmem_cache_destroy(ext4_pspace_cachep); 2825 return -ENOMEM; 2826 } 2827 2828 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 2829 SLAB_RECLAIM_ACCOUNT); 2830 if (ext4_free_data_cachep == NULL) { 2831 kmem_cache_destroy(ext4_pspace_cachep); 2832 kmem_cache_destroy(ext4_ac_cachep); 2833 return -ENOMEM; 2834 } 2835 return 0; 2836 } 2837 2838 void ext4_exit_mballoc(void) 2839 { 2840 /* 2841 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2842 * before destroying the slab cache. 2843 */ 2844 rcu_barrier(); 2845 kmem_cache_destroy(ext4_pspace_cachep); 2846 kmem_cache_destroy(ext4_ac_cachep); 2847 kmem_cache_destroy(ext4_free_data_cachep); 2848 ext4_groupinfo_destroy_slabs(); 2849 } 2850 2851 2852 /* 2853 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2854 * Returns 0 if success or error code 2855 */ 2856 static noinline_for_stack int 2857 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2858 handle_t *handle, unsigned int reserv_clstrs) 2859 { 2860 struct buffer_head *bitmap_bh = NULL; 2861 struct ext4_group_desc *gdp; 2862 struct buffer_head *gdp_bh; 2863 struct ext4_sb_info *sbi; 2864 struct super_block *sb; 2865 ext4_fsblk_t block; 2866 int err, len; 2867 2868 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2869 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2870 2871 sb = ac->ac_sb; 2872 sbi = EXT4_SB(sb); 2873 2874 err = -EIO; 2875 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2876 if (!bitmap_bh) 2877 goto out_err; 2878 2879 BUFFER_TRACE(bitmap_bh, "getting write access"); 2880 err = ext4_journal_get_write_access(handle, bitmap_bh); 2881 if (err) 2882 goto out_err; 2883 2884 err = -EIO; 2885 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2886 if (!gdp) 2887 goto out_err; 2888 2889 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2890 ext4_free_group_clusters(sb, gdp)); 2891 2892 BUFFER_TRACE(gdp_bh, "get_write_access"); 2893 err = ext4_journal_get_write_access(handle, gdp_bh); 2894 if (err) 2895 goto out_err; 2896 2897 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2898 2899 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 2900 if (!ext4_data_block_valid(sbi, block, len)) { 2901 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2902 "fs metadata", block, block+len); 2903 /* File system mounted not to panic on error 2904 * Fix the bitmap and repeat the block allocation 2905 * We leak some of the blocks here. 2906 */ 2907 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2908 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2909 ac->ac_b_ex.fe_len); 2910 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2911 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2912 if (!err) 2913 err = -EAGAIN; 2914 goto out_err; 2915 } 2916 2917 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2918 #ifdef AGGRESSIVE_CHECK 2919 { 2920 int i; 2921 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2922 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2923 bitmap_bh->b_data)); 2924 } 2925 } 2926 #endif 2927 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2928 ac->ac_b_ex.fe_len); 2929 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2930 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2931 ext4_free_group_clusters_set(sb, gdp, 2932 ext4_free_clusters_after_init(sb, 2933 ac->ac_b_ex.fe_group, gdp)); 2934 } 2935 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2936 ext4_free_group_clusters_set(sb, gdp, len); 2937 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 2938 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2939 2940 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2941 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2942 /* 2943 * Now reduce the dirty block count also. Should not go negative 2944 */ 2945 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2946 /* release all the reserved blocks if non delalloc */ 2947 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 2948 reserv_clstrs); 2949 2950 if (sbi->s_log_groups_per_flex) { 2951 ext4_group_t flex_group = ext4_flex_group(sbi, 2952 ac->ac_b_ex.fe_group); 2953 atomic64_sub(ac->ac_b_ex.fe_len, 2954 &sbi->s_flex_groups[flex_group].free_clusters); 2955 } 2956 2957 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2958 if (err) 2959 goto out_err; 2960 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 2961 2962 out_err: 2963 brelse(bitmap_bh); 2964 return err; 2965 } 2966 2967 /* 2968 * here we normalize request for locality group 2969 * Group request are normalized to s_mb_group_prealloc, which goes to 2970 * s_strip if we set the same via mount option. 2971 * s_mb_group_prealloc can be configured via 2972 * /sys/fs/ext4/<partition>/mb_group_prealloc 2973 * 2974 * XXX: should we try to preallocate more than the group has now? 2975 */ 2976 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 2977 { 2978 struct super_block *sb = ac->ac_sb; 2979 struct ext4_locality_group *lg = ac->ac_lg; 2980 2981 BUG_ON(lg == NULL); 2982 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 2983 mb_debug(1, "#%u: goal %u blocks for locality group\n", 2984 current->pid, ac->ac_g_ex.fe_len); 2985 } 2986 2987 /* 2988 * Normalization means making request better in terms of 2989 * size and alignment 2990 */ 2991 static noinline_for_stack void 2992 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 2993 struct ext4_allocation_request *ar) 2994 { 2995 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2996 int bsbits, max; 2997 ext4_lblk_t end; 2998 loff_t size, start_off; 2999 loff_t orig_size __maybe_unused; 3000 ext4_lblk_t start; 3001 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3002 struct ext4_prealloc_space *pa; 3003 3004 /* do normalize only data requests, metadata requests 3005 do not need preallocation */ 3006 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3007 return; 3008 3009 /* sometime caller may want exact blocks */ 3010 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3011 return; 3012 3013 /* caller may indicate that preallocation isn't 3014 * required (it's a tail, for example) */ 3015 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3016 return; 3017 3018 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3019 ext4_mb_normalize_group_request(ac); 3020 return ; 3021 } 3022 3023 bsbits = ac->ac_sb->s_blocksize_bits; 3024 3025 /* first, let's learn actual file size 3026 * given current request is allocated */ 3027 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3028 size = size << bsbits; 3029 if (size < i_size_read(ac->ac_inode)) 3030 size = i_size_read(ac->ac_inode); 3031 orig_size = size; 3032 3033 /* max size of free chunks */ 3034 max = 2 << bsbits; 3035 3036 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3037 (req <= (size) || max <= (chunk_size)) 3038 3039 /* first, try to predict filesize */ 3040 /* XXX: should this table be tunable? */ 3041 start_off = 0; 3042 if (size <= 16 * 1024) { 3043 size = 16 * 1024; 3044 } else if (size <= 32 * 1024) { 3045 size = 32 * 1024; 3046 } else if (size <= 64 * 1024) { 3047 size = 64 * 1024; 3048 } else if (size <= 128 * 1024) { 3049 size = 128 * 1024; 3050 } else if (size <= 256 * 1024) { 3051 size = 256 * 1024; 3052 } else if (size <= 512 * 1024) { 3053 size = 512 * 1024; 3054 } else if (size <= 1024 * 1024) { 3055 size = 1024 * 1024; 3056 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3057 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3058 (21 - bsbits)) << 21; 3059 size = 2 * 1024 * 1024; 3060 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3061 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3062 (22 - bsbits)) << 22; 3063 size = 4 * 1024 * 1024; 3064 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3065 (8<<20)>>bsbits, max, 8 * 1024)) { 3066 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3067 (23 - bsbits)) << 23; 3068 size = 8 * 1024 * 1024; 3069 } else { 3070 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; 3071 size = ac->ac_o_ex.fe_len << bsbits; 3072 } 3073 size = size >> bsbits; 3074 start = start_off >> bsbits; 3075 3076 /* don't cover already allocated blocks in selected range */ 3077 if (ar->pleft && start <= ar->lleft) { 3078 size -= ar->lleft + 1 - start; 3079 start = ar->lleft + 1; 3080 } 3081 if (ar->pright && start + size - 1 >= ar->lright) 3082 size -= start + size - ar->lright; 3083 3084 end = start + size; 3085 3086 /* check we don't cross already preallocated blocks */ 3087 rcu_read_lock(); 3088 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3089 ext4_lblk_t pa_end; 3090 3091 if (pa->pa_deleted) 3092 continue; 3093 spin_lock(&pa->pa_lock); 3094 if (pa->pa_deleted) { 3095 spin_unlock(&pa->pa_lock); 3096 continue; 3097 } 3098 3099 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3100 pa->pa_len); 3101 3102 /* PA must not overlap original request */ 3103 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3104 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3105 3106 /* skip PAs this normalized request doesn't overlap with */ 3107 if (pa->pa_lstart >= end || pa_end <= start) { 3108 spin_unlock(&pa->pa_lock); 3109 continue; 3110 } 3111 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3112 3113 /* adjust start or end to be adjacent to this pa */ 3114 if (pa_end <= ac->ac_o_ex.fe_logical) { 3115 BUG_ON(pa_end < start); 3116 start = pa_end; 3117 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3118 BUG_ON(pa->pa_lstart > end); 3119 end = pa->pa_lstart; 3120 } 3121 spin_unlock(&pa->pa_lock); 3122 } 3123 rcu_read_unlock(); 3124 size = end - start; 3125 3126 /* XXX: extra loop to check we really don't overlap preallocations */ 3127 rcu_read_lock(); 3128 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3129 ext4_lblk_t pa_end; 3130 3131 spin_lock(&pa->pa_lock); 3132 if (pa->pa_deleted == 0) { 3133 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3134 pa->pa_len); 3135 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3136 } 3137 spin_unlock(&pa->pa_lock); 3138 } 3139 rcu_read_unlock(); 3140 3141 if (start + size <= ac->ac_o_ex.fe_logical && 3142 start > ac->ac_o_ex.fe_logical) { 3143 ext4_msg(ac->ac_sb, KERN_ERR, 3144 "start %lu, size %lu, fe_logical %lu", 3145 (unsigned long) start, (unsigned long) size, 3146 (unsigned long) ac->ac_o_ex.fe_logical); 3147 } 3148 BUG_ON(start + size <= ac->ac_o_ex.fe_logical && 3149 start > ac->ac_o_ex.fe_logical); 3150 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3151 3152 /* now prepare goal request */ 3153 3154 /* XXX: is it better to align blocks WRT to logical 3155 * placement or satisfy big request as is */ 3156 ac->ac_g_ex.fe_logical = start; 3157 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3158 3159 /* define goal start in order to merge */ 3160 if (ar->pright && (ar->lright == (start + size))) { 3161 /* merge to the right */ 3162 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3163 &ac->ac_f_ex.fe_group, 3164 &ac->ac_f_ex.fe_start); 3165 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3166 } 3167 if (ar->pleft && (ar->lleft + 1 == start)) { 3168 /* merge to the left */ 3169 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3170 &ac->ac_f_ex.fe_group, 3171 &ac->ac_f_ex.fe_start); 3172 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3173 } 3174 3175 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 3176 (unsigned) orig_size, (unsigned) start); 3177 } 3178 3179 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3180 { 3181 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3182 3183 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3184 atomic_inc(&sbi->s_bal_reqs); 3185 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3186 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3187 atomic_inc(&sbi->s_bal_success); 3188 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3189 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3190 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3191 atomic_inc(&sbi->s_bal_goals); 3192 if (ac->ac_found > sbi->s_mb_max_to_scan) 3193 atomic_inc(&sbi->s_bal_breaks); 3194 } 3195 3196 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3197 trace_ext4_mballoc_alloc(ac); 3198 else 3199 trace_ext4_mballoc_prealloc(ac); 3200 } 3201 3202 /* 3203 * Called on failure; free up any blocks from the inode PA for this 3204 * context. We don't need this for MB_GROUP_PA because we only change 3205 * pa_free in ext4_mb_release_context(), but on failure, we've already 3206 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3207 */ 3208 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3209 { 3210 struct ext4_prealloc_space *pa = ac->ac_pa; 3211 3212 if (pa && pa->pa_type == MB_INODE_PA) 3213 pa->pa_free += ac->ac_b_ex.fe_len; 3214 } 3215 3216 /* 3217 * use blocks preallocated to inode 3218 */ 3219 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3220 struct ext4_prealloc_space *pa) 3221 { 3222 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3223 ext4_fsblk_t start; 3224 ext4_fsblk_t end; 3225 int len; 3226 3227 /* found preallocated blocks, use them */ 3228 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3229 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3230 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3231 len = EXT4_NUM_B2C(sbi, end - start); 3232 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3233 &ac->ac_b_ex.fe_start); 3234 ac->ac_b_ex.fe_len = len; 3235 ac->ac_status = AC_STATUS_FOUND; 3236 ac->ac_pa = pa; 3237 3238 BUG_ON(start < pa->pa_pstart); 3239 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3240 BUG_ON(pa->pa_free < len); 3241 pa->pa_free -= len; 3242 3243 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3244 } 3245 3246 /* 3247 * use blocks preallocated to locality group 3248 */ 3249 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3250 struct ext4_prealloc_space *pa) 3251 { 3252 unsigned int len = ac->ac_o_ex.fe_len; 3253 3254 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3255 &ac->ac_b_ex.fe_group, 3256 &ac->ac_b_ex.fe_start); 3257 ac->ac_b_ex.fe_len = len; 3258 ac->ac_status = AC_STATUS_FOUND; 3259 ac->ac_pa = pa; 3260 3261 /* we don't correct pa_pstart or pa_plen here to avoid 3262 * possible race when the group is being loaded concurrently 3263 * instead we correct pa later, after blocks are marked 3264 * in on-disk bitmap -- see ext4_mb_release_context() 3265 * Other CPUs are prevented from allocating from this pa by lg_mutex 3266 */ 3267 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3268 } 3269 3270 /* 3271 * Return the prealloc space that have minimal distance 3272 * from the goal block. @cpa is the prealloc 3273 * space that is having currently known minimal distance 3274 * from the goal block. 3275 */ 3276 static struct ext4_prealloc_space * 3277 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3278 struct ext4_prealloc_space *pa, 3279 struct ext4_prealloc_space *cpa) 3280 { 3281 ext4_fsblk_t cur_distance, new_distance; 3282 3283 if (cpa == NULL) { 3284 atomic_inc(&pa->pa_count); 3285 return pa; 3286 } 3287 cur_distance = abs(goal_block - cpa->pa_pstart); 3288 new_distance = abs(goal_block - pa->pa_pstart); 3289 3290 if (cur_distance <= new_distance) 3291 return cpa; 3292 3293 /* drop the previous reference */ 3294 atomic_dec(&cpa->pa_count); 3295 atomic_inc(&pa->pa_count); 3296 return pa; 3297 } 3298 3299 /* 3300 * search goal blocks in preallocated space 3301 */ 3302 static noinline_for_stack int 3303 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3304 { 3305 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3306 int order, i; 3307 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3308 struct ext4_locality_group *lg; 3309 struct ext4_prealloc_space *pa, *cpa = NULL; 3310 ext4_fsblk_t goal_block; 3311 3312 /* only data can be preallocated */ 3313 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3314 return 0; 3315 3316 /* first, try per-file preallocation */ 3317 rcu_read_lock(); 3318 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3319 3320 /* all fields in this condition don't change, 3321 * so we can skip locking for them */ 3322 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3323 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3324 EXT4_C2B(sbi, pa->pa_len))) 3325 continue; 3326 3327 /* non-extent files can't have physical blocks past 2^32 */ 3328 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3329 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3330 EXT4_MAX_BLOCK_FILE_PHYS)) 3331 continue; 3332 3333 /* found preallocated blocks, use them */ 3334 spin_lock(&pa->pa_lock); 3335 if (pa->pa_deleted == 0 && pa->pa_free) { 3336 atomic_inc(&pa->pa_count); 3337 ext4_mb_use_inode_pa(ac, pa); 3338 spin_unlock(&pa->pa_lock); 3339 ac->ac_criteria = 10; 3340 rcu_read_unlock(); 3341 return 1; 3342 } 3343 spin_unlock(&pa->pa_lock); 3344 } 3345 rcu_read_unlock(); 3346 3347 /* can we use group allocation? */ 3348 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3349 return 0; 3350 3351 /* inode may have no locality group for some reason */ 3352 lg = ac->ac_lg; 3353 if (lg == NULL) 3354 return 0; 3355 order = fls(ac->ac_o_ex.fe_len) - 1; 3356 if (order > PREALLOC_TB_SIZE - 1) 3357 /* The max size of hash table is PREALLOC_TB_SIZE */ 3358 order = PREALLOC_TB_SIZE - 1; 3359 3360 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3361 /* 3362 * search for the prealloc space that is having 3363 * minimal distance from the goal block. 3364 */ 3365 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3366 rcu_read_lock(); 3367 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3368 pa_inode_list) { 3369 spin_lock(&pa->pa_lock); 3370 if (pa->pa_deleted == 0 && 3371 pa->pa_free >= ac->ac_o_ex.fe_len) { 3372 3373 cpa = ext4_mb_check_group_pa(goal_block, 3374 pa, cpa); 3375 } 3376 spin_unlock(&pa->pa_lock); 3377 } 3378 rcu_read_unlock(); 3379 } 3380 if (cpa) { 3381 ext4_mb_use_group_pa(ac, cpa); 3382 ac->ac_criteria = 20; 3383 return 1; 3384 } 3385 return 0; 3386 } 3387 3388 /* 3389 * the function goes through all block freed in the group 3390 * but not yet committed and marks them used in in-core bitmap. 3391 * buddy must be generated from this bitmap 3392 * Need to be called with the ext4 group lock held 3393 */ 3394 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3395 ext4_group_t group) 3396 { 3397 struct rb_node *n; 3398 struct ext4_group_info *grp; 3399 struct ext4_free_data *entry; 3400 3401 grp = ext4_get_group_info(sb, group); 3402 n = rb_first(&(grp->bb_free_root)); 3403 3404 while (n) { 3405 entry = rb_entry(n, struct ext4_free_data, efd_node); 3406 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3407 n = rb_next(n); 3408 } 3409 return; 3410 } 3411 3412 /* 3413 * the function goes through all preallocation in this group and marks them 3414 * used in in-core bitmap. buddy must be generated from this bitmap 3415 * Need to be called with ext4 group lock held 3416 */ 3417 static noinline_for_stack 3418 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3419 ext4_group_t group) 3420 { 3421 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3422 struct ext4_prealloc_space *pa; 3423 struct list_head *cur; 3424 ext4_group_t groupnr; 3425 ext4_grpblk_t start; 3426 int preallocated = 0; 3427 int len; 3428 3429 /* all form of preallocation discards first load group, 3430 * so the only competing code is preallocation use. 3431 * we don't need any locking here 3432 * notice we do NOT ignore preallocations with pa_deleted 3433 * otherwise we could leave used blocks available for 3434 * allocation in buddy when concurrent ext4_mb_put_pa() 3435 * is dropping preallocation 3436 */ 3437 list_for_each(cur, &grp->bb_prealloc_list) { 3438 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3439 spin_lock(&pa->pa_lock); 3440 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3441 &groupnr, &start); 3442 len = pa->pa_len; 3443 spin_unlock(&pa->pa_lock); 3444 if (unlikely(len == 0)) 3445 continue; 3446 BUG_ON(groupnr != group); 3447 ext4_set_bits(bitmap, start, len); 3448 preallocated += len; 3449 } 3450 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3451 } 3452 3453 static void ext4_mb_pa_callback(struct rcu_head *head) 3454 { 3455 struct ext4_prealloc_space *pa; 3456 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3457 3458 BUG_ON(atomic_read(&pa->pa_count)); 3459 BUG_ON(pa->pa_deleted == 0); 3460 kmem_cache_free(ext4_pspace_cachep, pa); 3461 } 3462 3463 /* 3464 * drops a reference to preallocated space descriptor 3465 * if this was the last reference and the space is consumed 3466 */ 3467 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3468 struct super_block *sb, struct ext4_prealloc_space *pa) 3469 { 3470 ext4_group_t grp; 3471 ext4_fsblk_t grp_blk; 3472 3473 /* in this short window concurrent discard can set pa_deleted */ 3474 spin_lock(&pa->pa_lock); 3475 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3476 spin_unlock(&pa->pa_lock); 3477 return; 3478 } 3479 3480 if (pa->pa_deleted == 1) { 3481 spin_unlock(&pa->pa_lock); 3482 return; 3483 } 3484 3485 pa->pa_deleted = 1; 3486 spin_unlock(&pa->pa_lock); 3487 3488 grp_blk = pa->pa_pstart; 3489 /* 3490 * If doing group-based preallocation, pa_pstart may be in the 3491 * next group when pa is used up 3492 */ 3493 if (pa->pa_type == MB_GROUP_PA) 3494 grp_blk--; 3495 3496 grp = ext4_get_group_number(sb, grp_blk); 3497 3498 /* 3499 * possible race: 3500 * 3501 * P1 (buddy init) P2 (regular allocation) 3502 * find block B in PA 3503 * copy on-disk bitmap to buddy 3504 * mark B in on-disk bitmap 3505 * drop PA from group 3506 * mark all PAs in buddy 3507 * 3508 * thus, P1 initializes buddy with B available. to prevent this 3509 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3510 * against that pair 3511 */ 3512 ext4_lock_group(sb, grp); 3513 list_del(&pa->pa_group_list); 3514 ext4_unlock_group(sb, grp); 3515 3516 spin_lock(pa->pa_obj_lock); 3517 list_del_rcu(&pa->pa_inode_list); 3518 spin_unlock(pa->pa_obj_lock); 3519 3520 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3521 } 3522 3523 /* 3524 * creates new preallocated space for given inode 3525 */ 3526 static noinline_for_stack int 3527 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3528 { 3529 struct super_block *sb = ac->ac_sb; 3530 struct ext4_sb_info *sbi = EXT4_SB(sb); 3531 struct ext4_prealloc_space *pa; 3532 struct ext4_group_info *grp; 3533 struct ext4_inode_info *ei; 3534 3535 /* preallocate only when found space is larger then requested */ 3536 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3537 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3538 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3539 3540 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3541 if (pa == NULL) 3542 return -ENOMEM; 3543 3544 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3545 int winl; 3546 int wins; 3547 int win; 3548 int offs; 3549 3550 /* we can't allocate as much as normalizer wants. 3551 * so, found space must get proper lstart 3552 * to cover original request */ 3553 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3554 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3555 3556 /* we're limited by original request in that 3557 * logical block must be covered any way 3558 * winl is window we can move our chunk within */ 3559 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3560 3561 /* also, we should cover whole original request */ 3562 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 3563 3564 /* the smallest one defines real window */ 3565 win = min(winl, wins); 3566 3567 offs = ac->ac_o_ex.fe_logical % 3568 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3569 if (offs && offs < win) 3570 win = offs; 3571 3572 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 3573 EXT4_NUM_B2C(sbi, win); 3574 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3575 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3576 } 3577 3578 /* preallocation can change ac_b_ex, thus we store actually 3579 * allocated blocks for history */ 3580 ac->ac_f_ex = ac->ac_b_ex; 3581 3582 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3583 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3584 pa->pa_len = ac->ac_b_ex.fe_len; 3585 pa->pa_free = pa->pa_len; 3586 atomic_set(&pa->pa_count, 1); 3587 spin_lock_init(&pa->pa_lock); 3588 INIT_LIST_HEAD(&pa->pa_inode_list); 3589 INIT_LIST_HEAD(&pa->pa_group_list); 3590 pa->pa_deleted = 0; 3591 pa->pa_type = MB_INODE_PA; 3592 3593 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3594 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3595 trace_ext4_mb_new_inode_pa(ac, pa); 3596 3597 ext4_mb_use_inode_pa(ac, pa); 3598 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 3599 3600 ei = EXT4_I(ac->ac_inode); 3601 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3602 3603 pa->pa_obj_lock = &ei->i_prealloc_lock; 3604 pa->pa_inode = ac->ac_inode; 3605 3606 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3607 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3608 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3609 3610 spin_lock(pa->pa_obj_lock); 3611 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3612 spin_unlock(pa->pa_obj_lock); 3613 3614 return 0; 3615 } 3616 3617 /* 3618 * creates new preallocated space for locality group inodes belongs to 3619 */ 3620 static noinline_for_stack int 3621 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3622 { 3623 struct super_block *sb = ac->ac_sb; 3624 struct ext4_locality_group *lg; 3625 struct ext4_prealloc_space *pa; 3626 struct ext4_group_info *grp; 3627 3628 /* preallocate only when found space is larger then requested */ 3629 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3630 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3631 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3632 3633 BUG_ON(ext4_pspace_cachep == NULL); 3634 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3635 if (pa == NULL) 3636 return -ENOMEM; 3637 3638 /* preallocation can change ac_b_ex, thus we store actually 3639 * allocated blocks for history */ 3640 ac->ac_f_ex = ac->ac_b_ex; 3641 3642 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3643 pa->pa_lstart = pa->pa_pstart; 3644 pa->pa_len = ac->ac_b_ex.fe_len; 3645 pa->pa_free = pa->pa_len; 3646 atomic_set(&pa->pa_count, 1); 3647 spin_lock_init(&pa->pa_lock); 3648 INIT_LIST_HEAD(&pa->pa_inode_list); 3649 INIT_LIST_HEAD(&pa->pa_group_list); 3650 pa->pa_deleted = 0; 3651 pa->pa_type = MB_GROUP_PA; 3652 3653 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3654 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3655 trace_ext4_mb_new_group_pa(ac, pa); 3656 3657 ext4_mb_use_group_pa(ac, pa); 3658 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3659 3660 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3661 lg = ac->ac_lg; 3662 BUG_ON(lg == NULL); 3663 3664 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3665 pa->pa_inode = NULL; 3666 3667 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3668 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3669 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3670 3671 /* 3672 * We will later add the new pa to the right bucket 3673 * after updating the pa_free in ext4_mb_release_context 3674 */ 3675 return 0; 3676 } 3677 3678 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3679 { 3680 int err; 3681 3682 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3683 err = ext4_mb_new_group_pa(ac); 3684 else 3685 err = ext4_mb_new_inode_pa(ac); 3686 return err; 3687 } 3688 3689 /* 3690 * finds all unused blocks in on-disk bitmap, frees them in 3691 * in-core bitmap and buddy. 3692 * @pa must be unlinked from inode and group lists, so that 3693 * nobody else can find/use it. 3694 * the caller MUST hold group/inode locks. 3695 * TODO: optimize the case when there are no in-core structures yet 3696 */ 3697 static noinline_for_stack int 3698 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3699 struct ext4_prealloc_space *pa) 3700 { 3701 struct super_block *sb = e4b->bd_sb; 3702 struct ext4_sb_info *sbi = EXT4_SB(sb); 3703 unsigned int end; 3704 unsigned int next; 3705 ext4_group_t group; 3706 ext4_grpblk_t bit; 3707 unsigned long long grp_blk_start; 3708 int err = 0; 3709 int free = 0; 3710 3711 BUG_ON(pa->pa_deleted == 0); 3712 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3713 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 3714 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3715 end = bit + pa->pa_len; 3716 3717 while (bit < end) { 3718 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3719 if (bit >= end) 3720 break; 3721 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3722 mb_debug(1, " free preallocated %u/%u in group %u\n", 3723 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3724 (unsigned) next - bit, (unsigned) group); 3725 free += next - bit; 3726 3727 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3728 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 3729 EXT4_C2B(sbi, bit)), 3730 next - bit); 3731 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3732 bit = next + 1; 3733 } 3734 if (free != pa->pa_free) { 3735 ext4_msg(e4b->bd_sb, KERN_CRIT, 3736 "pa %p: logic %lu, phys. %lu, len %lu", 3737 pa, (unsigned long) pa->pa_lstart, 3738 (unsigned long) pa->pa_pstart, 3739 (unsigned long) pa->pa_len); 3740 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3741 free, pa->pa_free); 3742 /* 3743 * pa is already deleted so we use the value obtained 3744 * from the bitmap and continue. 3745 */ 3746 } 3747 atomic_add(free, &sbi->s_mb_discarded); 3748 3749 return err; 3750 } 3751 3752 static noinline_for_stack int 3753 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3754 struct ext4_prealloc_space *pa) 3755 { 3756 struct super_block *sb = e4b->bd_sb; 3757 ext4_group_t group; 3758 ext4_grpblk_t bit; 3759 3760 trace_ext4_mb_release_group_pa(sb, pa); 3761 BUG_ON(pa->pa_deleted == 0); 3762 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3763 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3764 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3765 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3766 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 3767 3768 return 0; 3769 } 3770 3771 /* 3772 * releases all preallocations in given group 3773 * 3774 * first, we need to decide discard policy: 3775 * - when do we discard 3776 * 1) ENOSPC 3777 * - how many do we discard 3778 * 1) how many requested 3779 */ 3780 static noinline_for_stack int 3781 ext4_mb_discard_group_preallocations(struct super_block *sb, 3782 ext4_group_t group, int needed) 3783 { 3784 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3785 struct buffer_head *bitmap_bh = NULL; 3786 struct ext4_prealloc_space *pa, *tmp; 3787 struct list_head list; 3788 struct ext4_buddy e4b; 3789 int err; 3790 int busy = 0; 3791 int free = 0; 3792 3793 mb_debug(1, "discard preallocation for group %u\n", group); 3794 3795 if (list_empty(&grp->bb_prealloc_list)) 3796 return 0; 3797 3798 bitmap_bh = ext4_read_block_bitmap(sb, group); 3799 if (bitmap_bh == NULL) { 3800 ext4_error(sb, "Error reading block bitmap for %u", group); 3801 return 0; 3802 } 3803 3804 err = ext4_mb_load_buddy(sb, group, &e4b); 3805 if (err) { 3806 ext4_error(sb, "Error loading buddy information for %u", group); 3807 put_bh(bitmap_bh); 3808 return 0; 3809 } 3810 3811 if (needed == 0) 3812 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 3813 3814 INIT_LIST_HEAD(&list); 3815 repeat: 3816 ext4_lock_group(sb, group); 3817 list_for_each_entry_safe(pa, tmp, 3818 &grp->bb_prealloc_list, pa_group_list) { 3819 spin_lock(&pa->pa_lock); 3820 if (atomic_read(&pa->pa_count)) { 3821 spin_unlock(&pa->pa_lock); 3822 busy = 1; 3823 continue; 3824 } 3825 if (pa->pa_deleted) { 3826 spin_unlock(&pa->pa_lock); 3827 continue; 3828 } 3829 3830 /* seems this one can be freed ... */ 3831 pa->pa_deleted = 1; 3832 3833 /* we can trust pa_free ... */ 3834 free += pa->pa_free; 3835 3836 spin_unlock(&pa->pa_lock); 3837 3838 list_del(&pa->pa_group_list); 3839 list_add(&pa->u.pa_tmp_list, &list); 3840 } 3841 3842 /* if we still need more blocks and some PAs were used, try again */ 3843 if (free < needed && busy) { 3844 busy = 0; 3845 ext4_unlock_group(sb, group); 3846 cond_resched(); 3847 goto repeat; 3848 } 3849 3850 /* found anything to free? */ 3851 if (list_empty(&list)) { 3852 BUG_ON(free != 0); 3853 goto out; 3854 } 3855 3856 /* now free all selected PAs */ 3857 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3858 3859 /* remove from object (inode or locality group) */ 3860 spin_lock(pa->pa_obj_lock); 3861 list_del_rcu(&pa->pa_inode_list); 3862 spin_unlock(pa->pa_obj_lock); 3863 3864 if (pa->pa_type == MB_GROUP_PA) 3865 ext4_mb_release_group_pa(&e4b, pa); 3866 else 3867 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3868 3869 list_del(&pa->u.pa_tmp_list); 3870 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3871 } 3872 3873 out: 3874 ext4_unlock_group(sb, group); 3875 ext4_mb_unload_buddy(&e4b); 3876 put_bh(bitmap_bh); 3877 return free; 3878 } 3879 3880 /* 3881 * releases all non-used preallocated blocks for given inode 3882 * 3883 * It's important to discard preallocations under i_data_sem 3884 * We don't want another block to be served from the prealloc 3885 * space when we are discarding the inode prealloc space. 3886 * 3887 * FIXME!! Make sure it is valid at all the call sites 3888 */ 3889 void ext4_discard_preallocations(struct inode *inode) 3890 { 3891 struct ext4_inode_info *ei = EXT4_I(inode); 3892 struct super_block *sb = inode->i_sb; 3893 struct buffer_head *bitmap_bh = NULL; 3894 struct ext4_prealloc_space *pa, *tmp; 3895 ext4_group_t group = 0; 3896 struct list_head list; 3897 struct ext4_buddy e4b; 3898 int err; 3899 3900 if (!S_ISREG(inode->i_mode)) { 3901 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3902 return; 3903 } 3904 3905 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3906 trace_ext4_discard_preallocations(inode); 3907 3908 INIT_LIST_HEAD(&list); 3909 3910 repeat: 3911 /* first, collect all pa's in the inode */ 3912 spin_lock(&ei->i_prealloc_lock); 3913 while (!list_empty(&ei->i_prealloc_list)) { 3914 pa = list_entry(ei->i_prealloc_list.next, 3915 struct ext4_prealloc_space, pa_inode_list); 3916 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 3917 spin_lock(&pa->pa_lock); 3918 if (atomic_read(&pa->pa_count)) { 3919 /* this shouldn't happen often - nobody should 3920 * use preallocation while we're discarding it */ 3921 spin_unlock(&pa->pa_lock); 3922 spin_unlock(&ei->i_prealloc_lock); 3923 ext4_msg(sb, KERN_ERR, 3924 "uh-oh! used pa while discarding"); 3925 WARN_ON(1); 3926 schedule_timeout_uninterruptible(HZ); 3927 goto repeat; 3928 3929 } 3930 if (pa->pa_deleted == 0) { 3931 pa->pa_deleted = 1; 3932 spin_unlock(&pa->pa_lock); 3933 list_del_rcu(&pa->pa_inode_list); 3934 list_add(&pa->u.pa_tmp_list, &list); 3935 continue; 3936 } 3937 3938 /* someone is deleting pa right now */ 3939 spin_unlock(&pa->pa_lock); 3940 spin_unlock(&ei->i_prealloc_lock); 3941 3942 /* we have to wait here because pa_deleted 3943 * doesn't mean pa is already unlinked from 3944 * the list. as we might be called from 3945 * ->clear_inode() the inode will get freed 3946 * and concurrent thread which is unlinking 3947 * pa from inode's list may access already 3948 * freed memory, bad-bad-bad */ 3949 3950 /* XXX: if this happens too often, we can 3951 * add a flag to force wait only in case 3952 * of ->clear_inode(), but not in case of 3953 * regular truncate */ 3954 schedule_timeout_uninterruptible(HZ); 3955 goto repeat; 3956 } 3957 spin_unlock(&ei->i_prealloc_lock); 3958 3959 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3960 BUG_ON(pa->pa_type != MB_INODE_PA); 3961 group = ext4_get_group_number(sb, pa->pa_pstart); 3962 3963 err = ext4_mb_load_buddy(sb, group, &e4b); 3964 if (err) { 3965 ext4_error(sb, "Error loading buddy information for %u", 3966 group); 3967 continue; 3968 } 3969 3970 bitmap_bh = ext4_read_block_bitmap(sb, group); 3971 if (bitmap_bh == NULL) { 3972 ext4_error(sb, "Error reading block bitmap for %u", 3973 group); 3974 ext4_mb_unload_buddy(&e4b); 3975 continue; 3976 } 3977 3978 ext4_lock_group(sb, group); 3979 list_del(&pa->pa_group_list); 3980 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3981 ext4_unlock_group(sb, group); 3982 3983 ext4_mb_unload_buddy(&e4b); 3984 put_bh(bitmap_bh); 3985 3986 list_del(&pa->u.pa_tmp_list); 3987 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3988 } 3989 } 3990 3991 #ifdef CONFIG_EXT4_DEBUG 3992 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 3993 { 3994 struct super_block *sb = ac->ac_sb; 3995 ext4_group_t ngroups, i; 3996 3997 if (!ext4_mballoc_debug || 3998 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 3999 return; 4000 4001 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" 4002 " Allocation context details:"); 4003 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", 4004 ac->ac_status, ac->ac_flags); 4005 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " 4006 "goal %lu/%lu/%lu@%lu, " 4007 "best %lu/%lu/%lu@%lu cr %d", 4008 (unsigned long)ac->ac_o_ex.fe_group, 4009 (unsigned long)ac->ac_o_ex.fe_start, 4010 (unsigned long)ac->ac_o_ex.fe_len, 4011 (unsigned long)ac->ac_o_ex.fe_logical, 4012 (unsigned long)ac->ac_g_ex.fe_group, 4013 (unsigned long)ac->ac_g_ex.fe_start, 4014 (unsigned long)ac->ac_g_ex.fe_len, 4015 (unsigned long)ac->ac_g_ex.fe_logical, 4016 (unsigned long)ac->ac_b_ex.fe_group, 4017 (unsigned long)ac->ac_b_ex.fe_start, 4018 (unsigned long)ac->ac_b_ex.fe_len, 4019 (unsigned long)ac->ac_b_ex.fe_logical, 4020 (int)ac->ac_criteria); 4021 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); 4022 ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); 4023 ngroups = ext4_get_groups_count(sb); 4024 for (i = 0; i < ngroups; i++) { 4025 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4026 struct ext4_prealloc_space *pa; 4027 ext4_grpblk_t start; 4028 struct list_head *cur; 4029 ext4_lock_group(sb, i); 4030 list_for_each(cur, &grp->bb_prealloc_list) { 4031 pa = list_entry(cur, struct ext4_prealloc_space, 4032 pa_group_list); 4033 spin_lock(&pa->pa_lock); 4034 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4035 NULL, &start); 4036 spin_unlock(&pa->pa_lock); 4037 printk(KERN_ERR "PA:%u:%d:%u \n", i, 4038 start, pa->pa_len); 4039 } 4040 ext4_unlock_group(sb, i); 4041 4042 if (grp->bb_free == 0) 4043 continue; 4044 printk(KERN_ERR "%u: %d/%d \n", 4045 i, grp->bb_free, grp->bb_fragments); 4046 } 4047 printk(KERN_ERR "\n"); 4048 } 4049 #else 4050 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4051 { 4052 return; 4053 } 4054 #endif 4055 4056 /* 4057 * We use locality group preallocation for small size file. The size of the 4058 * file is determined by the current size or the resulting size after 4059 * allocation which ever is larger 4060 * 4061 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4062 */ 4063 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4064 { 4065 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4066 int bsbits = ac->ac_sb->s_blocksize_bits; 4067 loff_t size, isize; 4068 4069 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4070 return; 4071 4072 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4073 return; 4074 4075 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4076 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4077 >> bsbits; 4078 4079 if ((size == isize) && 4080 !ext4_fs_is_busy(sbi) && 4081 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 4082 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4083 return; 4084 } 4085 4086 if (sbi->s_mb_group_prealloc <= 0) { 4087 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4088 return; 4089 } 4090 4091 /* don't use group allocation for large files */ 4092 size = max(size, isize); 4093 if (size > sbi->s_mb_stream_request) { 4094 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4095 return; 4096 } 4097 4098 BUG_ON(ac->ac_lg != NULL); 4099 /* 4100 * locality group prealloc space are per cpu. The reason for having 4101 * per cpu locality group is to reduce the contention between block 4102 * request from multiple CPUs. 4103 */ 4104 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); 4105 4106 /* we're going to use group allocation */ 4107 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4108 4109 /* serialize all allocations in the group */ 4110 mutex_lock(&ac->ac_lg->lg_mutex); 4111 } 4112 4113 static noinline_for_stack int 4114 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4115 struct ext4_allocation_request *ar) 4116 { 4117 struct super_block *sb = ar->inode->i_sb; 4118 struct ext4_sb_info *sbi = EXT4_SB(sb); 4119 struct ext4_super_block *es = sbi->s_es; 4120 ext4_group_t group; 4121 unsigned int len; 4122 ext4_fsblk_t goal; 4123 ext4_grpblk_t block; 4124 4125 /* we can't allocate > group size */ 4126 len = ar->len; 4127 4128 /* just a dirty hack to filter too big requests */ 4129 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4130 len = EXT4_CLUSTERS_PER_GROUP(sb); 4131 4132 /* start searching from the goal */ 4133 goal = ar->goal; 4134 if (goal < le32_to_cpu(es->s_first_data_block) || 4135 goal >= ext4_blocks_count(es)) 4136 goal = le32_to_cpu(es->s_first_data_block); 4137 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4138 4139 /* set up allocation goals */ 4140 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4141 ac->ac_status = AC_STATUS_CONTINUE; 4142 ac->ac_sb = sb; 4143 ac->ac_inode = ar->inode; 4144 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4145 ac->ac_o_ex.fe_group = group; 4146 ac->ac_o_ex.fe_start = block; 4147 ac->ac_o_ex.fe_len = len; 4148 ac->ac_g_ex = ac->ac_o_ex; 4149 ac->ac_flags = ar->flags; 4150 4151 /* we have to define context: we'll we work with a file or 4152 * locality group. this is a policy, actually */ 4153 ext4_mb_group_or_file(ac); 4154 4155 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4156 "left: %u/%u, right %u/%u to %swritable\n", 4157 (unsigned) ar->len, (unsigned) ar->logical, 4158 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4159 (unsigned) ar->lleft, (unsigned) ar->pleft, 4160 (unsigned) ar->lright, (unsigned) ar->pright, 4161 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4162 return 0; 4163 4164 } 4165 4166 static noinline_for_stack void 4167 ext4_mb_discard_lg_preallocations(struct super_block *sb, 4168 struct ext4_locality_group *lg, 4169 int order, int total_entries) 4170 { 4171 ext4_group_t group = 0; 4172 struct ext4_buddy e4b; 4173 struct list_head discard_list; 4174 struct ext4_prealloc_space *pa, *tmp; 4175 4176 mb_debug(1, "discard locality group preallocation\n"); 4177 4178 INIT_LIST_HEAD(&discard_list); 4179 4180 spin_lock(&lg->lg_prealloc_lock); 4181 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4182 pa_inode_list) { 4183 spin_lock(&pa->pa_lock); 4184 if (atomic_read(&pa->pa_count)) { 4185 /* 4186 * This is the pa that we just used 4187 * for block allocation. So don't 4188 * free that 4189 */ 4190 spin_unlock(&pa->pa_lock); 4191 continue; 4192 } 4193 if (pa->pa_deleted) { 4194 spin_unlock(&pa->pa_lock); 4195 continue; 4196 } 4197 /* only lg prealloc space */ 4198 BUG_ON(pa->pa_type != MB_GROUP_PA); 4199 4200 /* seems this one can be freed ... */ 4201 pa->pa_deleted = 1; 4202 spin_unlock(&pa->pa_lock); 4203 4204 list_del_rcu(&pa->pa_inode_list); 4205 list_add(&pa->u.pa_tmp_list, &discard_list); 4206 4207 total_entries--; 4208 if (total_entries <= 5) { 4209 /* 4210 * we want to keep only 5 entries 4211 * allowing it to grow to 8. This 4212 * mak sure we don't call discard 4213 * soon for this list. 4214 */ 4215 break; 4216 } 4217 } 4218 spin_unlock(&lg->lg_prealloc_lock); 4219 4220 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4221 4222 group = ext4_get_group_number(sb, pa->pa_pstart); 4223 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4224 ext4_error(sb, "Error loading buddy information for %u", 4225 group); 4226 continue; 4227 } 4228 ext4_lock_group(sb, group); 4229 list_del(&pa->pa_group_list); 4230 ext4_mb_release_group_pa(&e4b, pa); 4231 ext4_unlock_group(sb, group); 4232 4233 ext4_mb_unload_buddy(&e4b); 4234 list_del(&pa->u.pa_tmp_list); 4235 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4236 } 4237 } 4238 4239 /* 4240 * We have incremented pa_count. So it cannot be freed at this 4241 * point. Also we hold lg_mutex. So no parallel allocation is 4242 * possible from this lg. That means pa_free cannot be updated. 4243 * 4244 * A parallel ext4_mb_discard_group_preallocations is possible. 4245 * which can cause the lg_prealloc_list to be updated. 4246 */ 4247 4248 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4249 { 4250 int order, added = 0, lg_prealloc_count = 1; 4251 struct super_block *sb = ac->ac_sb; 4252 struct ext4_locality_group *lg = ac->ac_lg; 4253 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4254 4255 order = fls(pa->pa_free) - 1; 4256 if (order > PREALLOC_TB_SIZE - 1) 4257 /* The max size of hash table is PREALLOC_TB_SIZE */ 4258 order = PREALLOC_TB_SIZE - 1; 4259 /* Add the prealloc space to lg */ 4260 spin_lock(&lg->lg_prealloc_lock); 4261 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4262 pa_inode_list) { 4263 spin_lock(&tmp_pa->pa_lock); 4264 if (tmp_pa->pa_deleted) { 4265 spin_unlock(&tmp_pa->pa_lock); 4266 continue; 4267 } 4268 if (!added && pa->pa_free < tmp_pa->pa_free) { 4269 /* Add to the tail of the previous entry */ 4270 list_add_tail_rcu(&pa->pa_inode_list, 4271 &tmp_pa->pa_inode_list); 4272 added = 1; 4273 /* 4274 * we want to count the total 4275 * number of entries in the list 4276 */ 4277 } 4278 spin_unlock(&tmp_pa->pa_lock); 4279 lg_prealloc_count++; 4280 } 4281 if (!added) 4282 list_add_tail_rcu(&pa->pa_inode_list, 4283 &lg->lg_prealloc_list[order]); 4284 spin_unlock(&lg->lg_prealloc_lock); 4285 4286 /* Now trim the list to be not more than 8 elements */ 4287 if (lg_prealloc_count > 8) { 4288 ext4_mb_discard_lg_preallocations(sb, lg, 4289 order, lg_prealloc_count); 4290 return; 4291 } 4292 return ; 4293 } 4294 4295 /* 4296 * release all resource we used in allocation 4297 */ 4298 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4299 { 4300 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4301 struct ext4_prealloc_space *pa = ac->ac_pa; 4302 if (pa) { 4303 if (pa->pa_type == MB_GROUP_PA) { 4304 /* see comment in ext4_mb_use_group_pa() */ 4305 spin_lock(&pa->pa_lock); 4306 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4307 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4308 pa->pa_free -= ac->ac_b_ex.fe_len; 4309 pa->pa_len -= ac->ac_b_ex.fe_len; 4310 spin_unlock(&pa->pa_lock); 4311 } 4312 } 4313 if (pa) { 4314 /* 4315 * We want to add the pa to the right bucket. 4316 * Remove it from the list and while adding 4317 * make sure the list to which we are adding 4318 * doesn't grow big. 4319 */ 4320 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4321 spin_lock(pa->pa_obj_lock); 4322 list_del_rcu(&pa->pa_inode_list); 4323 spin_unlock(pa->pa_obj_lock); 4324 ext4_mb_add_n_trim(ac); 4325 } 4326 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4327 } 4328 if (ac->ac_bitmap_page) 4329 page_cache_release(ac->ac_bitmap_page); 4330 if (ac->ac_buddy_page) 4331 page_cache_release(ac->ac_buddy_page); 4332 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4333 mutex_unlock(&ac->ac_lg->lg_mutex); 4334 ext4_mb_collect_stats(ac); 4335 return 0; 4336 } 4337 4338 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4339 { 4340 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4341 int ret; 4342 int freed = 0; 4343 4344 trace_ext4_mb_discard_preallocations(sb, needed); 4345 for (i = 0; i < ngroups && needed > 0; i++) { 4346 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4347 freed += ret; 4348 needed -= ret; 4349 } 4350 4351 return freed; 4352 } 4353 4354 /* 4355 * Main entry point into mballoc to allocate blocks 4356 * it tries to use preallocation first, then falls back 4357 * to usual allocation 4358 */ 4359 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4360 struct ext4_allocation_request *ar, int *errp) 4361 { 4362 int freed; 4363 struct ext4_allocation_context *ac = NULL; 4364 struct ext4_sb_info *sbi; 4365 struct super_block *sb; 4366 ext4_fsblk_t block = 0; 4367 unsigned int inquota = 0; 4368 unsigned int reserv_clstrs = 0; 4369 4370 might_sleep(); 4371 sb = ar->inode->i_sb; 4372 sbi = EXT4_SB(sb); 4373 4374 trace_ext4_request_blocks(ar); 4375 4376 /* Allow to use superuser reservation for quota file */ 4377 if (IS_NOQUOTA(ar->inode)) 4378 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4379 4380 /* 4381 * For delayed allocation, we could skip the ENOSPC and 4382 * EDQUOT check, as blocks and quotas have been already 4383 * reserved when data being copied into pagecache. 4384 */ 4385 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) 4386 ar->flags |= EXT4_MB_DELALLOC_RESERVED; 4387 else { 4388 /* Without delayed allocation we need to verify 4389 * there is enough free blocks to do block allocation 4390 * and verify allocation doesn't exceed the quota limits. 4391 */ 4392 while (ar->len && 4393 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4394 4395 /* let others to free the space */ 4396 cond_resched(); 4397 ar->len = ar->len >> 1; 4398 } 4399 if (!ar->len) { 4400 *errp = -ENOSPC; 4401 return 0; 4402 } 4403 reserv_clstrs = ar->len; 4404 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4405 dquot_alloc_block_nofail(ar->inode, 4406 EXT4_C2B(sbi, ar->len)); 4407 } else { 4408 while (ar->len && 4409 dquot_alloc_block(ar->inode, 4410 EXT4_C2B(sbi, ar->len))) { 4411 4412 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4413 ar->len--; 4414 } 4415 } 4416 inquota = ar->len; 4417 if (ar->len == 0) { 4418 *errp = -EDQUOT; 4419 goto out; 4420 } 4421 } 4422 4423 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4424 if (!ac) { 4425 ar->len = 0; 4426 *errp = -ENOMEM; 4427 goto out; 4428 } 4429 4430 *errp = ext4_mb_initialize_context(ac, ar); 4431 if (*errp) { 4432 ar->len = 0; 4433 goto out; 4434 } 4435 4436 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4437 if (!ext4_mb_use_preallocated(ac)) { 4438 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4439 ext4_mb_normalize_request(ac, ar); 4440 repeat: 4441 /* allocate space in core */ 4442 *errp = ext4_mb_regular_allocator(ac); 4443 if (*errp) 4444 goto discard_and_exit; 4445 4446 /* as we've just preallocated more space than 4447 * user requested originally, we store allocated 4448 * space in a special descriptor */ 4449 if (ac->ac_status == AC_STATUS_FOUND && 4450 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4451 *errp = ext4_mb_new_preallocation(ac); 4452 if (*errp) { 4453 discard_and_exit: 4454 ext4_discard_allocated_blocks(ac); 4455 goto errout; 4456 } 4457 } 4458 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4459 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4460 if (*errp == -EAGAIN) { 4461 /* 4462 * drop the reference that we took 4463 * in ext4_mb_use_best_found 4464 */ 4465 ext4_mb_release_context(ac); 4466 ac->ac_b_ex.fe_group = 0; 4467 ac->ac_b_ex.fe_start = 0; 4468 ac->ac_b_ex.fe_len = 0; 4469 ac->ac_status = AC_STATUS_CONTINUE; 4470 goto repeat; 4471 } else if (*errp) { 4472 ext4_discard_allocated_blocks(ac); 4473 goto errout; 4474 } else { 4475 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4476 ar->len = ac->ac_b_ex.fe_len; 4477 } 4478 } else { 4479 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4480 if (freed) 4481 goto repeat; 4482 *errp = -ENOSPC; 4483 } 4484 4485 errout: 4486 if (*errp) { 4487 ac->ac_b_ex.fe_len = 0; 4488 ar->len = 0; 4489 ext4_mb_show_ac(ac); 4490 } 4491 ext4_mb_release_context(ac); 4492 out: 4493 if (ac) 4494 kmem_cache_free(ext4_ac_cachep, ac); 4495 if (inquota && ar->len < inquota) 4496 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4497 if (!ar->len) { 4498 if (!ext4_test_inode_state(ar->inode, 4499 EXT4_STATE_DELALLOC_RESERVED)) 4500 /* release all the reserved blocks if non delalloc */ 4501 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4502 reserv_clstrs); 4503 } 4504 4505 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4506 4507 return block; 4508 } 4509 4510 /* 4511 * We can merge two free data extents only if the physical blocks 4512 * are contiguous, AND the extents were freed by the same transaction, 4513 * AND the blocks are associated with the same group. 4514 */ 4515 static int can_merge(struct ext4_free_data *entry1, 4516 struct ext4_free_data *entry2) 4517 { 4518 if ((entry1->efd_tid == entry2->efd_tid) && 4519 (entry1->efd_group == entry2->efd_group) && 4520 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) 4521 return 1; 4522 return 0; 4523 } 4524 4525 static noinline_for_stack int 4526 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4527 struct ext4_free_data *new_entry) 4528 { 4529 ext4_group_t group = e4b->bd_group; 4530 ext4_grpblk_t cluster; 4531 struct ext4_free_data *entry; 4532 struct ext4_group_info *db = e4b->bd_info; 4533 struct super_block *sb = e4b->bd_sb; 4534 struct ext4_sb_info *sbi = EXT4_SB(sb); 4535 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4536 struct rb_node *parent = NULL, *new_node; 4537 4538 BUG_ON(!ext4_handle_valid(handle)); 4539 BUG_ON(e4b->bd_bitmap_page == NULL); 4540 BUG_ON(e4b->bd_buddy_page == NULL); 4541 4542 new_node = &new_entry->efd_node; 4543 cluster = new_entry->efd_start_cluster; 4544 4545 if (!*n) { 4546 /* first free block exent. We need to 4547 protect buddy cache from being freed, 4548 * otherwise we'll refresh it from 4549 * on-disk bitmap and lose not-yet-available 4550 * blocks */ 4551 page_cache_get(e4b->bd_buddy_page); 4552 page_cache_get(e4b->bd_bitmap_page); 4553 } 4554 while (*n) { 4555 parent = *n; 4556 entry = rb_entry(parent, struct ext4_free_data, efd_node); 4557 if (cluster < entry->efd_start_cluster) 4558 n = &(*n)->rb_left; 4559 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 4560 n = &(*n)->rb_right; 4561 else { 4562 ext4_grp_locked_error(sb, group, 0, 4563 ext4_group_first_block_no(sb, group) + 4564 EXT4_C2B(sbi, cluster), 4565 "Block already on to-be-freed list"); 4566 return 0; 4567 } 4568 } 4569 4570 rb_link_node(new_node, parent, n); 4571 rb_insert_color(new_node, &db->bb_free_root); 4572 4573 /* Now try to see the extent can be merged to left and right */ 4574 node = rb_prev(new_node); 4575 if (node) { 4576 entry = rb_entry(node, struct ext4_free_data, efd_node); 4577 if (can_merge(entry, new_entry) && 4578 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4579 new_entry->efd_start_cluster = entry->efd_start_cluster; 4580 new_entry->efd_count += entry->efd_count; 4581 rb_erase(node, &(db->bb_free_root)); 4582 kmem_cache_free(ext4_free_data_cachep, entry); 4583 } 4584 } 4585 4586 node = rb_next(new_node); 4587 if (node) { 4588 entry = rb_entry(node, struct ext4_free_data, efd_node); 4589 if (can_merge(new_entry, entry) && 4590 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4591 new_entry->efd_count += entry->efd_count; 4592 rb_erase(node, &(db->bb_free_root)); 4593 kmem_cache_free(ext4_free_data_cachep, entry); 4594 } 4595 } 4596 /* Add the extent to transaction's private list */ 4597 ext4_journal_callback_add(handle, ext4_free_data_callback, 4598 &new_entry->efd_jce); 4599 return 0; 4600 } 4601 4602 /** 4603 * ext4_free_blocks() -- Free given blocks and update quota 4604 * @handle: handle for this transaction 4605 * @inode: inode 4606 * @block: start physical block to free 4607 * @count: number of blocks to count 4608 * @flags: flags used by ext4_free_blocks 4609 */ 4610 void ext4_free_blocks(handle_t *handle, struct inode *inode, 4611 struct buffer_head *bh, ext4_fsblk_t block, 4612 unsigned long count, int flags) 4613 { 4614 struct buffer_head *bitmap_bh = NULL; 4615 struct super_block *sb = inode->i_sb; 4616 struct ext4_group_desc *gdp; 4617 unsigned int overflow; 4618 ext4_grpblk_t bit; 4619 struct buffer_head *gd_bh; 4620 ext4_group_t block_group; 4621 struct ext4_sb_info *sbi; 4622 struct ext4_inode_info *ei = EXT4_I(inode); 4623 struct ext4_buddy e4b; 4624 unsigned int count_clusters; 4625 int err = 0; 4626 int ret; 4627 4628 might_sleep(); 4629 if (bh) { 4630 if (block) 4631 BUG_ON(block != bh->b_blocknr); 4632 else 4633 block = bh->b_blocknr; 4634 } 4635 4636 sbi = EXT4_SB(sb); 4637 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4638 !ext4_data_block_valid(sbi, block, count)) { 4639 ext4_error(sb, "Freeing blocks not in datazone - " 4640 "block = %llu, count = %lu", block, count); 4641 goto error_return; 4642 } 4643 4644 ext4_debug("freeing block %llu\n", block); 4645 trace_ext4_free_blocks(inode, block, count, flags); 4646 4647 if (flags & EXT4_FREE_BLOCKS_FORGET) { 4648 struct buffer_head *tbh = bh; 4649 int i; 4650 4651 BUG_ON(bh && (count > 1)); 4652 4653 for (i = 0; i < count; i++) { 4654 cond_resched(); 4655 if (!bh) 4656 tbh = sb_find_get_block(inode->i_sb, 4657 block + i); 4658 if (!tbh) 4659 continue; 4660 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4661 inode, tbh, block + i); 4662 } 4663 } 4664 4665 /* 4666 * We need to make sure we don't reuse the freed block until 4667 * after the transaction is committed, which we can do by 4668 * treating the block as metadata, below. We make an 4669 * exception if the inode is to be written in writeback mode 4670 * since writeback mode has weak data consistency guarantees. 4671 */ 4672 if (!ext4_should_writeback_data(inode)) 4673 flags |= EXT4_FREE_BLOCKS_METADATA; 4674 4675 /* 4676 * If the extent to be freed does not begin on a cluster 4677 * boundary, we need to deal with partial clusters at the 4678 * beginning and end of the extent. Normally we will free 4679 * blocks at the beginning or the end unless we are explicitly 4680 * requested to avoid doing so. 4681 */ 4682 overflow = EXT4_PBLK_COFF(sbi, block); 4683 if (overflow) { 4684 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4685 overflow = sbi->s_cluster_ratio - overflow; 4686 block += overflow; 4687 if (count > overflow) 4688 count -= overflow; 4689 else 4690 return; 4691 } else { 4692 block -= overflow; 4693 count += overflow; 4694 } 4695 } 4696 overflow = EXT4_LBLK_COFF(sbi, count); 4697 if (overflow) { 4698 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4699 if (count > overflow) 4700 count -= overflow; 4701 else 4702 return; 4703 } else 4704 count += sbi->s_cluster_ratio - overflow; 4705 } 4706 4707 do_more: 4708 overflow = 0; 4709 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4710 4711 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 4712 ext4_get_group_info(sb, block_group)))) 4713 return; 4714 4715 /* 4716 * Check to see if we are freeing blocks across a group 4717 * boundary. 4718 */ 4719 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4720 overflow = EXT4_C2B(sbi, bit) + count - 4721 EXT4_BLOCKS_PER_GROUP(sb); 4722 count -= overflow; 4723 } 4724 count_clusters = EXT4_NUM_B2C(sbi, count); 4725 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4726 if (!bitmap_bh) { 4727 err = -EIO; 4728 goto error_return; 4729 } 4730 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4731 if (!gdp) { 4732 err = -EIO; 4733 goto error_return; 4734 } 4735 4736 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4737 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4738 in_range(block, ext4_inode_table(sb, gdp), 4739 EXT4_SB(sb)->s_itb_per_group) || 4740 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4741 EXT4_SB(sb)->s_itb_per_group)) { 4742 4743 ext4_error(sb, "Freeing blocks in system zone - " 4744 "Block = %llu, count = %lu", block, count); 4745 /* err = 0. ext4_std_error should be a no op */ 4746 goto error_return; 4747 } 4748 4749 BUFFER_TRACE(bitmap_bh, "getting write access"); 4750 err = ext4_journal_get_write_access(handle, bitmap_bh); 4751 if (err) 4752 goto error_return; 4753 4754 /* 4755 * We are about to modify some metadata. Call the journal APIs 4756 * to unshare ->b_data if a currently-committing transaction is 4757 * using it 4758 */ 4759 BUFFER_TRACE(gd_bh, "get_write_access"); 4760 err = ext4_journal_get_write_access(handle, gd_bh); 4761 if (err) 4762 goto error_return; 4763 #ifdef AGGRESSIVE_CHECK 4764 { 4765 int i; 4766 for (i = 0; i < count_clusters; i++) 4767 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4768 } 4769 #endif 4770 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 4771 4772 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4773 if (err) 4774 goto error_return; 4775 4776 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { 4777 struct ext4_free_data *new_entry; 4778 /* 4779 * blocks being freed are metadata. these blocks shouldn't 4780 * be used until this transaction is committed 4781 */ 4782 retry: 4783 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); 4784 if (!new_entry) { 4785 /* 4786 * We use a retry loop because 4787 * ext4_free_blocks() is not allowed to fail. 4788 */ 4789 cond_resched(); 4790 congestion_wait(BLK_RW_ASYNC, HZ/50); 4791 goto retry; 4792 } 4793 new_entry->efd_start_cluster = bit; 4794 new_entry->efd_group = block_group; 4795 new_entry->efd_count = count_clusters; 4796 new_entry->efd_tid = handle->h_transaction->t_tid; 4797 4798 ext4_lock_group(sb, block_group); 4799 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4800 ext4_mb_free_metadata(handle, &e4b, new_entry); 4801 } else { 4802 /* need to update group_info->bb_free and bitmap 4803 * with group lock held. generate_buddy look at 4804 * them with group lock_held 4805 */ 4806 if (test_opt(sb, DISCARD)) { 4807 err = ext4_issue_discard(sb, block_group, bit, count); 4808 if (err && err != -EOPNOTSUPP) 4809 ext4_msg(sb, KERN_WARNING, "discard request in" 4810 " group:%d block:%d count:%lu failed" 4811 " with %d", block_group, bit, count, 4812 err); 4813 } else 4814 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 4815 4816 ext4_lock_group(sb, block_group); 4817 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4818 mb_free_blocks(inode, &e4b, bit, count_clusters); 4819 } 4820 4821 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4822 ext4_free_group_clusters_set(sb, gdp, ret); 4823 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 4824 ext4_group_desc_csum_set(sb, block_group, gdp); 4825 ext4_unlock_group(sb, block_group); 4826 4827 if (sbi->s_log_groups_per_flex) { 4828 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4829 atomic64_add(count_clusters, 4830 &sbi->s_flex_groups[flex_group].free_clusters); 4831 } 4832 4833 if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { 4834 percpu_counter_add(&sbi->s_dirtyclusters_counter, 4835 count_clusters); 4836 spin_lock(&ei->i_block_reservation_lock); 4837 if (flags & EXT4_FREE_BLOCKS_METADATA) 4838 ei->i_reserved_meta_blocks += count_clusters; 4839 else 4840 ei->i_reserved_data_blocks += count_clusters; 4841 spin_unlock(&ei->i_block_reservation_lock); 4842 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4843 dquot_reclaim_block(inode, 4844 EXT4_C2B(sbi, count_clusters)); 4845 } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4846 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4847 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4848 4849 ext4_mb_unload_buddy(&e4b); 4850 4851 /* We dirtied the bitmap block */ 4852 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4853 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4854 4855 /* And the group descriptor block */ 4856 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4857 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4858 if (!err) 4859 err = ret; 4860 4861 if (overflow && !err) { 4862 block += count; 4863 count = overflow; 4864 put_bh(bitmap_bh); 4865 goto do_more; 4866 } 4867 error_return: 4868 brelse(bitmap_bh); 4869 ext4_std_error(sb, err); 4870 return; 4871 } 4872 4873 /** 4874 * ext4_group_add_blocks() -- Add given blocks to an existing group 4875 * @handle: handle to this transaction 4876 * @sb: super block 4877 * @block: start physical block to add to the block group 4878 * @count: number of blocks to free 4879 * 4880 * This marks the blocks as free in the bitmap and buddy. 4881 */ 4882 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 4883 ext4_fsblk_t block, unsigned long count) 4884 { 4885 struct buffer_head *bitmap_bh = NULL; 4886 struct buffer_head *gd_bh; 4887 ext4_group_t block_group; 4888 ext4_grpblk_t bit; 4889 unsigned int i; 4890 struct ext4_group_desc *desc; 4891 struct ext4_sb_info *sbi = EXT4_SB(sb); 4892 struct ext4_buddy e4b; 4893 int err = 0, ret, blk_free_count; 4894 ext4_grpblk_t blocks_freed; 4895 4896 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 4897 4898 if (count == 0) 4899 return 0; 4900 4901 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4902 /* 4903 * Check to see if we are freeing blocks across a group 4904 * boundary. 4905 */ 4906 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4907 ext4_warning(sb, "too much blocks added to group %u\n", 4908 block_group); 4909 err = -EINVAL; 4910 goto error_return; 4911 } 4912 4913 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4914 if (!bitmap_bh) { 4915 err = -EIO; 4916 goto error_return; 4917 } 4918 4919 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 4920 if (!desc) { 4921 err = -EIO; 4922 goto error_return; 4923 } 4924 4925 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 4926 in_range(ext4_inode_bitmap(sb, desc), block, count) || 4927 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 4928 in_range(block + count - 1, ext4_inode_table(sb, desc), 4929 sbi->s_itb_per_group)) { 4930 ext4_error(sb, "Adding blocks in system zones - " 4931 "Block = %llu, count = %lu", 4932 block, count); 4933 err = -EINVAL; 4934 goto error_return; 4935 } 4936 4937 BUFFER_TRACE(bitmap_bh, "getting write access"); 4938 err = ext4_journal_get_write_access(handle, bitmap_bh); 4939 if (err) 4940 goto error_return; 4941 4942 /* 4943 * We are about to modify some metadata. Call the journal APIs 4944 * to unshare ->b_data if a currently-committing transaction is 4945 * using it 4946 */ 4947 BUFFER_TRACE(gd_bh, "get_write_access"); 4948 err = ext4_journal_get_write_access(handle, gd_bh); 4949 if (err) 4950 goto error_return; 4951 4952 for (i = 0, blocks_freed = 0; i < count; i++) { 4953 BUFFER_TRACE(bitmap_bh, "clear bit"); 4954 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 4955 ext4_error(sb, "bit already cleared for block %llu", 4956 (ext4_fsblk_t)(block + i)); 4957 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 4958 } else { 4959 blocks_freed++; 4960 } 4961 } 4962 4963 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4964 if (err) 4965 goto error_return; 4966 4967 /* 4968 * need to update group_info->bb_free and bitmap 4969 * with group lock held. generate_buddy look at 4970 * them with group lock_held 4971 */ 4972 ext4_lock_group(sb, block_group); 4973 mb_clear_bits(bitmap_bh->b_data, bit, count); 4974 mb_free_blocks(NULL, &e4b, bit, count); 4975 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 4976 ext4_free_group_clusters_set(sb, desc, blk_free_count); 4977 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 4978 ext4_group_desc_csum_set(sb, block_group, desc); 4979 ext4_unlock_group(sb, block_group); 4980 percpu_counter_add(&sbi->s_freeclusters_counter, 4981 EXT4_NUM_B2C(sbi, blocks_freed)); 4982 4983 if (sbi->s_log_groups_per_flex) { 4984 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4985 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), 4986 &sbi->s_flex_groups[flex_group].free_clusters); 4987 } 4988 4989 ext4_mb_unload_buddy(&e4b); 4990 4991 /* We dirtied the bitmap block */ 4992 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4993 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4994 4995 /* And the group descriptor block */ 4996 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4997 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4998 if (!err) 4999 err = ret; 5000 5001 error_return: 5002 brelse(bitmap_bh); 5003 ext4_std_error(sb, err); 5004 return err; 5005 } 5006 5007 /** 5008 * ext4_trim_extent -- function to TRIM one single free extent in the group 5009 * @sb: super block for the file system 5010 * @start: starting block of the free extent in the alloc. group 5011 * @count: number of blocks to TRIM 5012 * @group: alloc. group we are working with 5013 * @e4b: ext4 buddy for the group 5014 * 5015 * Trim "count" blocks starting at "start" in the "group". To assure that no 5016 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5017 * be called with under the group lock. 5018 */ 5019 static int ext4_trim_extent(struct super_block *sb, int start, int count, 5020 ext4_group_t group, struct ext4_buddy *e4b) 5021 __releases(bitlock) 5022 __acquires(bitlock) 5023 { 5024 struct ext4_free_extent ex; 5025 int ret = 0; 5026 5027 trace_ext4_trim_extent(sb, group, start, count); 5028 5029 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5030 5031 ex.fe_start = start; 5032 ex.fe_group = group; 5033 ex.fe_len = count; 5034 5035 /* 5036 * Mark blocks used, so no one can reuse them while 5037 * being trimmed. 5038 */ 5039 mb_mark_used(e4b, &ex); 5040 ext4_unlock_group(sb, group); 5041 ret = ext4_issue_discard(sb, group, start, count); 5042 ext4_lock_group(sb, group); 5043 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5044 return ret; 5045 } 5046 5047 /** 5048 * ext4_trim_all_free -- function to trim all free space in alloc. group 5049 * @sb: super block for file system 5050 * @group: group to be trimmed 5051 * @start: first group block to examine 5052 * @max: last group block to examine 5053 * @minblocks: minimum extent block count 5054 * 5055 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5056 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5057 * the extent. 5058 * 5059 * 5060 * ext4_trim_all_free walks through group's block bitmap searching for free 5061 * extents. When the free extent is found, mark it as used in group buddy 5062 * bitmap. Then issue a TRIM command on this extent and free the extent in 5063 * the group buddy bitmap. This is done until whole group is scanned. 5064 */ 5065 static ext4_grpblk_t 5066 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5067 ext4_grpblk_t start, ext4_grpblk_t max, 5068 ext4_grpblk_t minblocks) 5069 { 5070 void *bitmap; 5071 ext4_grpblk_t next, count = 0, free_count = 0; 5072 struct ext4_buddy e4b; 5073 int ret = 0; 5074 5075 trace_ext4_trim_all_free(sb, group, start, max); 5076 5077 ret = ext4_mb_load_buddy(sb, group, &e4b); 5078 if (ret) { 5079 ext4_error(sb, "Error in loading buddy " 5080 "information for %u", group); 5081 return ret; 5082 } 5083 bitmap = e4b.bd_bitmap; 5084 5085 ext4_lock_group(sb, group); 5086 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5087 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5088 goto out; 5089 5090 start = (e4b.bd_info->bb_first_free > start) ? 5091 e4b.bd_info->bb_first_free : start; 5092 5093 while (start <= max) { 5094 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5095 if (start > max) 5096 break; 5097 next = mb_find_next_bit(bitmap, max + 1, start); 5098 5099 if ((next - start) >= minblocks) { 5100 ret = ext4_trim_extent(sb, start, 5101 next - start, group, &e4b); 5102 if (ret && ret != -EOPNOTSUPP) 5103 break; 5104 ret = 0; 5105 count += next - start; 5106 } 5107 free_count += next - start; 5108 start = next + 1; 5109 5110 if (fatal_signal_pending(current)) { 5111 count = -ERESTARTSYS; 5112 break; 5113 } 5114 5115 if (need_resched()) { 5116 ext4_unlock_group(sb, group); 5117 cond_resched(); 5118 ext4_lock_group(sb, group); 5119 } 5120 5121 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5122 break; 5123 } 5124 5125 if (!ret) { 5126 ret = count; 5127 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5128 } 5129 out: 5130 ext4_unlock_group(sb, group); 5131 ext4_mb_unload_buddy(&e4b); 5132 5133 ext4_debug("trimmed %d blocks in the group %d\n", 5134 count, group); 5135 5136 return ret; 5137 } 5138 5139 /** 5140 * ext4_trim_fs() -- trim ioctl handle function 5141 * @sb: superblock for filesystem 5142 * @range: fstrim_range structure 5143 * 5144 * start: First Byte to trim 5145 * len: number of Bytes to trim from start 5146 * minlen: minimum extent length in Bytes 5147 * ext4_trim_fs goes through all allocation groups containing Bytes from 5148 * start to start+len. For each such a group ext4_trim_all_free function 5149 * is invoked to trim all free space. 5150 */ 5151 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5152 { 5153 struct ext4_group_info *grp; 5154 ext4_group_t group, first_group, last_group; 5155 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5156 uint64_t start, end, minlen, trimmed = 0; 5157 ext4_fsblk_t first_data_blk = 5158 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5159 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5160 int ret = 0; 5161 5162 start = range->start >> sb->s_blocksize_bits; 5163 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5164 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5165 range->minlen >> sb->s_blocksize_bits); 5166 5167 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5168 start >= max_blks || 5169 range->len < sb->s_blocksize) 5170 return -EINVAL; 5171 if (end >= max_blks) 5172 end = max_blks - 1; 5173 if (end <= first_data_blk) 5174 goto out; 5175 if (start < first_data_blk) 5176 start = first_data_blk; 5177 5178 /* Determine first and last group to examine based on start and end */ 5179 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5180 &first_group, &first_cluster); 5181 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5182 &last_group, &last_cluster); 5183 5184 /* end now represents the last cluster to discard in this group */ 5185 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5186 5187 for (group = first_group; group <= last_group; group++) { 5188 grp = ext4_get_group_info(sb, group); 5189 /* We only do this if the grp has never been initialized */ 5190 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5191 ret = ext4_mb_init_group(sb, group); 5192 if (ret) 5193 break; 5194 } 5195 5196 /* 5197 * For all the groups except the last one, last cluster will 5198 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5199 * change it for the last group, note that last_cluster is 5200 * already computed earlier by ext4_get_group_no_and_offset() 5201 */ 5202 if (group == last_group) 5203 end = last_cluster; 5204 5205 if (grp->bb_free >= minlen) { 5206 cnt = ext4_trim_all_free(sb, group, first_cluster, 5207 end, minlen); 5208 if (cnt < 0) { 5209 ret = cnt; 5210 break; 5211 } 5212 trimmed += cnt; 5213 } 5214 5215 /* 5216 * For every group except the first one, we are sure 5217 * that the first cluster to discard will be cluster #0. 5218 */ 5219 first_cluster = 0; 5220 } 5221 5222 if (!ret) 5223 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5224 5225 out: 5226 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5227 return ret; 5228 } 5229