1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20 /* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24 #include "ext4_jbd2.h" 25 #include "mballoc.h" 26 #include <linux/log2.h> 27 #include <linux/module.h> 28 #include <linux/slab.h> 29 #include <trace/events/ext4.h> 30 31 #ifdef CONFIG_EXT4_DEBUG 32 ushort ext4_mballoc_debug __read_mostly; 33 34 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); 35 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); 36 #endif 37 38 /* 39 * MUSTDO: 40 * - test ext4_ext_search_left() and ext4_ext_search_right() 41 * - search for metadata in few groups 42 * 43 * TODO v4: 44 * - normalization should take into account whether file is still open 45 * - discard preallocations if no free space left (policy?) 46 * - don't normalize tails 47 * - quota 48 * - reservation for superuser 49 * 50 * TODO v3: 51 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 52 * - track min/max extents in each group for better group selection 53 * - mb_mark_used() may allocate chunk right after splitting buddy 54 * - tree of groups sorted by number of free blocks 55 * - error handling 56 */ 57 58 /* 59 * The allocation request involve request for multiple number of blocks 60 * near to the goal(block) value specified. 61 * 62 * During initialization phase of the allocator we decide to use the 63 * group preallocation or inode preallocation depending on the size of 64 * the file. The size of the file could be the resulting file size we 65 * would have after allocation, or the current file size, which ever 66 * is larger. If the size is less than sbi->s_mb_stream_request we 67 * select to use the group preallocation. The default value of 68 * s_mb_stream_request is 16 blocks. This can also be tuned via 69 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 70 * terms of number of blocks. 71 * 72 * The main motivation for having small file use group preallocation is to 73 * ensure that we have small files closer together on the disk. 74 * 75 * First stage the allocator looks at the inode prealloc list, 76 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 77 * spaces for this particular inode. The inode prealloc space is 78 * represented as: 79 * 80 * pa_lstart -> the logical start block for this prealloc space 81 * pa_pstart -> the physical start block for this prealloc space 82 * pa_len -> length for this prealloc space (in clusters) 83 * pa_free -> free space available in this prealloc space (in clusters) 84 * 85 * The inode preallocation space is used looking at the _logical_ start 86 * block. If only the logical file block falls within the range of prealloc 87 * space we will consume the particular prealloc space. This makes sure that 88 * we have contiguous physical blocks representing the file blocks 89 * 90 * The important thing to be noted in case of inode prealloc space is that 91 * we don't modify the values associated to inode prealloc space except 92 * pa_free. 93 * 94 * If we are not able to find blocks in the inode prealloc space and if we 95 * have the group allocation flag set then we look at the locality group 96 * prealloc space. These are per CPU prealloc list represented as 97 * 98 * ext4_sb_info.s_locality_groups[smp_processor_id()] 99 * 100 * The reason for having a per cpu locality group is to reduce the contention 101 * between CPUs. It is possible to get scheduled at this point. 102 * 103 * The locality group prealloc space is used looking at whether we have 104 * enough free space (pa_free) within the prealloc space. 105 * 106 * If we can't allocate blocks via inode prealloc or/and locality group 107 * prealloc then we look at the buddy cache. The buddy cache is represented 108 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 109 * mapped to the buddy and bitmap information regarding different 110 * groups. The buddy information is attached to buddy cache inode so that 111 * we can access them through the page cache. The information regarding 112 * each group is loaded via ext4_mb_load_buddy. The information involve 113 * block bitmap and buddy information. The information are stored in the 114 * inode as: 115 * 116 * { page } 117 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 118 * 119 * 120 * one block each for bitmap and buddy information. So for each group we 121 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * blocksize) blocks. So it can have information regarding groups_per_page 123 * which is blocks_per_page/2 124 * 125 * The buddy cache inode is not stored on disk. The inode is thrown 126 * away when the filesystem is unmounted. 127 * 128 * We look for count number of blocks in the buddy cache. If we were able 129 * to locate that many free blocks we return with additional information 130 * regarding rest of the contiguous physical block available 131 * 132 * Before allocating blocks via buddy cache we normalize the request 133 * blocks. This ensure we ask for more blocks that we needed. The extra 134 * blocks that we get after allocation is added to the respective prealloc 135 * list. In case of inode preallocation we follow a list of heuristics 136 * based on file size. This can be found in ext4_mb_normalize_request. If 137 * we are doing a group prealloc we try to normalize the request to 138 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 139 * dependent on the cluster size; for non-bigalloc file systems, it is 140 * 512 blocks. This can be tuned via 141 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 142 * terms of number of blocks. If we have mounted the file system with -O 143 * stripe=<value> option the group prealloc request is normalized to the 144 * the smallest multiple of the stripe value (sbi->s_stripe) which is 145 * greater than the default mb_group_prealloc. 146 * 147 * The regular allocator (using the buddy cache) supports a few tunables. 148 * 149 * /sys/fs/ext4/<partition>/mb_min_to_scan 150 * /sys/fs/ext4/<partition>/mb_max_to_scan 151 * /sys/fs/ext4/<partition>/mb_order2_req 152 * 153 * The regular allocator uses buddy scan only if the request len is power of 154 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 155 * value of s_mb_order2_reqs can be tuned via 156 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 157 * stripe size (sbi->s_stripe), we try to search for contiguous block in 158 * stripe size. This should result in better allocation on RAID setups. If 159 * not, we search in the specific group using bitmap for best extents. The 160 * tunable min_to_scan and max_to_scan control the behaviour here. 161 * min_to_scan indicate how long the mballoc __must__ look for a best 162 * extent and max_to_scan indicates how long the mballoc __can__ look for a 163 * best extent in the found extents. Searching for the blocks starts with 164 * the group specified as the goal value in allocation context via 165 * ac_g_ex. Each group is first checked based on the criteria whether it 166 * can be used for allocation. ext4_mb_good_group explains how the groups are 167 * checked. 168 * 169 * Both the prealloc space are getting populated as above. So for the first 170 * request we will hit the buddy cache which will result in this prealloc 171 * space getting filled. The prealloc space is then later used for the 172 * subsequent request. 173 */ 174 175 /* 176 * mballoc operates on the following data: 177 * - on-disk bitmap 178 * - in-core buddy (actually includes buddy and bitmap) 179 * - preallocation descriptors (PAs) 180 * 181 * there are two types of preallocations: 182 * - inode 183 * assiged to specific inode and can be used for this inode only. 184 * it describes part of inode's space preallocated to specific 185 * physical blocks. any block from that preallocated can be used 186 * independent. the descriptor just tracks number of blocks left 187 * unused. so, before taking some block from descriptor, one must 188 * make sure corresponded logical block isn't allocated yet. this 189 * also means that freeing any block within descriptor's range 190 * must discard all preallocated blocks. 191 * - locality group 192 * assigned to specific locality group which does not translate to 193 * permanent set of inodes: inode can join and leave group. space 194 * from this type of preallocation can be used for any inode. thus 195 * it's consumed from the beginning to the end. 196 * 197 * relation between them can be expressed as: 198 * in-core buddy = on-disk bitmap + preallocation descriptors 199 * 200 * this mean blocks mballoc considers used are: 201 * - allocated blocks (persistent) 202 * - preallocated blocks (non-persistent) 203 * 204 * consistency in mballoc world means that at any time a block is either 205 * free or used in ALL structures. notice: "any time" should not be read 206 * literally -- time is discrete and delimited by locks. 207 * 208 * to keep it simple, we don't use block numbers, instead we count number of 209 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 210 * 211 * all operations can be expressed as: 212 * - init buddy: buddy = on-disk + PAs 213 * - new PA: buddy += N; PA = N 214 * - use inode PA: on-disk += N; PA -= N 215 * - discard inode PA buddy -= on-disk - PA; PA = 0 216 * - use locality group PA on-disk += N; PA -= N 217 * - discard locality group PA buddy -= PA; PA = 0 218 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 219 * is used in real operation because we can't know actual used 220 * bits from PA, only from on-disk bitmap 221 * 222 * if we follow this strict logic, then all operations above should be atomic. 223 * given some of them can block, we'd have to use something like semaphores 224 * killing performance on high-end SMP hardware. let's try to relax it using 225 * the following knowledge: 226 * 1) if buddy is referenced, it's already initialized 227 * 2) while block is used in buddy and the buddy is referenced, 228 * nobody can re-allocate that block 229 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 230 * bit set and PA claims same block, it's OK. IOW, one can set bit in 231 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 232 * block 233 * 234 * so, now we're building a concurrency table: 235 * - init buddy vs. 236 * - new PA 237 * blocks for PA are allocated in the buddy, buddy must be referenced 238 * until PA is linked to allocation group to avoid concurrent buddy init 239 * - use inode PA 240 * we need to make sure that either on-disk bitmap or PA has uptodate data 241 * given (3) we care that PA-=N operation doesn't interfere with init 242 * - discard inode PA 243 * the simplest way would be to have buddy initialized by the discard 244 * - use locality group PA 245 * again PA-=N must be serialized with init 246 * - discard locality group PA 247 * the simplest way would be to have buddy initialized by the discard 248 * - new PA vs. 249 * - use inode PA 250 * i_data_sem serializes them 251 * - discard inode PA 252 * discard process must wait until PA isn't used by another process 253 * - use locality group PA 254 * some mutex should serialize them 255 * - discard locality group PA 256 * discard process must wait until PA isn't used by another process 257 * - use inode PA 258 * - use inode PA 259 * i_data_sem or another mutex should serializes them 260 * - discard inode PA 261 * discard process must wait until PA isn't used by another process 262 * - use locality group PA 263 * nothing wrong here -- they're different PAs covering different blocks 264 * - discard locality group PA 265 * discard process must wait until PA isn't used by another process 266 * 267 * now we're ready to make few consequences: 268 * - PA is referenced and while it is no discard is possible 269 * - PA is referenced until block isn't marked in on-disk bitmap 270 * - PA changes only after on-disk bitmap 271 * - discard must not compete with init. either init is done before 272 * any discard or they're serialized somehow 273 * - buddy init as sum of on-disk bitmap and PAs is done atomically 274 * 275 * a special case when we've used PA to emptiness. no need to modify buddy 276 * in this case, but we should care about concurrent init 277 * 278 */ 279 280 /* 281 * Logic in few words: 282 * 283 * - allocation: 284 * load group 285 * find blocks 286 * mark bits in on-disk bitmap 287 * release group 288 * 289 * - use preallocation: 290 * find proper PA (per-inode or group) 291 * load group 292 * mark bits in on-disk bitmap 293 * release group 294 * release PA 295 * 296 * - free: 297 * load group 298 * mark bits in on-disk bitmap 299 * release group 300 * 301 * - discard preallocations in group: 302 * mark PAs deleted 303 * move them onto local list 304 * load on-disk bitmap 305 * load group 306 * remove PA from object (inode or locality group) 307 * mark free blocks in-core 308 * 309 * - discard inode's preallocations: 310 */ 311 312 /* 313 * Locking rules 314 * 315 * Locks: 316 * - bitlock on a group (group) 317 * - object (inode/locality) (object) 318 * - per-pa lock (pa) 319 * 320 * Paths: 321 * - new pa 322 * object 323 * group 324 * 325 * - find and use pa: 326 * pa 327 * 328 * - release consumed pa: 329 * pa 330 * group 331 * object 332 * 333 * - generate in-core bitmap: 334 * group 335 * pa 336 * 337 * - discard all for given object (inode, locality group): 338 * object 339 * pa 340 * group 341 * 342 * - discard all for given group: 343 * group 344 * pa 345 * group 346 * object 347 * 348 */ 349 static struct kmem_cache *ext4_pspace_cachep; 350 static struct kmem_cache *ext4_ac_cachep; 351 static struct kmem_cache *ext4_free_data_cachep; 352 353 /* We create slab caches for groupinfo data structures based on the 354 * superblock block size. There will be one per mounted filesystem for 355 * each unique s_blocksize_bits */ 356 #define NR_GRPINFO_CACHES 8 357 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 358 359 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 360 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 361 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 362 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 363 }; 364 365 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 366 ext4_group_t group); 367 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 368 ext4_group_t group); 369 static void ext4_free_data_callback(struct super_block *sb, 370 struct ext4_journal_cb_entry *jce, int rc); 371 372 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 373 { 374 #if BITS_PER_LONG == 64 375 *bit += ((unsigned long) addr & 7UL) << 3; 376 addr = (void *) ((unsigned long) addr & ~7UL); 377 #elif BITS_PER_LONG == 32 378 *bit += ((unsigned long) addr & 3UL) << 3; 379 addr = (void *) ((unsigned long) addr & ~3UL); 380 #else 381 #error "how many bits you are?!" 382 #endif 383 return addr; 384 } 385 386 static inline int mb_test_bit(int bit, void *addr) 387 { 388 /* 389 * ext4_test_bit on architecture like powerpc 390 * needs unsigned long aligned address 391 */ 392 addr = mb_correct_addr_and_bit(&bit, addr); 393 return ext4_test_bit(bit, addr); 394 } 395 396 static inline void mb_set_bit(int bit, void *addr) 397 { 398 addr = mb_correct_addr_and_bit(&bit, addr); 399 ext4_set_bit(bit, addr); 400 } 401 402 static inline void mb_clear_bit(int bit, void *addr) 403 { 404 addr = mb_correct_addr_and_bit(&bit, addr); 405 ext4_clear_bit(bit, addr); 406 } 407 408 static inline int mb_test_and_clear_bit(int bit, void *addr) 409 { 410 addr = mb_correct_addr_and_bit(&bit, addr); 411 return ext4_test_and_clear_bit(bit, addr); 412 } 413 414 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 415 { 416 int fix = 0, ret, tmpmax; 417 addr = mb_correct_addr_and_bit(&fix, addr); 418 tmpmax = max + fix; 419 start += fix; 420 421 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 422 if (ret > max) 423 return max; 424 return ret; 425 } 426 427 static inline int mb_find_next_bit(void *addr, int max, int start) 428 { 429 int fix = 0, ret, tmpmax; 430 addr = mb_correct_addr_and_bit(&fix, addr); 431 tmpmax = max + fix; 432 start += fix; 433 434 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 435 if (ret > max) 436 return max; 437 return ret; 438 } 439 440 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 441 { 442 char *bb; 443 444 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 445 BUG_ON(max == NULL); 446 447 if (order > e4b->bd_blkbits + 1) { 448 *max = 0; 449 return NULL; 450 } 451 452 /* at order 0 we see each particular block */ 453 if (order == 0) { 454 *max = 1 << (e4b->bd_blkbits + 3); 455 return e4b->bd_bitmap; 456 } 457 458 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 459 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 460 461 return bb; 462 } 463 464 #ifdef DOUBLE_CHECK 465 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 466 int first, int count) 467 { 468 int i; 469 struct super_block *sb = e4b->bd_sb; 470 471 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 472 return; 473 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 474 for (i = 0; i < count; i++) { 475 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 476 ext4_fsblk_t blocknr; 477 478 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 479 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 480 ext4_grp_locked_error(sb, e4b->bd_group, 481 inode ? inode->i_ino : 0, 482 blocknr, 483 "freeing block already freed " 484 "(bit %u)", 485 first + i); 486 } 487 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 488 } 489 } 490 491 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 492 { 493 int i; 494 495 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 496 return; 497 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 498 for (i = 0; i < count; i++) { 499 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 500 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 501 } 502 } 503 504 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 505 { 506 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 507 unsigned char *b1, *b2; 508 int i; 509 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 510 b2 = (unsigned char *) bitmap; 511 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 512 if (b1[i] != b2[i]) { 513 ext4_msg(e4b->bd_sb, KERN_ERR, 514 "corruption in group %u " 515 "at byte %u(%u): %x in copy != %x " 516 "on disk/prealloc", 517 e4b->bd_group, i, i * 8, b1[i], b2[i]); 518 BUG(); 519 } 520 } 521 } 522 } 523 524 #else 525 static inline void mb_free_blocks_double(struct inode *inode, 526 struct ext4_buddy *e4b, int first, int count) 527 { 528 return; 529 } 530 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 531 int first, int count) 532 { 533 return; 534 } 535 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 536 { 537 return; 538 } 539 #endif 540 541 #ifdef AGGRESSIVE_CHECK 542 543 #define MB_CHECK_ASSERT(assert) \ 544 do { \ 545 if (!(assert)) { \ 546 printk(KERN_EMERG \ 547 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 548 function, file, line, # assert); \ 549 BUG(); \ 550 } \ 551 } while (0) 552 553 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 554 const char *function, int line) 555 { 556 struct super_block *sb = e4b->bd_sb; 557 int order = e4b->bd_blkbits + 1; 558 int max; 559 int max2; 560 int i; 561 int j; 562 int k; 563 int count; 564 struct ext4_group_info *grp; 565 int fragments = 0; 566 int fstart; 567 struct list_head *cur; 568 void *buddy; 569 void *buddy2; 570 571 { 572 static int mb_check_counter; 573 if (mb_check_counter++ % 100 != 0) 574 return 0; 575 } 576 577 while (order > 1) { 578 buddy = mb_find_buddy(e4b, order, &max); 579 MB_CHECK_ASSERT(buddy); 580 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 581 MB_CHECK_ASSERT(buddy2); 582 MB_CHECK_ASSERT(buddy != buddy2); 583 MB_CHECK_ASSERT(max * 2 == max2); 584 585 count = 0; 586 for (i = 0; i < max; i++) { 587 588 if (mb_test_bit(i, buddy)) { 589 /* only single bit in buddy2 may be 1 */ 590 if (!mb_test_bit(i << 1, buddy2)) { 591 MB_CHECK_ASSERT( 592 mb_test_bit((i<<1)+1, buddy2)); 593 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 594 MB_CHECK_ASSERT( 595 mb_test_bit(i << 1, buddy2)); 596 } 597 continue; 598 } 599 600 /* both bits in buddy2 must be 1 */ 601 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 602 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 603 604 for (j = 0; j < (1 << order); j++) { 605 k = (i * (1 << order)) + j; 606 MB_CHECK_ASSERT( 607 !mb_test_bit(k, e4b->bd_bitmap)); 608 } 609 count++; 610 } 611 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 612 order--; 613 } 614 615 fstart = -1; 616 buddy = mb_find_buddy(e4b, 0, &max); 617 for (i = 0; i < max; i++) { 618 if (!mb_test_bit(i, buddy)) { 619 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 620 if (fstart == -1) { 621 fragments++; 622 fstart = i; 623 } 624 continue; 625 } 626 fstart = -1; 627 /* check used bits only */ 628 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 629 buddy2 = mb_find_buddy(e4b, j, &max2); 630 k = i >> j; 631 MB_CHECK_ASSERT(k < max2); 632 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 633 } 634 } 635 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 636 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 637 638 grp = ext4_get_group_info(sb, e4b->bd_group); 639 list_for_each(cur, &grp->bb_prealloc_list) { 640 ext4_group_t groupnr; 641 struct ext4_prealloc_space *pa; 642 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 643 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 644 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 645 for (i = 0; i < pa->pa_len; i++) 646 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 647 } 648 return 0; 649 } 650 #undef MB_CHECK_ASSERT 651 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 652 __FILE__, __func__, __LINE__) 653 #else 654 #define mb_check_buddy(e4b) 655 #endif 656 657 /* 658 * Divide blocks started from @first with length @len into 659 * smaller chunks with power of 2 blocks. 660 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 661 * then increase bb_counters[] for corresponded chunk size. 662 */ 663 static void ext4_mb_mark_free_simple(struct super_block *sb, 664 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 665 struct ext4_group_info *grp) 666 { 667 struct ext4_sb_info *sbi = EXT4_SB(sb); 668 ext4_grpblk_t min; 669 ext4_grpblk_t max; 670 ext4_grpblk_t chunk; 671 unsigned short border; 672 673 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 674 675 border = 2 << sb->s_blocksize_bits; 676 677 while (len > 0) { 678 /* find how many blocks can be covered since this position */ 679 max = ffs(first | border) - 1; 680 681 /* find how many blocks of power 2 we need to mark */ 682 min = fls(len) - 1; 683 684 if (max < min) 685 min = max; 686 chunk = 1 << min; 687 688 /* mark multiblock chunks only */ 689 grp->bb_counters[min]++; 690 if (min > 0) 691 mb_clear_bit(first >> min, 692 buddy + sbi->s_mb_offsets[min]); 693 694 len -= chunk; 695 first += chunk; 696 } 697 } 698 699 /* 700 * Cache the order of the largest free extent we have available in this block 701 * group. 702 */ 703 static void 704 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 705 { 706 int i; 707 int bits; 708 709 grp->bb_largest_free_order = -1; /* uninit */ 710 711 bits = sb->s_blocksize_bits + 1; 712 for (i = bits; i >= 0; i--) { 713 if (grp->bb_counters[i] > 0) { 714 grp->bb_largest_free_order = i; 715 break; 716 } 717 } 718 } 719 720 static noinline_for_stack 721 void ext4_mb_generate_buddy(struct super_block *sb, 722 void *buddy, void *bitmap, ext4_group_t group) 723 { 724 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 725 struct ext4_sb_info *sbi = EXT4_SB(sb); 726 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 727 ext4_grpblk_t i = 0; 728 ext4_grpblk_t first; 729 ext4_grpblk_t len; 730 unsigned free = 0; 731 unsigned fragments = 0; 732 unsigned long long period = get_cycles(); 733 734 /* initialize buddy from bitmap which is aggregation 735 * of on-disk bitmap and preallocations */ 736 i = mb_find_next_zero_bit(bitmap, max, 0); 737 grp->bb_first_free = i; 738 while (i < max) { 739 fragments++; 740 first = i; 741 i = mb_find_next_bit(bitmap, max, i); 742 len = i - first; 743 free += len; 744 if (len > 1) 745 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 746 else 747 grp->bb_counters[0]++; 748 if (i < max) 749 i = mb_find_next_zero_bit(bitmap, max, i); 750 } 751 grp->bb_fragments = fragments; 752 753 if (free != grp->bb_free) { 754 ext4_grp_locked_error(sb, group, 0, 0, 755 "block bitmap and bg descriptor " 756 "inconsistent: %u vs %u free clusters", 757 free, grp->bb_free); 758 /* 759 * If we intend to continue, we consider group descriptor 760 * corrupt and update bb_free using bitmap value 761 */ 762 grp->bb_free = free; 763 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 764 percpu_counter_sub(&sbi->s_freeclusters_counter, 765 grp->bb_free); 766 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 767 } 768 mb_set_largest_free_order(sb, grp); 769 770 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 771 772 period = get_cycles() - period; 773 spin_lock(&EXT4_SB(sb)->s_bal_lock); 774 EXT4_SB(sb)->s_mb_buddies_generated++; 775 EXT4_SB(sb)->s_mb_generation_time += period; 776 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 777 } 778 779 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 780 { 781 int count; 782 int order = 1; 783 void *buddy; 784 785 while ((buddy = mb_find_buddy(e4b, order++, &count))) { 786 ext4_set_bits(buddy, 0, count); 787 } 788 e4b->bd_info->bb_fragments = 0; 789 memset(e4b->bd_info->bb_counters, 0, 790 sizeof(*e4b->bd_info->bb_counters) * 791 (e4b->bd_sb->s_blocksize_bits + 2)); 792 793 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 794 e4b->bd_bitmap, e4b->bd_group); 795 } 796 797 /* The buddy information is attached the buddy cache inode 798 * for convenience. The information regarding each group 799 * is loaded via ext4_mb_load_buddy. The information involve 800 * block bitmap and buddy information. The information are 801 * stored in the inode as 802 * 803 * { page } 804 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 805 * 806 * 807 * one block each for bitmap and buddy information. 808 * So for each group we take up 2 blocks. A page can 809 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 * So it can have information regarding groups_per_page which 811 * is blocks_per_page/2 812 * 813 * Locking note: This routine takes the block group lock of all groups 814 * for this page; do not hold this lock when calling this routine! 815 */ 816 817 static int ext4_mb_init_cache(struct page *page, char *incore) 818 { 819 ext4_group_t ngroups; 820 int blocksize; 821 int blocks_per_page; 822 int groups_per_page; 823 int err = 0; 824 int i; 825 ext4_group_t first_group, group; 826 int first_block; 827 struct super_block *sb; 828 struct buffer_head *bhs; 829 struct buffer_head **bh = NULL; 830 struct inode *inode; 831 char *data; 832 char *bitmap; 833 struct ext4_group_info *grinfo; 834 835 mb_debug(1, "init page %lu\n", page->index); 836 837 inode = page->mapping->host; 838 sb = inode->i_sb; 839 ngroups = ext4_get_groups_count(sb); 840 blocksize = 1 << inode->i_blkbits; 841 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 842 843 groups_per_page = blocks_per_page >> 1; 844 if (groups_per_page == 0) 845 groups_per_page = 1; 846 847 /* allocate buffer_heads to read bitmaps */ 848 if (groups_per_page > 1) { 849 i = sizeof(struct buffer_head *) * groups_per_page; 850 bh = kzalloc(i, GFP_NOFS); 851 if (bh == NULL) { 852 err = -ENOMEM; 853 goto out; 854 } 855 } else 856 bh = &bhs; 857 858 first_group = page->index * blocks_per_page / 2; 859 860 /* read all groups the page covers into the cache */ 861 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 862 if (group >= ngroups) 863 break; 864 865 grinfo = ext4_get_group_info(sb, group); 866 /* 867 * If page is uptodate then we came here after online resize 868 * which added some new uninitialized group info structs, so 869 * we must skip all initialized uptodate buddies on the page, 870 * which may be currently in use by an allocating task. 871 */ 872 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 873 bh[i] = NULL; 874 continue; 875 } 876 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { 877 err = -ENOMEM; 878 goto out; 879 } 880 mb_debug(1, "read bitmap for group %u\n", group); 881 } 882 883 /* wait for I/O completion */ 884 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 885 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { 886 err = -EIO; 887 goto out; 888 } 889 } 890 891 first_block = page->index * blocks_per_page; 892 for (i = 0; i < blocks_per_page; i++) { 893 group = (first_block + i) >> 1; 894 if (group >= ngroups) 895 break; 896 897 if (!bh[group - first_group]) 898 /* skip initialized uptodate buddy */ 899 continue; 900 901 /* 902 * data carry information regarding this 903 * particular group in the format specified 904 * above 905 * 906 */ 907 data = page_address(page) + (i * blocksize); 908 bitmap = bh[group - first_group]->b_data; 909 910 /* 911 * We place the buddy block and bitmap block 912 * close together 913 */ 914 if ((first_block + i) & 1) { 915 /* this is block of buddy */ 916 BUG_ON(incore == NULL); 917 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 918 group, page->index, i * blocksize); 919 trace_ext4_mb_buddy_bitmap_load(sb, group); 920 grinfo = ext4_get_group_info(sb, group); 921 grinfo->bb_fragments = 0; 922 memset(grinfo->bb_counters, 0, 923 sizeof(*grinfo->bb_counters) * 924 (sb->s_blocksize_bits+2)); 925 /* 926 * incore got set to the group block bitmap below 927 */ 928 ext4_lock_group(sb, group); 929 /* init the buddy */ 930 memset(data, 0xff, blocksize); 931 ext4_mb_generate_buddy(sb, data, incore, group); 932 ext4_unlock_group(sb, group); 933 incore = NULL; 934 } else { 935 /* this is block of bitmap */ 936 BUG_ON(incore != NULL); 937 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 938 group, page->index, i * blocksize); 939 trace_ext4_mb_bitmap_load(sb, group); 940 941 /* see comments in ext4_mb_put_pa() */ 942 ext4_lock_group(sb, group); 943 memcpy(data, bitmap, blocksize); 944 945 /* mark all preallocated blks used in in-core bitmap */ 946 ext4_mb_generate_from_pa(sb, data, group); 947 ext4_mb_generate_from_freelist(sb, data, group); 948 ext4_unlock_group(sb, group); 949 950 /* set incore so that the buddy information can be 951 * generated using this 952 */ 953 incore = data; 954 } 955 } 956 SetPageUptodate(page); 957 958 out: 959 if (bh) { 960 for (i = 0; i < groups_per_page; i++) 961 brelse(bh[i]); 962 if (bh != &bhs) 963 kfree(bh); 964 } 965 return err; 966 } 967 968 /* 969 * Lock the buddy and bitmap pages. This make sure other parallel init_group 970 * on the same buddy page doesn't happen whild holding the buddy page lock. 971 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 972 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 973 */ 974 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 975 ext4_group_t group, struct ext4_buddy *e4b) 976 { 977 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 978 int block, pnum, poff; 979 int blocks_per_page; 980 struct page *page; 981 982 e4b->bd_buddy_page = NULL; 983 e4b->bd_bitmap_page = NULL; 984 985 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 986 /* 987 * the buddy cache inode stores the block bitmap 988 * and buddy information in consecutive blocks. 989 * So for each group we need two blocks. 990 */ 991 block = group * 2; 992 pnum = block / blocks_per_page; 993 poff = block % blocks_per_page; 994 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 995 if (!page) 996 return -ENOMEM; 997 BUG_ON(page->mapping != inode->i_mapping); 998 e4b->bd_bitmap_page = page; 999 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1000 1001 if (blocks_per_page >= 2) { 1002 /* buddy and bitmap are on the same page */ 1003 return 0; 1004 } 1005 1006 block++; 1007 pnum = block / blocks_per_page; 1008 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1009 if (!page) 1010 return -ENOMEM; 1011 BUG_ON(page->mapping != inode->i_mapping); 1012 e4b->bd_buddy_page = page; 1013 return 0; 1014 } 1015 1016 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1017 { 1018 if (e4b->bd_bitmap_page) { 1019 unlock_page(e4b->bd_bitmap_page); 1020 page_cache_release(e4b->bd_bitmap_page); 1021 } 1022 if (e4b->bd_buddy_page) { 1023 unlock_page(e4b->bd_buddy_page); 1024 page_cache_release(e4b->bd_buddy_page); 1025 } 1026 } 1027 1028 /* 1029 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1030 * block group lock of all groups for this page; do not hold the BG lock when 1031 * calling this routine! 1032 */ 1033 static noinline_for_stack 1034 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1035 { 1036 1037 struct ext4_group_info *this_grp; 1038 struct ext4_buddy e4b; 1039 struct page *page; 1040 int ret = 0; 1041 1042 might_sleep(); 1043 mb_debug(1, "init group %u\n", group); 1044 this_grp = ext4_get_group_info(sb, group); 1045 /* 1046 * This ensures that we don't reinit the buddy cache 1047 * page which map to the group from which we are already 1048 * allocating. If we are looking at the buddy cache we would 1049 * have taken a reference using ext4_mb_load_buddy and that 1050 * would have pinned buddy page to page cache. 1051 * The call to ext4_mb_get_buddy_page_lock will mark the 1052 * page accessed. 1053 */ 1054 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); 1055 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1056 /* 1057 * somebody initialized the group 1058 * return without doing anything 1059 */ 1060 goto err; 1061 } 1062 1063 page = e4b.bd_bitmap_page; 1064 ret = ext4_mb_init_cache(page, NULL); 1065 if (ret) 1066 goto err; 1067 if (!PageUptodate(page)) { 1068 ret = -EIO; 1069 goto err; 1070 } 1071 1072 if (e4b.bd_buddy_page == NULL) { 1073 /* 1074 * If both the bitmap and buddy are in 1075 * the same page we don't need to force 1076 * init the buddy 1077 */ 1078 ret = 0; 1079 goto err; 1080 } 1081 /* init buddy cache */ 1082 page = e4b.bd_buddy_page; 1083 ret = ext4_mb_init_cache(page, e4b.bd_bitmap); 1084 if (ret) 1085 goto err; 1086 if (!PageUptodate(page)) { 1087 ret = -EIO; 1088 goto err; 1089 } 1090 err: 1091 ext4_mb_put_buddy_page_lock(&e4b); 1092 return ret; 1093 } 1094 1095 /* 1096 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1097 * block group lock of all groups for this page; do not hold the BG lock when 1098 * calling this routine! 1099 */ 1100 static noinline_for_stack int 1101 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1102 struct ext4_buddy *e4b) 1103 { 1104 int blocks_per_page; 1105 int block; 1106 int pnum; 1107 int poff; 1108 struct page *page; 1109 int ret; 1110 struct ext4_group_info *grp; 1111 struct ext4_sb_info *sbi = EXT4_SB(sb); 1112 struct inode *inode = sbi->s_buddy_cache; 1113 1114 might_sleep(); 1115 mb_debug(1, "load group %u\n", group); 1116 1117 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1118 grp = ext4_get_group_info(sb, group); 1119 1120 e4b->bd_blkbits = sb->s_blocksize_bits; 1121 e4b->bd_info = grp; 1122 e4b->bd_sb = sb; 1123 e4b->bd_group = group; 1124 e4b->bd_buddy_page = NULL; 1125 e4b->bd_bitmap_page = NULL; 1126 1127 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1128 /* 1129 * we need full data about the group 1130 * to make a good selection 1131 */ 1132 ret = ext4_mb_init_group(sb, group); 1133 if (ret) 1134 return ret; 1135 } 1136 1137 /* 1138 * the buddy cache inode stores the block bitmap 1139 * and buddy information in consecutive blocks. 1140 * So for each group we need two blocks. 1141 */ 1142 block = group * 2; 1143 pnum = block / blocks_per_page; 1144 poff = block % blocks_per_page; 1145 1146 /* we could use find_or_create_page(), but it locks page 1147 * what we'd like to avoid in fast path ... */ 1148 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1149 if (page == NULL || !PageUptodate(page)) { 1150 if (page) 1151 /* 1152 * drop the page reference and try 1153 * to get the page with lock. If we 1154 * are not uptodate that implies 1155 * somebody just created the page but 1156 * is yet to initialize the same. So 1157 * wait for it to initialize. 1158 */ 1159 page_cache_release(page); 1160 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1161 if (page) { 1162 BUG_ON(page->mapping != inode->i_mapping); 1163 if (!PageUptodate(page)) { 1164 ret = ext4_mb_init_cache(page, NULL); 1165 if (ret) { 1166 unlock_page(page); 1167 goto err; 1168 } 1169 mb_cmp_bitmaps(e4b, page_address(page) + 1170 (poff * sb->s_blocksize)); 1171 } 1172 unlock_page(page); 1173 } 1174 } 1175 if (page == NULL) { 1176 ret = -ENOMEM; 1177 goto err; 1178 } 1179 if (!PageUptodate(page)) { 1180 ret = -EIO; 1181 goto err; 1182 } 1183 1184 /* Pages marked accessed already */ 1185 e4b->bd_bitmap_page = page; 1186 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1187 1188 block++; 1189 pnum = block / blocks_per_page; 1190 poff = block % blocks_per_page; 1191 1192 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1193 if (page == NULL || !PageUptodate(page)) { 1194 if (page) 1195 page_cache_release(page); 1196 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1197 if (page) { 1198 BUG_ON(page->mapping != inode->i_mapping); 1199 if (!PageUptodate(page)) { 1200 ret = ext4_mb_init_cache(page, e4b->bd_bitmap); 1201 if (ret) { 1202 unlock_page(page); 1203 goto err; 1204 } 1205 } 1206 unlock_page(page); 1207 } 1208 } 1209 if (page == NULL) { 1210 ret = -ENOMEM; 1211 goto err; 1212 } 1213 if (!PageUptodate(page)) { 1214 ret = -EIO; 1215 goto err; 1216 } 1217 1218 /* Pages marked accessed already */ 1219 e4b->bd_buddy_page = page; 1220 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1221 1222 BUG_ON(e4b->bd_bitmap_page == NULL); 1223 BUG_ON(e4b->bd_buddy_page == NULL); 1224 1225 return 0; 1226 1227 err: 1228 if (page) 1229 page_cache_release(page); 1230 if (e4b->bd_bitmap_page) 1231 page_cache_release(e4b->bd_bitmap_page); 1232 if (e4b->bd_buddy_page) 1233 page_cache_release(e4b->bd_buddy_page); 1234 e4b->bd_buddy = NULL; 1235 e4b->bd_bitmap = NULL; 1236 return ret; 1237 } 1238 1239 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1240 { 1241 if (e4b->bd_bitmap_page) 1242 page_cache_release(e4b->bd_bitmap_page); 1243 if (e4b->bd_buddy_page) 1244 page_cache_release(e4b->bd_buddy_page); 1245 } 1246 1247 1248 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1249 { 1250 int order = 1; 1251 void *bb; 1252 1253 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1254 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1255 1256 bb = e4b->bd_buddy; 1257 while (order <= e4b->bd_blkbits + 1) { 1258 block = block >> 1; 1259 if (!mb_test_bit(block, bb)) { 1260 /* this block is part of buddy of order 'order' */ 1261 return order; 1262 } 1263 bb += 1 << (e4b->bd_blkbits - order); 1264 order++; 1265 } 1266 return 0; 1267 } 1268 1269 static void mb_clear_bits(void *bm, int cur, int len) 1270 { 1271 __u32 *addr; 1272 1273 len = cur + len; 1274 while (cur < len) { 1275 if ((cur & 31) == 0 && (len - cur) >= 32) { 1276 /* fast path: clear whole word at once */ 1277 addr = bm + (cur >> 3); 1278 *addr = 0; 1279 cur += 32; 1280 continue; 1281 } 1282 mb_clear_bit(cur, bm); 1283 cur++; 1284 } 1285 } 1286 1287 /* clear bits in given range 1288 * will return first found zero bit if any, -1 otherwise 1289 */ 1290 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1291 { 1292 __u32 *addr; 1293 int zero_bit = -1; 1294 1295 len = cur + len; 1296 while (cur < len) { 1297 if ((cur & 31) == 0 && (len - cur) >= 32) { 1298 /* fast path: clear whole word at once */ 1299 addr = bm + (cur >> 3); 1300 if (*addr != (__u32)(-1) && zero_bit == -1) 1301 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1302 *addr = 0; 1303 cur += 32; 1304 continue; 1305 } 1306 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1307 zero_bit = cur; 1308 cur++; 1309 } 1310 1311 return zero_bit; 1312 } 1313 1314 void ext4_set_bits(void *bm, int cur, int len) 1315 { 1316 __u32 *addr; 1317 1318 len = cur + len; 1319 while (cur < len) { 1320 if ((cur & 31) == 0 && (len - cur) >= 32) { 1321 /* fast path: set whole word at once */ 1322 addr = bm + (cur >> 3); 1323 *addr = 0xffffffff; 1324 cur += 32; 1325 continue; 1326 } 1327 mb_set_bit(cur, bm); 1328 cur++; 1329 } 1330 } 1331 1332 /* 1333 * _________________________________________________________________ */ 1334 1335 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1336 { 1337 if (mb_test_bit(*bit + side, bitmap)) { 1338 mb_clear_bit(*bit, bitmap); 1339 (*bit) -= side; 1340 return 1; 1341 } 1342 else { 1343 (*bit) += side; 1344 mb_set_bit(*bit, bitmap); 1345 return -1; 1346 } 1347 } 1348 1349 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1350 { 1351 int max; 1352 int order = 1; 1353 void *buddy = mb_find_buddy(e4b, order, &max); 1354 1355 while (buddy) { 1356 void *buddy2; 1357 1358 /* Bits in range [first; last] are known to be set since 1359 * corresponding blocks were allocated. Bits in range 1360 * (first; last) will stay set because they form buddies on 1361 * upper layer. We just deal with borders if they don't 1362 * align with upper layer and then go up. 1363 * Releasing entire group is all about clearing 1364 * single bit of highest order buddy. 1365 */ 1366 1367 /* Example: 1368 * --------------------------------- 1369 * | 1 | 1 | 1 | 1 | 1370 * --------------------------------- 1371 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1372 * --------------------------------- 1373 * 0 1 2 3 4 5 6 7 1374 * \_____________________/ 1375 * 1376 * Neither [1] nor [6] is aligned to above layer. 1377 * Left neighbour [0] is free, so mark it busy, 1378 * decrease bb_counters and extend range to 1379 * [0; 6] 1380 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1381 * mark [6] free, increase bb_counters and shrink range to 1382 * [0; 5]. 1383 * Then shift range to [0; 2], go up and do the same. 1384 */ 1385 1386 1387 if (first & 1) 1388 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1389 if (!(last & 1)) 1390 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1391 if (first > last) 1392 break; 1393 order++; 1394 1395 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1396 mb_clear_bits(buddy, first, last - first + 1); 1397 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1398 break; 1399 } 1400 first >>= 1; 1401 last >>= 1; 1402 buddy = buddy2; 1403 } 1404 } 1405 1406 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1407 int first, int count) 1408 { 1409 int left_is_free = 0; 1410 int right_is_free = 0; 1411 int block; 1412 int last = first + count - 1; 1413 struct super_block *sb = e4b->bd_sb; 1414 1415 BUG_ON(last >= (sb->s_blocksize << 3)); 1416 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1417 /* Don't bother if the block group is corrupt. */ 1418 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1419 return; 1420 1421 mb_check_buddy(e4b); 1422 mb_free_blocks_double(inode, e4b, first, count); 1423 1424 e4b->bd_info->bb_free += count; 1425 if (first < e4b->bd_info->bb_first_free) 1426 e4b->bd_info->bb_first_free = first; 1427 1428 /* access memory sequentially: check left neighbour, 1429 * clear range and then check right neighbour 1430 */ 1431 if (first != 0) 1432 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1433 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1434 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1435 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1436 1437 if (unlikely(block != -1)) { 1438 struct ext4_sb_info *sbi = EXT4_SB(sb); 1439 ext4_fsblk_t blocknr; 1440 1441 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1442 blocknr += EXT4_C2B(EXT4_SB(sb), block); 1443 ext4_grp_locked_error(sb, e4b->bd_group, 1444 inode ? inode->i_ino : 0, 1445 blocknr, 1446 "freeing already freed block " 1447 "(bit %u); block bitmap corrupt.", 1448 block); 1449 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)) 1450 percpu_counter_sub(&sbi->s_freeclusters_counter, 1451 e4b->bd_info->bb_free); 1452 /* Mark the block group as corrupt. */ 1453 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1454 &e4b->bd_info->bb_state); 1455 mb_regenerate_buddy(e4b); 1456 goto done; 1457 } 1458 1459 /* let's maintain fragments counter */ 1460 if (left_is_free && right_is_free) 1461 e4b->bd_info->bb_fragments--; 1462 else if (!left_is_free && !right_is_free) 1463 e4b->bd_info->bb_fragments++; 1464 1465 /* buddy[0] == bd_bitmap is a special case, so handle 1466 * it right away and let mb_buddy_mark_free stay free of 1467 * zero order checks. 1468 * Check if neighbours are to be coaleasced, 1469 * adjust bitmap bb_counters and borders appropriately. 1470 */ 1471 if (first & 1) { 1472 first += !left_is_free; 1473 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1474 } 1475 if (!(last & 1)) { 1476 last -= !right_is_free; 1477 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1478 } 1479 1480 if (first <= last) 1481 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1482 1483 done: 1484 mb_set_largest_free_order(sb, e4b->bd_info); 1485 mb_check_buddy(e4b); 1486 } 1487 1488 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1489 int needed, struct ext4_free_extent *ex) 1490 { 1491 int next = block; 1492 int max, order; 1493 void *buddy; 1494 1495 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1496 BUG_ON(ex == NULL); 1497 1498 buddy = mb_find_buddy(e4b, 0, &max); 1499 BUG_ON(buddy == NULL); 1500 BUG_ON(block >= max); 1501 if (mb_test_bit(block, buddy)) { 1502 ex->fe_len = 0; 1503 ex->fe_start = 0; 1504 ex->fe_group = 0; 1505 return 0; 1506 } 1507 1508 /* find actual order */ 1509 order = mb_find_order_for_block(e4b, block); 1510 block = block >> order; 1511 1512 ex->fe_len = 1 << order; 1513 ex->fe_start = block << order; 1514 ex->fe_group = e4b->bd_group; 1515 1516 /* calc difference from given start */ 1517 next = next - ex->fe_start; 1518 ex->fe_len -= next; 1519 ex->fe_start += next; 1520 1521 while (needed > ex->fe_len && 1522 mb_find_buddy(e4b, order, &max)) { 1523 1524 if (block + 1 >= max) 1525 break; 1526 1527 next = (block + 1) * (1 << order); 1528 if (mb_test_bit(next, e4b->bd_bitmap)) 1529 break; 1530 1531 order = mb_find_order_for_block(e4b, next); 1532 1533 block = next >> order; 1534 ex->fe_len += 1 << order; 1535 } 1536 1537 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); 1538 return ex->fe_len; 1539 } 1540 1541 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1542 { 1543 int ord; 1544 int mlen = 0; 1545 int max = 0; 1546 int cur; 1547 int start = ex->fe_start; 1548 int len = ex->fe_len; 1549 unsigned ret = 0; 1550 int len0 = len; 1551 void *buddy; 1552 1553 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1554 BUG_ON(e4b->bd_group != ex->fe_group); 1555 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1556 mb_check_buddy(e4b); 1557 mb_mark_used_double(e4b, start, len); 1558 1559 e4b->bd_info->bb_free -= len; 1560 if (e4b->bd_info->bb_first_free == start) 1561 e4b->bd_info->bb_first_free += len; 1562 1563 /* let's maintain fragments counter */ 1564 if (start != 0) 1565 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1566 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1567 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1568 if (mlen && max) 1569 e4b->bd_info->bb_fragments++; 1570 else if (!mlen && !max) 1571 e4b->bd_info->bb_fragments--; 1572 1573 /* let's maintain buddy itself */ 1574 while (len) { 1575 ord = mb_find_order_for_block(e4b, start); 1576 1577 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1578 /* the whole chunk may be allocated at once! */ 1579 mlen = 1 << ord; 1580 buddy = mb_find_buddy(e4b, ord, &max); 1581 BUG_ON((start >> ord) >= max); 1582 mb_set_bit(start >> ord, buddy); 1583 e4b->bd_info->bb_counters[ord]--; 1584 start += mlen; 1585 len -= mlen; 1586 BUG_ON(len < 0); 1587 continue; 1588 } 1589 1590 /* store for history */ 1591 if (ret == 0) 1592 ret = len | (ord << 16); 1593 1594 /* we have to split large buddy */ 1595 BUG_ON(ord <= 0); 1596 buddy = mb_find_buddy(e4b, ord, &max); 1597 mb_set_bit(start >> ord, buddy); 1598 e4b->bd_info->bb_counters[ord]--; 1599 1600 ord--; 1601 cur = (start >> ord) & ~1U; 1602 buddy = mb_find_buddy(e4b, ord, &max); 1603 mb_clear_bit(cur, buddy); 1604 mb_clear_bit(cur + 1, buddy); 1605 e4b->bd_info->bb_counters[ord]++; 1606 e4b->bd_info->bb_counters[ord]++; 1607 } 1608 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1609 1610 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1611 mb_check_buddy(e4b); 1612 1613 return ret; 1614 } 1615 1616 /* 1617 * Must be called under group lock! 1618 */ 1619 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1620 struct ext4_buddy *e4b) 1621 { 1622 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1623 int ret; 1624 1625 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1626 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1627 1628 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1629 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1630 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1631 1632 /* preallocation can change ac_b_ex, thus we store actually 1633 * allocated blocks for history */ 1634 ac->ac_f_ex = ac->ac_b_ex; 1635 1636 ac->ac_status = AC_STATUS_FOUND; 1637 ac->ac_tail = ret & 0xffff; 1638 ac->ac_buddy = ret >> 16; 1639 1640 /* 1641 * take the page reference. We want the page to be pinned 1642 * so that we don't get a ext4_mb_init_cache_call for this 1643 * group until we update the bitmap. That would mean we 1644 * double allocate blocks. The reference is dropped 1645 * in ext4_mb_release_context 1646 */ 1647 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1648 get_page(ac->ac_bitmap_page); 1649 ac->ac_buddy_page = e4b->bd_buddy_page; 1650 get_page(ac->ac_buddy_page); 1651 /* store last allocated for subsequent stream allocation */ 1652 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1653 spin_lock(&sbi->s_md_lock); 1654 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1655 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1656 spin_unlock(&sbi->s_md_lock); 1657 } 1658 } 1659 1660 /* 1661 * regular allocator, for general purposes allocation 1662 */ 1663 1664 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1665 struct ext4_buddy *e4b, 1666 int finish_group) 1667 { 1668 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1669 struct ext4_free_extent *bex = &ac->ac_b_ex; 1670 struct ext4_free_extent *gex = &ac->ac_g_ex; 1671 struct ext4_free_extent ex; 1672 int max; 1673 1674 if (ac->ac_status == AC_STATUS_FOUND) 1675 return; 1676 /* 1677 * We don't want to scan for a whole year 1678 */ 1679 if (ac->ac_found > sbi->s_mb_max_to_scan && 1680 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1681 ac->ac_status = AC_STATUS_BREAK; 1682 return; 1683 } 1684 1685 /* 1686 * Haven't found good chunk so far, let's continue 1687 */ 1688 if (bex->fe_len < gex->fe_len) 1689 return; 1690 1691 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1692 && bex->fe_group == e4b->bd_group) { 1693 /* recheck chunk's availability - we don't know 1694 * when it was found (within this lock-unlock 1695 * period or not) */ 1696 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1697 if (max >= gex->fe_len) { 1698 ext4_mb_use_best_found(ac, e4b); 1699 return; 1700 } 1701 } 1702 } 1703 1704 /* 1705 * The routine checks whether found extent is good enough. If it is, 1706 * then the extent gets marked used and flag is set to the context 1707 * to stop scanning. Otherwise, the extent is compared with the 1708 * previous found extent and if new one is better, then it's stored 1709 * in the context. Later, the best found extent will be used, if 1710 * mballoc can't find good enough extent. 1711 * 1712 * FIXME: real allocation policy is to be designed yet! 1713 */ 1714 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1715 struct ext4_free_extent *ex, 1716 struct ext4_buddy *e4b) 1717 { 1718 struct ext4_free_extent *bex = &ac->ac_b_ex; 1719 struct ext4_free_extent *gex = &ac->ac_g_ex; 1720 1721 BUG_ON(ex->fe_len <= 0); 1722 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1723 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1724 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1725 1726 ac->ac_found++; 1727 1728 /* 1729 * The special case - take what you catch first 1730 */ 1731 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1732 *bex = *ex; 1733 ext4_mb_use_best_found(ac, e4b); 1734 return; 1735 } 1736 1737 /* 1738 * Let's check whether the chuck is good enough 1739 */ 1740 if (ex->fe_len == gex->fe_len) { 1741 *bex = *ex; 1742 ext4_mb_use_best_found(ac, e4b); 1743 return; 1744 } 1745 1746 /* 1747 * If this is first found extent, just store it in the context 1748 */ 1749 if (bex->fe_len == 0) { 1750 *bex = *ex; 1751 return; 1752 } 1753 1754 /* 1755 * If new found extent is better, store it in the context 1756 */ 1757 if (bex->fe_len < gex->fe_len) { 1758 /* if the request isn't satisfied, any found extent 1759 * larger than previous best one is better */ 1760 if (ex->fe_len > bex->fe_len) 1761 *bex = *ex; 1762 } else if (ex->fe_len > gex->fe_len) { 1763 /* if the request is satisfied, then we try to find 1764 * an extent that still satisfy the request, but is 1765 * smaller than previous one */ 1766 if (ex->fe_len < bex->fe_len) 1767 *bex = *ex; 1768 } 1769 1770 ext4_mb_check_limits(ac, e4b, 0); 1771 } 1772 1773 static noinline_for_stack 1774 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1775 struct ext4_buddy *e4b) 1776 { 1777 struct ext4_free_extent ex = ac->ac_b_ex; 1778 ext4_group_t group = ex.fe_group; 1779 int max; 1780 int err; 1781 1782 BUG_ON(ex.fe_len <= 0); 1783 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1784 if (err) 1785 return err; 1786 1787 ext4_lock_group(ac->ac_sb, group); 1788 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1789 1790 if (max > 0) { 1791 ac->ac_b_ex = ex; 1792 ext4_mb_use_best_found(ac, e4b); 1793 } 1794 1795 ext4_unlock_group(ac->ac_sb, group); 1796 ext4_mb_unload_buddy(e4b); 1797 1798 return 0; 1799 } 1800 1801 static noinline_for_stack 1802 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1803 struct ext4_buddy *e4b) 1804 { 1805 ext4_group_t group = ac->ac_g_ex.fe_group; 1806 int max; 1807 int err; 1808 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1809 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1810 struct ext4_free_extent ex; 1811 1812 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1813 return 0; 1814 if (grp->bb_free == 0) 1815 return 0; 1816 1817 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1818 if (err) 1819 return err; 1820 1821 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1822 ext4_mb_unload_buddy(e4b); 1823 return 0; 1824 } 1825 1826 ext4_lock_group(ac->ac_sb, group); 1827 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1828 ac->ac_g_ex.fe_len, &ex); 1829 ex.fe_logical = 0xDEADFA11; /* debug value */ 1830 1831 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1832 ext4_fsblk_t start; 1833 1834 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1835 ex.fe_start; 1836 /* use do_div to get remainder (would be 64-bit modulo) */ 1837 if (do_div(start, sbi->s_stripe) == 0) { 1838 ac->ac_found++; 1839 ac->ac_b_ex = ex; 1840 ext4_mb_use_best_found(ac, e4b); 1841 } 1842 } else if (max >= ac->ac_g_ex.fe_len) { 1843 BUG_ON(ex.fe_len <= 0); 1844 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1845 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1846 ac->ac_found++; 1847 ac->ac_b_ex = ex; 1848 ext4_mb_use_best_found(ac, e4b); 1849 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1850 /* Sometimes, caller may want to merge even small 1851 * number of blocks to an existing extent */ 1852 BUG_ON(ex.fe_len <= 0); 1853 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1854 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1855 ac->ac_found++; 1856 ac->ac_b_ex = ex; 1857 ext4_mb_use_best_found(ac, e4b); 1858 } 1859 ext4_unlock_group(ac->ac_sb, group); 1860 ext4_mb_unload_buddy(e4b); 1861 1862 return 0; 1863 } 1864 1865 /* 1866 * The routine scans buddy structures (not bitmap!) from given order 1867 * to max order and tries to find big enough chunk to satisfy the req 1868 */ 1869 static noinline_for_stack 1870 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1871 struct ext4_buddy *e4b) 1872 { 1873 struct super_block *sb = ac->ac_sb; 1874 struct ext4_group_info *grp = e4b->bd_info; 1875 void *buddy; 1876 int i; 1877 int k; 1878 int max; 1879 1880 BUG_ON(ac->ac_2order <= 0); 1881 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1882 if (grp->bb_counters[i] == 0) 1883 continue; 1884 1885 buddy = mb_find_buddy(e4b, i, &max); 1886 BUG_ON(buddy == NULL); 1887 1888 k = mb_find_next_zero_bit(buddy, max, 0); 1889 BUG_ON(k >= max); 1890 1891 ac->ac_found++; 1892 1893 ac->ac_b_ex.fe_len = 1 << i; 1894 ac->ac_b_ex.fe_start = k << i; 1895 ac->ac_b_ex.fe_group = e4b->bd_group; 1896 1897 ext4_mb_use_best_found(ac, e4b); 1898 1899 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1900 1901 if (EXT4_SB(sb)->s_mb_stats) 1902 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1903 1904 break; 1905 } 1906 } 1907 1908 /* 1909 * The routine scans the group and measures all found extents. 1910 * In order to optimize scanning, caller must pass number of 1911 * free blocks in the group, so the routine can know upper limit. 1912 */ 1913 static noinline_for_stack 1914 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1915 struct ext4_buddy *e4b) 1916 { 1917 struct super_block *sb = ac->ac_sb; 1918 void *bitmap = e4b->bd_bitmap; 1919 struct ext4_free_extent ex; 1920 int i; 1921 int free; 1922 1923 free = e4b->bd_info->bb_free; 1924 BUG_ON(free <= 0); 1925 1926 i = e4b->bd_info->bb_first_free; 1927 1928 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1929 i = mb_find_next_zero_bit(bitmap, 1930 EXT4_CLUSTERS_PER_GROUP(sb), i); 1931 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1932 /* 1933 * IF we have corrupt bitmap, we won't find any 1934 * free blocks even though group info says we 1935 * we have free blocks 1936 */ 1937 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1938 "%d free clusters as per " 1939 "group info. But bitmap says 0", 1940 free); 1941 break; 1942 } 1943 1944 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 1945 BUG_ON(ex.fe_len <= 0); 1946 if (free < ex.fe_len) { 1947 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1948 "%d free clusters as per " 1949 "group info. But got %d blocks", 1950 free, ex.fe_len); 1951 /* 1952 * The number of free blocks differs. This mostly 1953 * indicate that the bitmap is corrupt. So exit 1954 * without claiming the space. 1955 */ 1956 break; 1957 } 1958 ex.fe_logical = 0xDEADC0DE; /* debug value */ 1959 ext4_mb_measure_extent(ac, &ex, e4b); 1960 1961 i += ex.fe_len; 1962 free -= ex.fe_len; 1963 } 1964 1965 ext4_mb_check_limits(ac, e4b, 1); 1966 } 1967 1968 /* 1969 * This is a special case for storages like raid5 1970 * we try to find stripe-aligned chunks for stripe-size-multiple requests 1971 */ 1972 static noinline_for_stack 1973 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1974 struct ext4_buddy *e4b) 1975 { 1976 struct super_block *sb = ac->ac_sb; 1977 struct ext4_sb_info *sbi = EXT4_SB(sb); 1978 void *bitmap = e4b->bd_bitmap; 1979 struct ext4_free_extent ex; 1980 ext4_fsblk_t first_group_block; 1981 ext4_fsblk_t a; 1982 ext4_grpblk_t i; 1983 int max; 1984 1985 BUG_ON(sbi->s_stripe == 0); 1986 1987 /* find first stripe-aligned block in group */ 1988 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 1989 1990 a = first_group_block + sbi->s_stripe - 1; 1991 do_div(a, sbi->s_stripe); 1992 i = (a * sbi->s_stripe) - first_group_block; 1993 1994 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 1995 if (!mb_test_bit(i, bitmap)) { 1996 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 1997 if (max >= sbi->s_stripe) { 1998 ac->ac_found++; 1999 ex.fe_logical = 0xDEADF00D; /* debug value */ 2000 ac->ac_b_ex = ex; 2001 ext4_mb_use_best_found(ac, e4b); 2002 break; 2003 } 2004 } 2005 i += sbi->s_stripe; 2006 } 2007 } 2008 2009 /* This is now called BEFORE we load the buddy bitmap. */ 2010 static int ext4_mb_good_group(struct ext4_allocation_context *ac, 2011 ext4_group_t group, int cr) 2012 { 2013 unsigned free, fragments; 2014 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2015 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2016 2017 BUG_ON(cr < 0 || cr >= 4); 2018 2019 free = grp->bb_free; 2020 if (free == 0) 2021 return 0; 2022 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2023 return 0; 2024 2025 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2026 return 0; 2027 2028 /* We only do this if the grp has never been initialized */ 2029 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2030 int ret = ext4_mb_init_group(ac->ac_sb, group); 2031 if (ret) 2032 return 0; 2033 } 2034 2035 fragments = grp->bb_fragments; 2036 if (fragments == 0) 2037 return 0; 2038 2039 switch (cr) { 2040 case 0: 2041 BUG_ON(ac->ac_2order == 0); 2042 2043 /* Avoid using the first bg of a flexgroup for data files */ 2044 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2045 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2046 ((group % flex_size) == 0)) 2047 return 0; 2048 2049 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || 2050 (free / fragments) >= ac->ac_g_ex.fe_len) 2051 return 1; 2052 2053 if (grp->bb_largest_free_order < ac->ac_2order) 2054 return 0; 2055 2056 return 1; 2057 case 1: 2058 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2059 return 1; 2060 break; 2061 case 2: 2062 if (free >= ac->ac_g_ex.fe_len) 2063 return 1; 2064 break; 2065 case 3: 2066 return 1; 2067 default: 2068 BUG(); 2069 } 2070 2071 return 0; 2072 } 2073 2074 static noinline_for_stack int 2075 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2076 { 2077 ext4_group_t ngroups, group, i; 2078 int cr; 2079 int err = 0; 2080 struct ext4_sb_info *sbi; 2081 struct super_block *sb; 2082 struct ext4_buddy e4b; 2083 2084 sb = ac->ac_sb; 2085 sbi = EXT4_SB(sb); 2086 ngroups = ext4_get_groups_count(sb); 2087 /* non-extent files are limited to low blocks/groups */ 2088 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2089 ngroups = sbi->s_blockfile_groups; 2090 2091 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2092 2093 /* first, try the goal */ 2094 err = ext4_mb_find_by_goal(ac, &e4b); 2095 if (err || ac->ac_status == AC_STATUS_FOUND) 2096 goto out; 2097 2098 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2099 goto out; 2100 2101 /* 2102 * ac->ac2_order is set only if the fe_len is a power of 2 2103 * if ac2_order is set we also set criteria to 0 so that we 2104 * try exact allocation using buddy. 2105 */ 2106 i = fls(ac->ac_g_ex.fe_len); 2107 ac->ac_2order = 0; 2108 /* 2109 * We search using buddy data only if the order of the request 2110 * is greater than equal to the sbi_s_mb_order2_reqs 2111 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2112 */ 2113 if (i >= sbi->s_mb_order2_reqs) { 2114 /* 2115 * This should tell if fe_len is exactly power of 2 2116 */ 2117 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2118 ac->ac_2order = i - 1; 2119 } 2120 2121 /* if stream allocation is enabled, use global goal */ 2122 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2123 /* TBD: may be hot point */ 2124 spin_lock(&sbi->s_md_lock); 2125 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2126 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2127 spin_unlock(&sbi->s_md_lock); 2128 } 2129 2130 /* Let's just scan groups to find more-less suitable blocks */ 2131 cr = ac->ac_2order ? 0 : 1; 2132 /* 2133 * cr == 0 try to get exact allocation, 2134 * cr == 3 try to get anything 2135 */ 2136 repeat: 2137 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2138 ac->ac_criteria = cr; 2139 /* 2140 * searching for the right group start 2141 * from the goal value specified 2142 */ 2143 group = ac->ac_g_ex.fe_group; 2144 2145 for (i = 0; i < ngroups; group++, i++) { 2146 cond_resched(); 2147 /* 2148 * Artificially restricted ngroups for non-extent 2149 * files makes group > ngroups possible on first loop. 2150 */ 2151 if (group >= ngroups) 2152 group = 0; 2153 2154 /* This now checks without needing the buddy page */ 2155 if (!ext4_mb_good_group(ac, group, cr)) 2156 continue; 2157 2158 err = ext4_mb_load_buddy(sb, group, &e4b); 2159 if (err) 2160 goto out; 2161 2162 ext4_lock_group(sb, group); 2163 2164 /* 2165 * We need to check again after locking the 2166 * block group 2167 */ 2168 if (!ext4_mb_good_group(ac, group, cr)) { 2169 ext4_unlock_group(sb, group); 2170 ext4_mb_unload_buddy(&e4b); 2171 continue; 2172 } 2173 2174 ac->ac_groups_scanned++; 2175 if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) 2176 ext4_mb_simple_scan_group(ac, &e4b); 2177 else if (cr == 1 && sbi->s_stripe && 2178 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2179 ext4_mb_scan_aligned(ac, &e4b); 2180 else 2181 ext4_mb_complex_scan_group(ac, &e4b); 2182 2183 ext4_unlock_group(sb, group); 2184 ext4_mb_unload_buddy(&e4b); 2185 2186 if (ac->ac_status != AC_STATUS_CONTINUE) 2187 break; 2188 } 2189 } 2190 2191 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2192 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2193 /* 2194 * We've been searching too long. Let's try to allocate 2195 * the best chunk we've found so far 2196 */ 2197 2198 ext4_mb_try_best_found(ac, &e4b); 2199 if (ac->ac_status != AC_STATUS_FOUND) { 2200 /* 2201 * Someone more lucky has already allocated it. 2202 * The only thing we can do is just take first 2203 * found block(s) 2204 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2205 */ 2206 ac->ac_b_ex.fe_group = 0; 2207 ac->ac_b_ex.fe_start = 0; 2208 ac->ac_b_ex.fe_len = 0; 2209 ac->ac_status = AC_STATUS_CONTINUE; 2210 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2211 cr = 3; 2212 atomic_inc(&sbi->s_mb_lost_chunks); 2213 goto repeat; 2214 } 2215 } 2216 out: 2217 return err; 2218 } 2219 2220 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2221 { 2222 struct super_block *sb = seq->private; 2223 ext4_group_t group; 2224 2225 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2226 return NULL; 2227 group = *pos + 1; 2228 return (void *) ((unsigned long) group); 2229 } 2230 2231 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2232 { 2233 struct super_block *sb = seq->private; 2234 ext4_group_t group; 2235 2236 ++*pos; 2237 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2238 return NULL; 2239 group = *pos + 1; 2240 return (void *) ((unsigned long) group); 2241 } 2242 2243 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2244 { 2245 struct super_block *sb = seq->private; 2246 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2247 int i; 2248 int err, buddy_loaded = 0; 2249 struct ext4_buddy e4b; 2250 struct ext4_group_info *grinfo; 2251 struct sg { 2252 struct ext4_group_info info; 2253 ext4_grpblk_t counters[16]; 2254 } sg; 2255 2256 group--; 2257 if (group == 0) 2258 seq_printf(seq, "#%-5s: %-5s %-5s %-5s " 2259 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " 2260 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", 2261 "group", "free", "frags", "first", 2262 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", 2263 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); 2264 2265 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2266 sizeof(struct ext4_group_info); 2267 grinfo = ext4_get_group_info(sb, group); 2268 /* Load the group info in memory only if not already loaded. */ 2269 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2270 err = ext4_mb_load_buddy(sb, group, &e4b); 2271 if (err) { 2272 seq_printf(seq, "#%-5u: I/O error\n", group); 2273 return 0; 2274 } 2275 buddy_loaded = 1; 2276 } 2277 2278 memcpy(&sg, ext4_get_group_info(sb, group), i); 2279 2280 if (buddy_loaded) 2281 ext4_mb_unload_buddy(&e4b); 2282 2283 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2284 sg.info.bb_fragments, sg.info.bb_first_free); 2285 for (i = 0; i <= 13; i++) 2286 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2287 sg.info.bb_counters[i] : 0); 2288 seq_printf(seq, " ]\n"); 2289 2290 return 0; 2291 } 2292 2293 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2294 { 2295 } 2296 2297 static const struct seq_operations ext4_mb_seq_groups_ops = { 2298 .start = ext4_mb_seq_groups_start, 2299 .next = ext4_mb_seq_groups_next, 2300 .stop = ext4_mb_seq_groups_stop, 2301 .show = ext4_mb_seq_groups_show, 2302 }; 2303 2304 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2305 { 2306 struct super_block *sb = PDE_DATA(inode); 2307 int rc; 2308 2309 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2310 if (rc == 0) { 2311 struct seq_file *m = file->private_data; 2312 m->private = sb; 2313 } 2314 return rc; 2315 2316 } 2317 2318 static const struct file_operations ext4_mb_seq_groups_fops = { 2319 .owner = THIS_MODULE, 2320 .open = ext4_mb_seq_groups_open, 2321 .read = seq_read, 2322 .llseek = seq_lseek, 2323 .release = seq_release, 2324 }; 2325 2326 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2327 { 2328 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2329 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2330 2331 BUG_ON(!cachep); 2332 return cachep; 2333 } 2334 2335 /* 2336 * Allocate the top-level s_group_info array for the specified number 2337 * of groups 2338 */ 2339 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2340 { 2341 struct ext4_sb_info *sbi = EXT4_SB(sb); 2342 unsigned size; 2343 struct ext4_group_info ***new_groupinfo; 2344 2345 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2346 EXT4_DESC_PER_BLOCK_BITS(sb); 2347 if (size <= sbi->s_group_info_size) 2348 return 0; 2349 2350 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2351 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); 2352 if (!new_groupinfo) { 2353 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2354 return -ENOMEM; 2355 } 2356 if (sbi->s_group_info) { 2357 memcpy(new_groupinfo, sbi->s_group_info, 2358 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2359 ext4_kvfree(sbi->s_group_info); 2360 } 2361 sbi->s_group_info = new_groupinfo; 2362 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2363 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2364 sbi->s_group_info_size); 2365 return 0; 2366 } 2367 2368 /* Create and initialize ext4_group_info data for the given group. */ 2369 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2370 struct ext4_group_desc *desc) 2371 { 2372 int i; 2373 int metalen = 0; 2374 struct ext4_sb_info *sbi = EXT4_SB(sb); 2375 struct ext4_group_info **meta_group_info; 2376 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2377 2378 /* 2379 * First check if this group is the first of a reserved block. 2380 * If it's true, we have to allocate a new table of pointers 2381 * to ext4_group_info structures 2382 */ 2383 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2384 metalen = sizeof(*meta_group_info) << 2385 EXT4_DESC_PER_BLOCK_BITS(sb); 2386 meta_group_info = kmalloc(metalen, GFP_KERNEL); 2387 if (meta_group_info == NULL) { 2388 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2389 "for a buddy group"); 2390 goto exit_meta_group_info; 2391 } 2392 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2393 meta_group_info; 2394 } 2395 2396 meta_group_info = 2397 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2398 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2399 2400 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); 2401 if (meta_group_info[i] == NULL) { 2402 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2403 goto exit_group_info; 2404 } 2405 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2406 &(meta_group_info[i]->bb_state)); 2407 2408 /* 2409 * initialize bb_free to be able to skip 2410 * empty groups without initialization 2411 */ 2412 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2413 meta_group_info[i]->bb_free = 2414 ext4_free_clusters_after_init(sb, group, desc); 2415 } else { 2416 meta_group_info[i]->bb_free = 2417 ext4_free_group_clusters(sb, desc); 2418 } 2419 2420 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2421 init_rwsem(&meta_group_info[i]->alloc_sem); 2422 meta_group_info[i]->bb_free_root = RB_ROOT; 2423 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2424 2425 #ifdef DOUBLE_CHECK 2426 { 2427 struct buffer_head *bh; 2428 meta_group_info[i]->bb_bitmap = 2429 kmalloc(sb->s_blocksize, GFP_KERNEL); 2430 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2431 bh = ext4_read_block_bitmap(sb, group); 2432 BUG_ON(bh == NULL); 2433 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2434 sb->s_blocksize); 2435 put_bh(bh); 2436 } 2437 #endif 2438 2439 return 0; 2440 2441 exit_group_info: 2442 /* If a meta_group_info table has been allocated, release it now */ 2443 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2444 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2445 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; 2446 } 2447 exit_meta_group_info: 2448 return -ENOMEM; 2449 } /* ext4_mb_add_groupinfo */ 2450 2451 static int ext4_mb_init_backend(struct super_block *sb) 2452 { 2453 ext4_group_t ngroups = ext4_get_groups_count(sb); 2454 ext4_group_t i; 2455 struct ext4_sb_info *sbi = EXT4_SB(sb); 2456 int err; 2457 struct ext4_group_desc *desc; 2458 struct kmem_cache *cachep; 2459 2460 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2461 if (err) 2462 return err; 2463 2464 sbi->s_buddy_cache = new_inode(sb); 2465 if (sbi->s_buddy_cache == NULL) { 2466 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2467 goto err_freesgi; 2468 } 2469 /* To avoid potentially colliding with an valid on-disk inode number, 2470 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2471 * not in the inode hash, so it should never be found by iget(), but 2472 * this will avoid confusion if it ever shows up during debugging. */ 2473 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2474 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2475 for (i = 0; i < ngroups; i++) { 2476 desc = ext4_get_group_desc(sb, i, NULL); 2477 if (desc == NULL) { 2478 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2479 goto err_freebuddy; 2480 } 2481 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2482 goto err_freebuddy; 2483 } 2484 2485 return 0; 2486 2487 err_freebuddy: 2488 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2489 while (i-- > 0) 2490 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2491 i = sbi->s_group_info_size; 2492 while (i-- > 0) 2493 kfree(sbi->s_group_info[i]); 2494 iput(sbi->s_buddy_cache); 2495 err_freesgi: 2496 ext4_kvfree(sbi->s_group_info); 2497 return -ENOMEM; 2498 } 2499 2500 static void ext4_groupinfo_destroy_slabs(void) 2501 { 2502 int i; 2503 2504 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2505 if (ext4_groupinfo_caches[i]) 2506 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2507 ext4_groupinfo_caches[i] = NULL; 2508 } 2509 } 2510 2511 static int ext4_groupinfo_create_slab(size_t size) 2512 { 2513 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2514 int slab_size; 2515 int blocksize_bits = order_base_2(size); 2516 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2517 struct kmem_cache *cachep; 2518 2519 if (cache_index >= NR_GRPINFO_CACHES) 2520 return -EINVAL; 2521 2522 if (unlikely(cache_index < 0)) 2523 cache_index = 0; 2524 2525 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2526 if (ext4_groupinfo_caches[cache_index]) { 2527 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2528 return 0; /* Already created */ 2529 } 2530 2531 slab_size = offsetof(struct ext4_group_info, 2532 bb_counters[blocksize_bits + 2]); 2533 2534 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2535 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2536 NULL); 2537 2538 ext4_groupinfo_caches[cache_index] = cachep; 2539 2540 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2541 if (!cachep) { 2542 printk(KERN_EMERG 2543 "EXT4-fs: no memory for groupinfo slab cache\n"); 2544 return -ENOMEM; 2545 } 2546 2547 return 0; 2548 } 2549 2550 int ext4_mb_init(struct super_block *sb) 2551 { 2552 struct ext4_sb_info *sbi = EXT4_SB(sb); 2553 unsigned i, j; 2554 unsigned offset; 2555 unsigned max; 2556 int ret; 2557 2558 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2559 2560 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2561 if (sbi->s_mb_offsets == NULL) { 2562 ret = -ENOMEM; 2563 goto out; 2564 } 2565 2566 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2567 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2568 if (sbi->s_mb_maxs == NULL) { 2569 ret = -ENOMEM; 2570 goto out; 2571 } 2572 2573 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2574 if (ret < 0) 2575 goto out; 2576 2577 /* order 0 is regular bitmap */ 2578 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2579 sbi->s_mb_offsets[0] = 0; 2580 2581 i = 1; 2582 offset = 0; 2583 max = sb->s_blocksize << 2; 2584 do { 2585 sbi->s_mb_offsets[i] = offset; 2586 sbi->s_mb_maxs[i] = max; 2587 offset += 1 << (sb->s_blocksize_bits - i); 2588 max = max >> 1; 2589 i++; 2590 } while (i <= sb->s_blocksize_bits + 1); 2591 2592 spin_lock_init(&sbi->s_md_lock); 2593 spin_lock_init(&sbi->s_bal_lock); 2594 2595 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2596 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2597 sbi->s_mb_stats = MB_DEFAULT_STATS; 2598 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2599 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2600 /* 2601 * The default group preallocation is 512, which for 4k block 2602 * sizes translates to 2 megabytes. However for bigalloc file 2603 * systems, this is probably too big (i.e, if the cluster size 2604 * is 1 megabyte, then group preallocation size becomes half a 2605 * gigabyte!). As a default, we will keep a two megabyte 2606 * group pralloc size for cluster sizes up to 64k, and after 2607 * that, we will force a minimum group preallocation size of 2608 * 32 clusters. This translates to 8 megs when the cluster 2609 * size is 256k, and 32 megs when the cluster size is 1 meg, 2610 * which seems reasonable as a default. 2611 */ 2612 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2613 sbi->s_cluster_bits, 32); 2614 /* 2615 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2616 * to the lowest multiple of s_stripe which is bigger than 2617 * the s_mb_group_prealloc as determined above. We want 2618 * the preallocation size to be an exact multiple of the 2619 * RAID stripe size so that preallocations don't fragment 2620 * the stripes. 2621 */ 2622 if (sbi->s_stripe > 1) { 2623 sbi->s_mb_group_prealloc = roundup( 2624 sbi->s_mb_group_prealloc, sbi->s_stripe); 2625 } 2626 2627 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2628 if (sbi->s_locality_groups == NULL) { 2629 ret = -ENOMEM; 2630 goto out; 2631 } 2632 for_each_possible_cpu(i) { 2633 struct ext4_locality_group *lg; 2634 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2635 mutex_init(&lg->lg_mutex); 2636 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2637 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2638 spin_lock_init(&lg->lg_prealloc_lock); 2639 } 2640 2641 /* init file for buddy data */ 2642 ret = ext4_mb_init_backend(sb); 2643 if (ret != 0) 2644 goto out_free_locality_groups; 2645 2646 if (sbi->s_proc) 2647 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, 2648 &ext4_mb_seq_groups_fops, sb); 2649 2650 return 0; 2651 2652 out_free_locality_groups: 2653 free_percpu(sbi->s_locality_groups); 2654 sbi->s_locality_groups = NULL; 2655 out: 2656 kfree(sbi->s_mb_offsets); 2657 sbi->s_mb_offsets = NULL; 2658 kfree(sbi->s_mb_maxs); 2659 sbi->s_mb_maxs = NULL; 2660 return ret; 2661 } 2662 2663 /* need to called with the ext4 group lock held */ 2664 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2665 { 2666 struct ext4_prealloc_space *pa; 2667 struct list_head *cur, *tmp; 2668 int count = 0; 2669 2670 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2671 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2672 list_del(&pa->pa_group_list); 2673 count++; 2674 kmem_cache_free(ext4_pspace_cachep, pa); 2675 } 2676 if (count) 2677 mb_debug(1, "mballoc: %u PAs left\n", count); 2678 2679 } 2680 2681 int ext4_mb_release(struct super_block *sb) 2682 { 2683 ext4_group_t ngroups = ext4_get_groups_count(sb); 2684 ext4_group_t i; 2685 int num_meta_group_infos; 2686 struct ext4_group_info *grinfo; 2687 struct ext4_sb_info *sbi = EXT4_SB(sb); 2688 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2689 2690 if (sbi->s_proc) 2691 remove_proc_entry("mb_groups", sbi->s_proc); 2692 2693 if (sbi->s_group_info) { 2694 for (i = 0; i < ngroups; i++) { 2695 grinfo = ext4_get_group_info(sb, i); 2696 #ifdef DOUBLE_CHECK 2697 kfree(grinfo->bb_bitmap); 2698 #endif 2699 ext4_lock_group(sb, i); 2700 ext4_mb_cleanup_pa(grinfo); 2701 ext4_unlock_group(sb, i); 2702 kmem_cache_free(cachep, grinfo); 2703 } 2704 num_meta_group_infos = (ngroups + 2705 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2706 EXT4_DESC_PER_BLOCK_BITS(sb); 2707 for (i = 0; i < num_meta_group_infos; i++) 2708 kfree(sbi->s_group_info[i]); 2709 ext4_kvfree(sbi->s_group_info); 2710 } 2711 kfree(sbi->s_mb_offsets); 2712 kfree(sbi->s_mb_maxs); 2713 if (sbi->s_buddy_cache) 2714 iput(sbi->s_buddy_cache); 2715 if (sbi->s_mb_stats) { 2716 ext4_msg(sb, KERN_INFO, 2717 "mballoc: %u blocks %u reqs (%u success)", 2718 atomic_read(&sbi->s_bal_allocated), 2719 atomic_read(&sbi->s_bal_reqs), 2720 atomic_read(&sbi->s_bal_success)); 2721 ext4_msg(sb, KERN_INFO, 2722 "mballoc: %u extents scanned, %u goal hits, " 2723 "%u 2^N hits, %u breaks, %u lost", 2724 atomic_read(&sbi->s_bal_ex_scanned), 2725 atomic_read(&sbi->s_bal_goals), 2726 atomic_read(&sbi->s_bal_2orders), 2727 atomic_read(&sbi->s_bal_breaks), 2728 atomic_read(&sbi->s_mb_lost_chunks)); 2729 ext4_msg(sb, KERN_INFO, 2730 "mballoc: %lu generated and it took %Lu", 2731 sbi->s_mb_buddies_generated, 2732 sbi->s_mb_generation_time); 2733 ext4_msg(sb, KERN_INFO, 2734 "mballoc: %u preallocated, %u discarded", 2735 atomic_read(&sbi->s_mb_preallocated), 2736 atomic_read(&sbi->s_mb_discarded)); 2737 } 2738 2739 free_percpu(sbi->s_locality_groups); 2740 2741 return 0; 2742 } 2743 2744 static inline int ext4_issue_discard(struct super_block *sb, 2745 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 2746 { 2747 ext4_fsblk_t discard_block; 2748 2749 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 2750 ext4_group_first_block_no(sb, block_group)); 2751 count = EXT4_C2B(EXT4_SB(sb), count); 2752 trace_ext4_discard_blocks(sb, 2753 (unsigned long long) discard_block, count); 2754 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2755 } 2756 2757 /* 2758 * This function is called by the jbd2 layer once the commit has finished, 2759 * so we know we can free the blocks that were released with that commit. 2760 */ 2761 static void ext4_free_data_callback(struct super_block *sb, 2762 struct ext4_journal_cb_entry *jce, 2763 int rc) 2764 { 2765 struct ext4_free_data *entry = (struct ext4_free_data *)jce; 2766 struct ext4_buddy e4b; 2767 struct ext4_group_info *db; 2768 int err, count = 0, count2 = 0; 2769 2770 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2771 entry->efd_count, entry->efd_group, entry); 2772 2773 if (test_opt(sb, DISCARD)) { 2774 err = ext4_issue_discard(sb, entry->efd_group, 2775 entry->efd_start_cluster, 2776 entry->efd_count); 2777 if (err && err != -EOPNOTSUPP) 2778 ext4_msg(sb, KERN_WARNING, "discard request in" 2779 " group:%d block:%d count:%d failed" 2780 " with %d", entry->efd_group, 2781 entry->efd_start_cluster, 2782 entry->efd_count, err); 2783 } 2784 2785 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 2786 /* we expect to find existing buddy because it's pinned */ 2787 BUG_ON(err != 0); 2788 2789 2790 db = e4b.bd_info; 2791 /* there are blocks to put in buddy to make them really free */ 2792 count += entry->efd_count; 2793 count2++; 2794 ext4_lock_group(sb, entry->efd_group); 2795 /* Take it out of per group rb tree */ 2796 rb_erase(&entry->efd_node, &(db->bb_free_root)); 2797 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 2798 2799 /* 2800 * Clear the trimmed flag for the group so that the next 2801 * ext4_trim_fs can trim it. 2802 * If the volume is mounted with -o discard, online discard 2803 * is supported and the free blocks will be trimmed online. 2804 */ 2805 if (!test_opt(sb, DISCARD)) 2806 EXT4_MB_GRP_CLEAR_TRIMMED(db); 2807 2808 if (!db->bb_free_root.rb_node) { 2809 /* No more items in the per group rb tree 2810 * balance refcounts from ext4_mb_free_metadata() 2811 */ 2812 page_cache_release(e4b.bd_buddy_page); 2813 page_cache_release(e4b.bd_bitmap_page); 2814 } 2815 ext4_unlock_group(sb, entry->efd_group); 2816 kmem_cache_free(ext4_free_data_cachep, entry); 2817 ext4_mb_unload_buddy(&e4b); 2818 2819 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2820 } 2821 2822 int __init ext4_init_mballoc(void) 2823 { 2824 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 2825 SLAB_RECLAIM_ACCOUNT); 2826 if (ext4_pspace_cachep == NULL) 2827 return -ENOMEM; 2828 2829 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 2830 SLAB_RECLAIM_ACCOUNT); 2831 if (ext4_ac_cachep == NULL) { 2832 kmem_cache_destroy(ext4_pspace_cachep); 2833 return -ENOMEM; 2834 } 2835 2836 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 2837 SLAB_RECLAIM_ACCOUNT); 2838 if (ext4_free_data_cachep == NULL) { 2839 kmem_cache_destroy(ext4_pspace_cachep); 2840 kmem_cache_destroy(ext4_ac_cachep); 2841 return -ENOMEM; 2842 } 2843 return 0; 2844 } 2845 2846 void ext4_exit_mballoc(void) 2847 { 2848 /* 2849 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2850 * before destroying the slab cache. 2851 */ 2852 rcu_barrier(); 2853 kmem_cache_destroy(ext4_pspace_cachep); 2854 kmem_cache_destroy(ext4_ac_cachep); 2855 kmem_cache_destroy(ext4_free_data_cachep); 2856 ext4_groupinfo_destroy_slabs(); 2857 } 2858 2859 2860 /* 2861 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2862 * Returns 0 if success or error code 2863 */ 2864 static noinline_for_stack int 2865 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2866 handle_t *handle, unsigned int reserv_clstrs) 2867 { 2868 struct buffer_head *bitmap_bh = NULL; 2869 struct ext4_group_desc *gdp; 2870 struct buffer_head *gdp_bh; 2871 struct ext4_sb_info *sbi; 2872 struct super_block *sb; 2873 ext4_fsblk_t block; 2874 int err, len; 2875 2876 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2877 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2878 2879 sb = ac->ac_sb; 2880 sbi = EXT4_SB(sb); 2881 2882 err = -EIO; 2883 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2884 if (!bitmap_bh) 2885 goto out_err; 2886 2887 BUFFER_TRACE(bitmap_bh, "getting write access"); 2888 err = ext4_journal_get_write_access(handle, bitmap_bh); 2889 if (err) 2890 goto out_err; 2891 2892 err = -EIO; 2893 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2894 if (!gdp) 2895 goto out_err; 2896 2897 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2898 ext4_free_group_clusters(sb, gdp)); 2899 2900 BUFFER_TRACE(gdp_bh, "get_write_access"); 2901 err = ext4_journal_get_write_access(handle, gdp_bh); 2902 if (err) 2903 goto out_err; 2904 2905 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2906 2907 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 2908 if (!ext4_data_block_valid(sbi, block, len)) { 2909 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2910 "fs metadata", block, block+len); 2911 /* File system mounted not to panic on error 2912 * Fix the bitmap and repeat the block allocation 2913 * We leak some of the blocks here. 2914 */ 2915 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2916 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2917 ac->ac_b_ex.fe_len); 2918 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2919 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2920 if (!err) 2921 err = -EAGAIN; 2922 goto out_err; 2923 } 2924 2925 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2926 #ifdef AGGRESSIVE_CHECK 2927 { 2928 int i; 2929 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2930 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2931 bitmap_bh->b_data)); 2932 } 2933 } 2934 #endif 2935 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2936 ac->ac_b_ex.fe_len); 2937 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2938 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2939 ext4_free_group_clusters_set(sb, gdp, 2940 ext4_free_clusters_after_init(sb, 2941 ac->ac_b_ex.fe_group, gdp)); 2942 } 2943 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2944 ext4_free_group_clusters_set(sb, gdp, len); 2945 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 2946 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2947 2948 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2949 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2950 /* 2951 * Now reduce the dirty block count also. Should not go negative 2952 */ 2953 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2954 /* release all the reserved blocks if non delalloc */ 2955 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 2956 reserv_clstrs); 2957 2958 if (sbi->s_log_groups_per_flex) { 2959 ext4_group_t flex_group = ext4_flex_group(sbi, 2960 ac->ac_b_ex.fe_group); 2961 atomic64_sub(ac->ac_b_ex.fe_len, 2962 &sbi->s_flex_groups[flex_group].free_clusters); 2963 } 2964 2965 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2966 if (err) 2967 goto out_err; 2968 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 2969 2970 out_err: 2971 brelse(bitmap_bh); 2972 return err; 2973 } 2974 2975 /* 2976 * here we normalize request for locality group 2977 * Group request are normalized to s_mb_group_prealloc, which goes to 2978 * s_strip if we set the same via mount option. 2979 * s_mb_group_prealloc can be configured via 2980 * /sys/fs/ext4/<partition>/mb_group_prealloc 2981 * 2982 * XXX: should we try to preallocate more than the group has now? 2983 */ 2984 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 2985 { 2986 struct super_block *sb = ac->ac_sb; 2987 struct ext4_locality_group *lg = ac->ac_lg; 2988 2989 BUG_ON(lg == NULL); 2990 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 2991 mb_debug(1, "#%u: goal %u blocks for locality group\n", 2992 current->pid, ac->ac_g_ex.fe_len); 2993 } 2994 2995 /* 2996 * Normalization means making request better in terms of 2997 * size and alignment 2998 */ 2999 static noinline_for_stack void 3000 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3001 struct ext4_allocation_request *ar) 3002 { 3003 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3004 int bsbits, max; 3005 ext4_lblk_t end; 3006 loff_t size, start_off; 3007 loff_t orig_size __maybe_unused; 3008 ext4_lblk_t start; 3009 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3010 struct ext4_prealloc_space *pa; 3011 3012 /* do normalize only data requests, metadata requests 3013 do not need preallocation */ 3014 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3015 return; 3016 3017 /* sometime caller may want exact blocks */ 3018 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3019 return; 3020 3021 /* caller may indicate that preallocation isn't 3022 * required (it's a tail, for example) */ 3023 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3024 return; 3025 3026 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3027 ext4_mb_normalize_group_request(ac); 3028 return ; 3029 } 3030 3031 bsbits = ac->ac_sb->s_blocksize_bits; 3032 3033 /* first, let's learn actual file size 3034 * given current request is allocated */ 3035 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3036 size = size << bsbits; 3037 if (size < i_size_read(ac->ac_inode)) 3038 size = i_size_read(ac->ac_inode); 3039 orig_size = size; 3040 3041 /* max size of free chunks */ 3042 max = 2 << bsbits; 3043 3044 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3045 (req <= (size) || max <= (chunk_size)) 3046 3047 /* first, try to predict filesize */ 3048 /* XXX: should this table be tunable? */ 3049 start_off = 0; 3050 if (size <= 16 * 1024) { 3051 size = 16 * 1024; 3052 } else if (size <= 32 * 1024) { 3053 size = 32 * 1024; 3054 } else if (size <= 64 * 1024) { 3055 size = 64 * 1024; 3056 } else if (size <= 128 * 1024) { 3057 size = 128 * 1024; 3058 } else if (size <= 256 * 1024) { 3059 size = 256 * 1024; 3060 } else if (size <= 512 * 1024) { 3061 size = 512 * 1024; 3062 } else if (size <= 1024 * 1024) { 3063 size = 1024 * 1024; 3064 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3065 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3066 (21 - bsbits)) << 21; 3067 size = 2 * 1024 * 1024; 3068 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3069 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3070 (22 - bsbits)) << 22; 3071 size = 4 * 1024 * 1024; 3072 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3073 (8<<20)>>bsbits, max, 8 * 1024)) { 3074 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3075 (23 - bsbits)) << 23; 3076 size = 8 * 1024 * 1024; 3077 } else { 3078 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; 3079 size = ac->ac_o_ex.fe_len << bsbits; 3080 } 3081 size = size >> bsbits; 3082 start = start_off >> bsbits; 3083 3084 /* don't cover already allocated blocks in selected range */ 3085 if (ar->pleft && start <= ar->lleft) { 3086 size -= ar->lleft + 1 - start; 3087 start = ar->lleft + 1; 3088 } 3089 if (ar->pright && start + size - 1 >= ar->lright) 3090 size -= start + size - ar->lright; 3091 3092 end = start + size; 3093 3094 /* check we don't cross already preallocated blocks */ 3095 rcu_read_lock(); 3096 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3097 ext4_lblk_t pa_end; 3098 3099 if (pa->pa_deleted) 3100 continue; 3101 spin_lock(&pa->pa_lock); 3102 if (pa->pa_deleted) { 3103 spin_unlock(&pa->pa_lock); 3104 continue; 3105 } 3106 3107 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3108 pa->pa_len); 3109 3110 /* PA must not overlap original request */ 3111 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3112 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3113 3114 /* skip PAs this normalized request doesn't overlap with */ 3115 if (pa->pa_lstart >= end || pa_end <= start) { 3116 spin_unlock(&pa->pa_lock); 3117 continue; 3118 } 3119 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3120 3121 /* adjust start or end to be adjacent to this pa */ 3122 if (pa_end <= ac->ac_o_ex.fe_logical) { 3123 BUG_ON(pa_end < start); 3124 start = pa_end; 3125 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3126 BUG_ON(pa->pa_lstart > end); 3127 end = pa->pa_lstart; 3128 } 3129 spin_unlock(&pa->pa_lock); 3130 } 3131 rcu_read_unlock(); 3132 size = end - start; 3133 3134 /* XXX: extra loop to check we really don't overlap preallocations */ 3135 rcu_read_lock(); 3136 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3137 ext4_lblk_t pa_end; 3138 3139 spin_lock(&pa->pa_lock); 3140 if (pa->pa_deleted == 0) { 3141 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3142 pa->pa_len); 3143 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3144 } 3145 spin_unlock(&pa->pa_lock); 3146 } 3147 rcu_read_unlock(); 3148 3149 if (start + size <= ac->ac_o_ex.fe_logical && 3150 start > ac->ac_o_ex.fe_logical) { 3151 ext4_msg(ac->ac_sb, KERN_ERR, 3152 "start %lu, size %lu, fe_logical %lu", 3153 (unsigned long) start, (unsigned long) size, 3154 (unsigned long) ac->ac_o_ex.fe_logical); 3155 } 3156 BUG_ON(start + size <= ac->ac_o_ex.fe_logical && 3157 start > ac->ac_o_ex.fe_logical); 3158 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3159 3160 /* now prepare goal request */ 3161 3162 /* XXX: is it better to align blocks WRT to logical 3163 * placement or satisfy big request as is */ 3164 ac->ac_g_ex.fe_logical = start; 3165 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3166 3167 /* define goal start in order to merge */ 3168 if (ar->pright && (ar->lright == (start + size))) { 3169 /* merge to the right */ 3170 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3171 &ac->ac_f_ex.fe_group, 3172 &ac->ac_f_ex.fe_start); 3173 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3174 } 3175 if (ar->pleft && (ar->lleft + 1 == start)) { 3176 /* merge to the left */ 3177 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3178 &ac->ac_f_ex.fe_group, 3179 &ac->ac_f_ex.fe_start); 3180 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3181 } 3182 3183 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 3184 (unsigned) orig_size, (unsigned) start); 3185 } 3186 3187 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3188 { 3189 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3190 3191 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3192 atomic_inc(&sbi->s_bal_reqs); 3193 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3194 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3195 atomic_inc(&sbi->s_bal_success); 3196 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3197 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3198 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3199 atomic_inc(&sbi->s_bal_goals); 3200 if (ac->ac_found > sbi->s_mb_max_to_scan) 3201 atomic_inc(&sbi->s_bal_breaks); 3202 } 3203 3204 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3205 trace_ext4_mballoc_alloc(ac); 3206 else 3207 trace_ext4_mballoc_prealloc(ac); 3208 } 3209 3210 /* 3211 * Called on failure; free up any blocks from the inode PA for this 3212 * context. We don't need this for MB_GROUP_PA because we only change 3213 * pa_free in ext4_mb_release_context(), but on failure, we've already 3214 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3215 */ 3216 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3217 { 3218 struct ext4_prealloc_space *pa = ac->ac_pa; 3219 3220 if (pa && pa->pa_type == MB_INODE_PA) 3221 pa->pa_free += ac->ac_b_ex.fe_len; 3222 } 3223 3224 /* 3225 * use blocks preallocated to inode 3226 */ 3227 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3228 struct ext4_prealloc_space *pa) 3229 { 3230 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3231 ext4_fsblk_t start; 3232 ext4_fsblk_t end; 3233 int len; 3234 3235 /* found preallocated blocks, use them */ 3236 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3237 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3238 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3239 len = EXT4_NUM_B2C(sbi, end - start); 3240 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3241 &ac->ac_b_ex.fe_start); 3242 ac->ac_b_ex.fe_len = len; 3243 ac->ac_status = AC_STATUS_FOUND; 3244 ac->ac_pa = pa; 3245 3246 BUG_ON(start < pa->pa_pstart); 3247 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3248 BUG_ON(pa->pa_free < len); 3249 pa->pa_free -= len; 3250 3251 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3252 } 3253 3254 /* 3255 * use blocks preallocated to locality group 3256 */ 3257 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3258 struct ext4_prealloc_space *pa) 3259 { 3260 unsigned int len = ac->ac_o_ex.fe_len; 3261 3262 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3263 &ac->ac_b_ex.fe_group, 3264 &ac->ac_b_ex.fe_start); 3265 ac->ac_b_ex.fe_len = len; 3266 ac->ac_status = AC_STATUS_FOUND; 3267 ac->ac_pa = pa; 3268 3269 /* we don't correct pa_pstart or pa_plen here to avoid 3270 * possible race when the group is being loaded concurrently 3271 * instead we correct pa later, after blocks are marked 3272 * in on-disk bitmap -- see ext4_mb_release_context() 3273 * Other CPUs are prevented from allocating from this pa by lg_mutex 3274 */ 3275 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3276 } 3277 3278 /* 3279 * Return the prealloc space that have minimal distance 3280 * from the goal block. @cpa is the prealloc 3281 * space that is having currently known minimal distance 3282 * from the goal block. 3283 */ 3284 static struct ext4_prealloc_space * 3285 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3286 struct ext4_prealloc_space *pa, 3287 struct ext4_prealloc_space *cpa) 3288 { 3289 ext4_fsblk_t cur_distance, new_distance; 3290 3291 if (cpa == NULL) { 3292 atomic_inc(&pa->pa_count); 3293 return pa; 3294 } 3295 cur_distance = abs(goal_block - cpa->pa_pstart); 3296 new_distance = abs(goal_block - pa->pa_pstart); 3297 3298 if (cur_distance <= new_distance) 3299 return cpa; 3300 3301 /* drop the previous reference */ 3302 atomic_dec(&cpa->pa_count); 3303 atomic_inc(&pa->pa_count); 3304 return pa; 3305 } 3306 3307 /* 3308 * search goal blocks in preallocated space 3309 */ 3310 static noinline_for_stack int 3311 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3312 { 3313 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3314 int order, i; 3315 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3316 struct ext4_locality_group *lg; 3317 struct ext4_prealloc_space *pa, *cpa = NULL; 3318 ext4_fsblk_t goal_block; 3319 3320 /* only data can be preallocated */ 3321 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3322 return 0; 3323 3324 /* first, try per-file preallocation */ 3325 rcu_read_lock(); 3326 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3327 3328 /* all fields in this condition don't change, 3329 * so we can skip locking for them */ 3330 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3331 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3332 EXT4_C2B(sbi, pa->pa_len))) 3333 continue; 3334 3335 /* non-extent files can't have physical blocks past 2^32 */ 3336 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3337 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3338 EXT4_MAX_BLOCK_FILE_PHYS)) 3339 continue; 3340 3341 /* found preallocated blocks, use them */ 3342 spin_lock(&pa->pa_lock); 3343 if (pa->pa_deleted == 0 && pa->pa_free) { 3344 atomic_inc(&pa->pa_count); 3345 ext4_mb_use_inode_pa(ac, pa); 3346 spin_unlock(&pa->pa_lock); 3347 ac->ac_criteria = 10; 3348 rcu_read_unlock(); 3349 return 1; 3350 } 3351 spin_unlock(&pa->pa_lock); 3352 } 3353 rcu_read_unlock(); 3354 3355 /* can we use group allocation? */ 3356 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3357 return 0; 3358 3359 /* inode may have no locality group for some reason */ 3360 lg = ac->ac_lg; 3361 if (lg == NULL) 3362 return 0; 3363 order = fls(ac->ac_o_ex.fe_len) - 1; 3364 if (order > PREALLOC_TB_SIZE - 1) 3365 /* The max size of hash table is PREALLOC_TB_SIZE */ 3366 order = PREALLOC_TB_SIZE - 1; 3367 3368 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3369 /* 3370 * search for the prealloc space that is having 3371 * minimal distance from the goal block. 3372 */ 3373 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3374 rcu_read_lock(); 3375 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3376 pa_inode_list) { 3377 spin_lock(&pa->pa_lock); 3378 if (pa->pa_deleted == 0 && 3379 pa->pa_free >= ac->ac_o_ex.fe_len) { 3380 3381 cpa = ext4_mb_check_group_pa(goal_block, 3382 pa, cpa); 3383 } 3384 spin_unlock(&pa->pa_lock); 3385 } 3386 rcu_read_unlock(); 3387 } 3388 if (cpa) { 3389 ext4_mb_use_group_pa(ac, cpa); 3390 ac->ac_criteria = 20; 3391 return 1; 3392 } 3393 return 0; 3394 } 3395 3396 /* 3397 * the function goes through all block freed in the group 3398 * but not yet committed and marks them used in in-core bitmap. 3399 * buddy must be generated from this bitmap 3400 * Need to be called with the ext4 group lock held 3401 */ 3402 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3403 ext4_group_t group) 3404 { 3405 struct rb_node *n; 3406 struct ext4_group_info *grp; 3407 struct ext4_free_data *entry; 3408 3409 grp = ext4_get_group_info(sb, group); 3410 n = rb_first(&(grp->bb_free_root)); 3411 3412 while (n) { 3413 entry = rb_entry(n, struct ext4_free_data, efd_node); 3414 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3415 n = rb_next(n); 3416 } 3417 return; 3418 } 3419 3420 /* 3421 * the function goes through all preallocation in this group and marks them 3422 * used in in-core bitmap. buddy must be generated from this bitmap 3423 * Need to be called with ext4 group lock held 3424 */ 3425 static noinline_for_stack 3426 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3427 ext4_group_t group) 3428 { 3429 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3430 struct ext4_prealloc_space *pa; 3431 struct list_head *cur; 3432 ext4_group_t groupnr; 3433 ext4_grpblk_t start; 3434 int preallocated = 0; 3435 int len; 3436 3437 /* all form of preallocation discards first load group, 3438 * so the only competing code is preallocation use. 3439 * we don't need any locking here 3440 * notice we do NOT ignore preallocations with pa_deleted 3441 * otherwise we could leave used blocks available for 3442 * allocation in buddy when concurrent ext4_mb_put_pa() 3443 * is dropping preallocation 3444 */ 3445 list_for_each(cur, &grp->bb_prealloc_list) { 3446 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3447 spin_lock(&pa->pa_lock); 3448 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3449 &groupnr, &start); 3450 len = pa->pa_len; 3451 spin_unlock(&pa->pa_lock); 3452 if (unlikely(len == 0)) 3453 continue; 3454 BUG_ON(groupnr != group); 3455 ext4_set_bits(bitmap, start, len); 3456 preallocated += len; 3457 } 3458 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3459 } 3460 3461 static void ext4_mb_pa_callback(struct rcu_head *head) 3462 { 3463 struct ext4_prealloc_space *pa; 3464 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3465 3466 BUG_ON(atomic_read(&pa->pa_count)); 3467 BUG_ON(pa->pa_deleted == 0); 3468 kmem_cache_free(ext4_pspace_cachep, pa); 3469 } 3470 3471 /* 3472 * drops a reference to preallocated space descriptor 3473 * if this was the last reference and the space is consumed 3474 */ 3475 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3476 struct super_block *sb, struct ext4_prealloc_space *pa) 3477 { 3478 ext4_group_t grp; 3479 ext4_fsblk_t grp_blk; 3480 3481 /* in this short window concurrent discard can set pa_deleted */ 3482 spin_lock(&pa->pa_lock); 3483 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3484 spin_unlock(&pa->pa_lock); 3485 return; 3486 } 3487 3488 if (pa->pa_deleted == 1) { 3489 spin_unlock(&pa->pa_lock); 3490 return; 3491 } 3492 3493 pa->pa_deleted = 1; 3494 spin_unlock(&pa->pa_lock); 3495 3496 grp_blk = pa->pa_pstart; 3497 /* 3498 * If doing group-based preallocation, pa_pstart may be in the 3499 * next group when pa is used up 3500 */ 3501 if (pa->pa_type == MB_GROUP_PA) 3502 grp_blk--; 3503 3504 grp = ext4_get_group_number(sb, grp_blk); 3505 3506 /* 3507 * possible race: 3508 * 3509 * P1 (buddy init) P2 (regular allocation) 3510 * find block B in PA 3511 * copy on-disk bitmap to buddy 3512 * mark B in on-disk bitmap 3513 * drop PA from group 3514 * mark all PAs in buddy 3515 * 3516 * thus, P1 initializes buddy with B available. to prevent this 3517 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3518 * against that pair 3519 */ 3520 ext4_lock_group(sb, grp); 3521 list_del(&pa->pa_group_list); 3522 ext4_unlock_group(sb, grp); 3523 3524 spin_lock(pa->pa_obj_lock); 3525 list_del_rcu(&pa->pa_inode_list); 3526 spin_unlock(pa->pa_obj_lock); 3527 3528 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3529 } 3530 3531 /* 3532 * creates new preallocated space for given inode 3533 */ 3534 static noinline_for_stack int 3535 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3536 { 3537 struct super_block *sb = ac->ac_sb; 3538 struct ext4_sb_info *sbi = EXT4_SB(sb); 3539 struct ext4_prealloc_space *pa; 3540 struct ext4_group_info *grp; 3541 struct ext4_inode_info *ei; 3542 3543 /* preallocate only when found space is larger then requested */ 3544 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3545 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3546 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3547 3548 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3549 if (pa == NULL) 3550 return -ENOMEM; 3551 3552 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3553 int winl; 3554 int wins; 3555 int win; 3556 int offs; 3557 3558 /* we can't allocate as much as normalizer wants. 3559 * so, found space must get proper lstart 3560 * to cover original request */ 3561 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3562 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3563 3564 /* we're limited by original request in that 3565 * logical block must be covered any way 3566 * winl is window we can move our chunk within */ 3567 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3568 3569 /* also, we should cover whole original request */ 3570 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 3571 3572 /* the smallest one defines real window */ 3573 win = min(winl, wins); 3574 3575 offs = ac->ac_o_ex.fe_logical % 3576 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3577 if (offs && offs < win) 3578 win = offs; 3579 3580 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 3581 EXT4_NUM_B2C(sbi, win); 3582 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3583 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3584 } 3585 3586 /* preallocation can change ac_b_ex, thus we store actually 3587 * allocated blocks for history */ 3588 ac->ac_f_ex = ac->ac_b_ex; 3589 3590 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3591 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3592 pa->pa_len = ac->ac_b_ex.fe_len; 3593 pa->pa_free = pa->pa_len; 3594 atomic_set(&pa->pa_count, 1); 3595 spin_lock_init(&pa->pa_lock); 3596 INIT_LIST_HEAD(&pa->pa_inode_list); 3597 INIT_LIST_HEAD(&pa->pa_group_list); 3598 pa->pa_deleted = 0; 3599 pa->pa_type = MB_INODE_PA; 3600 3601 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3602 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3603 trace_ext4_mb_new_inode_pa(ac, pa); 3604 3605 ext4_mb_use_inode_pa(ac, pa); 3606 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 3607 3608 ei = EXT4_I(ac->ac_inode); 3609 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3610 3611 pa->pa_obj_lock = &ei->i_prealloc_lock; 3612 pa->pa_inode = ac->ac_inode; 3613 3614 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3615 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3616 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3617 3618 spin_lock(pa->pa_obj_lock); 3619 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3620 spin_unlock(pa->pa_obj_lock); 3621 3622 return 0; 3623 } 3624 3625 /* 3626 * creates new preallocated space for locality group inodes belongs to 3627 */ 3628 static noinline_for_stack int 3629 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3630 { 3631 struct super_block *sb = ac->ac_sb; 3632 struct ext4_locality_group *lg; 3633 struct ext4_prealloc_space *pa; 3634 struct ext4_group_info *grp; 3635 3636 /* preallocate only when found space is larger then requested */ 3637 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3638 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3639 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3640 3641 BUG_ON(ext4_pspace_cachep == NULL); 3642 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3643 if (pa == NULL) 3644 return -ENOMEM; 3645 3646 /* preallocation can change ac_b_ex, thus we store actually 3647 * allocated blocks for history */ 3648 ac->ac_f_ex = ac->ac_b_ex; 3649 3650 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3651 pa->pa_lstart = pa->pa_pstart; 3652 pa->pa_len = ac->ac_b_ex.fe_len; 3653 pa->pa_free = pa->pa_len; 3654 atomic_set(&pa->pa_count, 1); 3655 spin_lock_init(&pa->pa_lock); 3656 INIT_LIST_HEAD(&pa->pa_inode_list); 3657 INIT_LIST_HEAD(&pa->pa_group_list); 3658 pa->pa_deleted = 0; 3659 pa->pa_type = MB_GROUP_PA; 3660 3661 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3662 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3663 trace_ext4_mb_new_group_pa(ac, pa); 3664 3665 ext4_mb_use_group_pa(ac, pa); 3666 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3667 3668 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3669 lg = ac->ac_lg; 3670 BUG_ON(lg == NULL); 3671 3672 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3673 pa->pa_inode = NULL; 3674 3675 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3676 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3677 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3678 3679 /* 3680 * We will later add the new pa to the right bucket 3681 * after updating the pa_free in ext4_mb_release_context 3682 */ 3683 return 0; 3684 } 3685 3686 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3687 { 3688 int err; 3689 3690 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3691 err = ext4_mb_new_group_pa(ac); 3692 else 3693 err = ext4_mb_new_inode_pa(ac); 3694 return err; 3695 } 3696 3697 /* 3698 * finds all unused blocks in on-disk bitmap, frees them in 3699 * in-core bitmap and buddy. 3700 * @pa must be unlinked from inode and group lists, so that 3701 * nobody else can find/use it. 3702 * the caller MUST hold group/inode locks. 3703 * TODO: optimize the case when there are no in-core structures yet 3704 */ 3705 static noinline_for_stack int 3706 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3707 struct ext4_prealloc_space *pa) 3708 { 3709 struct super_block *sb = e4b->bd_sb; 3710 struct ext4_sb_info *sbi = EXT4_SB(sb); 3711 unsigned int end; 3712 unsigned int next; 3713 ext4_group_t group; 3714 ext4_grpblk_t bit; 3715 unsigned long long grp_blk_start; 3716 int err = 0; 3717 int free = 0; 3718 3719 BUG_ON(pa->pa_deleted == 0); 3720 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3721 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 3722 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3723 end = bit + pa->pa_len; 3724 3725 while (bit < end) { 3726 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3727 if (bit >= end) 3728 break; 3729 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3730 mb_debug(1, " free preallocated %u/%u in group %u\n", 3731 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3732 (unsigned) next - bit, (unsigned) group); 3733 free += next - bit; 3734 3735 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3736 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 3737 EXT4_C2B(sbi, bit)), 3738 next - bit); 3739 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3740 bit = next + 1; 3741 } 3742 if (free != pa->pa_free) { 3743 ext4_msg(e4b->bd_sb, KERN_CRIT, 3744 "pa %p: logic %lu, phys. %lu, len %lu", 3745 pa, (unsigned long) pa->pa_lstart, 3746 (unsigned long) pa->pa_pstart, 3747 (unsigned long) pa->pa_len); 3748 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3749 free, pa->pa_free); 3750 /* 3751 * pa is already deleted so we use the value obtained 3752 * from the bitmap and continue. 3753 */ 3754 } 3755 atomic_add(free, &sbi->s_mb_discarded); 3756 3757 return err; 3758 } 3759 3760 static noinline_for_stack int 3761 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3762 struct ext4_prealloc_space *pa) 3763 { 3764 struct super_block *sb = e4b->bd_sb; 3765 ext4_group_t group; 3766 ext4_grpblk_t bit; 3767 3768 trace_ext4_mb_release_group_pa(sb, pa); 3769 BUG_ON(pa->pa_deleted == 0); 3770 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3771 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3772 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3773 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3774 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 3775 3776 return 0; 3777 } 3778 3779 /* 3780 * releases all preallocations in given group 3781 * 3782 * first, we need to decide discard policy: 3783 * - when do we discard 3784 * 1) ENOSPC 3785 * - how many do we discard 3786 * 1) how many requested 3787 */ 3788 static noinline_for_stack int 3789 ext4_mb_discard_group_preallocations(struct super_block *sb, 3790 ext4_group_t group, int needed) 3791 { 3792 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3793 struct buffer_head *bitmap_bh = NULL; 3794 struct ext4_prealloc_space *pa, *tmp; 3795 struct list_head list; 3796 struct ext4_buddy e4b; 3797 int err; 3798 int busy = 0; 3799 int free = 0; 3800 3801 mb_debug(1, "discard preallocation for group %u\n", group); 3802 3803 if (list_empty(&grp->bb_prealloc_list)) 3804 return 0; 3805 3806 bitmap_bh = ext4_read_block_bitmap(sb, group); 3807 if (bitmap_bh == NULL) { 3808 ext4_error(sb, "Error reading block bitmap for %u", group); 3809 return 0; 3810 } 3811 3812 err = ext4_mb_load_buddy(sb, group, &e4b); 3813 if (err) { 3814 ext4_error(sb, "Error loading buddy information for %u", group); 3815 put_bh(bitmap_bh); 3816 return 0; 3817 } 3818 3819 if (needed == 0) 3820 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 3821 3822 INIT_LIST_HEAD(&list); 3823 repeat: 3824 ext4_lock_group(sb, group); 3825 list_for_each_entry_safe(pa, tmp, 3826 &grp->bb_prealloc_list, pa_group_list) { 3827 spin_lock(&pa->pa_lock); 3828 if (atomic_read(&pa->pa_count)) { 3829 spin_unlock(&pa->pa_lock); 3830 busy = 1; 3831 continue; 3832 } 3833 if (pa->pa_deleted) { 3834 spin_unlock(&pa->pa_lock); 3835 continue; 3836 } 3837 3838 /* seems this one can be freed ... */ 3839 pa->pa_deleted = 1; 3840 3841 /* we can trust pa_free ... */ 3842 free += pa->pa_free; 3843 3844 spin_unlock(&pa->pa_lock); 3845 3846 list_del(&pa->pa_group_list); 3847 list_add(&pa->u.pa_tmp_list, &list); 3848 } 3849 3850 /* if we still need more blocks and some PAs were used, try again */ 3851 if (free < needed && busy) { 3852 busy = 0; 3853 ext4_unlock_group(sb, group); 3854 cond_resched(); 3855 goto repeat; 3856 } 3857 3858 /* found anything to free? */ 3859 if (list_empty(&list)) { 3860 BUG_ON(free != 0); 3861 goto out; 3862 } 3863 3864 /* now free all selected PAs */ 3865 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3866 3867 /* remove from object (inode or locality group) */ 3868 spin_lock(pa->pa_obj_lock); 3869 list_del_rcu(&pa->pa_inode_list); 3870 spin_unlock(pa->pa_obj_lock); 3871 3872 if (pa->pa_type == MB_GROUP_PA) 3873 ext4_mb_release_group_pa(&e4b, pa); 3874 else 3875 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3876 3877 list_del(&pa->u.pa_tmp_list); 3878 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3879 } 3880 3881 out: 3882 ext4_unlock_group(sb, group); 3883 ext4_mb_unload_buddy(&e4b); 3884 put_bh(bitmap_bh); 3885 return free; 3886 } 3887 3888 /* 3889 * releases all non-used preallocated blocks for given inode 3890 * 3891 * It's important to discard preallocations under i_data_sem 3892 * We don't want another block to be served from the prealloc 3893 * space when we are discarding the inode prealloc space. 3894 * 3895 * FIXME!! Make sure it is valid at all the call sites 3896 */ 3897 void ext4_discard_preallocations(struct inode *inode) 3898 { 3899 struct ext4_inode_info *ei = EXT4_I(inode); 3900 struct super_block *sb = inode->i_sb; 3901 struct buffer_head *bitmap_bh = NULL; 3902 struct ext4_prealloc_space *pa, *tmp; 3903 ext4_group_t group = 0; 3904 struct list_head list; 3905 struct ext4_buddy e4b; 3906 int err; 3907 3908 if (!S_ISREG(inode->i_mode)) { 3909 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3910 return; 3911 } 3912 3913 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3914 trace_ext4_discard_preallocations(inode); 3915 3916 INIT_LIST_HEAD(&list); 3917 3918 repeat: 3919 /* first, collect all pa's in the inode */ 3920 spin_lock(&ei->i_prealloc_lock); 3921 while (!list_empty(&ei->i_prealloc_list)) { 3922 pa = list_entry(ei->i_prealloc_list.next, 3923 struct ext4_prealloc_space, pa_inode_list); 3924 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 3925 spin_lock(&pa->pa_lock); 3926 if (atomic_read(&pa->pa_count)) { 3927 /* this shouldn't happen often - nobody should 3928 * use preallocation while we're discarding it */ 3929 spin_unlock(&pa->pa_lock); 3930 spin_unlock(&ei->i_prealloc_lock); 3931 ext4_msg(sb, KERN_ERR, 3932 "uh-oh! used pa while discarding"); 3933 WARN_ON(1); 3934 schedule_timeout_uninterruptible(HZ); 3935 goto repeat; 3936 3937 } 3938 if (pa->pa_deleted == 0) { 3939 pa->pa_deleted = 1; 3940 spin_unlock(&pa->pa_lock); 3941 list_del_rcu(&pa->pa_inode_list); 3942 list_add(&pa->u.pa_tmp_list, &list); 3943 continue; 3944 } 3945 3946 /* someone is deleting pa right now */ 3947 spin_unlock(&pa->pa_lock); 3948 spin_unlock(&ei->i_prealloc_lock); 3949 3950 /* we have to wait here because pa_deleted 3951 * doesn't mean pa is already unlinked from 3952 * the list. as we might be called from 3953 * ->clear_inode() the inode will get freed 3954 * and concurrent thread which is unlinking 3955 * pa from inode's list may access already 3956 * freed memory, bad-bad-bad */ 3957 3958 /* XXX: if this happens too often, we can 3959 * add a flag to force wait only in case 3960 * of ->clear_inode(), but not in case of 3961 * regular truncate */ 3962 schedule_timeout_uninterruptible(HZ); 3963 goto repeat; 3964 } 3965 spin_unlock(&ei->i_prealloc_lock); 3966 3967 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3968 BUG_ON(pa->pa_type != MB_INODE_PA); 3969 group = ext4_get_group_number(sb, pa->pa_pstart); 3970 3971 err = ext4_mb_load_buddy(sb, group, &e4b); 3972 if (err) { 3973 ext4_error(sb, "Error loading buddy information for %u", 3974 group); 3975 continue; 3976 } 3977 3978 bitmap_bh = ext4_read_block_bitmap(sb, group); 3979 if (bitmap_bh == NULL) { 3980 ext4_error(sb, "Error reading block bitmap for %u", 3981 group); 3982 ext4_mb_unload_buddy(&e4b); 3983 continue; 3984 } 3985 3986 ext4_lock_group(sb, group); 3987 list_del(&pa->pa_group_list); 3988 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3989 ext4_unlock_group(sb, group); 3990 3991 ext4_mb_unload_buddy(&e4b); 3992 put_bh(bitmap_bh); 3993 3994 list_del(&pa->u.pa_tmp_list); 3995 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3996 } 3997 } 3998 3999 #ifdef CONFIG_EXT4_DEBUG 4000 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4001 { 4002 struct super_block *sb = ac->ac_sb; 4003 ext4_group_t ngroups, i; 4004 4005 if (!ext4_mballoc_debug || 4006 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 4007 return; 4008 4009 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" 4010 " Allocation context details:"); 4011 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", 4012 ac->ac_status, ac->ac_flags); 4013 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " 4014 "goal %lu/%lu/%lu@%lu, " 4015 "best %lu/%lu/%lu@%lu cr %d", 4016 (unsigned long)ac->ac_o_ex.fe_group, 4017 (unsigned long)ac->ac_o_ex.fe_start, 4018 (unsigned long)ac->ac_o_ex.fe_len, 4019 (unsigned long)ac->ac_o_ex.fe_logical, 4020 (unsigned long)ac->ac_g_ex.fe_group, 4021 (unsigned long)ac->ac_g_ex.fe_start, 4022 (unsigned long)ac->ac_g_ex.fe_len, 4023 (unsigned long)ac->ac_g_ex.fe_logical, 4024 (unsigned long)ac->ac_b_ex.fe_group, 4025 (unsigned long)ac->ac_b_ex.fe_start, 4026 (unsigned long)ac->ac_b_ex.fe_len, 4027 (unsigned long)ac->ac_b_ex.fe_logical, 4028 (int)ac->ac_criteria); 4029 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); 4030 ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); 4031 ngroups = ext4_get_groups_count(sb); 4032 for (i = 0; i < ngroups; i++) { 4033 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4034 struct ext4_prealloc_space *pa; 4035 ext4_grpblk_t start; 4036 struct list_head *cur; 4037 ext4_lock_group(sb, i); 4038 list_for_each(cur, &grp->bb_prealloc_list) { 4039 pa = list_entry(cur, struct ext4_prealloc_space, 4040 pa_group_list); 4041 spin_lock(&pa->pa_lock); 4042 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4043 NULL, &start); 4044 spin_unlock(&pa->pa_lock); 4045 printk(KERN_ERR "PA:%u:%d:%u \n", i, 4046 start, pa->pa_len); 4047 } 4048 ext4_unlock_group(sb, i); 4049 4050 if (grp->bb_free == 0) 4051 continue; 4052 printk(KERN_ERR "%u: %d/%d \n", 4053 i, grp->bb_free, grp->bb_fragments); 4054 } 4055 printk(KERN_ERR "\n"); 4056 } 4057 #else 4058 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4059 { 4060 return; 4061 } 4062 #endif 4063 4064 /* 4065 * We use locality group preallocation for small size file. The size of the 4066 * file is determined by the current size or the resulting size after 4067 * allocation which ever is larger 4068 * 4069 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4070 */ 4071 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4072 { 4073 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4074 int bsbits = ac->ac_sb->s_blocksize_bits; 4075 loff_t size, isize; 4076 4077 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4078 return; 4079 4080 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4081 return; 4082 4083 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4084 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4085 >> bsbits; 4086 4087 if ((size == isize) && 4088 !ext4_fs_is_busy(sbi) && 4089 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 4090 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4091 return; 4092 } 4093 4094 if (sbi->s_mb_group_prealloc <= 0) { 4095 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4096 return; 4097 } 4098 4099 /* don't use group allocation for large files */ 4100 size = max(size, isize); 4101 if (size > sbi->s_mb_stream_request) { 4102 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4103 return; 4104 } 4105 4106 BUG_ON(ac->ac_lg != NULL); 4107 /* 4108 * locality group prealloc space are per cpu. The reason for having 4109 * per cpu locality group is to reduce the contention between block 4110 * request from multiple CPUs. 4111 */ 4112 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); 4113 4114 /* we're going to use group allocation */ 4115 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4116 4117 /* serialize all allocations in the group */ 4118 mutex_lock(&ac->ac_lg->lg_mutex); 4119 } 4120 4121 static noinline_for_stack int 4122 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4123 struct ext4_allocation_request *ar) 4124 { 4125 struct super_block *sb = ar->inode->i_sb; 4126 struct ext4_sb_info *sbi = EXT4_SB(sb); 4127 struct ext4_super_block *es = sbi->s_es; 4128 ext4_group_t group; 4129 unsigned int len; 4130 ext4_fsblk_t goal; 4131 ext4_grpblk_t block; 4132 4133 /* we can't allocate > group size */ 4134 len = ar->len; 4135 4136 /* just a dirty hack to filter too big requests */ 4137 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4138 len = EXT4_CLUSTERS_PER_GROUP(sb); 4139 4140 /* start searching from the goal */ 4141 goal = ar->goal; 4142 if (goal < le32_to_cpu(es->s_first_data_block) || 4143 goal >= ext4_blocks_count(es)) 4144 goal = le32_to_cpu(es->s_first_data_block); 4145 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4146 4147 /* set up allocation goals */ 4148 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4149 ac->ac_status = AC_STATUS_CONTINUE; 4150 ac->ac_sb = sb; 4151 ac->ac_inode = ar->inode; 4152 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4153 ac->ac_o_ex.fe_group = group; 4154 ac->ac_o_ex.fe_start = block; 4155 ac->ac_o_ex.fe_len = len; 4156 ac->ac_g_ex = ac->ac_o_ex; 4157 ac->ac_flags = ar->flags; 4158 4159 /* we have to define context: we'll we work with a file or 4160 * locality group. this is a policy, actually */ 4161 ext4_mb_group_or_file(ac); 4162 4163 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4164 "left: %u/%u, right %u/%u to %swritable\n", 4165 (unsigned) ar->len, (unsigned) ar->logical, 4166 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4167 (unsigned) ar->lleft, (unsigned) ar->pleft, 4168 (unsigned) ar->lright, (unsigned) ar->pright, 4169 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4170 return 0; 4171 4172 } 4173 4174 static noinline_for_stack void 4175 ext4_mb_discard_lg_preallocations(struct super_block *sb, 4176 struct ext4_locality_group *lg, 4177 int order, int total_entries) 4178 { 4179 ext4_group_t group = 0; 4180 struct ext4_buddy e4b; 4181 struct list_head discard_list; 4182 struct ext4_prealloc_space *pa, *tmp; 4183 4184 mb_debug(1, "discard locality group preallocation\n"); 4185 4186 INIT_LIST_HEAD(&discard_list); 4187 4188 spin_lock(&lg->lg_prealloc_lock); 4189 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4190 pa_inode_list) { 4191 spin_lock(&pa->pa_lock); 4192 if (atomic_read(&pa->pa_count)) { 4193 /* 4194 * This is the pa that we just used 4195 * for block allocation. So don't 4196 * free that 4197 */ 4198 spin_unlock(&pa->pa_lock); 4199 continue; 4200 } 4201 if (pa->pa_deleted) { 4202 spin_unlock(&pa->pa_lock); 4203 continue; 4204 } 4205 /* only lg prealloc space */ 4206 BUG_ON(pa->pa_type != MB_GROUP_PA); 4207 4208 /* seems this one can be freed ... */ 4209 pa->pa_deleted = 1; 4210 spin_unlock(&pa->pa_lock); 4211 4212 list_del_rcu(&pa->pa_inode_list); 4213 list_add(&pa->u.pa_tmp_list, &discard_list); 4214 4215 total_entries--; 4216 if (total_entries <= 5) { 4217 /* 4218 * we want to keep only 5 entries 4219 * allowing it to grow to 8. This 4220 * mak sure we don't call discard 4221 * soon for this list. 4222 */ 4223 break; 4224 } 4225 } 4226 spin_unlock(&lg->lg_prealloc_lock); 4227 4228 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4229 4230 group = ext4_get_group_number(sb, pa->pa_pstart); 4231 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4232 ext4_error(sb, "Error loading buddy information for %u", 4233 group); 4234 continue; 4235 } 4236 ext4_lock_group(sb, group); 4237 list_del(&pa->pa_group_list); 4238 ext4_mb_release_group_pa(&e4b, pa); 4239 ext4_unlock_group(sb, group); 4240 4241 ext4_mb_unload_buddy(&e4b); 4242 list_del(&pa->u.pa_tmp_list); 4243 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4244 } 4245 } 4246 4247 /* 4248 * We have incremented pa_count. So it cannot be freed at this 4249 * point. Also we hold lg_mutex. So no parallel allocation is 4250 * possible from this lg. That means pa_free cannot be updated. 4251 * 4252 * A parallel ext4_mb_discard_group_preallocations is possible. 4253 * which can cause the lg_prealloc_list to be updated. 4254 */ 4255 4256 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4257 { 4258 int order, added = 0, lg_prealloc_count = 1; 4259 struct super_block *sb = ac->ac_sb; 4260 struct ext4_locality_group *lg = ac->ac_lg; 4261 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4262 4263 order = fls(pa->pa_free) - 1; 4264 if (order > PREALLOC_TB_SIZE - 1) 4265 /* The max size of hash table is PREALLOC_TB_SIZE */ 4266 order = PREALLOC_TB_SIZE - 1; 4267 /* Add the prealloc space to lg */ 4268 spin_lock(&lg->lg_prealloc_lock); 4269 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4270 pa_inode_list) { 4271 spin_lock(&tmp_pa->pa_lock); 4272 if (tmp_pa->pa_deleted) { 4273 spin_unlock(&tmp_pa->pa_lock); 4274 continue; 4275 } 4276 if (!added && pa->pa_free < tmp_pa->pa_free) { 4277 /* Add to the tail of the previous entry */ 4278 list_add_tail_rcu(&pa->pa_inode_list, 4279 &tmp_pa->pa_inode_list); 4280 added = 1; 4281 /* 4282 * we want to count the total 4283 * number of entries in the list 4284 */ 4285 } 4286 spin_unlock(&tmp_pa->pa_lock); 4287 lg_prealloc_count++; 4288 } 4289 if (!added) 4290 list_add_tail_rcu(&pa->pa_inode_list, 4291 &lg->lg_prealloc_list[order]); 4292 spin_unlock(&lg->lg_prealloc_lock); 4293 4294 /* Now trim the list to be not more than 8 elements */ 4295 if (lg_prealloc_count > 8) { 4296 ext4_mb_discard_lg_preallocations(sb, lg, 4297 order, lg_prealloc_count); 4298 return; 4299 } 4300 return ; 4301 } 4302 4303 /* 4304 * release all resource we used in allocation 4305 */ 4306 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4307 { 4308 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4309 struct ext4_prealloc_space *pa = ac->ac_pa; 4310 if (pa) { 4311 if (pa->pa_type == MB_GROUP_PA) { 4312 /* see comment in ext4_mb_use_group_pa() */ 4313 spin_lock(&pa->pa_lock); 4314 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4315 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4316 pa->pa_free -= ac->ac_b_ex.fe_len; 4317 pa->pa_len -= ac->ac_b_ex.fe_len; 4318 spin_unlock(&pa->pa_lock); 4319 } 4320 } 4321 if (pa) { 4322 /* 4323 * We want to add the pa to the right bucket. 4324 * Remove it from the list and while adding 4325 * make sure the list to which we are adding 4326 * doesn't grow big. 4327 */ 4328 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4329 spin_lock(pa->pa_obj_lock); 4330 list_del_rcu(&pa->pa_inode_list); 4331 spin_unlock(pa->pa_obj_lock); 4332 ext4_mb_add_n_trim(ac); 4333 } 4334 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4335 } 4336 if (ac->ac_bitmap_page) 4337 page_cache_release(ac->ac_bitmap_page); 4338 if (ac->ac_buddy_page) 4339 page_cache_release(ac->ac_buddy_page); 4340 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4341 mutex_unlock(&ac->ac_lg->lg_mutex); 4342 ext4_mb_collect_stats(ac); 4343 return 0; 4344 } 4345 4346 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4347 { 4348 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4349 int ret; 4350 int freed = 0; 4351 4352 trace_ext4_mb_discard_preallocations(sb, needed); 4353 for (i = 0; i < ngroups && needed > 0; i++) { 4354 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4355 freed += ret; 4356 needed -= ret; 4357 } 4358 4359 return freed; 4360 } 4361 4362 /* 4363 * Main entry point into mballoc to allocate blocks 4364 * it tries to use preallocation first, then falls back 4365 * to usual allocation 4366 */ 4367 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4368 struct ext4_allocation_request *ar, int *errp) 4369 { 4370 int freed; 4371 struct ext4_allocation_context *ac = NULL; 4372 struct ext4_sb_info *sbi; 4373 struct super_block *sb; 4374 ext4_fsblk_t block = 0; 4375 unsigned int inquota = 0; 4376 unsigned int reserv_clstrs = 0; 4377 4378 might_sleep(); 4379 sb = ar->inode->i_sb; 4380 sbi = EXT4_SB(sb); 4381 4382 trace_ext4_request_blocks(ar); 4383 4384 /* Allow to use superuser reservation for quota file */ 4385 if (IS_NOQUOTA(ar->inode)) 4386 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4387 4388 /* 4389 * For delayed allocation, we could skip the ENOSPC and 4390 * EDQUOT check, as blocks and quotas have been already 4391 * reserved when data being copied into pagecache. 4392 */ 4393 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) 4394 ar->flags |= EXT4_MB_DELALLOC_RESERVED; 4395 else { 4396 /* Without delayed allocation we need to verify 4397 * there is enough free blocks to do block allocation 4398 * and verify allocation doesn't exceed the quota limits. 4399 */ 4400 while (ar->len && 4401 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4402 4403 /* let others to free the space */ 4404 cond_resched(); 4405 ar->len = ar->len >> 1; 4406 } 4407 if (!ar->len) { 4408 *errp = -ENOSPC; 4409 return 0; 4410 } 4411 reserv_clstrs = ar->len; 4412 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4413 dquot_alloc_block_nofail(ar->inode, 4414 EXT4_C2B(sbi, ar->len)); 4415 } else { 4416 while (ar->len && 4417 dquot_alloc_block(ar->inode, 4418 EXT4_C2B(sbi, ar->len))) { 4419 4420 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4421 ar->len--; 4422 } 4423 } 4424 inquota = ar->len; 4425 if (ar->len == 0) { 4426 *errp = -EDQUOT; 4427 goto out; 4428 } 4429 } 4430 4431 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4432 if (!ac) { 4433 ar->len = 0; 4434 *errp = -ENOMEM; 4435 goto out; 4436 } 4437 4438 *errp = ext4_mb_initialize_context(ac, ar); 4439 if (*errp) { 4440 ar->len = 0; 4441 goto out; 4442 } 4443 4444 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4445 if (!ext4_mb_use_preallocated(ac)) { 4446 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4447 ext4_mb_normalize_request(ac, ar); 4448 repeat: 4449 /* allocate space in core */ 4450 *errp = ext4_mb_regular_allocator(ac); 4451 if (*errp) 4452 goto discard_and_exit; 4453 4454 /* as we've just preallocated more space than 4455 * user requested originally, we store allocated 4456 * space in a special descriptor */ 4457 if (ac->ac_status == AC_STATUS_FOUND && 4458 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4459 *errp = ext4_mb_new_preallocation(ac); 4460 if (*errp) { 4461 discard_and_exit: 4462 ext4_discard_allocated_blocks(ac); 4463 goto errout; 4464 } 4465 } 4466 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4467 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4468 if (*errp == -EAGAIN) { 4469 /* 4470 * drop the reference that we took 4471 * in ext4_mb_use_best_found 4472 */ 4473 ext4_mb_release_context(ac); 4474 ac->ac_b_ex.fe_group = 0; 4475 ac->ac_b_ex.fe_start = 0; 4476 ac->ac_b_ex.fe_len = 0; 4477 ac->ac_status = AC_STATUS_CONTINUE; 4478 goto repeat; 4479 } else if (*errp) { 4480 ext4_discard_allocated_blocks(ac); 4481 goto errout; 4482 } else { 4483 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4484 ar->len = ac->ac_b_ex.fe_len; 4485 } 4486 } else { 4487 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4488 if (freed) 4489 goto repeat; 4490 *errp = -ENOSPC; 4491 } 4492 4493 errout: 4494 if (*errp) { 4495 ac->ac_b_ex.fe_len = 0; 4496 ar->len = 0; 4497 ext4_mb_show_ac(ac); 4498 } 4499 ext4_mb_release_context(ac); 4500 out: 4501 if (ac) 4502 kmem_cache_free(ext4_ac_cachep, ac); 4503 if (inquota && ar->len < inquota) 4504 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4505 if (!ar->len) { 4506 if (!ext4_test_inode_state(ar->inode, 4507 EXT4_STATE_DELALLOC_RESERVED)) 4508 /* release all the reserved blocks if non delalloc */ 4509 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4510 reserv_clstrs); 4511 } 4512 4513 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4514 4515 return block; 4516 } 4517 4518 /* 4519 * We can merge two free data extents only if the physical blocks 4520 * are contiguous, AND the extents were freed by the same transaction, 4521 * AND the blocks are associated with the same group. 4522 */ 4523 static int can_merge(struct ext4_free_data *entry1, 4524 struct ext4_free_data *entry2) 4525 { 4526 if ((entry1->efd_tid == entry2->efd_tid) && 4527 (entry1->efd_group == entry2->efd_group) && 4528 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) 4529 return 1; 4530 return 0; 4531 } 4532 4533 static noinline_for_stack int 4534 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4535 struct ext4_free_data *new_entry) 4536 { 4537 ext4_group_t group = e4b->bd_group; 4538 ext4_grpblk_t cluster; 4539 struct ext4_free_data *entry; 4540 struct ext4_group_info *db = e4b->bd_info; 4541 struct super_block *sb = e4b->bd_sb; 4542 struct ext4_sb_info *sbi = EXT4_SB(sb); 4543 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4544 struct rb_node *parent = NULL, *new_node; 4545 4546 BUG_ON(!ext4_handle_valid(handle)); 4547 BUG_ON(e4b->bd_bitmap_page == NULL); 4548 BUG_ON(e4b->bd_buddy_page == NULL); 4549 4550 new_node = &new_entry->efd_node; 4551 cluster = new_entry->efd_start_cluster; 4552 4553 if (!*n) { 4554 /* first free block exent. We need to 4555 protect buddy cache from being freed, 4556 * otherwise we'll refresh it from 4557 * on-disk bitmap and lose not-yet-available 4558 * blocks */ 4559 page_cache_get(e4b->bd_buddy_page); 4560 page_cache_get(e4b->bd_bitmap_page); 4561 } 4562 while (*n) { 4563 parent = *n; 4564 entry = rb_entry(parent, struct ext4_free_data, efd_node); 4565 if (cluster < entry->efd_start_cluster) 4566 n = &(*n)->rb_left; 4567 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 4568 n = &(*n)->rb_right; 4569 else { 4570 ext4_grp_locked_error(sb, group, 0, 4571 ext4_group_first_block_no(sb, group) + 4572 EXT4_C2B(sbi, cluster), 4573 "Block already on to-be-freed list"); 4574 return 0; 4575 } 4576 } 4577 4578 rb_link_node(new_node, parent, n); 4579 rb_insert_color(new_node, &db->bb_free_root); 4580 4581 /* Now try to see the extent can be merged to left and right */ 4582 node = rb_prev(new_node); 4583 if (node) { 4584 entry = rb_entry(node, struct ext4_free_data, efd_node); 4585 if (can_merge(entry, new_entry) && 4586 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4587 new_entry->efd_start_cluster = entry->efd_start_cluster; 4588 new_entry->efd_count += entry->efd_count; 4589 rb_erase(node, &(db->bb_free_root)); 4590 kmem_cache_free(ext4_free_data_cachep, entry); 4591 } 4592 } 4593 4594 node = rb_next(new_node); 4595 if (node) { 4596 entry = rb_entry(node, struct ext4_free_data, efd_node); 4597 if (can_merge(new_entry, entry) && 4598 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4599 new_entry->efd_count += entry->efd_count; 4600 rb_erase(node, &(db->bb_free_root)); 4601 kmem_cache_free(ext4_free_data_cachep, entry); 4602 } 4603 } 4604 /* Add the extent to transaction's private list */ 4605 ext4_journal_callback_add(handle, ext4_free_data_callback, 4606 &new_entry->efd_jce); 4607 return 0; 4608 } 4609 4610 /** 4611 * ext4_free_blocks() -- Free given blocks and update quota 4612 * @handle: handle for this transaction 4613 * @inode: inode 4614 * @block: start physical block to free 4615 * @count: number of blocks to count 4616 * @flags: flags used by ext4_free_blocks 4617 */ 4618 void ext4_free_blocks(handle_t *handle, struct inode *inode, 4619 struct buffer_head *bh, ext4_fsblk_t block, 4620 unsigned long count, int flags) 4621 { 4622 struct buffer_head *bitmap_bh = NULL; 4623 struct super_block *sb = inode->i_sb; 4624 struct ext4_group_desc *gdp; 4625 unsigned int overflow; 4626 ext4_grpblk_t bit; 4627 struct buffer_head *gd_bh; 4628 ext4_group_t block_group; 4629 struct ext4_sb_info *sbi; 4630 struct ext4_inode_info *ei = EXT4_I(inode); 4631 struct ext4_buddy e4b; 4632 unsigned int count_clusters; 4633 int err = 0; 4634 int ret; 4635 4636 might_sleep(); 4637 if (bh) { 4638 if (block) 4639 BUG_ON(block != bh->b_blocknr); 4640 else 4641 block = bh->b_blocknr; 4642 } 4643 4644 sbi = EXT4_SB(sb); 4645 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4646 !ext4_data_block_valid(sbi, block, count)) { 4647 ext4_error(sb, "Freeing blocks not in datazone - " 4648 "block = %llu, count = %lu", block, count); 4649 goto error_return; 4650 } 4651 4652 ext4_debug("freeing block %llu\n", block); 4653 trace_ext4_free_blocks(inode, block, count, flags); 4654 4655 if (flags & EXT4_FREE_BLOCKS_FORGET) { 4656 struct buffer_head *tbh = bh; 4657 int i; 4658 4659 BUG_ON(bh && (count > 1)); 4660 4661 for (i = 0; i < count; i++) { 4662 cond_resched(); 4663 if (!bh) 4664 tbh = sb_find_get_block(inode->i_sb, 4665 block + i); 4666 if (!tbh) 4667 continue; 4668 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4669 inode, tbh, block + i); 4670 } 4671 } 4672 4673 /* 4674 * We need to make sure we don't reuse the freed block until 4675 * after the transaction is committed, which we can do by 4676 * treating the block as metadata, below. We make an 4677 * exception if the inode is to be written in writeback mode 4678 * since writeback mode has weak data consistency guarantees. 4679 */ 4680 if (!ext4_should_writeback_data(inode)) 4681 flags |= EXT4_FREE_BLOCKS_METADATA; 4682 4683 /* 4684 * If the extent to be freed does not begin on a cluster 4685 * boundary, we need to deal with partial clusters at the 4686 * beginning and end of the extent. Normally we will free 4687 * blocks at the beginning or the end unless we are explicitly 4688 * requested to avoid doing so. 4689 */ 4690 overflow = EXT4_PBLK_COFF(sbi, block); 4691 if (overflow) { 4692 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4693 overflow = sbi->s_cluster_ratio - overflow; 4694 block += overflow; 4695 if (count > overflow) 4696 count -= overflow; 4697 else 4698 return; 4699 } else { 4700 block -= overflow; 4701 count += overflow; 4702 } 4703 } 4704 overflow = EXT4_LBLK_COFF(sbi, count); 4705 if (overflow) { 4706 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4707 if (count > overflow) 4708 count -= overflow; 4709 else 4710 return; 4711 } else 4712 count += sbi->s_cluster_ratio - overflow; 4713 } 4714 4715 do_more: 4716 overflow = 0; 4717 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4718 4719 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 4720 ext4_get_group_info(sb, block_group)))) 4721 return; 4722 4723 /* 4724 * Check to see if we are freeing blocks across a group 4725 * boundary. 4726 */ 4727 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4728 overflow = EXT4_C2B(sbi, bit) + count - 4729 EXT4_BLOCKS_PER_GROUP(sb); 4730 count -= overflow; 4731 } 4732 count_clusters = EXT4_NUM_B2C(sbi, count); 4733 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4734 if (!bitmap_bh) { 4735 err = -EIO; 4736 goto error_return; 4737 } 4738 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4739 if (!gdp) { 4740 err = -EIO; 4741 goto error_return; 4742 } 4743 4744 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4745 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4746 in_range(block, ext4_inode_table(sb, gdp), 4747 EXT4_SB(sb)->s_itb_per_group) || 4748 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4749 EXT4_SB(sb)->s_itb_per_group)) { 4750 4751 ext4_error(sb, "Freeing blocks in system zone - " 4752 "Block = %llu, count = %lu", block, count); 4753 /* err = 0. ext4_std_error should be a no op */ 4754 goto error_return; 4755 } 4756 4757 BUFFER_TRACE(bitmap_bh, "getting write access"); 4758 err = ext4_journal_get_write_access(handle, bitmap_bh); 4759 if (err) 4760 goto error_return; 4761 4762 /* 4763 * We are about to modify some metadata. Call the journal APIs 4764 * to unshare ->b_data if a currently-committing transaction is 4765 * using it 4766 */ 4767 BUFFER_TRACE(gd_bh, "get_write_access"); 4768 err = ext4_journal_get_write_access(handle, gd_bh); 4769 if (err) 4770 goto error_return; 4771 #ifdef AGGRESSIVE_CHECK 4772 { 4773 int i; 4774 for (i = 0; i < count_clusters; i++) 4775 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4776 } 4777 #endif 4778 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 4779 4780 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4781 if (err) 4782 goto error_return; 4783 4784 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { 4785 struct ext4_free_data *new_entry; 4786 /* 4787 * blocks being freed are metadata. these blocks shouldn't 4788 * be used until this transaction is committed 4789 */ 4790 retry: 4791 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); 4792 if (!new_entry) { 4793 /* 4794 * We use a retry loop because 4795 * ext4_free_blocks() is not allowed to fail. 4796 */ 4797 cond_resched(); 4798 congestion_wait(BLK_RW_ASYNC, HZ/50); 4799 goto retry; 4800 } 4801 new_entry->efd_start_cluster = bit; 4802 new_entry->efd_group = block_group; 4803 new_entry->efd_count = count_clusters; 4804 new_entry->efd_tid = handle->h_transaction->t_tid; 4805 4806 ext4_lock_group(sb, block_group); 4807 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4808 ext4_mb_free_metadata(handle, &e4b, new_entry); 4809 } else { 4810 /* need to update group_info->bb_free and bitmap 4811 * with group lock held. generate_buddy look at 4812 * them with group lock_held 4813 */ 4814 if (test_opt(sb, DISCARD)) { 4815 err = ext4_issue_discard(sb, block_group, bit, count); 4816 if (err && err != -EOPNOTSUPP) 4817 ext4_msg(sb, KERN_WARNING, "discard request in" 4818 " group:%d block:%d count:%lu failed" 4819 " with %d", block_group, bit, count, 4820 err); 4821 } else 4822 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 4823 4824 ext4_lock_group(sb, block_group); 4825 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4826 mb_free_blocks(inode, &e4b, bit, count_clusters); 4827 } 4828 4829 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4830 ext4_free_group_clusters_set(sb, gdp, ret); 4831 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 4832 ext4_group_desc_csum_set(sb, block_group, gdp); 4833 ext4_unlock_group(sb, block_group); 4834 4835 if (sbi->s_log_groups_per_flex) { 4836 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4837 atomic64_add(count_clusters, 4838 &sbi->s_flex_groups[flex_group].free_clusters); 4839 } 4840 4841 if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { 4842 percpu_counter_add(&sbi->s_dirtyclusters_counter, 4843 count_clusters); 4844 spin_lock(&ei->i_block_reservation_lock); 4845 if (flags & EXT4_FREE_BLOCKS_METADATA) 4846 ei->i_reserved_meta_blocks += count_clusters; 4847 else 4848 ei->i_reserved_data_blocks += count_clusters; 4849 spin_unlock(&ei->i_block_reservation_lock); 4850 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4851 dquot_reclaim_block(inode, 4852 EXT4_C2B(sbi, count_clusters)); 4853 } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4854 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4855 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4856 4857 ext4_mb_unload_buddy(&e4b); 4858 4859 /* We dirtied the bitmap block */ 4860 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4861 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4862 4863 /* And the group descriptor block */ 4864 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4865 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4866 if (!err) 4867 err = ret; 4868 4869 if (overflow && !err) { 4870 block += count; 4871 count = overflow; 4872 put_bh(bitmap_bh); 4873 goto do_more; 4874 } 4875 error_return: 4876 brelse(bitmap_bh); 4877 ext4_std_error(sb, err); 4878 return; 4879 } 4880 4881 /** 4882 * ext4_group_add_blocks() -- Add given blocks to an existing group 4883 * @handle: handle to this transaction 4884 * @sb: super block 4885 * @block: start physical block to add to the block group 4886 * @count: number of blocks to free 4887 * 4888 * This marks the blocks as free in the bitmap and buddy. 4889 */ 4890 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 4891 ext4_fsblk_t block, unsigned long count) 4892 { 4893 struct buffer_head *bitmap_bh = NULL; 4894 struct buffer_head *gd_bh; 4895 ext4_group_t block_group; 4896 ext4_grpblk_t bit; 4897 unsigned int i; 4898 struct ext4_group_desc *desc; 4899 struct ext4_sb_info *sbi = EXT4_SB(sb); 4900 struct ext4_buddy e4b; 4901 int err = 0, ret, blk_free_count; 4902 ext4_grpblk_t blocks_freed; 4903 4904 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 4905 4906 if (count == 0) 4907 return 0; 4908 4909 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4910 /* 4911 * Check to see if we are freeing blocks across a group 4912 * boundary. 4913 */ 4914 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4915 ext4_warning(sb, "too much blocks added to group %u\n", 4916 block_group); 4917 err = -EINVAL; 4918 goto error_return; 4919 } 4920 4921 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4922 if (!bitmap_bh) { 4923 err = -EIO; 4924 goto error_return; 4925 } 4926 4927 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 4928 if (!desc) { 4929 err = -EIO; 4930 goto error_return; 4931 } 4932 4933 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 4934 in_range(ext4_inode_bitmap(sb, desc), block, count) || 4935 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 4936 in_range(block + count - 1, ext4_inode_table(sb, desc), 4937 sbi->s_itb_per_group)) { 4938 ext4_error(sb, "Adding blocks in system zones - " 4939 "Block = %llu, count = %lu", 4940 block, count); 4941 err = -EINVAL; 4942 goto error_return; 4943 } 4944 4945 BUFFER_TRACE(bitmap_bh, "getting write access"); 4946 err = ext4_journal_get_write_access(handle, bitmap_bh); 4947 if (err) 4948 goto error_return; 4949 4950 /* 4951 * We are about to modify some metadata. Call the journal APIs 4952 * to unshare ->b_data if a currently-committing transaction is 4953 * using it 4954 */ 4955 BUFFER_TRACE(gd_bh, "get_write_access"); 4956 err = ext4_journal_get_write_access(handle, gd_bh); 4957 if (err) 4958 goto error_return; 4959 4960 for (i = 0, blocks_freed = 0; i < count; i++) { 4961 BUFFER_TRACE(bitmap_bh, "clear bit"); 4962 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 4963 ext4_error(sb, "bit already cleared for block %llu", 4964 (ext4_fsblk_t)(block + i)); 4965 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 4966 } else { 4967 blocks_freed++; 4968 } 4969 } 4970 4971 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4972 if (err) 4973 goto error_return; 4974 4975 /* 4976 * need to update group_info->bb_free and bitmap 4977 * with group lock held. generate_buddy look at 4978 * them with group lock_held 4979 */ 4980 ext4_lock_group(sb, block_group); 4981 mb_clear_bits(bitmap_bh->b_data, bit, count); 4982 mb_free_blocks(NULL, &e4b, bit, count); 4983 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 4984 ext4_free_group_clusters_set(sb, desc, blk_free_count); 4985 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 4986 ext4_group_desc_csum_set(sb, block_group, desc); 4987 ext4_unlock_group(sb, block_group); 4988 percpu_counter_add(&sbi->s_freeclusters_counter, 4989 EXT4_NUM_B2C(sbi, blocks_freed)); 4990 4991 if (sbi->s_log_groups_per_flex) { 4992 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4993 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), 4994 &sbi->s_flex_groups[flex_group].free_clusters); 4995 } 4996 4997 ext4_mb_unload_buddy(&e4b); 4998 4999 /* We dirtied the bitmap block */ 5000 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5001 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5002 5003 /* And the group descriptor block */ 5004 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5005 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5006 if (!err) 5007 err = ret; 5008 5009 error_return: 5010 brelse(bitmap_bh); 5011 ext4_std_error(sb, err); 5012 return err; 5013 } 5014 5015 /** 5016 * ext4_trim_extent -- function to TRIM one single free extent in the group 5017 * @sb: super block for the file system 5018 * @start: starting block of the free extent in the alloc. group 5019 * @count: number of blocks to TRIM 5020 * @group: alloc. group we are working with 5021 * @e4b: ext4 buddy for the group 5022 * 5023 * Trim "count" blocks starting at "start" in the "group". To assure that no 5024 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5025 * be called with under the group lock. 5026 */ 5027 static int ext4_trim_extent(struct super_block *sb, int start, int count, 5028 ext4_group_t group, struct ext4_buddy *e4b) 5029 __releases(bitlock) 5030 __acquires(bitlock) 5031 { 5032 struct ext4_free_extent ex; 5033 int ret = 0; 5034 5035 trace_ext4_trim_extent(sb, group, start, count); 5036 5037 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5038 5039 ex.fe_start = start; 5040 ex.fe_group = group; 5041 ex.fe_len = count; 5042 5043 /* 5044 * Mark blocks used, so no one can reuse them while 5045 * being trimmed. 5046 */ 5047 mb_mark_used(e4b, &ex); 5048 ext4_unlock_group(sb, group); 5049 ret = ext4_issue_discard(sb, group, start, count); 5050 ext4_lock_group(sb, group); 5051 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5052 return ret; 5053 } 5054 5055 /** 5056 * ext4_trim_all_free -- function to trim all free space in alloc. group 5057 * @sb: super block for file system 5058 * @group: group to be trimmed 5059 * @start: first group block to examine 5060 * @max: last group block to examine 5061 * @minblocks: minimum extent block count 5062 * 5063 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5064 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5065 * the extent. 5066 * 5067 * 5068 * ext4_trim_all_free walks through group's block bitmap searching for free 5069 * extents. When the free extent is found, mark it as used in group buddy 5070 * bitmap. Then issue a TRIM command on this extent and free the extent in 5071 * the group buddy bitmap. This is done until whole group is scanned. 5072 */ 5073 static ext4_grpblk_t 5074 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5075 ext4_grpblk_t start, ext4_grpblk_t max, 5076 ext4_grpblk_t minblocks) 5077 { 5078 void *bitmap; 5079 ext4_grpblk_t next, count = 0, free_count = 0; 5080 struct ext4_buddy e4b; 5081 int ret = 0; 5082 5083 trace_ext4_trim_all_free(sb, group, start, max); 5084 5085 ret = ext4_mb_load_buddy(sb, group, &e4b); 5086 if (ret) { 5087 ext4_error(sb, "Error in loading buddy " 5088 "information for %u", group); 5089 return ret; 5090 } 5091 bitmap = e4b.bd_bitmap; 5092 5093 ext4_lock_group(sb, group); 5094 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5095 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5096 goto out; 5097 5098 start = (e4b.bd_info->bb_first_free > start) ? 5099 e4b.bd_info->bb_first_free : start; 5100 5101 while (start <= max) { 5102 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5103 if (start > max) 5104 break; 5105 next = mb_find_next_bit(bitmap, max + 1, start); 5106 5107 if ((next - start) >= minblocks) { 5108 ret = ext4_trim_extent(sb, start, 5109 next - start, group, &e4b); 5110 if (ret && ret != -EOPNOTSUPP) 5111 break; 5112 ret = 0; 5113 count += next - start; 5114 } 5115 free_count += next - start; 5116 start = next + 1; 5117 5118 if (fatal_signal_pending(current)) { 5119 count = -ERESTARTSYS; 5120 break; 5121 } 5122 5123 if (need_resched()) { 5124 ext4_unlock_group(sb, group); 5125 cond_resched(); 5126 ext4_lock_group(sb, group); 5127 } 5128 5129 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5130 break; 5131 } 5132 5133 if (!ret) { 5134 ret = count; 5135 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5136 } 5137 out: 5138 ext4_unlock_group(sb, group); 5139 ext4_mb_unload_buddy(&e4b); 5140 5141 ext4_debug("trimmed %d blocks in the group %d\n", 5142 count, group); 5143 5144 return ret; 5145 } 5146 5147 /** 5148 * ext4_trim_fs() -- trim ioctl handle function 5149 * @sb: superblock for filesystem 5150 * @range: fstrim_range structure 5151 * 5152 * start: First Byte to trim 5153 * len: number of Bytes to trim from start 5154 * minlen: minimum extent length in Bytes 5155 * ext4_trim_fs goes through all allocation groups containing Bytes from 5156 * start to start+len. For each such a group ext4_trim_all_free function 5157 * is invoked to trim all free space. 5158 */ 5159 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5160 { 5161 struct ext4_group_info *grp; 5162 ext4_group_t group, first_group, last_group; 5163 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5164 uint64_t start, end, minlen, trimmed = 0; 5165 ext4_fsblk_t first_data_blk = 5166 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5167 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5168 int ret = 0; 5169 5170 start = range->start >> sb->s_blocksize_bits; 5171 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5172 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5173 range->minlen >> sb->s_blocksize_bits); 5174 5175 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5176 start >= max_blks || 5177 range->len < sb->s_blocksize) 5178 return -EINVAL; 5179 if (end >= max_blks) 5180 end = max_blks - 1; 5181 if (end <= first_data_blk) 5182 goto out; 5183 if (start < first_data_blk) 5184 start = first_data_blk; 5185 5186 /* Determine first and last group to examine based on start and end */ 5187 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5188 &first_group, &first_cluster); 5189 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5190 &last_group, &last_cluster); 5191 5192 /* end now represents the last cluster to discard in this group */ 5193 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5194 5195 for (group = first_group; group <= last_group; group++) { 5196 grp = ext4_get_group_info(sb, group); 5197 /* We only do this if the grp has never been initialized */ 5198 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5199 ret = ext4_mb_init_group(sb, group); 5200 if (ret) 5201 break; 5202 } 5203 5204 /* 5205 * For all the groups except the last one, last cluster will 5206 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5207 * change it for the last group, note that last_cluster is 5208 * already computed earlier by ext4_get_group_no_and_offset() 5209 */ 5210 if (group == last_group) 5211 end = last_cluster; 5212 5213 if (grp->bb_free >= minlen) { 5214 cnt = ext4_trim_all_free(sb, group, first_cluster, 5215 end, minlen); 5216 if (cnt < 0) { 5217 ret = cnt; 5218 break; 5219 } 5220 trimmed += cnt; 5221 } 5222 5223 /* 5224 * For every group except the first one, we are sure 5225 * that the first cluster to discard will be cluster #0. 5226 */ 5227 first_cluster = 0; 5228 } 5229 5230 if (!ret) 5231 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5232 5233 out: 5234 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5235 return ret; 5236 } 5237