1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20 /* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24 #include "ext4_jbd2.h" 25 #include "mballoc.h" 26 #include <linux/log2.h> 27 #include <linux/module.h> 28 #include <linux/slab.h> 29 #include <linux/backing-dev.h> 30 #include <trace/events/ext4.h> 31 32 #ifdef CONFIG_EXT4_DEBUG 33 ushort ext4_mballoc_debug __read_mostly; 34 35 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); 36 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); 37 #endif 38 39 /* 40 * MUSTDO: 41 * - test ext4_ext_search_left() and ext4_ext_search_right() 42 * - search for metadata in few groups 43 * 44 * TODO v4: 45 * - normalization should take into account whether file is still open 46 * - discard preallocations if no free space left (policy?) 47 * - don't normalize tails 48 * - quota 49 * - reservation for superuser 50 * 51 * TODO v3: 52 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 53 * - track min/max extents in each group for better group selection 54 * - mb_mark_used() may allocate chunk right after splitting buddy 55 * - tree of groups sorted by number of free blocks 56 * - error handling 57 */ 58 59 /* 60 * The allocation request involve request for multiple number of blocks 61 * near to the goal(block) value specified. 62 * 63 * During initialization phase of the allocator we decide to use the 64 * group preallocation or inode preallocation depending on the size of 65 * the file. The size of the file could be the resulting file size we 66 * would have after allocation, or the current file size, which ever 67 * is larger. If the size is less than sbi->s_mb_stream_request we 68 * select to use the group preallocation. The default value of 69 * s_mb_stream_request is 16 blocks. This can also be tuned via 70 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 71 * terms of number of blocks. 72 * 73 * The main motivation for having small file use group preallocation is to 74 * ensure that we have small files closer together on the disk. 75 * 76 * First stage the allocator looks at the inode prealloc list, 77 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 78 * spaces for this particular inode. The inode prealloc space is 79 * represented as: 80 * 81 * pa_lstart -> the logical start block for this prealloc space 82 * pa_pstart -> the physical start block for this prealloc space 83 * pa_len -> length for this prealloc space (in clusters) 84 * pa_free -> free space available in this prealloc space (in clusters) 85 * 86 * The inode preallocation space is used looking at the _logical_ start 87 * block. If only the logical file block falls within the range of prealloc 88 * space we will consume the particular prealloc space. This makes sure that 89 * we have contiguous physical blocks representing the file blocks 90 * 91 * The important thing to be noted in case of inode prealloc space is that 92 * we don't modify the values associated to inode prealloc space except 93 * pa_free. 94 * 95 * If we are not able to find blocks in the inode prealloc space and if we 96 * have the group allocation flag set then we look at the locality group 97 * prealloc space. These are per CPU prealloc list represented as 98 * 99 * ext4_sb_info.s_locality_groups[smp_processor_id()] 100 * 101 * The reason for having a per cpu locality group is to reduce the contention 102 * between CPUs. It is possible to get scheduled at this point. 103 * 104 * The locality group prealloc space is used looking at whether we have 105 * enough free space (pa_free) within the prealloc space. 106 * 107 * If we can't allocate blocks via inode prealloc or/and locality group 108 * prealloc then we look at the buddy cache. The buddy cache is represented 109 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 110 * mapped to the buddy and bitmap information regarding different 111 * groups. The buddy information is attached to buddy cache inode so that 112 * we can access them through the page cache. The information regarding 113 * each group is loaded via ext4_mb_load_buddy. The information involve 114 * block bitmap and buddy information. The information are stored in the 115 * inode as: 116 * 117 * { page } 118 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 119 * 120 * 121 * one block each for bitmap and buddy information. So for each group we 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 123 * blocksize) blocks. So it can have information regarding groups_per_page 124 * which is blocks_per_page/2 125 * 126 * The buddy cache inode is not stored on disk. The inode is thrown 127 * away when the filesystem is unmounted. 128 * 129 * We look for count number of blocks in the buddy cache. If we were able 130 * to locate that many free blocks we return with additional information 131 * regarding rest of the contiguous physical block available 132 * 133 * Before allocating blocks via buddy cache we normalize the request 134 * blocks. This ensure we ask for more blocks that we needed. The extra 135 * blocks that we get after allocation is added to the respective prealloc 136 * list. In case of inode preallocation we follow a list of heuristics 137 * based on file size. This can be found in ext4_mb_normalize_request. If 138 * we are doing a group prealloc we try to normalize the request to 139 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 140 * dependent on the cluster size; for non-bigalloc file systems, it is 141 * 512 blocks. This can be tuned via 142 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 143 * terms of number of blocks. If we have mounted the file system with -O 144 * stripe=<value> option the group prealloc request is normalized to the 145 * the smallest multiple of the stripe value (sbi->s_stripe) which is 146 * greater than the default mb_group_prealloc. 147 * 148 * The regular allocator (using the buddy cache) supports a few tunables. 149 * 150 * /sys/fs/ext4/<partition>/mb_min_to_scan 151 * /sys/fs/ext4/<partition>/mb_max_to_scan 152 * /sys/fs/ext4/<partition>/mb_order2_req 153 * 154 * The regular allocator uses buddy scan only if the request len is power of 155 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 156 * value of s_mb_order2_reqs can be tuned via 157 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 158 * stripe size (sbi->s_stripe), we try to search for contiguous block in 159 * stripe size. This should result in better allocation on RAID setups. If 160 * not, we search in the specific group using bitmap for best extents. The 161 * tunable min_to_scan and max_to_scan control the behaviour here. 162 * min_to_scan indicate how long the mballoc __must__ look for a best 163 * extent and max_to_scan indicates how long the mballoc __can__ look for a 164 * best extent in the found extents. Searching for the blocks starts with 165 * the group specified as the goal value in allocation context via 166 * ac_g_ex. Each group is first checked based on the criteria whether it 167 * can be used for allocation. ext4_mb_good_group explains how the groups are 168 * checked. 169 * 170 * Both the prealloc space are getting populated as above. So for the first 171 * request we will hit the buddy cache which will result in this prealloc 172 * space getting filled. The prealloc space is then later used for the 173 * subsequent request. 174 */ 175 176 /* 177 * mballoc operates on the following data: 178 * - on-disk bitmap 179 * - in-core buddy (actually includes buddy and bitmap) 180 * - preallocation descriptors (PAs) 181 * 182 * there are two types of preallocations: 183 * - inode 184 * assiged to specific inode and can be used for this inode only. 185 * it describes part of inode's space preallocated to specific 186 * physical blocks. any block from that preallocated can be used 187 * independent. the descriptor just tracks number of blocks left 188 * unused. so, before taking some block from descriptor, one must 189 * make sure corresponded logical block isn't allocated yet. this 190 * also means that freeing any block within descriptor's range 191 * must discard all preallocated blocks. 192 * - locality group 193 * assigned to specific locality group which does not translate to 194 * permanent set of inodes: inode can join and leave group. space 195 * from this type of preallocation can be used for any inode. thus 196 * it's consumed from the beginning to the end. 197 * 198 * relation between them can be expressed as: 199 * in-core buddy = on-disk bitmap + preallocation descriptors 200 * 201 * this mean blocks mballoc considers used are: 202 * - allocated blocks (persistent) 203 * - preallocated blocks (non-persistent) 204 * 205 * consistency in mballoc world means that at any time a block is either 206 * free or used in ALL structures. notice: "any time" should not be read 207 * literally -- time is discrete and delimited by locks. 208 * 209 * to keep it simple, we don't use block numbers, instead we count number of 210 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 211 * 212 * all operations can be expressed as: 213 * - init buddy: buddy = on-disk + PAs 214 * - new PA: buddy += N; PA = N 215 * - use inode PA: on-disk += N; PA -= N 216 * - discard inode PA buddy -= on-disk - PA; PA = 0 217 * - use locality group PA on-disk += N; PA -= N 218 * - discard locality group PA buddy -= PA; PA = 0 219 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 220 * is used in real operation because we can't know actual used 221 * bits from PA, only from on-disk bitmap 222 * 223 * if we follow this strict logic, then all operations above should be atomic. 224 * given some of them can block, we'd have to use something like semaphores 225 * killing performance on high-end SMP hardware. let's try to relax it using 226 * the following knowledge: 227 * 1) if buddy is referenced, it's already initialized 228 * 2) while block is used in buddy and the buddy is referenced, 229 * nobody can re-allocate that block 230 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 231 * bit set and PA claims same block, it's OK. IOW, one can set bit in 232 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 233 * block 234 * 235 * so, now we're building a concurrency table: 236 * - init buddy vs. 237 * - new PA 238 * blocks for PA are allocated in the buddy, buddy must be referenced 239 * until PA is linked to allocation group to avoid concurrent buddy init 240 * - use inode PA 241 * we need to make sure that either on-disk bitmap or PA has uptodate data 242 * given (3) we care that PA-=N operation doesn't interfere with init 243 * - discard inode PA 244 * the simplest way would be to have buddy initialized by the discard 245 * - use locality group PA 246 * again PA-=N must be serialized with init 247 * - discard locality group PA 248 * the simplest way would be to have buddy initialized by the discard 249 * - new PA vs. 250 * - use inode PA 251 * i_data_sem serializes them 252 * - discard inode PA 253 * discard process must wait until PA isn't used by another process 254 * - use locality group PA 255 * some mutex should serialize them 256 * - discard locality group PA 257 * discard process must wait until PA isn't used by another process 258 * - use inode PA 259 * - use inode PA 260 * i_data_sem or another mutex should serializes them 261 * - discard inode PA 262 * discard process must wait until PA isn't used by another process 263 * - use locality group PA 264 * nothing wrong here -- they're different PAs covering different blocks 265 * - discard locality group PA 266 * discard process must wait until PA isn't used by another process 267 * 268 * now we're ready to make few consequences: 269 * - PA is referenced and while it is no discard is possible 270 * - PA is referenced until block isn't marked in on-disk bitmap 271 * - PA changes only after on-disk bitmap 272 * - discard must not compete with init. either init is done before 273 * any discard or they're serialized somehow 274 * - buddy init as sum of on-disk bitmap and PAs is done atomically 275 * 276 * a special case when we've used PA to emptiness. no need to modify buddy 277 * in this case, but we should care about concurrent init 278 * 279 */ 280 281 /* 282 * Logic in few words: 283 * 284 * - allocation: 285 * load group 286 * find blocks 287 * mark bits in on-disk bitmap 288 * release group 289 * 290 * - use preallocation: 291 * find proper PA (per-inode or group) 292 * load group 293 * mark bits in on-disk bitmap 294 * release group 295 * release PA 296 * 297 * - free: 298 * load group 299 * mark bits in on-disk bitmap 300 * release group 301 * 302 * - discard preallocations in group: 303 * mark PAs deleted 304 * move them onto local list 305 * load on-disk bitmap 306 * load group 307 * remove PA from object (inode or locality group) 308 * mark free blocks in-core 309 * 310 * - discard inode's preallocations: 311 */ 312 313 /* 314 * Locking rules 315 * 316 * Locks: 317 * - bitlock on a group (group) 318 * - object (inode/locality) (object) 319 * - per-pa lock (pa) 320 * 321 * Paths: 322 * - new pa 323 * object 324 * group 325 * 326 * - find and use pa: 327 * pa 328 * 329 * - release consumed pa: 330 * pa 331 * group 332 * object 333 * 334 * - generate in-core bitmap: 335 * group 336 * pa 337 * 338 * - discard all for given object (inode, locality group): 339 * object 340 * pa 341 * group 342 * 343 * - discard all for given group: 344 * group 345 * pa 346 * group 347 * object 348 * 349 */ 350 static struct kmem_cache *ext4_pspace_cachep; 351 static struct kmem_cache *ext4_ac_cachep; 352 static struct kmem_cache *ext4_free_data_cachep; 353 354 /* We create slab caches for groupinfo data structures based on the 355 * superblock block size. There will be one per mounted filesystem for 356 * each unique s_blocksize_bits */ 357 #define NR_GRPINFO_CACHES 8 358 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 359 360 static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 361 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 362 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 363 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 364 }; 365 366 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 367 ext4_group_t group); 368 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 369 ext4_group_t group); 370 static void ext4_free_data_callback(struct super_block *sb, 371 struct ext4_journal_cb_entry *jce, int rc); 372 373 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 374 { 375 #if BITS_PER_LONG == 64 376 *bit += ((unsigned long) addr & 7UL) << 3; 377 addr = (void *) ((unsigned long) addr & ~7UL); 378 #elif BITS_PER_LONG == 32 379 *bit += ((unsigned long) addr & 3UL) << 3; 380 addr = (void *) ((unsigned long) addr & ~3UL); 381 #else 382 #error "how many bits you are?!" 383 #endif 384 return addr; 385 } 386 387 static inline int mb_test_bit(int bit, void *addr) 388 { 389 /* 390 * ext4_test_bit on architecture like powerpc 391 * needs unsigned long aligned address 392 */ 393 addr = mb_correct_addr_and_bit(&bit, addr); 394 return ext4_test_bit(bit, addr); 395 } 396 397 static inline void mb_set_bit(int bit, void *addr) 398 { 399 addr = mb_correct_addr_and_bit(&bit, addr); 400 ext4_set_bit(bit, addr); 401 } 402 403 static inline void mb_clear_bit(int bit, void *addr) 404 { 405 addr = mb_correct_addr_and_bit(&bit, addr); 406 ext4_clear_bit(bit, addr); 407 } 408 409 static inline int mb_test_and_clear_bit(int bit, void *addr) 410 { 411 addr = mb_correct_addr_and_bit(&bit, addr); 412 return ext4_test_and_clear_bit(bit, addr); 413 } 414 415 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 416 { 417 int fix = 0, ret, tmpmax; 418 addr = mb_correct_addr_and_bit(&fix, addr); 419 tmpmax = max + fix; 420 start += fix; 421 422 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 423 if (ret > max) 424 return max; 425 return ret; 426 } 427 428 static inline int mb_find_next_bit(void *addr, int max, int start) 429 { 430 int fix = 0, ret, tmpmax; 431 addr = mb_correct_addr_and_bit(&fix, addr); 432 tmpmax = max + fix; 433 start += fix; 434 435 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 436 if (ret > max) 437 return max; 438 return ret; 439 } 440 441 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 442 { 443 char *bb; 444 445 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 446 BUG_ON(max == NULL); 447 448 if (order > e4b->bd_blkbits + 1) { 449 *max = 0; 450 return NULL; 451 } 452 453 /* at order 0 we see each particular block */ 454 if (order == 0) { 455 *max = 1 << (e4b->bd_blkbits + 3); 456 return e4b->bd_bitmap; 457 } 458 459 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 460 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 461 462 return bb; 463 } 464 465 #ifdef DOUBLE_CHECK 466 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 467 int first, int count) 468 { 469 int i; 470 struct super_block *sb = e4b->bd_sb; 471 472 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 473 return; 474 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 475 for (i = 0; i < count; i++) { 476 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 477 ext4_fsblk_t blocknr; 478 479 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 480 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 481 ext4_grp_locked_error(sb, e4b->bd_group, 482 inode ? inode->i_ino : 0, 483 blocknr, 484 "freeing block already freed " 485 "(bit %u)", 486 first + i); 487 } 488 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 489 } 490 } 491 492 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 493 { 494 int i; 495 496 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 497 return; 498 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 499 for (i = 0; i < count; i++) { 500 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 501 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 502 } 503 } 504 505 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 506 { 507 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 508 unsigned char *b1, *b2; 509 int i; 510 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 511 b2 = (unsigned char *) bitmap; 512 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 513 if (b1[i] != b2[i]) { 514 ext4_msg(e4b->bd_sb, KERN_ERR, 515 "corruption in group %u " 516 "at byte %u(%u): %x in copy != %x " 517 "on disk/prealloc", 518 e4b->bd_group, i, i * 8, b1[i], b2[i]); 519 BUG(); 520 } 521 } 522 } 523 } 524 525 #else 526 static inline void mb_free_blocks_double(struct inode *inode, 527 struct ext4_buddy *e4b, int first, int count) 528 { 529 return; 530 } 531 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 532 int first, int count) 533 { 534 return; 535 } 536 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 537 { 538 return; 539 } 540 #endif 541 542 #ifdef AGGRESSIVE_CHECK 543 544 #define MB_CHECK_ASSERT(assert) \ 545 do { \ 546 if (!(assert)) { \ 547 printk(KERN_EMERG \ 548 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 549 function, file, line, # assert); \ 550 BUG(); \ 551 } \ 552 } while (0) 553 554 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 555 const char *function, int line) 556 { 557 struct super_block *sb = e4b->bd_sb; 558 int order = e4b->bd_blkbits + 1; 559 int max; 560 int max2; 561 int i; 562 int j; 563 int k; 564 int count; 565 struct ext4_group_info *grp; 566 int fragments = 0; 567 int fstart; 568 struct list_head *cur; 569 void *buddy; 570 void *buddy2; 571 572 { 573 static int mb_check_counter; 574 if (mb_check_counter++ % 100 != 0) 575 return 0; 576 } 577 578 while (order > 1) { 579 buddy = mb_find_buddy(e4b, order, &max); 580 MB_CHECK_ASSERT(buddy); 581 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 582 MB_CHECK_ASSERT(buddy2); 583 MB_CHECK_ASSERT(buddy != buddy2); 584 MB_CHECK_ASSERT(max * 2 == max2); 585 586 count = 0; 587 for (i = 0; i < max; i++) { 588 589 if (mb_test_bit(i, buddy)) { 590 /* only single bit in buddy2 may be 1 */ 591 if (!mb_test_bit(i << 1, buddy2)) { 592 MB_CHECK_ASSERT( 593 mb_test_bit((i<<1)+1, buddy2)); 594 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 595 MB_CHECK_ASSERT( 596 mb_test_bit(i << 1, buddy2)); 597 } 598 continue; 599 } 600 601 /* both bits in buddy2 must be 1 */ 602 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 603 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 604 605 for (j = 0; j < (1 << order); j++) { 606 k = (i * (1 << order)) + j; 607 MB_CHECK_ASSERT( 608 !mb_test_bit(k, e4b->bd_bitmap)); 609 } 610 count++; 611 } 612 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 613 order--; 614 } 615 616 fstart = -1; 617 buddy = mb_find_buddy(e4b, 0, &max); 618 for (i = 0; i < max; i++) { 619 if (!mb_test_bit(i, buddy)) { 620 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 621 if (fstart == -1) { 622 fragments++; 623 fstart = i; 624 } 625 continue; 626 } 627 fstart = -1; 628 /* check used bits only */ 629 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 630 buddy2 = mb_find_buddy(e4b, j, &max2); 631 k = i >> j; 632 MB_CHECK_ASSERT(k < max2); 633 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 634 } 635 } 636 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 637 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 638 639 grp = ext4_get_group_info(sb, e4b->bd_group); 640 list_for_each(cur, &grp->bb_prealloc_list) { 641 ext4_group_t groupnr; 642 struct ext4_prealloc_space *pa; 643 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 644 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 645 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 646 for (i = 0; i < pa->pa_len; i++) 647 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 648 } 649 return 0; 650 } 651 #undef MB_CHECK_ASSERT 652 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 653 __FILE__, __func__, __LINE__) 654 #else 655 #define mb_check_buddy(e4b) 656 #endif 657 658 /* 659 * Divide blocks started from @first with length @len into 660 * smaller chunks with power of 2 blocks. 661 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 662 * then increase bb_counters[] for corresponded chunk size. 663 */ 664 static void ext4_mb_mark_free_simple(struct super_block *sb, 665 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 666 struct ext4_group_info *grp) 667 { 668 struct ext4_sb_info *sbi = EXT4_SB(sb); 669 ext4_grpblk_t min; 670 ext4_grpblk_t max; 671 ext4_grpblk_t chunk; 672 unsigned short border; 673 674 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 675 676 border = 2 << sb->s_blocksize_bits; 677 678 while (len > 0) { 679 /* find how many blocks can be covered since this position */ 680 max = ffs(first | border) - 1; 681 682 /* find how many blocks of power 2 we need to mark */ 683 min = fls(len) - 1; 684 685 if (max < min) 686 min = max; 687 chunk = 1 << min; 688 689 /* mark multiblock chunks only */ 690 grp->bb_counters[min]++; 691 if (min > 0) 692 mb_clear_bit(first >> min, 693 buddy + sbi->s_mb_offsets[min]); 694 695 len -= chunk; 696 first += chunk; 697 } 698 } 699 700 /* 701 * Cache the order of the largest free extent we have available in this block 702 * group. 703 */ 704 static void 705 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 706 { 707 int i; 708 int bits; 709 710 grp->bb_largest_free_order = -1; /* uninit */ 711 712 bits = sb->s_blocksize_bits + 1; 713 for (i = bits; i >= 0; i--) { 714 if (grp->bb_counters[i] > 0) { 715 grp->bb_largest_free_order = i; 716 break; 717 } 718 } 719 } 720 721 static noinline_for_stack 722 void ext4_mb_generate_buddy(struct super_block *sb, 723 void *buddy, void *bitmap, ext4_group_t group) 724 { 725 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 726 struct ext4_sb_info *sbi = EXT4_SB(sb); 727 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 728 ext4_grpblk_t i = 0; 729 ext4_grpblk_t first; 730 ext4_grpblk_t len; 731 unsigned free = 0; 732 unsigned fragments = 0; 733 unsigned long long period = get_cycles(); 734 735 /* initialize buddy from bitmap which is aggregation 736 * of on-disk bitmap and preallocations */ 737 i = mb_find_next_zero_bit(bitmap, max, 0); 738 grp->bb_first_free = i; 739 while (i < max) { 740 fragments++; 741 first = i; 742 i = mb_find_next_bit(bitmap, max, i); 743 len = i - first; 744 free += len; 745 if (len > 1) 746 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 747 else 748 grp->bb_counters[0]++; 749 if (i < max) 750 i = mb_find_next_zero_bit(bitmap, max, i); 751 } 752 grp->bb_fragments = fragments; 753 754 if (free != grp->bb_free) { 755 ext4_grp_locked_error(sb, group, 0, 0, 756 "block bitmap and bg descriptor " 757 "inconsistent: %u vs %u free clusters", 758 free, grp->bb_free); 759 /* 760 * If we intend to continue, we consider group descriptor 761 * corrupt and update bb_free using bitmap value 762 */ 763 grp->bb_free = free; 764 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 765 percpu_counter_sub(&sbi->s_freeclusters_counter, 766 grp->bb_free); 767 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 768 } 769 mb_set_largest_free_order(sb, grp); 770 771 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 772 773 period = get_cycles() - period; 774 spin_lock(&EXT4_SB(sb)->s_bal_lock); 775 EXT4_SB(sb)->s_mb_buddies_generated++; 776 EXT4_SB(sb)->s_mb_generation_time += period; 777 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 778 } 779 780 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 781 { 782 int count; 783 int order = 1; 784 void *buddy; 785 786 while ((buddy = mb_find_buddy(e4b, order++, &count))) { 787 ext4_set_bits(buddy, 0, count); 788 } 789 e4b->bd_info->bb_fragments = 0; 790 memset(e4b->bd_info->bb_counters, 0, 791 sizeof(*e4b->bd_info->bb_counters) * 792 (e4b->bd_sb->s_blocksize_bits + 2)); 793 794 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 795 e4b->bd_bitmap, e4b->bd_group); 796 } 797 798 /* The buddy information is attached the buddy cache inode 799 * for convenience. The information regarding each group 800 * is loaded via ext4_mb_load_buddy. The information involve 801 * block bitmap and buddy information. The information are 802 * stored in the inode as 803 * 804 * { page } 805 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 806 * 807 * 808 * one block each for bitmap and buddy information. 809 * So for each group we take up 2 blocks. A page can 810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 811 * So it can have information regarding groups_per_page which 812 * is blocks_per_page/2 813 * 814 * Locking note: This routine takes the block group lock of all groups 815 * for this page; do not hold this lock when calling this routine! 816 */ 817 818 static int ext4_mb_init_cache(struct page *page, char *incore) 819 { 820 ext4_group_t ngroups; 821 int blocksize; 822 int blocks_per_page; 823 int groups_per_page; 824 int err = 0; 825 int i; 826 ext4_group_t first_group, group; 827 int first_block; 828 struct super_block *sb; 829 struct buffer_head *bhs; 830 struct buffer_head **bh = NULL; 831 struct inode *inode; 832 char *data; 833 char *bitmap; 834 struct ext4_group_info *grinfo; 835 836 mb_debug(1, "init page %lu\n", page->index); 837 838 inode = page->mapping->host; 839 sb = inode->i_sb; 840 ngroups = ext4_get_groups_count(sb); 841 blocksize = 1 << inode->i_blkbits; 842 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 843 844 groups_per_page = blocks_per_page >> 1; 845 if (groups_per_page == 0) 846 groups_per_page = 1; 847 848 /* allocate buffer_heads to read bitmaps */ 849 if (groups_per_page > 1) { 850 i = sizeof(struct buffer_head *) * groups_per_page; 851 bh = kzalloc(i, GFP_NOFS); 852 if (bh == NULL) { 853 err = -ENOMEM; 854 goto out; 855 } 856 } else 857 bh = &bhs; 858 859 first_group = page->index * blocks_per_page / 2; 860 861 /* read all groups the page covers into the cache */ 862 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 863 if (group >= ngroups) 864 break; 865 866 grinfo = ext4_get_group_info(sb, group); 867 /* 868 * If page is uptodate then we came here after online resize 869 * which added some new uninitialized group info structs, so 870 * we must skip all initialized uptodate buddies on the page, 871 * which may be currently in use by an allocating task. 872 */ 873 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 874 bh[i] = NULL; 875 continue; 876 } 877 bh[i] = ext4_read_block_bitmap_nowait(sb, group); 878 if (IS_ERR(bh[i])) { 879 err = PTR_ERR(bh[i]); 880 bh[i] = NULL; 881 goto out; 882 } 883 mb_debug(1, "read bitmap for group %u\n", group); 884 } 885 886 /* wait for I/O completion */ 887 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 888 int err2; 889 890 if (!bh[i]) 891 continue; 892 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 893 if (!err) 894 err = err2; 895 } 896 897 first_block = page->index * blocks_per_page; 898 for (i = 0; i < blocks_per_page; i++) { 899 group = (first_block + i) >> 1; 900 if (group >= ngroups) 901 break; 902 903 if (!bh[group - first_group]) 904 /* skip initialized uptodate buddy */ 905 continue; 906 907 if (!buffer_verified(bh[group - first_group])) 908 /* Skip faulty bitmaps */ 909 continue; 910 err = 0; 911 912 /* 913 * data carry information regarding this 914 * particular group in the format specified 915 * above 916 * 917 */ 918 data = page_address(page) + (i * blocksize); 919 bitmap = bh[group - first_group]->b_data; 920 921 /* 922 * We place the buddy block and bitmap block 923 * close together 924 */ 925 if ((first_block + i) & 1) { 926 /* this is block of buddy */ 927 BUG_ON(incore == NULL); 928 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 929 group, page->index, i * blocksize); 930 trace_ext4_mb_buddy_bitmap_load(sb, group); 931 grinfo = ext4_get_group_info(sb, group); 932 grinfo->bb_fragments = 0; 933 memset(grinfo->bb_counters, 0, 934 sizeof(*grinfo->bb_counters) * 935 (sb->s_blocksize_bits+2)); 936 /* 937 * incore got set to the group block bitmap below 938 */ 939 ext4_lock_group(sb, group); 940 /* init the buddy */ 941 memset(data, 0xff, blocksize); 942 ext4_mb_generate_buddy(sb, data, incore, group); 943 ext4_unlock_group(sb, group); 944 incore = NULL; 945 } else { 946 /* this is block of bitmap */ 947 BUG_ON(incore != NULL); 948 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 949 group, page->index, i * blocksize); 950 trace_ext4_mb_bitmap_load(sb, group); 951 952 /* see comments in ext4_mb_put_pa() */ 953 ext4_lock_group(sb, group); 954 memcpy(data, bitmap, blocksize); 955 956 /* mark all preallocated blks used in in-core bitmap */ 957 ext4_mb_generate_from_pa(sb, data, group); 958 ext4_mb_generate_from_freelist(sb, data, group); 959 ext4_unlock_group(sb, group); 960 961 /* set incore so that the buddy information can be 962 * generated using this 963 */ 964 incore = data; 965 } 966 } 967 SetPageUptodate(page); 968 969 out: 970 if (bh) { 971 for (i = 0; i < groups_per_page; i++) 972 brelse(bh[i]); 973 if (bh != &bhs) 974 kfree(bh); 975 } 976 return err; 977 } 978 979 /* 980 * Lock the buddy and bitmap pages. This make sure other parallel init_group 981 * on the same buddy page doesn't happen whild holding the buddy page lock. 982 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 983 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 984 */ 985 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 986 ext4_group_t group, struct ext4_buddy *e4b) 987 { 988 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 989 int block, pnum, poff; 990 int blocks_per_page; 991 struct page *page; 992 993 e4b->bd_buddy_page = NULL; 994 e4b->bd_bitmap_page = NULL; 995 996 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 997 /* 998 * the buddy cache inode stores the block bitmap 999 * and buddy information in consecutive blocks. 1000 * So for each group we need two blocks. 1001 */ 1002 block = group * 2; 1003 pnum = block / blocks_per_page; 1004 poff = block % blocks_per_page; 1005 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1006 if (!page) 1007 return -ENOMEM; 1008 BUG_ON(page->mapping != inode->i_mapping); 1009 e4b->bd_bitmap_page = page; 1010 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1011 1012 if (blocks_per_page >= 2) { 1013 /* buddy and bitmap are on the same page */ 1014 return 0; 1015 } 1016 1017 block++; 1018 pnum = block / blocks_per_page; 1019 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1020 if (!page) 1021 return -ENOMEM; 1022 BUG_ON(page->mapping != inode->i_mapping); 1023 e4b->bd_buddy_page = page; 1024 return 0; 1025 } 1026 1027 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1028 { 1029 if (e4b->bd_bitmap_page) { 1030 unlock_page(e4b->bd_bitmap_page); 1031 page_cache_release(e4b->bd_bitmap_page); 1032 } 1033 if (e4b->bd_buddy_page) { 1034 unlock_page(e4b->bd_buddy_page); 1035 page_cache_release(e4b->bd_buddy_page); 1036 } 1037 } 1038 1039 /* 1040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1041 * block group lock of all groups for this page; do not hold the BG lock when 1042 * calling this routine! 1043 */ 1044 static noinline_for_stack 1045 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1046 { 1047 1048 struct ext4_group_info *this_grp; 1049 struct ext4_buddy e4b; 1050 struct page *page; 1051 int ret = 0; 1052 1053 might_sleep(); 1054 mb_debug(1, "init group %u\n", group); 1055 this_grp = ext4_get_group_info(sb, group); 1056 /* 1057 * This ensures that we don't reinit the buddy cache 1058 * page which map to the group from which we are already 1059 * allocating. If we are looking at the buddy cache we would 1060 * have taken a reference using ext4_mb_load_buddy and that 1061 * would have pinned buddy page to page cache. 1062 * The call to ext4_mb_get_buddy_page_lock will mark the 1063 * page accessed. 1064 */ 1065 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); 1066 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1067 /* 1068 * somebody initialized the group 1069 * return without doing anything 1070 */ 1071 goto err; 1072 } 1073 1074 page = e4b.bd_bitmap_page; 1075 ret = ext4_mb_init_cache(page, NULL); 1076 if (ret) 1077 goto err; 1078 if (!PageUptodate(page)) { 1079 ret = -EIO; 1080 goto err; 1081 } 1082 1083 if (e4b.bd_buddy_page == NULL) { 1084 /* 1085 * If both the bitmap and buddy are in 1086 * the same page we don't need to force 1087 * init the buddy 1088 */ 1089 ret = 0; 1090 goto err; 1091 } 1092 /* init buddy cache */ 1093 page = e4b.bd_buddy_page; 1094 ret = ext4_mb_init_cache(page, e4b.bd_bitmap); 1095 if (ret) 1096 goto err; 1097 if (!PageUptodate(page)) { 1098 ret = -EIO; 1099 goto err; 1100 } 1101 err: 1102 ext4_mb_put_buddy_page_lock(&e4b); 1103 return ret; 1104 } 1105 1106 /* 1107 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1108 * block group lock of all groups for this page; do not hold the BG lock when 1109 * calling this routine! 1110 */ 1111 static noinline_for_stack int 1112 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1113 struct ext4_buddy *e4b) 1114 { 1115 int blocks_per_page; 1116 int block; 1117 int pnum; 1118 int poff; 1119 struct page *page; 1120 int ret; 1121 struct ext4_group_info *grp; 1122 struct ext4_sb_info *sbi = EXT4_SB(sb); 1123 struct inode *inode = sbi->s_buddy_cache; 1124 1125 might_sleep(); 1126 mb_debug(1, "load group %u\n", group); 1127 1128 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1129 grp = ext4_get_group_info(sb, group); 1130 1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1132 e4b->bd_info = grp; 1133 e4b->bd_sb = sb; 1134 e4b->bd_group = group; 1135 e4b->bd_buddy_page = NULL; 1136 e4b->bd_bitmap_page = NULL; 1137 1138 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1139 /* 1140 * we need full data about the group 1141 * to make a good selection 1142 */ 1143 ret = ext4_mb_init_group(sb, group); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 /* 1149 * the buddy cache inode stores the block bitmap 1150 * and buddy information in consecutive blocks. 1151 * So for each group we need two blocks. 1152 */ 1153 block = group * 2; 1154 pnum = block / blocks_per_page; 1155 poff = block % blocks_per_page; 1156 1157 /* we could use find_or_create_page(), but it locks page 1158 * what we'd like to avoid in fast path ... */ 1159 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1160 if (page == NULL || !PageUptodate(page)) { 1161 if (page) 1162 /* 1163 * drop the page reference and try 1164 * to get the page with lock. If we 1165 * are not uptodate that implies 1166 * somebody just created the page but 1167 * is yet to initialize the same. So 1168 * wait for it to initialize. 1169 */ 1170 page_cache_release(page); 1171 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1172 if (page) { 1173 BUG_ON(page->mapping != inode->i_mapping); 1174 if (!PageUptodate(page)) { 1175 ret = ext4_mb_init_cache(page, NULL); 1176 if (ret) { 1177 unlock_page(page); 1178 goto err; 1179 } 1180 mb_cmp_bitmaps(e4b, page_address(page) + 1181 (poff * sb->s_blocksize)); 1182 } 1183 unlock_page(page); 1184 } 1185 } 1186 if (page == NULL) { 1187 ret = -ENOMEM; 1188 goto err; 1189 } 1190 if (!PageUptodate(page)) { 1191 ret = -EIO; 1192 goto err; 1193 } 1194 1195 /* Pages marked accessed already */ 1196 e4b->bd_bitmap_page = page; 1197 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1198 1199 block++; 1200 pnum = block / blocks_per_page; 1201 poff = block % blocks_per_page; 1202 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1204 if (page == NULL || !PageUptodate(page)) { 1205 if (page) 1206 page_cache_release(page); 1207 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1208 if (page) { 1209 BUG_ON(page->mapping != inode->i_mapping); 1210 if (!PageUptodate(page)) { 1211 ret = ext4_mb_init_cache(page, e4b->bd_bitmap); 1212 if (ret) { 1213 unlock_page(page); 1214 goto err; 1215 } 1216 } 1217 unlock_page(page); 1218 } 1219 } 1220 if (page == NULL) { 1221 ret = -ENOMEM; 1222 goto err; 1223 } 1224 if (!PageUptodate(page)) { 1225 ret = -EIO; 1226 goto err; 1227 } 1228 1229 /* Pages marked accessed already */ 1230 e4b->bd_buddy_page = page; 1231 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1232 1233 BUG_ON(e4b->bd_bitmap_page == NULL); 1234 BUG_ON(e4b->bd_buddy_page == NULL); 1235 1236 return 0; 1237 1238 err: 1239 if (page) 1240 page_cache_release(page); 1241 if (e4b->bd_bitmap_page) 1242 page_cache_release(e4b->bd_bitmap_page); 1243 if (e4b->bd_buddy_page) 1244 page_cache_release(e4b->bd_buddy_page); 1245 e4b->bd_buddy = NULL; 1246 e4b->bd_bitmap = NULL; 1247 return ret; 1248 } 1249 1250 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1251 { 1252 if (e4b->bd_bitmap_page) 1253 page_cache_release(e4b->bd_bitmap_page); 1254 if (e4b->bd_buddy_page) 1255 page_cache_release(e4b->bd_buddy_page); 1256 } 1257 1258 1259 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1260 { 1261 int order = 1; 1262 void *bb; 1263 1264 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1265 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1266 1267 bb = e4b->bd_buddy; 1268 while (order <= e4b->bd_blkbits + 1) { 1269 block = block >> 1; 1270 if (!mb_test_bit(block, bb)) { 1271 /* this block is part of buddy of order 'order' */ 1272 return order; 1273 } 1274 bb += 1 << (e4b->bd_blkbits - order); 1275 order++; 1276 } 1277 return 0; 1278 } 1279 1280 static void mb_clear_bits(void *bm, int cur, int len) 1281 { 1282 __u32 *addr; 1283 1284 len = cur + len; 1285 while (cur < len) { 1286 if ((cur & 31) == 0 && (len - cur) >= 32) { 1287 /* fast path: clear whole word at once */ 1288 addr = bm + (cur >> 3); 1289 *addr = 0; 1290 cur += 32; 1291 continue; 1292 } 1293 mb_clear_bit(cur, bm); 1294 cur++; 1295 } 1296 } 1297 1298 /* clear bits in given range 1299 * will return first found zero bit if any, -1 otherwise 1300 */ 1301 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1302 { 1303 __u32 *addr; 1304 int zero_bit = -1; 1305 1306 len = cur + len; 1307 while (cur < len) { 1308 if ((cur & 31) == 0 && (len - cur) >= 32) { 1309 /* fast path: clear whole word at once */ 1310 addr = bm + (cur >> 3); 1311 if (*addr != (__u32)(-1) && zero_bit == -1) 1312 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1313 *addr = 0; 1314 cur += 32; 1315 continue; 1316 } 1317 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1318 zero_bit = cur; 1319 cur++; 1320 } 1321 1322 return zero_bit; 1323 } 1324 1325 void ext4_set_bits(void *bm, int cur, int len) 1326 { 1327 __u32 *addr; 1328 1329 len = cur + len; 1330 while (cur < len) { 1331 if ((cur & 31) == 0 && (len - cur) >= 32) { 1332 /* fast path: set whole word at once */ 1333 addr = bm + (cur >> 3); 1334 *addr = 0xffffffff; 1335 cur += 32; 1336 continue; 1337 } 1338 mb_set_bit(cur, bm); 1339 cur++; 1340 } 1341 } 1342 1343 /* 1344 * _________________________________________________________________ */ 1345 1346 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1347 { 1348 if (mb_test_bit(*bit + side, bitmap)) { 1349 mb_clear_bit(*bit, bitmap); 1350 (*bit) -= side; 1351 return 1; 1352 } 1353 else { 1354 (*bit) += side; 1355 mb_set_bit(*bit, bitmap); 1356 return -1; 1357 } 1358 } 1359 1360 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1361 { 1362 int max; 1363 int order = 1; 1364 void *buddy = mb_find_buddy(e4b, order, &max); 1365 1366 while (buddy) { 1367 void *buddy2; 1368 1369 /* Bits in range [first; last] are known to be set since 1370 * corresponding blocks were allocated. Bits in range 1371 * (first; last) will stay set because they form buddies on 1372 * upper layer. We just deal with borders if they don't 1373 * align with upper layer and then go up. 1374 * Releasing entire group is all about clearing 1375 * single bit of highest order buddy. 1376 */ 1377 1378 /* Example: 1379 * --------------------------------- 1380 * | 1 | 1 | 1 | 1 | 1381 * --------------------------------- 1382 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1383 * --------------------------------- 1384 * 0 1 2 3 4 5 6 7 1385 * \_____________________/ 1386 * 1387 * Neither [1] nor [6] is aligned to above layer. 1388 * Left neighbour [0] is free, so mark it busy, 1389 * decrease bb_counters and extend range to 1390 * [0; 6] 1391 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1392 * mark [6] free, increase bb_counters and shrink range to 1393 * [0; 5]. 1394 * Then shift range to [0; 2], go up and do the same. 1395 */ 1396 1397 1398 if (first & 1) 1399 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1400 if (!(last & 1)) 1401 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1402 if (first > last) 1403 break; 1404 order++; 1405 1406 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1407 mb_clear_bits(buddy, first, last - first + 1); 1408 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1409 break; 1410 } 1411 first >>= 1; 1412 last >>= 1; 1413 buddy = buddy2; 1414 } 1415 } 1416 1417 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1418 int first, int count) 1419 { 1420 int left_is_free = 0; 1421 int right_is_free = 0; 1422 int block; 1423 int last = first + count - 1; 1424 struct super_block *sb = e4b->bd_sb; 1425 1426 if (WARN_ON(count == 0)) 1427 return; 1428 BUG_ON(last >= (sb->s_blocksize << 3)); 1429 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1430 /* Don't bother if the block group is corrupt. */ 1431 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1432 return; 1433 1434 mb_check_buddy(e4b); 1435 mb_free_blocks_double(inode, e4b, first, count); 1436 1437 e4b->bd_info->bb_free += count; 1438 if (first < e4b->bd_info->bb_first_free) 1439 e4b->bd_info->bb_first_free = first; 1440 1441 /* access memory sequentially: check left neighbour, 1442 * clear range and then check right neighbour 1443 */ 1444 if (first != 0) 1445 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1446 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1447 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1448 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1449 1450 if (unlikely(block != -1)) { 1451 struct ext4_sb_info *sbi = EXT4_SB(sb); 1452 ext4_fsblk_t blocknr; 1453 1454 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1455 blocknr += EXT4_C2B(EXT4_SB(sb), block); 1456 ext4_grp_locked_error(sb, e4b->bd_group, 1457 inode ? inode->i_ino : 0, 1458 blocknr, 1459 "freeing already freed block " 1460 "(bit %u); block bitmap corrupt.", 1461 block); 1462 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)) 1463 percpu_counter_sub(&sbi->s_freeclusters_counter, 1464 e4b->bd_info->bb_free); 1465 /* Mark the block group as corrupt. */ 1466 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1467 &e4b->bd_info->bb_state); 1468 mb_regenerate_buddy(e4b); 1469 goto done; 1470 } 1471 1472 /* let's maintain fragments counter */ 1473 if (left_is_free && right_is_free) 1474 e4b->bd_info->bb_fragments--; 1475 else if (!left_is_free && !right_is_free) 1476 e4b->bd_info->bb_fragments++; 1477 1478 /* buddy[0] == bd_bitmap is a special case, so handle 1479 * it right away and let mb_buddy_mark_free stay free of 1480 * zero order checks. 1481 * Check if neighbours are to be coaleasced, 1482 * adjust bitmap bb_counters and borders appropriately. 1483 */ 1484 if (first & 1) { 1485 first += !left_is_free; 1486 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1487 } 1488 if (!(last & 1)) { 1489 last -= !right_is_free; 1490 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1491 } 1492 1493 if (first <= last) 1494 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1495 1496 done: 1497 mb_set_largest_free_order(sb, e4b->bd_info); 1498 mb_check_buddy(e4b); 1499 } 1500 1501 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1502 int needed, struct ext4_free_extent *ex) 1503 { 1504 int next = block; 1505 int max, order; 1506 void *buddy; 1507 1508 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1509 BUG_ON(ex == NULL); 1510 1511 buddy = mb_find_buddy(e4b, 0, &max); 1512 BUG_ON(buddy == NULL); 1513 BUG_ON(block >= max); 1514 if (mb_test_bit(block, buddy)) { 1515 ex->fe_len = 0; 1516 ex->fe_start = 0; 1517 ex->fe_group = 0; 1518 return 0; 1519 } 1520 1521 /* find actual order */ 1522 order = mb_find_order_for_block(e4b, block); 1523 block = block >> order; 1524 1525 ex->fe_len = 1 << order; 1526 ex->fe_start = block << order; 1527 ex->fe_group = e4b->bd_group; 1528 1529 /* calc difference from given start */ 1530 next = next - ex->fe_start; 1531 ex->fe_len -= next; 1532 ex->fe_start += next; 1533 1534 while (needed > ex->fe_len && 1535 mb_find_buddy(e4b, order, &max)) { 1536 1537 if (block + 1 >= max) 1538 break; 1539 1540 next = (block + 1) * (1 << order); 1541 if (mb_test_bit(next, e4b->bd_bitmap)) 1542 break; 1543 1544 order = mb_find_order_for_block(e4b, next); 1545 1546 block = next >> order; 1547 ex->fe_len += 1 << order; 1548 } 1549 1550 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); 1551 return ex->fe_len; 1552 } 1553 1554 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1555 { 1556 int ord; 1557 int mlen = 0; 1558 int max = 0; 1559 int cur; 1560 int start = ex->fe_start; 1561 int len = ex->fe_len; 1562 unsigned ret = 0; 1563 int len0 = len; 1564 void *buddy; 1565 1566 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1567 BUG_ON(e4b->bd_group != ex->fe_group); 1568 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1569 mb_check_buddy(e4b); 1570 mb_mark_used_double(e4b, start, len); 1571 1572 e4b->bd_info->bb_free -= len; 1573 if (e4b->bd_info->bb_first_free == start) 1574 e4b->bd_info->bb_first_free += len; 1575 1576 /* let's maintain fragments counter */ 1577 if (start != 0) 1578 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1579 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1580 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1581 if (mlen && max) 1582 e4b->bd_info->bb_fragments++; 1583 else if (!mlen && !max) 1584 e4b->bd_info->bb_fragments--; 1585 1586 /* let's maintain buddy itself */ 1587 while (len) { 1588 ord = mb_find_order_for_block(e4b, start); 1589 1590 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1591 /* the whole chunk may be allocated at once! */ 1592 mlen = 1 << ord; 1593 buddy = mb_find_buddy(e4b, ord, &max); 1594 BUG_ON((start >> ord) >= max); 1595 mb_set_bit(start >> ord, buddy); 1596 e4b->bd_info->bb_counters[ord]--; 1597 start += mlen; 1598 len -= mlen; 1599 BUG_ON(len < 0); 1600 continue; 1601 } 1602 1603 /* store for history */ 1604 if (ret == 0) 1605 ret = len | (ord << 16); 1606 1607 /* we have to split large buddy */ 1608 BUG_ON(ord <= 0); 1609 buddy = mb_find_buddy(e4b, ord, &max); 1610 mb_set_bit(start >> ord, buddy); 1611 e4b->bd_info->bb_counters[ord]--; 1612 1613 ord--; 1614 cur = (start >> ord) & ~1U; 1615 buddy = mb_find_buddy(e4b, ord, &max); 1616 mb_clear_bit(cur, buddy); 1617 mb_clear_bit(cur + 1, buddy); 1618 e4b->bd_info->bb_counters[ord]++; 1619 e4b->bd_info->bb_counters[ord]++; 1620 } 1621 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1622 1623 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1624 mb_check_buddy(e4b); 1625 1626 return ret; 1627 } 1628 1629 /* 1630 * Must be called under group lock! 1631 */ 1632 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1633 struct ext4_buddy *e4b) 1634 { 1635 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1636 int ret; 1637 1638 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1639 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1640 1641 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1642 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1643 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1644 1645 /* preallocation can change ac_b_ex, thus we store actually 1646 * allocated blocks for history */ 1647 ac->ac_f_ex = ac->ac_b_ex; 1648 1649 ac->ac_status = AC_STATUS_FOUND; 1650 ac->ac_tail = ret & 0xffff; 1651 ac->ac_buddy = ret >> 16; 1652 1653 /* 1654 * take the page reference. We want the page to be pinned 1655 * so that we don't get a ext4_mb_init_cache_call for this 1656 * group until we update the bitmap. That would mean we 1657 * double allocate blocks. The reference is dropped 1658 * in ext4_mb_release_context 1659 */ 1660 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1661 get_page(ac->ac_bitmap_page); 1662 ac->ac_buddy_page = e4b->bd_buddy_page; 1663 get_page(ac->ac_buddy_page); 1664 /* store last allocated for subsequent stream allocation */ 1665 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1666 spin_lock(&sbi->s_md_lock); 1667 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1668 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1669 spin_unlock(&sbi->s_md_lock); 1670 } 1671 } 1672 1673 /* 1674 * regular allocator, for general purposes allocation 1675 */ 1676 1677 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1678 struct ext4_buddy *e4b, 1679 int finish_group) 1680 { 1681 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1682 struct ext4_free_extent *bex = &ac->ac_b_ex; 1683 struct ext4_free_extent *gex = &ac->ac_g_ex; 1684 struct ext4_free_extent ex; 1685 int max; 1686 1687 if (ac->ac_status == AC_STATUS_FOUND) 1688 return; 1689 /* 1690 * We don't want to scan for a whole year 1691 */ 1692 if (ac->ac_found > sbi->s_mb_max_to_scan && 1693 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1694 ac->ac_status = AC_STATUS_BREAK; 1695 return; 1696 } 1697 1698 /* 1699 * Haven't found good chunk so far, let's continue 1700 */ 1701 if (bex->fe_len < gex->fe_len) 1702 return; 1703 1704 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1705 && bex->fe_group == e4b->bd_group) { 1706 /* recheck chunk's availability - we don't know 1707 * when it was found (within this lock-unlock 1708 * period or not) */ 1709 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1710 if (max >= gex->fe_len) { 1711 ext4_mb_use_best_found(ac, e4b); 1712 return; 1713 } 1714 } 1715 } 1716 1717 /* 1718 * The routine checks whether found extent is good enough. If it is, 1719 * then the extent gets marked used and flag is set to the context 1720 * to stop scanning. Otherwise, the extent is compared with the 1721 * previous found extent and if new one is better, then it's stored 1722 * in the context. Later, the best found extent will be used, if 1723 * mballoc can't find good enough extent. 1724 * 1725 * FIXME: real allocation policy is to be designed yet! 1726 */ 1727 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1728 struct ext4_free_extent *ex, 1729 struct ext4_buddy *e4b) 1730 { 1731 struct ext4_free_extent *bex = &ac->ac_b_ex; 1732 struct ext4_free_extent *gex = &ac->ac_g_ex; 1733 1734 BUG_ON(ex->fe_len <= 0); 1735 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1736 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1737 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1738 1739 ac->ac_found++; 1740 1741 /* 1742 * The special case - take what you catch first 1743 */ 1744 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1745 *bex = *ex; 1746 ext4_mb_use_best_found(ac, e4b); 1747 return; 1748 } 1749 1750 /* 1751 * Let's check whether the chuck is good enough 1752 */ 1753 if (ex->fe_len == gex->fe_len) { 1754 *bex = *ex; 1755 ext4_mb_use_best_found(ac, e4b); 1756 return; 1757 } 1758 1759 /* 1760 * If this is first found extent, just store it in the context 1761 */ 1762 if (bex->fe_len == 0) { 1763 *bex = *ex; 1764 return; 1765 } 1766 1767 /* 1768 * If new found extent is better, store it in the context 1769 */ 1770 if (bex->fe_len < gex->fe_len) { 1771 /* if the request isn't satisfied, any found extent 1772 * larger than previous best one is better */ 1773 if (ex->fe_len > bex->fe_len) 1774 *bex = *ex; 1775 } else if (ex->fe_len > gex->fe_len) { 1776 /* if the request is satisfied, then we try to find 1777 * an extent that still satisfy the request, but is 1778 * smaller than previous one */ 1779 if (ex->fe_len < bex->fe_len) 1780 *bex = *ex; 1781 } 1782 1783 ext4_mb_check_limits(ac, e4b, 0); 1784 } 1785 1786 static noinline_for_stack 1787 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1788 struct ext4_buddy *e4b) 1789 { 1790 struct ext4_free_extent ex = ac->ac_b_ex; 1791 ext4_group_t group = ex.fe_group; 1792 int max; 1793 int err; 1794 1795 BUG_ON(ex.fe_len <= 0); 1796 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1797 if (err) 1798 return err; 1799 1800 ext4_lock_group(ac->ac_sb, group); 1801 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1802 1803 if (max > 0) { 1804 ac->ac_b_ex = ex; 1805 ext4_mb_use_best_found(ac, e4b); 1806 } 1807 1808 ext4_unlock_group(ac->ac_sb, group); 1809 ext4_mb_unload_buddy(e4b); 1810 1811 return 0; 1812 } 1813 1814 static noinline_for_stack 1815 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1816 struct ext4_buddy *e4b) 1817 { 1818 ext4_group_t group = ac->ac_g_ex.fe_group; 1819 int max; 1820 int err; 1821 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1822 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1823 struct ext4_free_extent ex; 1824 1825 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1826 return 0; 1827 if (grp->bb_free == 0) 1828 return 0; 1829 1830 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1831 if (err) 1832 return err; 1833 1834 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1835 ext4_mb_unload_buddy(e4b); 1836 return 0; 1837 } 1838 1839 ext4_lock_group(ac->ac_sb, group); 1840 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1841 ac->ac_g_ex.fe_len, &ex); 1842 ex.fe_logical = 0xDEADFA11; /* debug value */ 1843 1844 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1845 ext4_fsblk_t start; 1846 1847 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1848 ex.fe_start; 1849 /* use do_div to get remainder (would be 64-bit modulo) */ 1850 if (do_div(start, sbi->s_stripe) == 0) { 1851 ac->ac_found++; 1852 ac->ac_b_ex = ex; 1853 ext4_mb_use_best_found(ac, e4b); 1854 } 1855 } else if (max >= ac->ac_g_ex.fe_len) { 1856 BUG_ON(ex.fe_len <= 0); 1857 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1858 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1859 ac->ac_found++; 1860 ac->ac_b_ex = ex; 1861 ext4_mb_use_best_found(ac, e4b); 1862 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1863 /* Sometimes, caller may want to merge even small 1864 * number of blocks to an existing extent */ 1865 BUG_ON(ex.fe_len <= 0); 1866 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1867 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1868 ac->ac_found++; 1869 ac->ac_b_ex = ex; 1870 ext4_mb_use_best_found(ac, e4b); 1871 } 1872 ext4_unlock_group(ac->ac_sb, group); 1873 ext4_mb_unload_buddy(e4b); 1874 1875 return 0; 1876 } 1877 1878 /* 1879 * The routine scans buddy structures (not bitmap!) from given order 1880 * to max order and tries to find big enough chunk to satisfy the req 1881 */ 1882 static noinline_for_stack 1883 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1884 struct ext4_buddy *e4b) 1885 { 1886 struct super_block *sb = ac->ac_sb; 1887 struct ext4_group_info *grp = e4b->bd_info; 1888 void *buddy; 1889 int i; 1890 int k; 1891 int max; 1892 1893 BUG_ON(ac->ac_2order <= 0); 1894 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1895 if (grp->bb_counters[i] == 0) 1896 continue; 1897 1898 buddy = mb_find_buddy(e4b, i, &max); 1899 BUG_ON(buddy == NULL); 1900 1901 k = mb_find_next_zero_bit(buddy, max, 0); 1902 BUG_ON(k >= max); 1903 1904 ac->ac_found++; 1905 1906 ac->ac_b_ex.fe_len = 1 << i; 1907 ac->ac_b_ex.fe_start = k << i; 1908 ac->ac_b_ex.fe_group = e4b->bd_group; 1909 1910 ext4_mb_use_best_found(ac, e4b); 1911 1912 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1913 1914 if (EXT4_SB(sb)->s_mb_stats) 1915 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1916 1917 break; 1918 } 1919 } 1920 1921 /* 1922 * The routine scans the group and measures all found extents. 1923 * In order to optimize scanning, caller must pass number of 1924 * free blocks in the group, so the routine can know upper limit. 1925 */ 1926 static noinline_for_stack 1927 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1928 struct ext4_buddy *e4b) 1929 { 1930 struct super_block *sb = ac->ac_sb; 1931 void *bitmap = e4b->bd_bitmap; 1932 struct ext4_free_extent ex; 1933 int i; 1934 int free; 1935 1936 free = e4b->bd_info->bb_free; 1937 BUG_ON(free <= 0); 1938 1939 i = e4b->bd_info->bb_first_free; 1940 1941 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1942 i = mb_find_next_zero_bit(bitmap, 1943 EXT4_CLUSTERS_PER_GROUP(sb), i); 1944 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1945 /* 1946 * IF we have corrupt bitmap, we won't find any 1947 * free blocks even though group info says we 1948 * we have free blocks 1949 */ 1950 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1951 "%d free clusters as per " 1952 "group info. But bitmap says 0", 1953 free); 1954 break; 1955 } 1956 1957 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 1958 BUG_ON(ex.fe_len <= 0); 1959 if (free < ex.fe_len) { 1960 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1961 "%d free clusters as per " 1962 "group info. But got %d blocks", 1963 free, ex.fe_len); 1964 /* 1965 * The number of free blocks differs. This mostly 1966 * indicate that the bitmap is corrupt. So exit 1967 * without claiming the space. 1968 */ 1969 break; 1970 } 1971 ex.fe_logical = 0xDEADC0DE; /* debug value */ 1972 ext4_mb_measure_extent(ac, &ex, e4b); 1973 1974 i += ex.fe_len; 1975 free -= ex.fe_len; 1976 } 1977 1978 ext4_mb_check_limits(ac, e4b, 1); 1979 } 1980 1981 /* 1982 * This is a special case for storages like raid5 1983 * we try to find stripe-aligned chunks for stripe-size-multiple requests 1984 */ 1985 static noinline_for_stack 1986 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1987 struct ext4_buddy *e4b) 1988 { 1989 struct super_block *sb = ac->ac_sb; 1990 struct ext4_sb_info *sbi = EXT4_SB(sb); 1991 void *bitmap = e4b->bd_bitmap; 1992 struct ext4_free_extent ex; 1993 ext4_fsblk_t first_group_block; 1994 ext4_fsblk_t a; 1995 ext4_grpblk_t i; 1996 int max; 1997 1998 BUG_ON(sbi->s_stripe == 0); 1999 2000 /* find first stripe-aligned block in group */ 2001 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2002 2003 a = first_group_block + sbi->s_stripe - 1; 2004 do_div(a, sbi->s_stripe); 2005 i = (a * sbi->s_stripe) - first_group_block; 2006 2007 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2008 if (!mb_test_bit(i, bitmap)) { 2009 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2010 if (max >= sbi->s_stripe) { 2011 ac->ac_found++; 2012 ex.fe_logical = 0xDEADF00D; /* debug value */ 2013 ac->ac_b_ex = ex; 2014 ext4_mb_use_best_found(ac, e4b); 2015 break; 2016 } 2017 } 2018 i += sbi->s_stripe; 2019 } 2020 } 2021 2022 /* 2023 * This is now called BEFORE we load the buddy bitmap. 2024 * Returns either 1 or 0 indicating that the group is either suitable 2025 * for the allocation or not. In addition it can also return negative 2026 * error code when something goes wrong. 2027 */ 2028 static int ext4_mb_good_group(struct ext4_allocation_context *ac, 2029 ext4_group_t group, int cr) 2030 { 2031 unsigned free, fragments; 2032 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2033 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2034 2035 BUG_ON(cr < 0 || cr >= 4); 2036 2037 free = grp->bb_free; 2038 if (free == 0) 2039 return 0; 2040 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2041 return 0; 2042 2043 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2044 return 0; 2045 2046 /* We only do this if the grp has never been initialized */ 2047 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2048 int ret = ext4_mb_init_group(ac->ac_sb, group); 2049 if (ret) 2050 return ret; 2051 } 2052 2053 fragments = grp->bb_fragments; 2054 if (fragments == 0) 2055 return 0; 2056 2057 switch (cr) { 2058 case 0: 2059 BUG_ON(ac->ac_2order == 0); 2060 2061 /* Avoid using the first bg of a flexgroup for data files */ 2062 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2063 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2064 ((group % flex_size) == 0)) 2065 return 0; 2066 2067 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || 2068 (free / fragments) >= ac->ac_g_ex.fe_len) 2069 return 1; 2070 2071 if (grp->bb_largest_free_order < ac->ac_2order) 2072 return 0; 2073 2074 return 1; 2075 case 1: 2076 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2077 return 1; 2078 break; 2079 case 2: 2080 if (free >= ac->ac_g_ex.fe_len) 2081 return 1; 2082 break; 2083 case 3: 2084 return 1; 2085 default: 2086 BUG(); 2087 } 2088 2089 return 0; 2090 } 2091 2092 static noinline_for_stack int 2093 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2094 { 2095 ext4_group_t ngroups, group, i; 2096 int cr; 2097 int err = 0, first_err = 0; 2098 struct ext4_sb_info *sbi; 2099 struct super_block *sb; 2100 struct ext4_buddy e4b; 2101 2102 sb = ac->ac_sb; 2103 sbi = EXT4_SB(sb); 2104 ngroups = ext4_get_groups_count(sb); 2105 /* non-extent files are limited to low blocks/groups */ 2106 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2107 ngroups = sbi->s_blockfile_groups; 2108 2109 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2110 2111 /* first, try the goal */ 2112 err = ext4_mb_find_by_goal(ac, &e4b); 2113 if (err || ac->ac_status == AC_STATUS_FOUND) 2114 goto out; 2115 2116 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2117 goto out; 2118 2119 /* 2120 * ac->ac2_order is set only if the fe_len is a power of 2 2121 * if ac2_order is set we also set criteria to 0 so that we 2122 * try exact allocation using buddy. 2123 */ 2124 i = fls(ac->ac_g_ex.fe_len); 2125 ac->ac_2order = 0; 2126 /* 2127 * We search using buddy data only if the order of the request 2128 * is greater than equal to the sbi_s_mb_order2_reqs 2129 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2130 */ 2131 if (i >= sbi->s_mb_order2_reqs) { 2132 /* 2133 * This should tell if fe_len is exactly power of 2 2134 */ 2135 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2136 ac->ac_2order = i - 1; 2137 } 2138 2139 /* if stream allocation is enabled, use global goal */ 2140 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2141 /* TBD: may be hot point */ 2142 spin_lock(&sbi->s_md_lock); 2143 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2144 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2145 spin_unlock(&sbi->s_md_lock); 2146 } 2147 2148 /* Let's just scan groups to find more-less suitable blocks */ 2149 cr = ac->ac_2order ? 0 : 1; 2150 /* 2151 * cr == 0 try to get exact allocation, 2152 * cr == 3 try to get anything 2153 */ 2154 repeat: 2155 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2156 ac->ac_criteria = cr; 2157 /* 2158 * searching for the right group start 2159 * from the goal value specified 2160 */ 2161 group = ac->ac_g_ex.fe_group; 2162 2163 for (i = 0; i < ngroups; group++, i++) { 2164 int ret = 0; 2165 cond_resched(); 2166 /* 2167 * Artificially restricted ngroups for non-extent 2168 * files makes group > ngroups possible on first loop. 2169 */ 2170 if (group >= ngroups) 2171 group = 0; 2172 2173 /* This now checks without needing the buddy page */ 2174 ret = ext4_mb_good_group(ac, group, cr); 2175 if (ret <= 0) { 2176 if (!first_err) 2177 first_err = ret; 2178 continue; 2179 } 2180 2181 err = ext4_mb_load_buddy(sb, group, &e4b); 2182 if (err) 2183 goto out; 2184 2185 ext4_lock_group(sb, group); 2186 2187 /* 2188 * We need to check again after locking the 2189 * block group 2190 */ 2191 ret = ext4_mb_good_group(ac, group, cr); 2192 if (ret <= 0) { 2193 ext4_unlock_group(sb, group); 2194 ext4_mb_unload_buddy(&e4b); 2195 if (!first_err) 2196 first_err = ret; 2197 continue; 2198 } 2199 2200 ac->ac_groups_scanned++; 2201 if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) 2202 ext4_mb_simple_scan_group(ac, &e4b); 2203 else if (cr == 1 && sbi->s_stripe && 2204 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2205 ext4_mb_scan_aligned(ac, &e4b); 2206 else 2207 ext4_mb_complex_scan_group(ac, &e4b); 2208 2209 ext4_unlock_group(sb, group); 2210 ext4_mb_unload_buddy(&e4b); 2211 2212 if (ac->ac_status != AC_STATUS_CONTINUE) 2213 break; 2214 } 2215 } 2216 2217 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2218 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2219 /* 2220 * We've been searching too long. Let's try to allocate 2221 * the best chunk we've found so far 2222 */ 2223 2224 ext4_mb_try_best_found(ac, &e4b); 2225 if (ac->ac_status != AC_STATUS_FOUND) { 2226 /* 2227 * Someone more lucky has already allocated it. 2228 * The only thing we can do is just take first 2229 * found block(s) 2230 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2231 */ 2232 ac->ac_b_ex.fe_group = 0; 2233 ac->ac_b_ex.fe_start = 0; 2234 ac->ac_b_ex.fe_len = 0; 2235 ac->ac_status = AC_STATUS_CONTINUE; 2236 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2237 cr = 3; 2238 atomic_inc(&sbi->s_mb_lost_chunks); 2239 goto repeat; 2240 } 2241 } 2242 out: 2243 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2244 err = first_err; 2245 return err; 2246 } 2247 2248 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2249 { 2250 struct super_block *sb = seq->private; 2251 ext4_group_t group; 2252 2253 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2254 return NULL; 2255 group = *pos + 1; 2256 return (void *) ((unsigned long) group); 2257 } 2258 2259 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2260 { 2261 struct super_block *sb = seq->private; 2262 ext4_group_t group; 2263 2264 ++*pos; 2265 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2266 return NULL; 2267 group = *pos + 1; 2268 return (void *) ((unsigned long) group); 2269 } 2270 2271 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2272 { 2273 struct super_block *sb = seq->private; 2274 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2275 int i; 2276 int err, buddy_loaded = 0; 2277 struct ext4_buddy e4b; 2278 struct ext4_group_info *grinfo; 2279 struct sg { 2280 struct ext4_group_info info; 2281 ext4_grpblk_t counters[16]; 2282 } sg; 2283 2284 group--; 2285 if (group == 0) 2286 seq_puts(seq, "#group: free frags first [" 2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]"); 2289 2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2291 sizeof(struct ext4_group_info); 2292 grinfo = ext4_get_group_info(sb, group); 2293 /* Load the group info in memory only if not already loaded. */ 2294 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2295 err = ext4_mb_load_buddy(sb, group, &e4b); 2296 if (err) { 2297 seq_printf(seq, "#%-5u: I/O error\n", group); 2298 return 0; 2299 } 2300 buddy_loaded = 1; 2301 } 2302 2303 memcpy(&sg, ext4_get_group_info(sb, group), i); 2304 2305 if (buddy_loaded) 2306 ext4_mb_unload_buddy(&e4b); 2307 2308 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2309 sg.info.bb_fragments, sg.info.bb_first_free); 2310 for (i = 0; i <= 13; i++) 2311 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2312 sg.info.bb_counters[i] : 0); 2313 seq_printf(seq, " ]\n"); 2314 2315 return 0; 2316 } 2317 2318 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2319 { 2320 } 2321 2322 static const struct seq_operations ext4_mb_seq_groups_ops = { 2323 .start = ext4_mb_seq_groups_start, 2324 .next = ext4_mb_seq_groups_next, 2325 .stop = ext4_mb_seq_groups_stop, 2326 .show = ext4_mb_seq_groups_show, 2327 }; 2328 2329 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2330 { 2331 struct super_block *sb = PDE_DATA(inode); 2332 int rc; 2333 2334 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2335 if (rc == 0) { 2336 struct seq_file *m = file->private_data; 2337 m->private = sb; 2338 } 2339 return rc; 2340 2341 } 2342 2343 const struct file_operations ext4_seq_mb_groups_fops = { 2344 .owner = THIS_MODULE, 2345 .open = ext4_mb_seq_groups_open, 2346 .read = seq_read, 2347 .llseek = seq_lseek, 2348 .release = seq_release, 2349 }; 2350 2351 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2352 { 2353 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2354 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2355 2356 BUG_ON(!cachep); 2357 return cachep; 2358 } 2359 2360 /* 2361 * Allocate the top-level s_group_info array for the specified number 2362 * of groups 2363 */ 2364 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2365 { 2366 struct ext4_sb_info *sbi = EXT4_SB(sb); 2367 unsigned size; 2368 struct ext4_group_info ***new_groupinfo; 2369 2370 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2371 EXT4_DESC_PER_BLOCK_BITS(sb); 2372 if (size <= sbi->s_group_info_size) 2373 return 0; 2374 2375 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2376 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); 2377 if (!new_groupinfo) { 2378 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2379 return -ENOMEM; 2380 } 2381 if (sbi->s_group_info) { 2382 memcpy(new_groupinfo, sbi->s_group_info, 2383 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2384 kvfree(sbi->s_group_info); 2385 } 2386 sbi->s_group_info = new_groupinfo; 2387 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2388 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2389 sbi->s_group_info_size); 2390 return 0; 2391 } 2392 2393 /* Create and initialize ext4_group_info data for the given group. */ 2394 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2395 struct ext4_group_desc *desc) 2396 { 2397 int i; 2398 int metalen = 0; 2399 struct ext4_sb_info *sbi = EXT4_SB(sb); 2400 struct ext4_group_info **meta_group_info; 2401 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2402 2403 /* 2404 * First check if this group is the first of a reserved block. 2405 * If it's true, we have to allocate a new table of pointers 2406 * to ext4_group_info structures 2407 */ 2408 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2409 metalen = sizeof(*meta_group_info) << 2410 EXT4_DESC_PER_BLOCK_BITS(sb); 2411 meta_group_info = kmalloc(metalen, GFP_NOFS); 2412 if (meta_group_info == NULL) { 2413 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2414 "for a buddy group"); 2415 goto exit_meta_group_info; 2416 } 2417 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2418 meta_group_info; 2419 } 2420 2421 meta_group_info = 2422 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2423 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2424 2425 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 2426 if (meta_group_info[i] == NULL) { 2427 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2428 goto exit_group_info; 2429 } 2430 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2431 &(meta_group_info[i]->bb_state)); 2432 2433 /* 2434 * initialize bb_free to be able to skip 2435 * empty groups without initialization 2436 */ 2437 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2438 meta_group_info[i]->bb_free = 2439 ext4_free_clusters_after_init(sb, group, desc); 2440 } else { 2441 meta_group_info[i]->bb_free = 2442 ext4_free_group_clusters(sb, desc); 2443 } 2444 2445 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2446 init_rwsem(&meta_group_info[i]->alloc_sem); 2447 meta_group_info[i]->bb_free_root = RB_ROOT; 2448 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2449 2450 #ifdef DOUBLE_CHECK 2451 { 2452 struct buffer_head *bh; 2453 meta_group_info[i]->bb_bitmap = 2454 kmalloc(sb->s_blocksize, GFP_NOFS); 2455 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2456 bh = ext4_read_block_bitmap(sb, group); 2457 BUG_ON(IS_ERR_OR_NULL(bh)); 2458 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2459 sb->s_blocksize); 2460 put_bh(bh); 2461 } 2462 #endif 2463 2464 return 0; 2465 2466 exit_group_info: 2467 /* If a meta_group_info table has been allocated, release it now */ 2468 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2469 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2470 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; 2471 } 2472 exit_meta_group_info: 2473 return -ENOMEM; 2474 } /* ext4_mb_add_groupinfo */ 2475 2476 static int ext4_mb_init_backend(struct super_block *sb) 2477 { 2478 ext4_group_t ngroups = ext4_get_groups_count(sb); 2479 ext4_group_t i; 2480 struct ext4_sb_info *sbi = EXT4_SB(sb); 2481 int err; 2482 struct ext4_group_desc *desc; 2483 struct kmem_cache *cachep; 2484 2485 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2486 if (err) 2487 return err; 2488 2489 sbi->s_buddy_cache = new_inode(sb); 2490 if (sbi->s_buddy_cache == NULL) { 2491 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2492 goto err_freesgi; 2493 } 2494 /* To avoid potentially colliding with an valid on-disk inode number, 2495 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2496 * not in the inode hash, so it should never be found by iget(), but 2497 * this will avoid confusion if it ever shows up during debugging. */ 2498 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2499 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2500 for (i = 0; i < ngroups; i++) { 2501 desc = ext4_get_group_desc(sb, i, NULL); 2502 if (desc == NULL) { 2503 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2504 goto err_freebuddy; 2505 } 2506 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2507 goto err_freebuddy; 2508 } 2509 2510 return 0; 2511 2512 err_freebuddy: 2513 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2514 while (i-- > 0) 2515 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2516 i = sbi->s_group_info_size; 2517 while (i-- > 0) 2518 kfree(sbi->s_group_info[i]); 2519 iput(sbi->s_buddy_cache); 2520 err_freesgi: 2521 kvfree(sbi->s_group_info); 2522 return -ENOMEM; 2523 } 2524 2525 static void ext4_groupinfo_destroy_slabs(void) 2526 { 2527 int i; 2528 2529 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2530 if (ext4_groupinfo_caches[i]) 2531 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2532 ext4_groupinfo_caches[i] = NULL; 2533 } 2534 } 2535 2536 static int ext4_groupinfo_create_slab(size_t size) 2537 { 2538 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2539 int slab_size; 2540 int blocksize_bits = order_base_2(size); 2541 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2542 struct kmem_cache *cachep; 2543 2544 if (cache_index >= NR_GRPINFO_CACHES) 2545 return -EINVAL; 2546 2547 if (unlikely(cache_index < 0)) 2548 cache_index = 0; 2549 2550 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2551 if (ext4_groupinfo_caches[cache_index]) { 2552 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2553 return 0; /* Already created */ 2554 } 2555 2556 slab_size = offsetof(struct ext4_group_info, 2557 bb_counters[blocksize_bits + 2]); 2558 2559 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2560 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2561 NULL); 2562 2563 ext4_groupinfo_caches[cache_index] = cachep; 2564 2565 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2566 if (!cachep) { 2567 printk(KERN_EMERG 2568 "EXT4-fs: no memory for groupinfo slab cache\n"); 2569 return -ENOMEM; 2570 } 2571 2572 return 0; 2573 } 2574 2575 int ext4_mb_init(struct super_block *sb) 2576 { 2577 struct ext4_sb_info *sbi = EXT4_SB(sb); 2578 unsigned i, j; 2579 unsigned offset; 2580 unsigned max; 2581 int ret; 2582 2583 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2584 2585 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2586 if (sbi->s_mb_offsets == NULL) { 2587 ret = -ENOMEM; 2588 goto out; 2589 } 2590 2591 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2592 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2593 if (sbi->s_mb_maxs == NULL) { 2594 ret = -ENOMEM; 2595 goto out; 2596 } 2597 2598 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2599 if (ret < 0) 2600 goto out; 2601 2602 /* order 0 is regular bitmap */ 2603 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2604 sbi->s_mb_offsets[0] = 0; 2605 2606 i = 1; 2607 offset = 0; 2608 max = sb->s_blocksize << 2; 2609 do { 2610 sbi->s_mb_offsets[i] = offset; 2611 sbi->s_mb_maxs[i] = max; 2612 offset += 1 << (sb->s_blocksize_bits - i); 2613 max = max >> 1; 2614 i++; 2615 } while (i <= sb->s_blocksize_bits + 1); 2616 2617 spin_lock_init(&sbi->s_md_lock); 2618 spin_lock_init(&sbi->s_bal_lock); 2619 2620 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2621 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2622 sbi->s_mb_stats = MB_DEFAULT_STATS; 2623 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2624 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2625 /* 2626 * The default group preallocation is 512, which for 4k block 2627 * sizes translates to 2 megabytes. However for bigalloc file 2628 * systems, this is probably too big (i.e, if the cluster size 2629 * is 1 megabyte, then group preallocation size becomes half a 2630 * gigabyte!). As a default, we will keep a two megabyte 2631 * group pralloc size for cluster sizes up to 64k, and after 2632 * that, we will force a minimum group preallocation size of 2633 * 32 clusters. This translates to 8 megs when the cluster 2634 * size is 256k, and 32 megs when the cluster size is 1 meg, 2635 * which seems reasonable as a default. 2636 */ 2637 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2638 sbi->s_cluster_bits, 32); 2639 /* 2640 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2641 * to the lowest multiple of s_stripe which is bigger than 2642 * the s_mb_group_prealloc as determined above. We want 2643 * the preallocation size to be an exact multiple of the 2644 * RAID stripe size so that preallocations don't fragment 2645 * the stripes. 2646 */ 2647 if (sbi->s_stripe > 1) { 2648 sbi->s_mb_group_prealloc = roundup( 2649 sbi->s_mb_group_prealloc, sbi->s_stripe); 2650 } 2651 2652 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2653 if (sbi->s_locality_groups == NULL) { 2654 ret = -ENOMEM; 2655 goto out; 2656 } 2657 for_each_possible_cpu(i) { 2658 struct ext4_locality_group *lg; 2659 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2660 mutex_init(&lg->lg_mutex); 2661 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2662 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2663 spin_lock_init(&lg->lg_prealloc_lock); 2664 } 2665 2666 /* init file for buddy data */ 2667 ret = ext4_mb_init_backend(sb); 2668 if (ret != 0) 2669 goto out_free_locality_groups; 2670 2671 return 0; 2672 2673 out_free_locality_groups: 2674 free_percpu(sbi->s_locality_groups); 2675 sbi->s_locality_groups = NULL; 2676 out: 2677 kfree(sbi->s_mb_offsets); 2678 sbi->s_mb_offsets = NULL; 2679 kfree(sbi->s_mb_maxs); 2680 sbi->s_mb_maxs = NULL; 2681 return ret; 2682 } 2683 2684 /* need to called with the ext4 group lock held */ 2685 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2686 { 2687 struct ext4_prealloc_space *pa; 2688 struct list_head *cur, *tmp; 2689 int count = 0; 2690 2691 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2692 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2693 list_del(&pa->pa_group_list); 2694 count++; 2695 kmem_cache_free(ext4_pspace_cachep, pa); 2696 } 2697 if (count) 2698 mb_debug(1, "mballoc: %u PAs left\n", count); 2699 2700 } 2701 2702 int ext4_mb_release(struct super_block *sb) 2703 { 2704 ext4_group_t ngroups = ext4_get_groups_count(sb); 2705 ext4_group_t i; 2706 int num_meta_group_infos; 2707 struct ext4_group_info *grinfo; 2708 struct ext4_sb_info *sbi = EXT4_SB(sb); 2709 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2710 2711 if (sbi->s_group_info) { 2712 for (i = 0; i < ngroups; i++) { 2713 grinfo = ext4_get_group_info(sb, i); 2714 #ifdef DOUBLE_CHECK 2715 kfree(grinfo->bb_bitmap); 2716 #endif 2717 ext4_lock_group(sb, i); 2718 ext4_mb_cleanup_pa(grinfo); 2719 ext4_unlock_group(sb, i); 2720 kmem_cache_free(cachep, grinfo); 2721 } 2722 num_meta_group_infos = (ngroups + 2723 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2724 EXT4_DESC_PER_BLOCK_BITS(sb); 2725 for (i = 0; i < num_meta_group_infos; i++) 2726 kfree(sbi->s_group_info[i]); 2727 kvfree(sbi->s_group_info); 2728 } 2729 kfree(sbi->s_mb_offsets); 2730 kfree(sbi->s_mb_maxs); 2731 iput(sbi->s_buddy_cache); 2732 if (sbi->s_mb_stats) { 2733 ext4_msg(sb, KERN_INFO, 2734 "mballoc: %u blocks %u reqs (%u success)", 2735 atomic_read(&sbi->s_bal_allocated), 2736 atomic_read(&sbi->s_bal_reqs), 2737 atomic_read(&sbi->s_bal_success)); 2738 ext4_msg(sb, KERN_INFO, 2739 "mballoc: %u extents scanned, %u goal hits, " 2740 "%u 2^N hits, %u breaks, %u lost", 2741 atomic_read(&sbi->s_bal_ex_scanned), 2742 atomic_read(&sbi->s_bal_goals), 2743 atomic_read(&sbi->s_bal_2orders), 2744 atomic_read(&sbi->s_bal_breaks), 2745 atomic_read(&sbi->s_mb_lost_chunks)); 2746 ext4_msg(sb, KERN_INFO, 2747 "mballoc: %lu generated and it took %Lu", 2748 sbi->s_mb_buddies_generated, 2749 sbi->s_mb_generation_time); 2750 ext4_msg(sb, KERN_INFO, 2751 "mballoc: %u preallocated, %u discarded", 2752 atomic_read(&sbi->s_mb_preallocated), 2753 atomic_read(&sbi->s_mb_discarded)); 2754 } 2755 2756 free_percpu(sbi->s_locality_groups); 2757 2758 return 0; 2759 } 2760 2761 static inline int ext4_issue_discard(struct super_block *sb, 2762 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 2763 { 2764 ext4_fsblk_t discard_block; 2765 2766 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 2767 ext4_group_first_block_no(sb, block_group)); 2768 count = EXT4_C2B(EXT4_SB(sb), count); 2769 trace_ext4_discard_blocks(sb, 2770 (unsigned long long) discard_block, count); 2771 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2772 } 2773 2774 /* 2775 * This function is called by the jbd2 layer once the commit has finished, 2776 * so we know we can free the blocks that were released with that commit. 2777 */ 2778 static void ext4_free_data_callback(struct super_block *sb, 2779 struct ext4_journal_cb_entry *jce, 2780 int rc) 2781 { 2782 struct ext4_free_data *entry = (struct ext4_free_data *)jce; 2783 struct ext4_buddy e4b; 2784 struct ext4_group_info *db; 2785 int err, count = 0, count2 = 0; 2786 2787 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2788 entry->efd_count, entry->efd_group, entry); 2789 2790 if (test_opt(sb, DISCARD)) { 2791 err = ext4_issue_discard(sb, entry->efd_group, 2792 entry->efd_start_cluster, 2793 entry->efd_count); 2794 if (err && err != -EOPNOTSUPP) 2795 ext4_msg(sb, KERN_WARNING, "discard request in" 2796 " group:%d block:%d count:%d failed" 2797 " with %d", entry->efd_group, 2798 entry->efd_start_cluster, 2799 entry->efd_count, err); 2800 } 2801 2802 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 2803 /* we expect to find existing buddy because it's pinned */ 2804 BUG_ON(err != 0); 2805 2806 2807 db = e4b.bd_info; 2808 /* there are blocks to put in buddy to make them really free */ 2809 count += entry->efd_count; 2810 count2++; 2811 ext4_lock_group(sb, entry->efd_group); 2812 /* Take it out of per group rb tree */ 2813 rb_erase(&entry->efd_node, &(db->bb_free_root)); 2814 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 2815 2816 /* 2817 * Clear the trimmed flag for the group so that the next 2818 * ext4_trim_fs can trim it. 2819 * If the volume is mounted with -o discard, online discard 2820 * is supported and the free blocks will be trimmed online. 2821 */ 2822 if (!test_opt(sb, DISCARD)) 2823 EXT4_MB_GRP_CLEAR_TRIMMED(db); 2824 2825 if (!db->bb_free_root.rb_node) { 2826 /* No more items in the per group rb tree 2827 * balance refcounts from ext4_mb_free_metadata() 2828 */ 2829 page_cache_release(e4b.bd_buddy_page); 2830 page_cache_release(e4b.bd_bitmap_page); 2831 } 2832 ext4_unlock_group(sb, entry->efd_group); 2833 kmem_cache_free(ext4_free_data_cachep, entry); 2834 ext4_mb_unload_buddy(&e4b); 2835 2836 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2837 } 2838 2839 int __init ext4_init_mballoc(void) 2840 { 2841 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 2842 SLAB_RECLAIM_ACCOUNT); 2843 if (ext4_pspace_cachep == NULL) 2844 return -ENOMEM; 2845 2846 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 2847 SLAB_RECLAIM_ACCOUNT); 2848 if (ext4_ac_cachep == NULL) { 2849 kmem_cache_destroy(ext4_pspace_cachep); 2850 return -ENOMEM; 2851 } 2852 2853 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 2854 SLAB_RECLAIM_ACCOUNT); 2855 if (ext4_free_data_cachep == NULL) { 2856 kmem_cache_destroy(ext4_pspace_cachep); 2857 kmem_cache_destroy(ext4_ac_cachep); 2858 return -ENOMEM; 2859 } 2860 return 0; 2861 } 2862 2863 void ext4_exit_mballoc(void) 2864 { 2865 /* 2866 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2867 * before destroying the slab cache. 2868 */ 2869 rcu_barrier(); 2870 kmem_cache_destroy(ext4_pspace_cachep); 2871 kmem_cache_destroy(ext4_ac_cachep); 2872 kmem_cache_destroy(ext4_free_data_cachep); 2873 ext4_groupinfo_destroy_slabs(); 2874 } 2875 2876 2877 /* 2878 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2879 * Returns 0 if success or error code 2880 */ 2881 static noinline_for_stack int 2882 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2883 handle_t *handle, unsigned int reserv_clstrs) 2884 { 2885 struct buffer_head *bitmap_bh = NULL; 2886 struct ext4_group_desc *gdp; 2887 struct buffer_head *gdp_bh; 2888 struct ext4_sb_info *sbi; 2889 struct super_block *sb; 2890 ext4_fsblk_t block; 2891 int err, len; 2892 2893 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2894 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2895 2896 sb = ac->ac_sb; 2897 sbi = EXT4_SB(sb); 2898 2899 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2900 if (IS_ERR(bitmap_bh)) { 2901 err = PTR_ERR(bitmap_bh); 2902 bitmap_bh = NULL; 2903 goto out_err; 2904 } 2905 2906 BUFFER_TRACE(bitmap_bh, "getting write access"); 2907 err = ext4_journal_get_write_access(handle, bitmap_bh); 2908 if (err) 2909 goto out_err; 2910 2911 err = -EIO; 2912 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2913 if (!gdp) 2914 goto out_err; 2915 2916 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2917 ext4_free_group_clusters(sb, gdp)); 2918 2919 BUFFER_TRACE(gdp_bh, "get_write_access"); 2920 err = ext4_journal_get_write_access(handle, gdp_bh); 2921 if (err) 2922 goto out_err; 2923 2924 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2925 2926 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 2927 if (!ext4_data_block_valid(sbi, block, len)) { 2928 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2929 "fs metadata", block, block+len); 2930 /* File system mounted not to panic on error 2931 * Fix the bitmap and repeat the block allocation 2932 * We leak some of the blocks here. 2933 */ 2934 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2935 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2936 ac->ac_b_ex.fe_len); 2937 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2938 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2939 if (!err) 2940 err = -EAGAIN; 2941 goto out_err; 2942 } 2943 2944 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2945 #ifdef AGGRESSIVE_CHECK 2946 { 2947 int i; 2948 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2949 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2950 bitmap_bh->b_data)); 2951 } 2952 } 2953 #endif 2954 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2955 ac->ac_b_ex.fe_len); 2956 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2957 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2958 ext4_free_group_clusters_set(sb, gdp, 2959 ext4_free_clusters_after_init(sb, 2960 ac->ac_b_ex.fe_group, gdp)); 2961 } 2962 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2963 ext4_free_group_clusters_set(sb, gdp, len); 2964 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 2965 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2966 2967 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2968 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2969 /* 2970 * Now reduce the dirty block count also. Should not go negative 2971 */ 2972 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2973 /* release all the reserved blocks if non delalloc */ 2974 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 2975 reserv_clstrs); 2976 2977 if (sbi->s_log_groups_per_flex) { 2978 ext4_group_t flex_group = ext4_flex_group(sbi, 2979 ac->ac_b_ex.fe_group); 2980 atomic64_sub(ac->ac_b_ex.fe_len, 2981 &sbi->s_flex_groups[flex_group].free_clusters); 2982 } 2983 2984 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2985 if (err) 2986 goto out_err; 2987 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 2988 2989 out_err: 2990 brelse(bitmap_bh); 2991 return err; 2992 } 2993 2994 /* 2995 * here we normalize request for locality group 2996 * Group request are normalized to s_mb_group_prealloc, which goes to 2997 * s_strip if we set the same via mount option. 2998 * s_mb_group_prealloc can be configured via 2999 * /sys/fs/ext4/<partition>/mb_group_prealloc 3000 * 3001 * XXX: should we try to preallocate more than the group has now? 3002 */ 3003 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3004 { 3005 struct super_block *sb = ac->ac_sb; 3006 struct ext4_locality_group *lg = ac->ac_lg; 3007 3008 BUG_ON(lg == NULL); 3009 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3010 mb_debug(1, "#%u: goal %u blocks for locality group\n", 3011 current->pid, ac->ac_g_ex.fe_len); 3012 } 3013 3014 /* 3015 * Normalization means making request better in terms of 3016 * size and alignment 3017 */ 3018 static noinline_for_stack void 3019 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3020 struct ext4_allocation_request *ar) 3021 { 3022 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3023 int bsbits, max; 3024 ext4_lblk_t end; 3025 loff_t size, start_off; 3026 loff_t orig_size __maybe_unused; 3027 ext4_lblk_t start; 3028 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3029 struct ext4_prealloc_space *pa; 3030 3031 /* do normalize only data requests, metadata requests 3032 do not need preallocation */ 3033 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3034 return; 3035 3036 /* sometime caller may want exact blocks */ 3037 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3038 return; 3039 3040 /* caller may indicate that preallocation isn't 3041 * required (it's a tail, for example) */ 3042 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3043 return; 3044 3045 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3046 ext4_mb_normalize_group_request(ac); 3047 return ; 3048 } 3049 3050 bsbits = ac->ac_sb->s_blocksize_bits; 3051 3052 /* first, let's learn actual file size 3053 * given current request is allocated */ 3054 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3055 size = size << bsbits; 3056 if (size < i_size_read(ac->ac_inode)) 3057 size = i_size_read(ac->ac_inode); 3058 orig_size = size; 3059 3060 /* max size of free chunks */ 3061 max = 2 << bsbits; 3062 3063 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3064 (req <= (size) || max <= (chunk_size)) 3065 3066 /* first, try to predict filesize */ 3067 /* XXX: should this table be tunable? */ 3068 start_off = 0; 3069 if (size <= 16 * 1024) { 3070 size = 16 * 1024; 3071 } else if (size <= 32 * 1024) { 3072 size = 32 * 1024; 3073 } else if (size <= 64 * 1024) { 3074 size = 64 * 1024; 3075 } else if (size <= 128 * 1024) { 3076 size = 128 * 1024; 3077 } else if (size <= 256 * 1024) { 3078 size = 256 * 1024; 3079 } else if (size <= 512 * 1024) { 3080 size = 512 * 1024; 3081 } else if (size <= 1024 * 1024) { 3082 size = 1024 * 1024; 3083 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3084 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3085 (21 - bsbits)) << 21; 3086 size = 2 * 1024 * 1024; 3087 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3088 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3089 (22 - bsbits)) << 22; 3090 size = 4 * 1024 * 1024; 3091 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3092 (8<<20)>>bsbits, max, 8 * 1024)) { 3093 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3094 (23 - bsbits)) << 23; 3095 size = 8 * 1024 * 1024; 3096 } else { 3097 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 3098 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 3099 ac->ac_o_ex.fe_len) << bsbits; 3100 } 3101 size = size >> bsbits; 3102 start = start_off >> bsbits; 3103 3104 /* don't cover already allocated blocks in selected range */ 3105 if (ar->pleft && start <= ar->lleft) { 3106 size -= ar->lleft + 1 - start; 3107 start = ar->lleft + 1; 3108 } 3109 if (ar->pright && start + size - 1 >= ar->lright) 3110 size -= start + size - ar->lright; 3111 3112 end = start + size; 3113 3114 /* check we don't cross already preallocated blocks */ 3115 rcu_read_lock(); 3116 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3117 ext4_lblk_t pa_end; 3118 3119 if (pa->pa_deleted) 3120 continue; 3121 spin_lock(&pa->pa_lock); 3122 if (pa->pa_deleted) { 3123 spin_unlock(&pa->pa_lock); 3124 continue; 3125 } 3126 3127 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3128 pa->pa_len); 3129 3130 /* PA must not overlap original request */ 3131 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3132 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3133 3134 /* skip PAs this normalized request doesn't overlap with */ 3135 if (pa->pa_lstart >= end || pa_end <= start) { 3136 spin_unlock(&pa->pa_lock); 3137 continue; 3138 } 3139 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3140 3141 /* adjust start or end to be adjacent to this pa */ 3142 if (pa_end <= ac->ac_o_ex.fe_logical) { 3143 BUG_ON(pa_end < start); 3144 start = pa_end; 3145 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3146 BUG_ON(pa->pa_lstart > end); 3147 end = pa->pa_lstart; 3148 } 3149 spin_unlock(&pa->pa_lock); 3150 } 3151 rcu_read_unlock(); 3152 size = end - start; 3153 3154 /* XXX: extra loop to check we really don't overlap preallocations */ 3155 rcu_read_lock(); 3156 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3157 ext4_lblk_t pa_end; 3158 3159 spin_lock(&pa->pa_lock); 3160 if (pa->pa_deleted == 0) { 3161 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3162 pa->pa_len); 3163 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3164 } 3165 spin_unlock(&pa->pa_lock); 3166 } 3167 rcu_read_unlock(); 3168 3169 if (start + size <= ac->ac_o_ex.fe_logical && 3170 start > ac->ac_o_ex.fe_logical) { 3171 ext4_msg(ac->ac_sb, KERN_ERR, 3172 "start %lu, size %lu, fe_logical %lu", 3173 (unsigned long) start, (unsigned long) size, 3174 (unsigned long) ac->ac_o_ex.fe_logical); 3175 BUG(); 3176 } 3177 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3178 3179 /* now prepare goal request */ 3180 3181 /* XXX: is it better to align blocks WRT to logical 3182 * placement or satisfy big request as is */ 3183 ac->ac_g_ex.fe_logical = start; 3184 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3185 3186 /* define goal start in order to merge */ 3187 if (ar->pright && (ar->lright == (start + size))) { 3188 /* merge to the right */ 3189 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3190 &ac->ac_f_ex.fe_group, 3191 &ac->ac_f_ex.fe_start); 3192 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3193 } 3194 if (ar->pleft && (ar->lleft + 1 == start)) { 3195 /* merge to the left */ 3196 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3197 &ac->ac_f_ex.fe_group, 3198 &ac->ac_f_ex.fe_start); 3199 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3200 } 3201 3202 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 3203 (unsigned) orig_size, (unsigned) start); 3204 } 3205 3206 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3207 { 3208 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3209 3210 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3211 atomic_inc(&sbi->s_bal_reqs); 3212 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3213 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3214 atomic_inc(&sbi->s_bal_success); 3215 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3216 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3217 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3218 atomic_inc(&sbi->s_bal_goals); 3219 if (ac->ac_found > sbi->s_mb_max_to_scan) 3220 atomic_inc(&sbi->s_bal_breaks); 3221 } 3222 3223 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3224 trace_ext4_mballoc_alloc(ac); 3225 else 3226 trace_ext4_mballoc_prealloc(ac); 3227 } 3228 3229 /* 3230 * Called on failure; free up any blocks from the inode PA for this 3231 * context. We don't need this for MB_GROUP_PA because we only change 3232 * pa_free in ext4_mb_release_context(), but on failure, we've already 3233 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3234 */ 3235 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3236 { 3237 struct ext4_prealloc_space *pa = ac->ac_pa; 3238 struct ext4_buddy e4b; 3239 int err; 3240 3241 if (pa == NULL) { 3242 if (ac->ac_f_ex.fe_len == 0) 3243 return; 3244 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 3245 if (err) { 3246 /* 3247 * This should never happen since we pin the 3248 * pages in the ext4_allocation_context so 3249 * ext4_mb_load_buddy() should never fail. 3250 */ 3251 WARN(1, "mb_load_buddy failed (%d)", err); 3252 return; 3253 } 3254 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3255 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 3256 ac->ac_f_ex.fe_len); 3257 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3258 ext4_mb_unload_buddy(&e4b); 3259 return; 3260 } 3261 if (pa->pa_type == MB_INODE_PA) 3262 pa->pa_free += ac->ac_b_ex.fe_len; 3263 } 3264 3265 /* 3266 * use blocks preallocated to inode 3267 */ 3268 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3269 struct ext4_prealloc_space *pa) 3270 { 3271 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3272 ext4_fsblk_t start; 3273 ext4_fsblk_t end; 3274 int len; 3275 3276 /* found preallocated blocks, use them */ 3277 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3278 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3279 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3280 len = EXT4_NUM_B2C(sbi, end - start); 3281 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3282 &ac->ac_b_ex.fe_start); 3283 ac->ac_b_ex.fe_len = len; 3284 ac->ac_status = AC_STATUS_FOUND; 3285 ac->ac_pa = pa; 3286 3287 BUG_ON(start < pa->pa_pstart); 3288 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3289 BUG_ON(pa->pa_free < len); 3290 pa->pa_free -= len; 3291 3292 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3293 } 3294 3295 /* 3296 * use blocks preallocated to locality group 3297 */ 3298 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3299 struct ext4_prealloc_space *pa) 3300 { 3301 unsigned int len = ac->ac_o_ex.fe_len; 3302 3303 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3304 &ac->ac_b_ex.fe_group, 3305 &ac->ac_b_ex.fe_start); 3306 ac->ac_b_ex.fe_len = len; 3307 ac->ac_status = AC_STATUS_FOUND; 3308 ac->ac_pa = pa; 3309 3310 /* we don't correct pa_pstart or pa_plen here to avoid 3311 * possible race when the group is being loaded concurrently 3312 * instead we correct pa later, after blocks are marked 3313 * in on-disk bitmap -- see ext4_mb_release_context() 3314 * Other CPUs are prevented from allocating from this pa by lg_mutex 3315 */ 3316 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3317 } 3318 3319 /* 3320 * Return the prealloc space that have minimal distance 3321 * from the goal block. @cpa is the prealloc 3322 * space that is having currently known minimal distance 3323 * from the goal block. 3324 */ 3325 static struct ext4_prealloc_space * 3326 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3327 struct ext4_prealloc_space *pa, 3328 struct ext4_prealloc_space *cpa) 3329 { 3330 ext4_fsblk_t cur_distance, new_distance; 3331 3332 if (cpa == NULL) { 3333 atomic_inc(&pa->pa_count); 3334 return pa; 3335 } 3336 cur_distance = abs(goal_block - cpa->pa_pstart); 3337 new_distance = abs(goal_block - pa->pa_pstart); 3338 3339 if (cur_distance <= new_distance) 3340 return cpa; 3341 3342 /* drop the previous reference */ 3343 atomic_dec(&cpa->pa_count); 3344 atomic_inc(&pa->pa_count); 3345 return pa; 3346 } 3347 3348 /* 3349 * search goal blocks in preallocated space 3350 */ 3351 static noinline_for_stack int 3352 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3353 { 3354 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3355 int order, i; 3356 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3357 struct ext4_locality_group *lg; 3358 struct ext4_prealloc_space *pa, *cpa = NULL; 3359 ext4_fsblk_t goal_block; 3360 3361 /* only data can be preallocated */ 3362 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3363 return 0; 3364 3365 /* first, try per-file preallocation */ 3366 rcu_read_lock(); 3367 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3368 3369 /* all fields in this condition don't change, 3370 * so we can skip locking for them */ 3371 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3372 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3373 EXT4_C2B(sbi, pa->pa_len))) 3374 continue; 3375 3376 /* non-extent files can't have physical blocks past 2^32 */ 3377 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3378 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3379 EXT4_MAX_BLOCK_FILE_PHYS)) 3380 continue; 3381 3382 /* found preallocated blocks, use them */ 3383 spin_lock(&pa->pa_lock); 3384 if (pa->pa_deleted == 0 && pa->pa_free) { 3385 atomic_inc(&pa->pa_count); 3386 ext4_mb_use_inode_pa(ac, pa); 3387 spin_unlock(&pa->pa_lock); 3388 ac->ac_criteria = 10; 3389 rcu_read_unlock(); 3390 return 1; 3391 } 3392 spin_unlock(&pa->pa_lock); 3393 } 3394 rcu_read_unlock(); 3395 3396 /* can we use group allocation? */ 3397 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3398 return 0; 3399 3400 /* inode may have no locality group for some reason */ 3401 lg = ac->ac_lg; 3402 if (lg == NULL) 3403 return 0; 3404 order = fls(ac->ac_o_ex.fe_len) - 1; 3405 if (order > PREALLOC_TB_SIZE - 1) 3406 /* The max size of hash table is PREALLOC_TB_SIZE */ 3407 order = PREALLOC_TB_SIZE - 1; 3408 3409 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3410 /* 3411 * search for the prealloc space that is having 3412 * minimal distance from the goal block. 3413 */ 3414 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3415 rcu_read_lock(); 3416 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3417 pa_inode_list) { 3418 spin_lock(&pa->pa_lock); 3419 if (pa->pa_deleted == 0 && 3420 pa->pa_free >= ac->ac_o_ex.fe_len) { 3421 3422 cpa = ext4_mb_check_group_pa(goal_block, 3423 pa, cpa); 3424 } 3425 spin_unlock(&pa->pa_lock); 3426 } 3427 rcu_read_unlock(); 3428 } 3429 if (cpa) { 3430 ext4_mb_use_group_pa(ac, cpa); 3431 ac->ac_criteria = 20; 3432 return 1; 3433 } 3434 return 0; 3435 } 3436 3437 /* 3438 * the function goes through all block freed in the group 3439 * but not yet committed and marks them used in in-core bitmap. 3440 * buddy must be generated from this bitmap 3441 * Need to be called with the ext4 group lock held 3442 */ 3443 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3444 ext4_group_t group) 3445 { 3446 struct rb_node *n; 3447 struct ext4_group_info *grp; 3448 struct ext4_free_data *entry; 3449 3450 grp = ext4_get_group_info(sb, group); 3451 n = rb_first(&(grp->bb_free_root)); 3452 3453 while (n) { 3454 entry = rb_entry(n, struct ext4_free_data, efd_node); 3455 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3456 n = rb_next(n); 3457 } 3458 return; 3459 } 3460 3461 /* 3462 * the function goes through all preallocation in this group and marks them 3463 * used in in-core bitmap. buddy must be generated from this bitmap 3464 * Need to be called with ext4 group lock held 3465 */ 3466 static noinline_for_stack 3467 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3468 ext4_group_t group) 3469 { 3470 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3471 struct ext4_prealloc_space *pa; 3472 struct list_head *cur; 3473 ext4_group_t groupnr; 3474 ext4_grpblk_t start; 3475 int preallocated = 0; 3476 int len; 3477 3478 /* all form of preallocation discards first load group, 3479 * so the only competing code is preallocation use. 3480 * we don't need any locking here 3481 * notice we do NOT ignore preallocations with pa_deleted 3482 * otherwise we could leave used blocks available for 3483 * allocation in buddy when concurrent ext4_mb_put_pa() 3484 * is dropping preallocation 3485 */ 3486 list_for_each(cur, &grp->bb_prealloc_list) { 3487 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3488 spin_lock(&pa->pa_lock); 3489 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3490 &groupnr, &start); 3491 len = pa->pa_len; 3492 spin_unlock(&pa->pa_lock); 3493 if (unlikely(len == 0)) 3494 continue; 3495 BUG_ON(groupnr != group); 3496 ext4_set_bits(bitmap, start, len); 3497 preallocated += len; 3498 } 3499 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3500 } 3501 3502 static void ext4_mb_pa_callback(struct rcu_head *head) 3503 { 3504 struct ext4_prealloc_space *pa; 3505 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3506 3507 BUG_ON(atomic_read(&pa->pa_count)); 3508 BUG_ON(pa->pa_deleted == 0); 3509 kmem_cache_free(ext4_pspace_cachep, pa); 3510 } 3511 3512 /* 3513 * drops a reference to preallocated space descriptor 3514 * if this was the last reference and the space is consumed 3515 */ 3516 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3517 struct super_block *sb, struct ext4_prealloc_space *pa) 3518 { 3519 ext4_group_t grp; 3520 ext4_fsblk_t grp_blk; 3521 3522 /* in this short window concurrent discard can set pa_deleted */ 3523 spin_lock(&pa->pa_lock); 3524 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3525 spin_unlock(&pa->pa_lock); 3526 return; 3527 } 3528 3529 if (pa->pa_deleted == 1) { 3530 spin_unlock(&pa->pa_lock); 3531 return; 3532 } 3533 3534 pa->pa_deleted = 1; 3535 spin_unlock(&pa->pa_lock); 3536 3537 grp_blk = pa->pa_pstart; 3538 /* 3539 * If doing group-based preallocation, pa_pstart may be in the 3540 * next group when pa is used up 3541 */ 3542 if (pa->pa_type == MB_GROUP_PA) 3543 grp_blk--; 3544 3545 grp = ext4_get_group_number(sb, grp_blk); 3546 3547 /* 3548 * possible race: 3549 * 3550 * P1 (buddy init) P2 (regular allocation) 3551 * find block B in PA 3552 * copy on-disk bitmap to buddy 3553 * mark B in on-disk bitmap 3554 * drop PA from group 3555 * mark all PAs in buddy 3556 * 3557 * thus, P1 initializes buddy with B available. to prevent this 3558 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3559 * against that pair 3560 */ 3561 ext4_lock_group(sb, grp); 3562 list_del(&pa->pa_group_list); 3563 ext4_unlock_group(sb, grp); 3564 3565 spin_lock(pa->pa_obj_lock); 3566 list_del_rcu(&pa->pa_inode_list); 3567 spin_unlock(pa->pa_obj_lock); 3568 3569 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3570 } 3571 3572 /* 3573 * creates new preallocated space for given inode 3574 */ 3575 static noinline_for_stack int 3576 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3577 { 3578 struct super_block *sb = ac->ac_sb; 3579 struct ext4_sb_info *sbi = EXT4_SB(sb); 3580 struct ext4_prealloc_space *pa; 3581 struct ext4_group_info *grp; 3582 struct ext4_inode_info *ei; 3583 3584 /* preallocate only when found space is larger then requested */ 3585 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3586 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3587 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3588 3589 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3590 if (pa == NULL) 3591 return -ENOMEM; 3592 3593 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3594 int winl; 3595 int wins; 3596 int win; 3597 int offs; 3598 3599 /* we can't allocate as much as normalizer wants. 3600 * so, found space must get proper lstart 3601 * to cover original request */ 3602 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3603 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3604 3605 /* we're limited by original request in that 3606 * logical block must be covered any way 3607 * winl is window we can move our chunk within */ 3608 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3609 3610 /* also, we should cover whole original request */ 3611 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 3612 3613 /* the smallest one defines real window */ 3614 win = min(winl, wins); 3615 3616 offs = ac->ac_o_ex.fe_logical % 3617 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3618 if (offs && offs < win) 3619 win = offs; 3620 3621 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 3622 EXT4_NUM_B2C(sbi, win); 3623 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3624 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3625 } 3626 3627 /* preallocation can change ac_b_ex, thus we store actually 3628 * allocated blocks for history */ 3629 ac->ac_f_ex = ac->ac_b_ex; 3630 3631 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3632 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3633 pa->pa_len = ac->ac_b_ex.fe_len; 3634 pa->pa_free = pa->pa_len; 3635 atomic_set(&pa->pa_count, 1); 3636 spin_lock_init(&pa->pa_lock); 3637 INIT_LIST_HEAD(&pa->pa_inode_list); 3638 INIT_LIST_HEAD(&pa->pa_group_list); 3639 pa->pa_deleted = 0; 3640 pa->pa_type = MB_INODE_PA; 3641 3642 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3643 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3644 trace_ext4_mb_new_inode_pa(ac, pa); 3645 3646 ext4_mb_use_inode_pa(ac, pa); 3647 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 3648 3649 ei = EXT4_I(ac->ac_inode); 3650 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3651 3652 pa->pa_obj_lock = &ei->i_prealloc_lock; 3653 pa->pa_inode = ac->ac_inode; 3654 3655 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3656 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3657 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3658 3659 spin_lock(pa->pa_obj_lock); 3660 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3661 spin_unlock(pa->pa_obj_lock); 3662 3663 return 0; 3664 } 3665 3666 /* 3667 * creates new preallocated space for locality group inodes belongs to 3668 */ 3669 static noinline_for_stack int 3670 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3671 { 3672 struct super_block *sb = ac->ac_sb; 3673 struct ext4_locality_group *lg; 3674 struct ext4_prealloc_space *pa; 3675 struct ext4_group_info *grp; 3676 3677 /* preallocate only when found space is larger then requested */ 3678 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3679 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3680 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3681 3682 BUG_ON(ext4_pspace_cachep == NULL); 3683 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3684 if (pa == NULL) 3685 return -ENOMEM; 3686 3687 /* preallocation can change ac_b_ex, thus we store actually 3688 * allocated blocks for history */ 3689 ac->ac_f_ex = ac->ac_b_ex; 3690 3691 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3692 pa->pa_lstart = pa->pa_pstart; 3693 pa->pa_len = ac->ac_b_ex.fe_len; 3694 pa->pa_free = pa->pa_len; 3695 atomic_set(&pa->pa_count, 1); 3696 spin_lock_init(&pa->pa_lock); 3697 INIT_LIST_HEAD(&pa->pa_inode_list); 3698 INIT_LIST_HEAD(&pa->pa_group_list); 3699 pa->pa_deleted = 0; 3700 pa->pa_type = MB_GROUP_PA; 3701 3702 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3703 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3704 trace_ext4_mb_new_group_pa(ac, pa); 3705 3706 ext4_mb_use_group_pa(ac, pa); 3707 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3708 3709 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3710 lg = ac->ac_lg; 3711 BUG_ON(lg == NULL); 3712 3713 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3714 pa->pa_inode = NULL; 3715 3716 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3717 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3718 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3719 3720 /* 3721 * We will later add the new pa to the right bucket 3722 * after updating the pa_free in ext4_mb_release_context 3723 */ 3724 return 0; 3725 } 3726 3727 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3728 { 3729 int err; 3730 3731 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3732 err = ext4_mb_new_group_pa(ac); 3733 else 3734 err = ext4_mb_new_inode_pa(ac); 3735 return err; 3736 } 3737 3738 /* 3739 * finds all unused blocks in on-disk bitmap, frees them in 3740 * in-core bitmap and buddy. 3741 * @pa must be unlinked from inode and group lists, so that 3742 * nobody else can find/use it. 3743 * the caller MUST hold group/inode locks. 3744 * TODO: optimize the case when there are no in-core structures yet 3745 */ 3746 static noinline_for_stack int 3747 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3748 struct ext4_prealloc_space *pa) 3749 { 3750 struct super_block *sb = e4b->bd_sb; 3751 struct ext4_sb_info *sbi = EXT4_SB(sb); 3752 unsigned int end; 3753 unsigned int next; 3754 ext4_group_t group; 3755 ext4_grpblk_t bit; 3756 unsigned long long grp_blk_start; 3757 int err = 0; 3758 int free = 0; 3759 3760 BUG_ON(pa->pa_deleted == 0); 3761 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3762 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 3763 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3764 end = bit + pa->pa_len; 3765 3766 while (bit < end) { 3767 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3768 if (bit >= end) 3769 break; 3770 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3771 mb_debug(1, " free preallocated %u/%u in group %u\n", 3772 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3773 (unsigned) next - bit, (unsigned) group); 3774 free += next - bit; 3775 3776 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3777 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 3778 EXT4_C2B(sbi, bit)), 3779 next - bit); 3780 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3781 bit = next + 1; 3782 } 3783 if (free != pa->pa_free) { 3784 ext4_msg(e4b->bd_sb, KERN_CRIT, 3785 "pa %p: logic %lu, phys. %lu, len %lu", 3786 pa, (unsigned long) pa->pa_lstart, 3787 (unsigned long) pa->pa_pstart, 3788 (unsigned long) pa->pa_len); 3789 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3790 free, pa->pa_free); 3791 /* 3792 * pa is already deleted so we use the value obtained 3793 * from the bitmap and continue. 3794 */ 3795 } 3796 atomic_add(free, &sbi->s_mb_discarded); 3797 3798 return err; 3799 } 3800 3801 static noinline_for_stack int 3802 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3803 struct ext4_prealloc_space *pa) 3804 { 3805 struct super_block *sb = e4b->bd_sb; 3806 ext4_group_t group; 3807 ext4_grpblk_t bit; 3808 3809 trace_ext4_mb_release_group_pa(sb, pa); 3810 BUG_ON(pa->pa_deleted == 0); 3811 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3812 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3813 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3814 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3815 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 3816 3817 return 0; 3818 } 3819 3820 /* 3821 * releases all preallocations in given group 3822 * 3823 * first, we need to decide discard policy: 3824 * - when do we discard 3825 * 1) ENOSPC 3826 * - how many do we discard 3827 * 1) how many requested 3828 */ 3829 static noinline_for_stack int 3830 ext4_mb_discard_group_preallocations(struct super_block *sb, 3831 ext4_group_t group, int needed) 3832 { 3833 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3834 struct buffer_head *bitmap_bh = NULL; 3835 struct ext4_prealloc_space *pa, *tmp; 3836 struct list_head list; 3837 struct ext4_buddy e4b; 3838 int err; 3839 int busy = 0; 3840 int free = 0; 3841 3842 mb_debug(1, "discard preallocation for group %u\n", group); 3843 3844 if (list_empty(&grp->bb_prealloc_list)) 3845 return 0; 3846 3847 bitmap_bh = ext4_read_block_bitmap(sb, group); 3848 if (IS_ERR(bitmap_bh)) { 3849 err = PTR_ERR(bitmap_bh); 3850 ext4_error(sb, "Error %d reading block bitmap for %u", 3851 err, group); 3852 return 0; 3853 } 3854 3855 err = ext4_mb_load_buddy(sb, group, &e4b); 3856 if (err) { 3857 ext4_error(sb, "Error loading buddy information for %u", group); 3858 put_bh(bitmap_bh); 3859 return 0; 3860 } 3861 3862 if (needed == 0) 3863 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 3864 3865 INIT_LIST_HEAD(&list); 3866 repeat: 3867 ext4_lock_group(sb, group); 3868 list_for_each_entry_safe(pa, tmp, 3869 &grp->bb_prealloc_list, pa_group_list) { 3870 spin_lock(&pa->pa_lock); 3871 if (atomic_read(&pa->pa_count)) { 3872 spin_unlock(&pa->pa_lock); 3873 busy = 1; 3874 continue; 3875 } 3876 if (pa->pa_deleted) { 3877 spin_unlock(&pa->pa_lock); 3878 continue; 3879 } 3880 3881 /* seems this one can be freed ... */ 3882 pa->pa_deleted = 1; 3883 3884 /* we can trust pa_free ... */ 3885 free += pa->pa_free; 3886 3887 spin_unlock(&pa->pa_lock); 3888 3889 list_del(&pa->pa_group_list); 3890 list_add(&pa->u.pa_tmp_list, &list); 3891 } 3892 3893 /* if we still need more blocks and some PAs were used, try again */ 3894 if (free < needed && busy) { 3895 busy = 0; 3896 ext4_unlock_group(sb, group); 3897 cond_resched(); 3898 goto repeat; 3899 } 3900 3901 /* found anything to free? */ 3902 if (list_empty(&list)) { 3903 BUG_ON(free != 0); 3904 goto out; 3905 } 3906 3907 /* now free all selected PAs */ 3908 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3909 3910 /* remove from object (inode or locality group) */ 3911 spin_lock(pa->pa_obj_lock); 3912 list_del_rcu(&pa->pa_inode_list); 3913 spin_unlock(pa->pa_obj_lock); 3914 3915 if (pa->pa_type == MB_GROUP_PA) 3916 ext4_mb_release_group_pa(&e4b, pa); 3917 else 3918 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3919 3920 list_del(&pa->u.pa_tmp_list); 3921 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3922 } 3923 3924 out: 3925 ext4_unlock_group(sb, group); 3926 ext4_mb_unload_buddy(&e4b); 3927 put_bh(bitmap_bh); 3928 return free; 3929 } 3930 3931 /* 3932 * releases all non-used preallocated blocks for given inode 3933 * 3934 * It's important to discard preallocations under i_data_sem 3935 * We don't want another block to be served from the prealloc 3936 * space when we are discarding the inode prealloc space. 3937 * 3938 * FIXME!! Make sure it is valid at all the call sites 3939 */ 3940 void ext4_discard_preallocations(struct inode *inode) 3941 { 3942 struct ext4_inode_info *ei = EXT4_I(inode); 3943 struct super_block *sb = inode->i_sb; 3944 struct buffer_head *bitmap_bh = NULL; 3945 struct ext4_prealloc_space *pa, *tmp; 3946 ext4_group_t group = 0; 3947 struct list_head list; 3948 struct ext4_buddy e4b; 3949 int err; 3950 3951 if (!S_ISREG(inode->i_mode)) { 3952 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3953 return; 3954 } 3955 3956 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3957 trace_ext4_discard_preallocations(inode); 3958 3959 INIT_LIST_HEAD(&list); 3960 3961 repeat: 3962 /* first, collect all pa's in the inode */ 3963 spin_lock(&ei->i_prealloc_lock); 3964 while (!list_empty(&ei->i_prealloc_list)) { 3965 pa = list_entry(ei->i_prealloc_list.next, 3966 struct ext4_prealloc_space, pa_inode_list); 3967 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 3968 spin_lock(&pa->pa_lock); 3969 if (atomic_read(&pa->pa_count)) { 3970 /* this shouldn't happen often - nobody should 3971 * use preallocation while we're discarding it */ 3972 spin_unlock(&pa->pa_lock); 3973 spin_unlock(&ei->i_prealloc_lock); 3974 ext4_msg(sb, KERN_ERR, 3975 "uh-oh! used pa while discarding"); 3976 WARN_ON(1); 3977 schedule_timeout_uninterruptible(HZ); 3978 goto repeat; 3979 3980 } 3981 if (pa->pa_deleted == 0) { 3982 pa->pa_deleted = 1; 3983 spin_unlock(&pa->pa_lock); 3984 list_del_rcu(&pa->pa_inode_list); 3985 list_add(&pa->u.pa_tmp_list, &list); 3986 continue; 3987 } 3988 3989 /* someone is deleting pa right now */ 3990 spin_unlock(&pa->pa_lock); 3991 spin_unlock(&ei->i_prealloc_lock); 3992 3993 /* we have to wait here because pa_deleted 3994 * doesn't mean pa is already unlinked from 3995 * the list. as we might be called from 3996 * ->clear_inode() the inode will get freed 3997 * and concurrent thread which is unlinking 3998 * pa from inode's list may access already 3999 * freed memory, bad-bad-bad */ 4000 4001 /* XXX: if this happens too often, we can 4002 * add a flag to force wait only in case 4003 * of ->clear_inode(), but not in case of 4004 * regular truncate */ 4005 schedule_timeout_uninterruptible(HZ); 4006 goto repeat; 4007 } 4008 spin_unlock(&ei->i_prealloc_lock); 4009 4010 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4011 BUG_ON(pa->pa_type != MB_INODE_PA); 4012 group = ext4_get_group_number(sb, pa->pa_pstart); 4013 4014 err = ext4_mb_load_buddy(sb, group, &e4b); 4015 if (err) { 4016 ext4_error(sb, "Error loading buddy information for %u", 4017 group); 4018 continue; 4019 } 4020 4021 bitmap_bh = ext4_read_block_bitmap(sb, group); 4022 if (IS_ERR(bitmap_bh)) { 4023 err = PTR_ERR(bitmap_bh); 4024 ext4_error(sb, "Error %d reading block bitmap for %u", 4025 err, group); 4026 ext4_mb_unload_buddy(&e4b); 4027 continue; 4028 } 4029 4030 ext4_lock_group(sb, group); 4031 list_del(&pa->pa_group_list); 4032 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4033 ext4_unlock_group(sb, group); 4034 4035 ext4_mb_unload_buddy(&e4b); 4036 put_bh(bitmap_bh); 4037 4038 list_del(&pa->u.pa_tmp_list); 4039 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4040 } 4041 } 4042 4043 #ifdef CONFIG_EXT4_DEBUG 4044 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4045 { 4046 struct super_block *sb = ac->ac_sb; 4047 ext4_group_t ngroups, i; 4048 4049 if (!ext4_mballoc_debug || 4050 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 4051 return; 4052 4053 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" 4054 " Allocation context details:"); 4055 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", 4056 ac->ac_status, ac->ac_flags); 4057 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " 4058 "goal %lu/%lu/%lu@%lu, " 4059 "best %lu/%lu/%lu@%lu cr %d", 4060 (unsigned long)ac->ac_o_ex.fe_group, 4061 (unsigned long)ac->ac_o_ex.fe_start, 4062 (unsigned long)ac->ac_o_ex.fe_len, 4063 (unsigned long)ac->ac_o_ex.fe_logical, 4064 (unsigned long)ac->ac_g_ex.fe_group, 4065 (unsigned long)ac->ac_g_ex.fe_start, 4066 (unsigned long)ac->ac_g_ex.fe_len, 4067 (unsigned long)ac->ac_g_ex.fe_logical, 4068 (unsigned long)ac->ac_b_ex.fe_group, 4069 (unsigned long)ac->ac_b_ex.fe_start, 4070 (unsigned long)ac->ac_b_ex.fe_len, 4071 (unsigned long)ac->ac_b_ex.fe_logical, 4072 (int)ac->ac_criteria); 4073 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); 4074 ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); 4075 ngroups = ext4_get_groups_count(sb); 4076 for (i = 0; i < ngroups; i++) { 4077 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4078 struct ext4_prealloc_space *pa; 4079 ext4_grpblk_t start; 4080 struct list_head *cur; 4081 ext4_lock_group(sb, i); 4082 list_for_each(cur, &grp->bb_prealloc_list) { 4083 pa = list_entry(cur, struct ext4_prealloc_space, 4084 pa_group_list); 4085 spin_lock(&pa->pa_lock); 4086 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4087 NULL, &start); 4088 spin_unlock(&pa->pa_lock); 4089 printk(KERN_ERR "PA:%u:%d:%u \n", i, 4090 start, pa->pa_len); 4091 } 4092 ext4_unlock_group(sb, i); 4093 4094 if (grp->bb_free == 0) 4095 continue; 4096 printk(KERN_ERR "%u: %d/%d \n", 4097 i, grp->bb_free, grp->bb_fragments); 4098 } 4099 printk(KERN_ERR "\n"); 4100 } 4101 #else 4102 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4103 { 4104 return; 4105 } 4106 #endif 4107 4108 /* 4109 * We use locality group preallocation for small size file. The size of the 4110 * file is determined by the current size or the resulting size after 4111 * allocation which ever is larger 4112 * 4113 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4114 */ 4115 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4116 { 4117 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4118 int bsbits = ac->ac_sb->s_blocksize_bits; 4119 loff_t size, isize; 4120 4121 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4122 return; 4123 4124 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4125 return; 4126 4127 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4128 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4129 >> bsbits; 4130 4131 if ((size == isize) && 4132 !ext4_fs_is_busy(sbi) && 4133 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 4134 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4135 return; 4136 } 4137 4138 if (sbi->s_mb_group_prealloc <= 0) { 4139 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4140 return; 4141 } 4142 4143 /* don't use group allocation for large files */ 4144 size = max(size, isize); 4145 if (size > sbi->s_mb_stream_request) { 4146 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4147 return; 4148 } 4149 4150 BUG_ON(ac->ac_lg != NULL); 4151 /* 4152 * locality group prealloc space are per cpu. The reason for having 4153 * per cpu locality group is to reduce the contention between block 4154 * request from multiple CPUs. 4155 */ 4156 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 4157 4158 /* we're going to use group allocation */ 4159 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4160 4161 /* serialize all allocations in the group */ 4162 mutex_lock(&ac->ac_lg->lg_mutex); 4163 } 4164 4165 static noinline_for_stack int 4166 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4167 struct ext4_allocation_request *ar) 4168 { 4169 struct super_block *sb = ar->inode->i_sb; 4170 struct ext4_sb_info *sbi = EXT4_SB(sb); 4171 struct ext4_super_block *es = sbi->s_es; 4172 ext4_group_t group; 4173 unsigned int len; 4174 ext4_fsblk_t goal; 4175 ext4_grpblk_t block; 4176 4177 /* we can't allocate > group size */ 4178 len = ar->len; 4179 4180 /* just a dirty hack to filter too big requests */ 4181 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4182 len = EXT4_CLUSTERS_PER_GROUP(sb); 4183 4184 /* start searching from the goal */ 4185 goal = ar->goal; 4186 if (goal < le32_to_cpu(es->s_first_data_block) || 4187 goal >= ext4_blocks_count(es)) 4188 goal = le32_to_cpu(es->s_first_data_block); 4189 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4190 4191 /* set up allocation goals */ 4192 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4193 ac->ac_status = AC_STATUS_CONTINUE; 4194 ac->ac_sb = sb; 4195 ac->ac_inode = ar->inode; 4196 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4197 ac->ac_o_ex.fe_group = group; 4198 ac->ac_o_ex.fe_start = block; 4199 ac->ac_o_ex.fe_len = len; 4200 ac->ac_g_ex = ac->ac_o_ex; 4201 ac->ac_flags = ar->flags; 4202 4203 /* we have to define context: we'll we work with a file or 4204 * locality group. this is a policy, actually */ 4205 ext4_mb_group_or_file(ac); 4206 4207 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4208 "left: %u/%u, right %u/%u to %swritable\n", 4209 (unsigned) ar->len, (unsigned) ar->logical, 4210 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4211 (unsigned) ar->lleft, (unsigned) ar->pleft, 4212 (unsigned) ar->lright, (unsigned) ar->pright, 4213 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4214 return 0; 4215 4216 } 4217 4218 static noinline_for_stack void 4219 ext4_mb_discard_lg_preallocations(struct super_block *sb, 4220 struct ext4_locality_group *lg, 4221 int order, int total_entries) 4222 { 4223 ext4_group_t group = 0; 4224 struct ext4_buddy e4b; 4225 struct list_head discard_list; 4226 struct ext4_prealloc_space *pa, *tmp; 4227 4228 mb_debug(1, "discard locality group preallocation\n"); 4229 4230 INIT_LIST_HEAD(&discard_list); 4231 4232 spin_lock(&lg->lg_prealloc_lock); 4233 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4234 pa_inode_list) { 4235 spin_lock(&pa->pa_lock); 4236 if (atomic_read(&pa->pa_count)) { 4237 /* 4238 * This is the pa that we just used 4239 * for block allocation. So don't 4240 * free that 4241 */ 4242 spin_unlock(&pa->pa_lock); 4243 continue; 4244 } 4245 if (pa->pa_deleted) { 4246 spin_unlock(&pa->pa_lock); 4247 continue; 4248 } 4249 /* only lg prealloc space */ 4250 BUG_ON(pa->pa_type != MB_GROUP_PA); 4251 4252 /* seems this one can be freed ... */ 4253 pa->pa_deleted = 1; 4254 spin_unlock(&pa->pa_lock); 4255 4256 list_del_rcu(&pa->pa_inode_list); 4257 list_add(&pa->u.pa_tmp_list, &discard_list); 4258 4259 total_entries--; 4260 if (total_entries <= 5) { 4261 /* 4262 * we want to keep only 5 entries 4263 * allowing it to grow to 8. This 4264 * mak sure we don't call discard 4265 * soon for this list. 4266 */ 4267 break; 4268 } 4269 } 4270 spin_unlock(&lg->lg_prealloc_lock); 4271 4272 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4273 4274 group = ext4_get_group_number(sb, pa->pa_pstart); 4275 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4276 ext4_error(sb, "Error loading buddy information for %u", 4277 group); 4278 continue; 4279 } 4280 ext4_lock_group(sb, group); 4281 list_del(&pa->pa_group_list); 4282 ext4_mb_release_group_pa(&e4b, pa); 4283 ext4_unlock_group(sb, group); 4284 4285 ext4_mb_unload_buddy(&e4b); 4286 list_del(&pa->u.pa_tmp_list); 4287 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4288 } 4289 } 4290 4291 /* 4292 * We have incremented pa_count. So it cannot be freed at this 4293 * point. Also we hold lg_mutex. So no parallel allocation is 4294 * possible from this lg. That means pa_free cannot be updated. 4295 * 4296 * A parallel ext4_mb_discard_group_preallocations is possible. 4297 * which can cause the lg_prealloc_list to be updated. 4298 */ 4299 4300 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4301 { 4302 int order, added = 0, lg_prealloc_count = 1; 4303 struct super_block *sb = ac->ac_sb; 4304 struct ext4_locality_group *lg = ac->ac_lg; 4305 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4306 4307 order = fls(pa->pa_free) - 1; 4308 if (order > PREALLOC_TB_SIZE - 1) 4309 /* The max size of hash table is PREALLOC_TB_SIZE */ 4310 order = PREALLOC_TB_SIZE - 1; 4311 /* Add the prealloc space to lg */ 4312 spin_lock(&lg->lg_prealloc_lock); 4313 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4314 pa_inode_list) { 4315 spin_lock(&tmp_pa->pa_lock); 4316 if (tmp_pa->pa_deleted) { 4317 spin_unlock(&tmp_pa->pa_lock); 4318 continue; 4319 } 4320 if (!added && pa->pa_free < tmp_pa->pa_free) { 4321 /* Add to the tail of the previous entry */ 4322 list_add_tail_rcu(&pa->pa_inode_list, 4323 &tmp_pa->pa_inode_list); 4324 added = 1; 4325 /* 4326 * we want to count the total 4327 * number of entries in the list 4328 */ 4329 } 4330 spin_unlock(&tmp_pa->pa_lock); 4331 lg_prealloc_count++; 4332 } 4333 if (!added) 4334 list_add_tail_rcu(&pa->pa_inode_list, 4335 &lg->lg_prealloc_list[order]); 4336 spin_unlock(&lg->lg_prealloc_lock); 4337 4338 /* Now trim the list to be not more than 8 elements */ 4339 if (lg_prealloc_count > 8) { 4340 ext4_mb_discard_lg_preallocations(sb, lg, 4341 order, lg_prealloc_count); 4342 return; 4343 } 4344 return ; 4345 } 4346 4347 /* 4348 * release all resource we used in allocation 4349 */ 4350 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4351 { 4352 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4353 struct ext4_prealloc_space *pa = ac->ac_pa; 4354 if (pa) { 4355 if (pa->pa_type == MB_GROUP_PA) { 4356 /* see comment in ext4_mb_use_group_pa() */ 4357 spin_lock(&pa->pa_lock); 4358 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4359 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4360 pa->pa_free -= ac->ac_b_ex.fe_len; 4361 pa->pa_len -= ac->ac_b_ex.fe_len; 4362 spin_unlock(&pa->pa_lock); 4363 } 4364 } 4365 if (pa) { 4366 /* 4367 * We want to add the pa to the right bucket. 4368 * Remove it from the list and while adding 4369 * make sure the list to which we are adding 4370 * doesn't grow big. 4371 */ 4372 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4373 spin_lock(pa->pa_obj_lock); 4374 list_del_rcu(&pa->pa_inode_list); 4375 spin_unlock(pa->pa_obj_lock); 4376 ext4_mb_add_n_trim(ac); 4377 } 4378 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4379 } 4380 if (ac->ac_bitmap_page) 4381 page_cache_release(ac->ac_bitmap_page); 4382 if (ac->ac_buddy_page) 4383 page_cache_release(ac->ac_buddy_page); 4384 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4385 mutex_unlock(&ac->ac_lg->lg_mutex); 4386 ext4_mb_collect_stats(ac); 4387 return 0; 4388 } 4389 4390 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4391 { 4392 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4393 int ret; 4394 int freed = 0; 4395 4396 trace_ext4_mb_discard_preallocations(sb, needed); 4397 for (i = 0; i < ngroups && needed > 0; i++) { 4398 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4399 freed += ret; 4400 needed -= ret; 4401 } 4402 4403 return freed; 4404 } 4405 4406 /* 4407 * Main entry point into mballoc to allocate blocks 4408 * it tries to use preallocation first, then falls back 4409 * to usual allocation 4410 */ 4411 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4412 struct ext4_allocation_request *ar, int *errp) 4413 { 4414 int freed; 4415 struct ext4_allocation_context *ac = NULL; 4416 struct ext4_sb_info *sbi; 4417 struct super_block *sb; 4418 ext4_fsblk_t block = 0; 4419 unsigned int inquota = 0; 4420 unsigned int reserv_clstrs = 0; 4421 4422 might_sleep(); 4423 sb = ar->inode->i_sb; 4424 sbi = EXT4_SB(sb); 4425 4426 trace_ext4_request_blocks(ar); 4427 4428 /* Allow to use superuser reservation for quota file */ 4429 if (IS_NOQUOTA(ar->inode)) 4430 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4431 4432 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 4433 /* Without delayed allocation we need to verify 4434 * there is enough free blocks to do block allocation 4435 * and verify allocation doesn't exceed the quota limits. 4436 */ 4437 while (ar->len && 4438 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4439 4440 /* let others to free the space */ 4441 cond_resched(); 4442 ar->len = ar->len >> 1; 4443 } 4444 if (!ar->len) { 4445 *errp = -ENOSPC; 4446 return 0; 4447 } 4448 reserv_clstrs = ar->len; 4449 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4450 dquot_alloc_block_nofail(ar->inode, 4451 EXT4_C2B(sbi, ar->len)); 4452 } else { 4453 while (ar->len && 4454 dquot_alloc_block(ar->inode, 4455 EXT4_C2B(sbi, ar->len))) { 4456 4457 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4458 ar->len--; 4459 } 4460 } 4461 inquota = ar->len; 4462 if (ar->len == 0) { 4463 *errp = -EDQUOT; 4464 goto out; 4465 } 4466 } 4467 4468 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4469 if (!ac) { 4470 ar->len = 0; 4471 *errp = -ENOMEM; 4472 goto out; 4473 } 4474 4475 *errp = ext4_mb_initialize_context(ac, ar); 4476 if (*errp) { 4477 ar->len = 0; 4478 goto out; 4479 } 4480 4481 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4482 if (!ext4_mb_use_preallocated(ac)) { 4483 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4484 ext4_mb_normalize_request(ac, ar); 4485 repeat: 4486 /* allocate space in core */ 4487 *errp = ext4_mb_regular_allocator(ac); 4488 if (*errp) 4489 goto discard_and_exit; 4490 4491 /* as we've just preallocated more space than 4492 * user requested originally, we store allocated 4493 * space in a special descriptor */ 4494 if (ac->ac_status == AC_STATUS_FOUND && 4495 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4496 *errp = ext4_mb_new_preallocation(ac); 4497 if (*errp) { 4498 discard_and_exit: 4499 ext4_discard_allocated_blocks(ac); 4500 goto errout; 4501 } 4502 } 4503 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4504 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4505 if (*errp == -EAGAIN) { 4506 /* 4507 * drop the reference that we took 4508 * in ext4_mb_use_best_found 4509 */ 4510 ext4_mb_release_context(ac); 4511 ac->ac_b_ex.fe_group = 0; 4512 ac->ac_b_ex.fe_start = 0; 4513 ac->ac_b_ex.fe_len = 0; 4514 ac->ac_status = AC_STATUS_CONTINUE; 4515 goto repeat; 4516 } else if (*errp) { 4517 ext4_discard_allocated_blocks(ac); 4518 goto errout; 4519 } else { 4520 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4521 ar->len = ac->ac_b_ex.fe_len; 4522 } 4523 } else { 4524 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4525 if (freed) 4526 goto repeat; 4527 *errp = -ENOSPC; 4528 } 4529 4530 errout: 4531 if (*errp) { 4532 ac->ac_b_ex.fe_len = 0; 4533 ar->len = 0; 4534 ext4_mb_show_ac(ac); 4535 } 4536 ext4_mb_release_context(ac); 4537 out: 4538 if (ac) 4539 kmem_cache_free(ext4_ac_cachep, ac); 4540 if (inquota && ar->len < inquota) 4541 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4542 if (!ar->len) { 4543 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 4544 /* release all the reserved blocks if non delalloc */ 4545 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4546 reserv_clstrs); 4547 } 4548 4549 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4550 4551 return block; 4552 } 4553 4554 /* 4555 * We can merge two free data extents only if the physical blocks 4556 * are contiguous, AND the extents were freed by the same transaction, 4557 * AND the blocks are associated with the same group. 4558 */ 4559 static int can_merge(struct ext4_free_data *entry1, 4560 struct ext4_free_data *entry2) 4561 { 4562 if ((entry1->efd_tid == entry2->efd_tid) && 4563 (entry1->efd_group == entry2->efd_group) && 4564 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) 4565 return 1; 4566 return 0; 4567 } 4568 4569 static noinline_for_stack int 4570 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4571 struct ext4_free_data *new_entry) 4572 { 4573 ext4_group_t group = e4b->bd_group; 4574 ext4_grpblk_t cluster; 4575 struct ext4_free_data *entry; 4576 struct ext4_group_info *db = e4b->bd_info; 4577 struct super_block *sb = e4b->bd_sb; 4578 struct ext4_sb_info *sbi = EXT4_SB(sb); 4579 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4580 struct rb_node *parent = NULL, *new_node; 4581 4582 BUG_ON(!ext4_handle_valid(handle)); 4583 BUG_ON(e4b->bd_bitmap_page == NULL); 4584 BUG_ON(e4b->bd_buddy_page == NULL); 4585 4586 new_node = &new_entry->efd_node; 4587 cluster = new_entry->efd_start_cluster; 4588 4589 if (!*n) { 4590 /* first free block exent. We need to 4591 protect buddy cache from being freed, 4592 * otherwise we'll refresh it from 4593 * on-disk bitmap and lose not-yet-available 4594 * blocks */ 4595 page_cache_get(e4b->bd_buddy_page); 4596 page_cache_get(e4b->bd_bitmap_page); 4597 } 4598 while (*n) { 4599 parent = *n; 4600 entry = rb_entry(parent, struct ext4_free_data, efd_node); 4601 if (cluster < entry->efd_start_cluster) 4602 n = &(*n)->rb_left; 4603 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 4604 n = &(*n)->rb_right; 4605 else { 4606 ext4_grp_locked_error(sb, group, 0, 4607 ext4_group_first_block_no(sb, group) + 4608 EXT4_C2B(sbi, cluster), 4609 "Block already on to-be-freed list"); 4610 return 0; 4611 } 4612 } 4613 4614 rb_link_node(new_node, parent, n); 4615 rb_insert_color(new_node, &db->bb_free_root); 4616 4617 /* Now try to see the extent can be merged to left and right */ 4618 node = rb_prev(new_node); 4619 if (node) { 4620 entry = rb_entry(node, struct ext4_free_data, efd_node); 4621 if (can_merge(entry, new_entry) && 4622 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4623 new_entry->efd_start_cluster = entry->efd_start_cluster; 4624 new_entry->efd_count += entry->efd_count; 4625 rb_erase(node, &(db->bb_free_root)); 4626 kmem_cache_free(ext4_free_data_cachep, entry); 4627 } 4628 } 4629 4630 node = rb_next(new_node); 4631 if (node) { 4632 entry = rb_entry(node, struct ext4_free_data, efd_node); 4633 if (can_merge(new_entry, entry) && 4634 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4635 new_entry->efd_count += entry->efd_count; 4636 rb_erase(node, &(db->bb_free_root)); 4637 kmem_cache_free(ext4_free_data_cachep, entry); 4638 } 4639 } 4640 /* Add the extent to transaction's private list */ 4641 ext4_journal_callback_add(handle, ext4_free_data_callback, 4642 &new_entry->efd_jce); 4643 return 0; 4644 } 4645 4646 /** 4647 * ext4_free_blocks() -- Free given blocks and update quota 4648 * @handle: handle for this transaction 4649 * @inode: inode 4650 * @block: start physical block to free 4651 * @count: number of blocks to count 4652 * @flags: flags used by ext4_free_blocks 4653 */ 4654 void ext4_free_blocks(handle_t *handle, struct inode *inode, 4655 struct buffer_head *bh, ext4_fsblk_t block, 4656 unsigned long count, int flags) 4657 { 4658 struct buffer_head *bitmap_bh = NULL; 4659 struct super_block *sb = inode->i_sb; 4660 struct ext4_group_desc *gdp; 4661 unsigned int overflow; 4662 ext4_grpblk_t bit; 4663 struct buffer_head *gd_bh; 4664 ext4_group_t block_group; 4665 struct ext4_sb_info *sbi; 4666 struct ext4_buddy e4b; 4667 unsigned int count_clusters; 4668 int err = 0; 4669 int ret; 4670 4671 might_sleep(); 4672 if (bh) { 4673 if (block) 4674 BUG_ON(block != bh->b_blocknr); 4675 else 4676 block = bh->b_blocknr; 4677 } 4678 4679 sbi = EXT4_SB(sb); 4680 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4681 !ext4_data_block_valid(sbi, block, count)) { 4682 ext4_error(sb, "Freeing blocks not in datazone - " 4683 "block = %llu, count = %lu", block, count); 4684 goto error_return; 4685 } 4686 4687 ext4_debug("freeing block %llu\n", block); 4688 trace_ext4_free_blocks(inode, block, count, flags); 4689 4690 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4691 BUG_ON(count > 1); 4692 4693 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4694 inode, bh, block); 4695 } 4696 4697 /* 4698 * We need to make sure we don't reuse the freed block until 4699 * after the transaction is committed, which we can do by 4700 * treating the block as metadata, below. We make an 4701 * exception if the inode is to be written in writeback mode 4702 * since writeback mode has weak data consistency guarantees. 4703 */ 4704 if (!ext4_should_writeback_data(inode)) 4705 flags |= EXT4_FREE_BLOCKS_METADATA; 4706 4707 /* 4708 * If the extent to be freed does not begin on a cluster 4709 * boundary, we need to deal with partial clusters at the 4710 * beginning and end of the extent. Normally we will free 4711 * blocks at the beginning or the end unless we are explicitly 4712 * requested to avoid doing so. 4713 */ 4714 overflow = EXT4_PBLK_COFF(sbi, block); 4715 if (overflow) { 4716 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4717 overflow = sbi->s_cluster_ratio - overflow; 4718 block += overflow; 4719 if (count > overflow) 4720 count -= overflow; 4721 else 4722 return; 4723 } else { 4724 block -= overflow; 4725 count += overflow; 4726 } 4727 } 4728 overflow = EXT4_LBLK_COFF(sbi, count); 4729 if (overflow) { 4730 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4731 if (count > overflow) 4732 count -= overflow; 4733 else 4734 return; 4735 } else 4736 count += sbi->s_cluster_ratio - overflow; 4737 } 4738 4739 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4740 int i; 4741 4742 for (i = 0; i < count; i++) { 4743 cond_resched(); 4744 bh = sb_find_get_block(inode->i_sb, block + i); 4745 if (!bh) 4746 continue; 4747 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4748 inode, bh, block + i); 4749 } 4750 } 4751 4752 do_more: 4753 overflow = 0; 4754 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4755 4756 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 4757 ext4_get_group_info(sb, block_group)))) 4758 return; 4759 4760 /* 4761 * Check to see if we are freeing blocks across a group 4762 * boundary. 4763 */ 4764 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4765 overflow = EXT4_C2B(sbi, bit) + count - 4766 EXT4_BLOCKS_PER_GROUP(sb); 4767 count -= overflow; 4768 } 4769 count_clusters = EXT4_NUM_B2C(sbi, count); 4770 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4771 if (IS_ERR(bitmap_bh)) { 4772 err = PTR_ERR(bitmap_bh); 4773 bitmap_bh = NULL; 4774 goto error_return; 4775 } 4776 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4777 if (!gdp) { 4778 err = -EIO; 4779 goto error_return; 4780 } 4781 4782 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4783 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4784 in_range(block, ext4_inode_table(sb, gdp), 4785 EXT4_SB(sb)->s_itb_per_group) || 4786 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4787 EXT4_SB(sb)->s_itb_per_group)) { 4788 4789 ext4_error(sb, "Freeing blocks in system zone - " 4790 "Block = %llu, count = %lu", block, count); 4791 /* err = 0. ext4_std_error should be a no op */ 4792 goto error_return; 4793 } 4794 4795 BUFFER_TRACE(bitmap_bh, "getting write access"); 4796 err = ext4_journal_get_write_access(handle, bitmap_bh); 4797 if (err) 4798 goto error_return; 4799 4800 /* 4801 * We are about to modify some metadata. Call the journal APIs 4802 * to unshare ->b_data if a currently-committing transaction is 4803 * using it 4804 */ 4805 BUFFER_TRACE(gd_bh, "get_write_access"); 4806 err = ext4_journal_get_write_access(handle, gd_bh); 4807 if (err) 4808 goto error_return; 4809 #ifdef AGGRESSIVE_CHECK 4810 { 4811 int i; 4812 for (i = 0; i < count_clusters; i++) 4813 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4814 } 4815 #endif 4816 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 4817 4818 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4819 if (err) 4820 goto error_return; 4821 4822 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { 4823 struct ext4_free_data *new_entry; 4824 /* 4825 * blocks being freed are metadata. these blocks shouldn't 4826 * be used until this transaction is committed 4827 * 4828 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 4829 * to fail. 4830 */ 4831 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 4832 GFP_NOFS|__GFP_NOFAIL); 4833 new_entry->efd_start_cluster = bit; 4834 new_entry->efd_group = block_group; 4835 new_entry->efd_count = count_clusters; 4836 new_entry->efd_tid = handle->h_transaction->t_tid; 4837 4838 ext4_lock_group(sb, block_group); 4839 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4840 ext4_mb_free_metadata(handle, &e4b, new_entry); 4841 } else { 4842 /* need to update group_info->bb_free and bitmap 4843 * with group lock held. generate_buddy look at 4844 * them with group lock_held 4845 */ 4846 if (test_opt(sb, DISCARD)) { 4847 err = ext4_issue_discard(sb, block_group, bit, count); 4848 if (err && err != -EOPNOTSUPP) 4849 ext4_msg(sb, KERN_WARNING, "discard request in" 4850 " group:%d block:%d count:%lu failed" 4851 " with %d", block_group, bit, count, 4852 err); 4853 } else 4854 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 4855 4856 ext4_lock_group(sb, block_group); 4857 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4858 mb_free_blocks(inode, &e4b, bit, count_clusters); 4859 } 4860 4861 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4862 ext4_free_group_clusters_set(sb, gdp, ret); 4863 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 4864 ext4_group_desc_csum_set(sb, block_group, gdp); 4865 ext4_unlock_group(sb, block_group); 4866 4867 if (sbi->s_log_groups_per_flex) { 4868 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4869 atomic64_add(count_clusters, 4870 &sbi->s_flex_groups[flex_group].free_clusters); 4871 } 4872 4873 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4874 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4875 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4876 4877 ext4_mb_unload_buddy(&e4b); 4878 4879 /* We dirtied the bitmap block */ 4880 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4881 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4882 4883 /* And the group descriptor block */ 4884 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4885 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4886 if (!err) 4887 err = ret; 4888 4889 if (overflow && !err) { 4890 block += count; 4891 count = overflow; 4892 put_bh(bitmap_bh); 4893 goto do_more; 4894 } 4895 error_return: 4896 brelse(bitmap_bh); 4897 ext4_std_error(sb, err); 4898 return; 4899 } 4900 4901 /** 4902 * ext4_group_add_blocks() -- Add given blocks to an existing group 4903 * @handle: handle to this transaction 4904 * @sb: super block 4905 * @block: start physical block to add to the block group 4906 * @count: number of blocks to free 4907 * 4908 * This marks the blocks as free in the bitmap and buddy. 4909 */ 4910 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 4911 ext4_fsblk_t block, unsigned long count) 4912 { 4913 struct buffer_head *bitmap_bh = NULL; 4914 struct buffer_head *gd_bh; 4915 ext4_group_t block_group; 4916 ext4_grpblk_t bit; 4917 unsigned int i; 4918 struct ext4_group_desc *desc; 4919 struct ext4_sb_info *sbi = EXT4_SB(sb); 4920 struct ext4_buddy e4b; 4921 int err = 0, ret, blk_free_count; 4922 ext4_grpblk_t blocks_freed; 4923 4924 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 4925 4926 if (count == 0) 4927 return 0; 4928 4929 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4930 /* 4931 * Check to see if we are freeing blocks across a group 4932 * boundary. 4933 */ 4934 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4935 ext4_warning(sb, "too much blocks added to group %u\n", 4936 block_group); 4937 err = -EINVAL; 4938 goto error_return; 4939 } 4940 4941 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4942 if (IS_ERR(bitmap_bh)) { 4943 err = PTR_ERR(bitmap_bh); 4944 bitmap_bh = NULL; 4945 goto error_return; 4946 } 4947 4948 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 4949 if (!desc) { 4950 err = -EIO; 4951 goto error_return; 4952 } 4953 4954 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 4955 in_range(ext4_inode_bitmap(sb, desc), block, count) || 4956 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 4957 in_range(block + count - 1, ext4_inode_table(sb, desc), 4958 sbi->s_itb_per_group)) { 4959 ext4_error(sb, "Adding blocks in system zones - " 4960 "Block = %llu, count = %lu", 4961 block, count); 4962 err = -EINVAL; 4963 goto error_return; 4964 } 4965 4966 BUFFER_TRACE(bitmap_bh, "getting write access"); 4967 err = ext4_journal_get_write_access(handle, bitmap_bh); 4968 if (err) 4969 goto error_return; 4970 4971 /* 4972 * We are about to modify some metadata. Call the journal APIs 4973 * to unshare ->b_data if a currently-committing transaction is 4974 * using it 4975 */ 4976 BUFFER_TRACE(gd_bh, "get_write_access"); 4977 err = ext4_journal_get_write_access(handle, gd_bh); 4978 if (err) 4979 goto error_return; 4980 4981 for (i = 0, blocks_freed = 0; i < count; i++) { 4982 BUFFER_TRACE(bitmap_bh, "clear bit"); 4983 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 4984 ext4_error(sb, "bit already cleared for block %llu", 4985 (ext4_fsblk_t)(block + i)); 4986 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 4987 } else { 4988 blocks_freed++; 4989 } 4990 } 4991 4992 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4993 if (err) 4994 goto error_return; 4995 4996 /* 4997 * need to update group_info->bb_free and bitmap 4998 * with group lock held. generate_buddy look at 4999 * them with group lock_held 5000 */ 5001 ext4_lock_group(sb, block_group); 5002 mb_clear_bits(bitmap_bh->b_data, bit, count); 5003 mb_free_blocks(NULL, &e4b, bit, count); 5004 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 5005 ext4_free_group_clusters_set(sb, desc, blk_free_count); 5006 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 5007 ext4_group_desc_csum_set(sb, block_group, desc); 5008 ext4_unlock_group(sb, block_group); 5009 percpu_counter_add(&sbi->s_freeclusters_counter, 5010 EXT4_NUM_B2C(sbi, blocks_freed)); 5011 5012 if (sbi->s_log_groups_per_flex) { 5013 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 5014 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), 5015 &sbi->s_flex_groups[flex_group].free_clusters); 5016 } 5017 5018 ext4_mb_unload_buddy(&e4b); 5019 5020 /* We dirtied the bitmap block */ 5021 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5022 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5023 5024 /* And the group descriptor block */ 5025 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5026 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5027 if (!err) 5028 err = ret; 5029 5030 error_return: 5031 brelse(bitmap_bh); 5032 ext4_std_error(sb, err); 5033 return err; 5034 } 5035 5036 /** 5037 * ext4_trim_extent -- function to TRIM one single free extent in the group 5038 * @sb: super block for the file system 5039 * @start: starting block of the free extent in the alloc. group 5040 * @count: number of blocks to TRIM 5041 * @group: alloc. group we are working with 5042 * @e4b: ext4 buddy for the group 5043 * 5044 * Trim "count" blocks starting at "start" in the "group". To assure that no 5045 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5046 * be called with under the group lock. 5047 */ 5048 static int ext4_trim_extent(struct super_block *sb, int start, int count, 5049 ext4_group_t group, struct ext4_buddy *e4b) 5050 __releases(bitlock) 5051 __acquires(bitlock) 5052 { 5053 struct ext4_free_extent ex; 5054 int ret = 0; 5055 5056 trace_ext4_trim_extent(sb, group, start, count); 5057 5058 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5059 5060 ex.fe_start = start; 5061 ex.fe_group = group; 5062 ex.fe_len = count; 5063 5064 /* 5065 * Mark blocks used, so no one can reuse them while 5066 * being trimmed. 5067 */ 5068 mb_mark_used(e4b, &ex); 5069 ext4_unlock_group(sb, group); 5070 ret = ext4_issue_discard(sb, group, start, count); 5071 ext4_lock_group(sb, group); 5072 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5073 return ret; 5074 } 5075 5076 /** 5077 * ext4_trim_all_free -- function to trim all free space in alloc. group 5078 * @sb: super block for file system 5079 * @group: group to be trimmed 5080 * @start: first group block to examine 5081 * @max: last group block to examine 5082 * @minblocks: minimum extent block count 5083 * 5084 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5085 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5086 * the extent. 5087 * 5088 * 5089 * ext4_trim_all_free walks through group's block bitmap searching for free 5090 * extents. When the free extent is found, mark it as used in group buddy 5091 * bitmap. Then issue a TRIM command on this extent and free the extent in 5092 * the group buddy bitmap. This is done until whole group is scanned. 5093 */ 5094 static ext4_grpblk_t 5095 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5096 ext4_grpblk_t start, ext4_grpblk_t max, 5097 ext4_grpblk_t minblocks) 5098 { 5099 void *bitmap; 5100 ext4_grpblk_t next, count = 0, free_count = 0; 5101 struct ext4_buddy e4b; 5102 int ret = 0; 5103 5104 trace_ext4_trim_all_free(sb, group, start, max); 5105 5106 ret = ext4_mb_load_buddy(sb, group, &e4b); 5107 if (ret) { 5108 ext4_error(sb, "Error in loading buddy " 5109 "information for %u", group); 5110 return ret; 5111 } 5112 bitmap = e4b.bd_bitmap; 5113 5114 ext4_lock_group(sb, group); 5115 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5116 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5117 goto out; 5118 5119 start = (e4b.bd_info->bb_first_free > start) ? 5120 e4b.bd_info->bb_first_free : start; 5121 5122 while (start <= max) { 5123 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5124 if (start > max) 5125 break; 5126 next = mb_find_next_bit(bitmap, max + 1, start); 5127 5128 if ((next - start) >= minblocks) { 5129 ret = ext4_trim_extent(sb, start, 5130 next - start, group, &e4b); 5131 if (ret && ret != -EOPNOTSUPP) 5132 break; 5133 ret = 0; 5134 count += next - start; 5135 } 5136 free_count += next - start; 5137 start = next + 1; 5138 5139 if (fatal_signal_pending(current)) { 5140 count = -ERESTARTSYS; 5141 break; 5142 } 5143 5144 if (need_resched()) { 5145 ext4_unlock_group(sb, group); 5146 cond_resched(); 5147 ext4_lock_group(sb, group); 5148 } 5149 5150 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5151 break; 5152 } 5153 5154 if (!ret) { 5155 ret = count; 5156 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5157 } 5158 out: 5159 ext4_unlock_group(sb, group); 5160 ext4_mb_unload_buddy(&e4b); 5161 5162 ext4_debug("trimmed %d blocks in the group %d\n", 5163 count, group); 5164 5165 return ret; 5166 } 5167 5168 /** 5169 * ext4_trim_fs() -- trim ioctl handle function 5170 * @sb: superblock for filesystem 5171 * @range: fstrim_range structure 5172 * 5173 * start: First Byte to trim 5174 * len: number of Bytes to trim from start 5175 * minlen: minimum extent length in Bytes 5176 * ext4_trim_fs goes through all allocation groups containing Bytes from 5177 * start to start+len. For each such a group ext4_trim_all_free function 5178 * is invoked to trim all free space. 5179 */ 5180 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5181 { 5182 struct ext4_group_info *grp; 5183 ext4_group_t group, first_group, last_group; 5184 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5185 uint64_t start, end, minlen, trimmed = 0; 5186 ext4_fsblk_t first_data_blk = 5187 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5188 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5189 int ret = 0; 5190 5191 start = range->start >> sb->s_blocksize_bits; 5192 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5193 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5194 range->minlen >> sb->s_blocksize_bits); 5195 5196 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5197 start >= max_blks || 5198 range->len < sb->s_blocksize) 5199 return -EINVAL; 5200 if (end >= max_blks) 5201 end = max_blks - 1; 5202 if (end <= first_data_blk) 5203 goto out; 5204 if (start < first_data_blk) 5205 start = first_data_blk; 5206 5207 /* Determine first and last group to examine based on start and end */ 5208 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5209 &first_group, &first_cluster); 5210 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5211 &last_group, &last_cluster); 5212 5213 /* end now represents the last cluster to discard in this group */ 5214 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5215 5216 for (group = first_group; group <= last_group; group++) { 5217 grp = ext4_get_group_info(sb, group); 5218 /* We only do this if the grp has never been initialized */ 5219 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5220 ret = ext4_mb_init_group(sb, group); 5221 if (ret) 5222 break; 5223 } 5224 5225 /* 5226 * For all the groups except the last one, last cluster will 5227 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5228 * change it for the last group, note that last_cluster is 5229 * already computed earlier by ext4_get_group_no_and_offset() 5230 */ 5231 if (group == last_group) 5232 end = last_cluster; 5233 5234 if (grp->bb_free >= minlen) { 5235 cnt = ext4_trim_all_free(sb, group, first_cluster, 5236 end, minlen); 5237 if (cnt < 0) { 5238 ret = cnt; 5239 break; 5240 } 5241 trimmed += cnt; 5242 } 5243 5244 /* 5245 * For every group except the first one, we are sure 5246 * that the first cluster to discard will be cluster #0. 5247 */ 5248 first_cluster = 0; 5249 } 5250 5251 if (!ret) 5252 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5253 5254 out: 5255 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5256 return ret; 5257 } 5258