1 /* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20 /* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24 #include "ext4_jbd2.h" 25 #include "mballoc.h" 26 #include <linux/log2.h> 27 #include <linux/module.h> 28 #include <linux/slab.h> 29 #include <linux/backing-dev.h> 30 #include <trace/events/ext4.h> 31 32 #ifdef CONFIG_EXT4_DEBUG 33 ushort ext4_mballoc_debug __read_mostly; 34 35 module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); 36 MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); 37 #endif 38 39 /* 40 * MUSTDO: 41 * - test ext4_ext_search_left() and ext4_ext_search_right() 42 * - search for metadata in few groups 43 * 44 * TODO v4: 45 * - normalization should take into account whether file is still open 46 * - discard preallocations if no free space left (policy?) 47 * - don't normalize tails 48 * - quota 49 * - reservation for superuser 50 * 51 * TODO v3: 52 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 53 * - track min/max extents in each group for better group selection 54 * - mb_mark_used() may allocate chunk right after splitting buddy 55 * - tree of groups sorted by number of free blocks 56 * - error handling 57 */ 58 59 /* 60 * The allocation request involve request for multiple number of blocks 61 * near to the goal(block) value specified. 62 * 63 * During initialization phase of the allocator we decide to use the 64 * group preallocation or inode preallocation depending on the size of 65 * the file. The size of the file could be the resulting file size we 66 * would have after allocation, or the current file size, which ever 67 * is larger. If the size is less than sbi->s_mb_stream_request we 68 * select to use the group preallocation. The default value of 69 * s_mb_stream_request is 16 blocks. This can also be tuned via 70 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 71 * terms of number of blocks. 72 * 73 * The main motivation for having small file use group preallocation is to 74 * ensure that we have small files closer together on the disk. 75 * 76 * First stage the allocator looks at the inode prealloc list, 77 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 78 * spaces for this particular inode. The inode prealloc space is 79 * represented as: 80 * 81 * pa_lstart -> the logical start block for this prealloc space 82 * pa_pstart -> the physical start block for this prealloc space 83 * pa_len -> length for this prealloc space (in clusters) 84 * pa_free -> free space available in this prealloc space (in clusters) 85 * 86 * The inode preallocation space is used looking at the _logical_ start 87 * block. If only the logical file block falls within the range of prealloc 88 * space we will consume the particular prealloc space. This makes sure that 89 * we have contiguous physical blocks representing the file blocks 90 * 91 * The important thing to be noted in case of inode prealloc space is that 92 * we don't modify the values associated to inode prealloc space except 93 * pa_free. 94 * 95 * If we are not able to find blocks in the inode prealloc space and if we 96 * have the group allocation flag set then we look at the locality group 97 * prealloc space. These are per CPU prealloc list represented as 98 * 99 * ext4_sb_info.s_locality_groups[smp_processor_id()] 100 * 101 * The reason for having a per cpu locality group is to reduce the contention 102 * between CPUs. It is possible to get scheduled at this point. 103 * 104 * The locality group prealloc space is used looking at whether we have 105 * enough free space (pa_free) within the prealloc space. 106 * 107 * If we can't allocate blocks via inode prealloc or/and locality group 108 * prealloc then we look at the buddy cache. The buddy cache is represented 109 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 110 * mapped to the buddy and bitmap information regarding different 111 * groups. The buddy information is attached to buddy cache inode so that 112 * we can access them through the page cache. The information regarding 113 * each group is loaded via ext4_mb_load_buddy. The information involve 114 * block bitmap and buddy information. The information are stored in the 115 * inode as: 116 * 117 * { page } 118 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 119 * 120 * 121 * one block each for bitmap and buddy information. So for each group we 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 123 * blocksize) blocks. So it can have information regarding groups_per_page 124 * which is blocks_per_page/2 125 * 126 * The buddy cache inode is not stored on disk. The inode is thrown 127 * away when the filesystem is unmounted. 128 * 129 * We look for count number of blocks in the buddy cache. If we were able 130 * to locate that many free blocks we return with additional information 131 * regarding rest of the contiguous physical block available 132 * 133 * Before allocating blocks via buddy cache we normalize the request 134 * blocks. This ensure we ask for more blocks that we needed. The extra 135 * blocks that we get after allocation is added to the respective prealloc 136 * list. In case of inode preallocation we follow a list of heuristics 137 * based on file size. This can be found in ext4_mb_normalize_request. If 138 * we are doing a group prealloc we try to normalize the request to 139 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 140 * dependent on the cluster size; for non-bigalloc file systems, it is 141 * 512 blocks. This can be tuned via 142 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 143 * terms of number of blocks. If we have mounted the file system with -O 144 * stripe=<value> option the group prealloc request is normalized to the 145 * the smallest multiple of the stripe value (sbi->s_stripe) which is 146 * greater than the default mb_group_prealloc. 147 * 148 * The regular allocator (using the buddy cache) supports a few tunables. 149 * 150 * /sys/fs/ext4/<partition>/mb_min_to_scan 151 * /sys/fs/ext4/<partition>/mb_max_to_scan 152 * /sys/fs/ext4/<partition>/mb_order2_req 153 * 154 * The regular allocator uses buddy scan only if the request len is power of 155 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 156 * value of s_mb_order2_reqs can be tuned via 157 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 158 * stripe size (sbi->s_stripe), we try to search for contiguous block in 159 * stripe size. This should result in better allocation on RAID setups. If 160 * not, we search in the specific group using bitmap for best extents. The 161 * tunable min_to_scan and max_to_scan control the behaviour here. 162 * min_to_scan indicate how long the mballoc __must__ look for a best 163 * extent and max_to_scan indicates how long the mballoc __can__ look for a 164 * best extent in the found extents. Searching for the blocks starts with 165 * the group specified as the goal value in allocation context via 166 * ac_g_ex. Each group is first checked based on the criteria whether it 167 * can be used for allocation. ext4_mb_good_group explains how the groups are 168 * checked. 169 * 170 * Both the prealloc space are getting populated as above. So for the first 171 * request we will hit the buddy cache which will result in this prealloc 172 * space getting filled. The prealloc space is then later used for the 173 * subsequent request. 174 */ 175 176 /* 177 * mballoc operates on the following data: 178 * - on-disk bitmap 179 * - in-core buddy (actually includes buddy and bitmap) 180 * - preallocation descriptors (PAs) 181 * 182 * there are two types of preallocations: 183 * - inode 184 * assiged to specific inode and can be used for this inode only. 185 * it describes part of inode's space preallocated to specific 186 * physical blocks. any block from that preallocated can be used 187 * independent. the descriptor just tracks number of blocks left 188 * unused. so, before taking some block from descriptor, one must 189 * make sure corresponded logical block isn't allocated yet. this 190 * also means that freeing any block within descriptor's range 191 * must discard all preallocated blocks. 192 * - locality group 193 * assigned to specific locality group which does not translate to 194 * permanent set of inodes: inode can join and leave group. space 195 * from this type of preallocation can be used for any inode. thus 196 * it's consumed from the beginning to the end. 197 * 198 * relation between them can be expressed as: 199 * in-core buddy = on-disk bitmap + preallocation descriptors 200 * 201 * this mean blocks mballoc considers used are: 202 * - allocated blocks (persistent) 203 * - preallocated blocks (non-persistent) 204 * 205 * consistency in mballoc world means that at any time a block is either 206 * free or used in ALL structures. notice: "any time" should not be read 207 * literally -- time is discrete and delimited by locks. 208 * 209 * to keep it simple, we don't use block numbers, instead we count number of 210 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 211 * 212 * all operations can be expressed as: 213 * - init buddy: buddy = on-disk + PAs 214 * - new PA: buddy += N; PA = N 215 * - use inode PA: on-disk += N; PA -= N 216 * - discard inode PA buddy -= on-disk - PA; PA = 0 217 * - use locality group PA on-disk += N; PA -= N 218 * - discard locality group PA buddy -= PA; PA = 0 219 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 220 * is used in real operation because we can't know actual used 221 * bits from PA, only from on-disk bitmap 222 * 223 * if we follow this strict logic, then all operations above should be atomic. 224 * given some of them can block, we'd have to use something like semaphores 225 * killing performance on high-end SMP hardware. let's try to relax it using 226 * the following knowledge: 227 * 1) if buddy is referenced, it's already initialized 228 * 2) while block is used in buddy and the buddy is referenced, 229 * nobody can re-allocate that block 230 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 231 * bit set and PA claims same block, it's OK. IOW, one can set bit in 232 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 233 * block 234 * 235 * so, now we're building a concurrency table: 236 * - init buddy vs. 237 * - new PA 238 * blocks for PA are allocated in the buddy, buddy must be referenced 239 * until PA is linked to allocation group to avoid concurrent buddy init 240 * - use inode PA 241 * we need to make sure that either on-disk bitmap or PA has uptodate data 242 * given (3) we care that PA-=N operation doesn't interfere with init 243 * - discard inode PA 244 * the simplest way would be to have buddy initialized by the discard 245 * - use locality group PA 246 * again PA-=N must be serialized with init 247 * - discard locality group PA 248 * the simplest way would be to have buddy initialized by the discard 249 * - new PA vs. 250 * - use inode PA 251 * i_data_sem serializes them 252 * - discard inode PA 253 * discard process must wait until PA isn't used by another process 254 * - use locality group PA 255 * some mutex should serialize them 256 * - discard locality group PA 257 * discard process must wait until PA isn't used by another process 258 * - use inode PA 259 * - use inode PA 260 * i_data_sem or another mutex should serializes them 261 * - discard inode PA 262 * discard process must wait until PA isn't used by another process 263 * - use locality group PA 264 * nothing wrong here -- they're different PAs covering different blocks 265 * - discard locality group PA 266 * discard process must wait until PA isn't used by another process 267 * 268 * now we're ready to make few consequences: 269 * - PA is referenced and while it is no discard is possible 270 * - PA is referenced until block isn't marked in on-disk bitmap 271 * - PA changes only after on-disk bitmap 272 * - discard must not compete with init. either init is done before 273 * any discard or they're serialized somehow 274 * - buddy init as sum of on-disk bitmap and PAs is done atomically 275 * 276 * a special case when we've used PA to emptiness. no need to modify buddy 277 * in this case, but we should care about concurrent init 278 * 279 */ 280 281 /* 282 * Logic in few words: 283 * 284 * - allocation: 285 * load group 286 * find blocks 287 * mark bits in on-disk bitmap 288 * release group 289 * 290 * - use preallocation: 291 * find proper PA (per-inode or group) 292 * load group 293 * mark bits in on-disk bitmap 294 * release group 295 * release PA 296 * 297 * - free: 298 * load group 299 * mark bits in on-disk bitmap 300 * release group 301 * 302 * - discard preallocations in group: 303 * mark PAs deleted 304 * move them onto local list 305 * load on-disk bitmap 306 * load group 307 * remove PA from object (inode or locality group) 308 * mark free blocks in-core 309 * 310 * - discard inode's preallocations: 311 */ 312 313 /* 314 * Locking rules 315 * 316 * Locks: 317 * - bitlock on a group (group) 318 * - object (inode/locality) (object) 319 * - per-pa lock (pa) 320 * 321 * Paths: 322 * - new pa 323 * object 324 * group 325 * 326 * - find and use pa: 327 * pa 328 * 329 * - release consumed pa: 330 * pa 331 * group 332 * object 333 * 334 * - generate in-core bitmap: 335 * group 336 * pa 337 * 338 * - discard all for given object (inode, locality group): 339 * object 340 * pa 341 * group 342 * 343 * - discard all for given group: 344 * group 345 * pa 346 * group 347 * object 348 * 349 */ 350 static struct kmem_cache *ext4_pspace_cachep; 351 static struct kmem_cache *ext4_ac_cachep; 352 static struct kmem_cache *ext4_free_data_cachep; 353 354 /* We create slab caches for groupinfo data structures based on the 355 * superblock block size. There will be one per mounted filesystem for 356 * each unique s_blocksize_bits */ 357 #define NR_GRPINFO_CACHES 8 358 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 359 360 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 361 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 362 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 363 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 364 }; 365 366 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 367 ext4_group_t group); 368 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 369 ext4_group_t group); 370 static void ext4_free_data_callback(struct super_block *sb, 371 struct ext4_journal_cb_entry *jce, int rc); 372 373 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 374 { 375 #if BITS_PER_LONG == 64 376 *bit += ((unsigned long) addr & 7UL) << 3; 377 addr = (void *) ((unsigned long) addr & ~7UL); 378 #elif BITS_PER_LONG == 32 379 *bit += ((unsigned long) addr & 3UL) << 3; 380 addr = (void *) ((unsigned long) addr & ~3UL); 381 #else 382 #error "how many bits you are?!" 383 #endif 384 return addr; 385 } 386 387 static inline int mb_test_bit(int bit, void *addr) 388 { 389 /* 390 * ext4_test_bit on architecture like powerpc 391 * needs unsigned long aligned address 392 */ 393 addr = mb_correct_addr_and_bit(&bit, addr); 394 return ext4_test_bit(bit, addr); 395 } 396 397 static inline void mb_set_bit(int bit, void *addr) 398 { 399 addr = mb_correct_addr_and_bit(&bit, addr); 400 ext4_set_bit(bit, addr); 401 } 402 403 static inline void mb_clear_bit(int bit, void *addr) 404 { 405 addr = mb_correct_addr_and_bit(&bit, addr); 406 ext4_clear_bit(bit, addr); 407 } 408 409 static inline int mb_test_and_clear_bit(int bit, void *addr) 410 { 411 addr = mb_correct_addr_and_bit(&bit, addr); 412 return ext4_test_and_clear_bit(bit, addr); 413 } 414 415 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 416 { 417 int fix = 0, ret, tmpmax; 418 addr = mb_correct_addr_and_bit(&fix, addr); 419 tmpmax = max + fix; 420 start += fix; 421 422 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 423 if (ret > max) 424 return max; 425 return ret; 426 } 427 428 static inline int mb_find_next_bit(void *addr, int max, int start) 429 { 430 int fix = 0, ret, tmpmax; 431 addr = mb_correct_addr_and_bit(&fix, addr); 432 tmpmax = max + fix; 433 start += fix; 434 435 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 436 if (ret > max) 437 return max; 438 return ret; 439 } 440 441 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 442 { 443 char *bb; 444 445 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 446 BUG_ON(max == NULL); 447 448 if (order > e4b->bd_blkbits + 1) { 449 *max = 0; 450 return NULL; 451 } 452 453 /* at order 0 we see each particular block */ 454 if (order == 0) { 455 *max = 1 << (e4b->bd_blkbits + 3); 456 return e4b->bd_bitmap; 457 } 458 459 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 460 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 461 462 return bb; 463 } 464 465 #ifdef DOUBLE_CHECK 466 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 467 int first, int count) 468 { 469 int i; 470 struct super_block *sb = e4b->bd_sb; 471 472 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 473 return; 474 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 475 for (i = 0; i < count; i++) { 476 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 477 ext4_fsblk_t blocknr; 478 479 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 480 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 481 ext4_grp_locked_error(sb, e4b->bd_group, 482 inode ? inode->i_ino : 0, 483 blocknr, 484 "freeing block already freed " 485 "(bit %u)", 486 first + i); 487 } 488 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 489 } 490 } 491 492 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 493 { 494 int i; 495 496 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 497 return; 498 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 499 for (i = 0; i < count; i++) { 500 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 501 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 502 } 503 } 504 505 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 506 { 507 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 508 unsigned char *b1, *b2; 509 int i; 510 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 511 b2 = (unsigned char *) bitmap; 512 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 513 if (b1[i] != b2[i]) { 514 ext4_msg(e4b->bd_sb, KERN_ERR, 515 "corruption in group %u " 516 "at byte %u(%u): %x in copy != %x " 517 "on disk/prealloc", 518 e4b->bd_group, i, i * 8, b1[i], b2[i]); 519 BUG(); 520 } 521 } 522 } 523 } 524 525 #else 526 static inline void mb_free_blocks_double(struct inode *inode, 527 struct ext4_buddy *e4b, int first, int count) 528 { 529 return; 530 } 531 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 532 int first, int count) 533 { 534 return; 535 } 536 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 537 { 538 return; 539 } 540 #endif 541 542 #ifdef AGGRESSIVE_CHECK 543 544 #define MB_CHECK_ASSERT(assert) \ 545 do { \ 546 if (!(assert)) { \ 547 printk(KERN_EMERG \ 548 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 549 function, file, line, # assert); \ 550 BUG(); \ 551 } \ 552 } while (0) 553 554 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 555 const char *function, int line) 556 { 557 struct super_block *sb = e4b->bd_sb; 558 int order = e4b->bd_blkbits + 1; 559 int max; 560 int max2; 561 int i; 562 int j; 563 int k; 564 int count; 565 struct ext4_group_info *grp; 566 int fragments = 0; 567 int fstart; 568 struct list_head *cur; 569 void *buddy; 570 void *buddy2; 571 572 { 573 static int mb_check_counter; 574 if (mb_check_counter++ % 100 != 0) 575 return 0; 576 } 577 578 while (order > 1) { 579 buddy = mb_find_buddy(e4b, order, &max); 580 MB_CHECK_ASSERT(buddy); 581 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 582 MB_CHECK_ASSERT(buddy2); 583 MB_CHECK_ASSERT(buddy != buddy2); 584 MB_CHECK_ASSERT(max * 2 == max2); 585 586 count = 0; 587 for (i = 0; i < max; i++) { 588 589 if (mb_test_bit(i, buddy)) { 590 /* only single bit in buddy2 may be 1 */ 591 if (!mb_test_bit(i << 1, buddy2)) { 592 MB_CHECK_ASSERT( 593 mb_test_bit((i<<1)+1, buddy2)); 594 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 595 MB_CHECK_ASSERT( 596 mb_test_bit(i << 1, buddy2)); 597 } 598 continue; 599 } 600 601 /* both bits in buddy2 must be 1 */ 602 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 603 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 604 605 for (j = 0; j < (1 << order); j++) { 606 k = (i * (1 << order)) + j; 607 MB_CHECK_ASSERT( 608 !mb_test_bit(k, e4b->bd_bitmap)); 609 } 610 count++; 611 } 612 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 613 order--; 614 } 615 616 fstart = -1; 617 buddy = mb_find_buddy(e4b, 0, &max); 618 for (i = 0; i < max; i++) { 619 if (!mb_test_bit(i, buddy)) { 620 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 621 if (fstart == -1) { 622 fragments++; 623 fstart = i; 624 } 625 continue; 626 } 627 fstart = -1; 628 /* check used bits only */ 629 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 630 buddy2 = mb_find_buddy(e4b, j, &max2); 631 k = i >> j; 632 MB_CHECK_ASSERT(k < max2); 633 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 634 } 635 } 636 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 637 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 638 639 grp = ext4_get_group_info(sb, e4b->bd_group); 640 list_for_each(cur, &grp->bb_prealloc_list) { 641 ext4_group_t groupnr; 642 struct ext4_prealloc_space *pa; 643 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 644 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 645 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 646 for (i = 0; i < pa->pa_len; i++) 647 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 648 } 649 return 0; 650 } 651 #undef MB_CHECK_ASSERT 652 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 653 __FILE__, __func__, __LINE__) 654 #else 655 #define mb_check_buddy(e4b) 656 #endif 657 658 /* 659 * Divide blocks started from @first with length @len into 660 * smaller chunks with power of 2 blocks. 661 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 662 * then increase bb_counters[] for corresponded chunk size. 663 */ 664 static void ext4_mb_mark_free_simple(struct super_block *sb, 665 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 666 struct ext4_group_info *grp) 667 { 668 struct ext4_sb_info *sbi = EXT4_SB(sb); 669 ext4_grpblk_t min; 670 ext4_grpblk_t max; 671 ext4_grpblk_t chunk; 672 unsigned int border; 673 674 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 675 676 border = 2 << sb->s_blocksize_bits; 677 678 while (len > 0) { 679 /* find how many blocks can be covered since this position */ 680 max = ffs(first | border) - 1; 681 682 /* find how many blocks of power 2 we need to mark */ 683 min = fls(len) - 1; 684 685 if (max < min) 686 min = max; 687 chunk = 1 << min; 688 689 /* mark multiblock chunks only */ 690 grp->bb_counters[min]++; 691 if (min > 0) 692 mb_clear_bit(first >> min, 693 buddy + sbi->s_mb_offsets[min]); 694 695 len -= chunk; 696 first += chunk; 697 } 698 } 699 700 /* 701 * Cache the order of the largest free extent we have available in this block 702 * group. 703 */ 704 static void 705 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 706 { 707 int i; 708 int bits; 709 710 grp->bb_largest_free_order = -1; /* uninit */ 711 712 bits = sb->s_blocksize_bits + 1; 713 for (i = bits; i >= 0; i--) { 714 if (grp->bb_counters[i] > 0) { 715 grp->bb_largest_free_order = i; 716 break; 717 } 718 } 719 } 720 721 static noinline_for_stack 722 void ext4_mb_generate_buddy(struct super_block *sb, 723 void *buddy, void *bitmap, ext4_group_t group) 724 { 725 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 726 struct ext4_sb_info *sbi = EXT4_SB(sb); 727 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 728 ext4_grpblk_t i = 0; 729 ext4_grpblk_t first; 730 ext4_grpblk_t len; 731 unsigned free = 0; 732 unsigned fragments = 0; 733 unsigned long long period = get_cycles(); 734 735 /* initialize buddy from bitmap which is aggregation 736 * of on-disk bitmap and preallocations */ 737 i = mb_find_next_zero_bit(bitmap, max, 0); 738 grp->bb_first_free = i; 739 while (i < max) { 740 fragments++; 741 first = i; 742 i = mb_find_next_bit(bitmap, max, i); 743 len = i - first; 744 free += len; 745 if (len > 1) 746 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 747 else 748 grp->bb_counters[0]++; 749 if (i < max) 750 i = mb_find_next_zero_bit(bitmap, max, i); 751 } 752 grp->bb_fragments = fragments; 753 754 if (free != grp->bb_free) { 755 ext4_grp_locked_error(sb, group, 0, 0, 756 "block bitmap and bg descriptor " 757 "inconsistent: %u vs %u free clusters", 758 free, grp->bb_free); 759 /* 760 * If we intend to continue, we consider group descriptor 761 * corrupt and update bb_free using bitmap value 762 */ 763 grp->bb_free = free; 764 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 765 percpu_counter_sub(&sbi->s_freeclusters_counter, 766 grp->bb_free); 767 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 768 } 769 mb_set_largest_free_order(sb, grp); 770 771 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 772 773 period = get_cycles() - period; 774 spin_lock(&EXT4_SB(sb)->s_bal_lock); 775 EXT4_SB(sb)->s_mb_buddies_generated++; 776 EXT4_SB(sb)->s_mb_generation_time += period; 777 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 778 } 779 780 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 781 { 782 int count; 783 int order = 1; 784 void *buddy; 785 786 while ((buddy = mb_find_buddy(e4b, order++, &count))) { 787 ext4_set_bits(buddy, 0, count); 788 } 789 e4b->bd_info->bb_fragments = 0; 790 memset(e4b->bd_info->bb_counters, 0, 791 sizeof(*e4b->bd_info->bb_counters) * 792 (e4b->bd_sb->s_blocksize_bits + 2)); 793 794 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 795 e4b->bd_bitmap, e4b->bd_group); 796 } 797 798 /* The buddy information is attached the buddy cache inode 799 * for convenience. The information regarding each group 800 * is loaded via ext4_mb_load_buddy. The information involve 801 * block bitmap and buddy information. The information are 802 * stored in the inode as 803 * 804 * { page } 805 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 806 * 807 * 808 * one block each for bitmap and buddy information. 809 * So for each group we take up 2 blocks. A page can 810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 811 * So it can have information regarding groups_per_page which 812 * is blocks_per_page/2 813 * 814 * Locking note: This routine takes the block group lock of all groups 815 * for this page; do not hold this lock when calling this routine! 816 */ 817 818 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 819 { 820 ext4_group_t ngroups; 821 int blocksize; 822 int blocks_per_page; 823 int groups_per_page; 824 int err = 0; 825 int i; 826 ext4_group_t first_group, group; 827 int first_block; 828 struct super_block *sb; 829 struct buffer_head *bhs; 830 struct buffer_head **bh = NULL; 831 struct inode *inode; 832 char *data; 833 char *bitmap; 834 struct ext4_group_info *grinfo; 835 836 mb_debug(1, "init page %lu\n", page->index); 837 838 inode = page->mapping->host; 839 sb = inode->i_sb; 840 ngroups = ext4_get_groups_count(sb); 841 blocksize = i_blocksize(inode); 842 blocks_per_page = PAGE_SIZE / blocksize; 843 844 groups_per_page = blocks_per_page >> 1; 845 if (groups_per_page == 0) 846 groups_per_page = 1; 847 848 /* allocate buffer_heads to read bitmaps */ 849 if (groups_per_page > 1) { 850 i = sizeof(struct buffer_head *) * groups_per_page; 851 bh = kzalloc(i, gfp); 852 if (bh == NULL) { 853 err = -ENOMEM; 854 goto out; 855 } 856 } else 857 bh = &bhs; 858 859 first_group = page->index * blocks_per_page / 2; 860 861 /* read all groups the page covers into the cache */ 862 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 863 if (group >= ngroups) 864 break; 865 866 grinfo = ext4_get_group_info(sb, group); 867 /* 868 * If page is uptodate then we came here after online resize 869 * which added some new uninitialized group info structs, so 870 * we must skip all initialized uptodate buddies on the page, 871 * which may be currently in use by an allocating task. 872 */ 873 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 874 bh[i] = NULL; 875 continue; 876 } 877 bh[i] = ext4_read_block_bitmap_nowait(sb, group); 878 if (IS_ERR(bh[i])) { 879 err = PTR_ERR(bh[i]); 880 bh[i] = NULL; 881 goto out; 882 } 883 mb_debug(1, "read bitmap for group %u\n", group); 884 } 885 886 /* wait for I/O completion */ 887 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 888 int err2; 889 890 if (!bh[i]) 891 continue; 892 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 893 if (!err) 894 err = err2; 895 } 896 897 first_block = page->index * blocks_per_page; 898 for (i = 0; i < blocks_per_page; i++) { 899 group = (first_block + i) >> 1; 900 if (group >= ngroups) 901 break; 902 903 if (!bh[group - first_group]) 904 /* skip initialized uptodate buddy */ 905 continue; 906 907 if (!buffer_verified(bh[group - first_group])) 908 /* Skip faulty bitmaps */ 909 continue; 910 err = 0; 911 912 /* 913 * data carry information regarding this 914 * particular group in the format specified 915 * above 916 * 917 */ 918 data = page_address(page) + (i * blocksize); 919 bitmap = bh[group - first_group]->b_data; 920 921 /* 922 * We place the buddy block and bitmap block 923 * close together 924 */ 925 if ((first_block + i) & 1) { 926 /* this is block of buddy */ 927 BUG_ON(incore == NULL); 928 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 929 group, page->index, i * blocksize); 930 trace_ext4_mb_buddy_bitmap_load(sb, group); 931 grinfo = ext4_get_group_info(sb, group); 932 grinfo->bb_fragments = 0; 933 memset(grinfo->bb_counters, 0, 934 sizeof(*grinfo->bb_counters) * 935 (sb->s_blocksize_bits+2)); 936 /* 937 * incore got set to the group block bitmap below 938 */ 939 ext4_lock_group(sb, group); 940 /* init the buddy */ 941 memset(data, 0xff, blocksize); 942 ext4_mb_generate_buddy(sb, data, incore, group); 943 ext4_unlock_group(sb, group); 944 incore = NULL; 945 } else { 946 /* this is block of bitmap */ 947 BUG_ON(incore != NULL); 948 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 949 group, page->index, i * blocksize); 950 trace_ext4_mb_bitmap_load(sb, group); 951 952 /* see comments in ext4_mb_put_pa() */ 953 ext4_lock_group(sb, group); 954 memcpy(data, bitmap, blocksize); 955 956 /* mark all preallocated blks used in in-core bitmap */ 957 ext4_mb_generate_from_pa(sb, data, group); 958 ext4_mb_generate_from_freelist(sb, data, group); 959 ext4_unlock_group(sb, group); 960 961 /* set incore so that the buddy information can be 962 * generated using this 963 */ 964 incore = data; 965 } 966 } 967 SetPageUptodate(page); 968 969 out: 970 if (bh) { 971 for (i = 0; i < groups_per_page; i++) 972 brelse(bh[i]); 973 if (bh != &bhs) 974 kfree(bh); 975 } 976 return err; 977 } 978 979 /* 980 * Lock the buddy and bitmap pages. This make sure other parallel init_group 981 * on the same buddy page doesn't happen whild holding the buddy page lock. 982 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 983 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 984 */ 985 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 986 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 987 { 988 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 989 int block, pnum, poff; 990 int blocks_per_page; 991 struct page *page; 992 993 e4b->bd_buddy_page = NULL; 994 e4b->bd_bitmap_page = NULL; 995 996 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 997 /* 998 * the buddy cache inode stores the block bitmap 999 * and buddy information in consecutive blocks. 1000 * So for each group we need two blocks. 1001 */ 1002 block = group * 2; 1003 pnum = block / blocks_per_page; 1004 poff = block % blocks_per_page; 1005 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1006 if (!page) 1007 return -ENOMEM; 1008 BUG_ON(page->mapping != inode->i_mapping); 1009 e4b->bd_bitmap_page = page; 1010 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1011 1012 if (blocks_per_page >= 2) { 1013 /* buddy and bitmap are on the same page */ 1014 return 0; 1015 } 1016 1017 block++; 1018 pnum = block / blocks_per_page; 1019 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1020 if (!page) 1021 return -ENOMEM; 1022 BUG_ON(page->mapping != inode->i_mapping); 1023 e4b->bd_buddy_page = page; 1024 return 0; 1025 } 1026 1027 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1028 { 1029 if (e4b->bd_bitmap_page) { 1030 unlock_page(e4b->bd_bitmap_page); 1031 put_page(e4b->bd_bitmap_page); 1032 } 1033 if (e4b->bd_buddy_page) { 1034 unlock_page(e4b->bd_buddy_page); 1035 put_page(e4b->bd_buddy_page); 1036 } 1037 } 1038 1039 /* 1040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1041 * block group lock of all groups for this page; do not hold the BG lock when 1042 * calling this routine! 1043 */ 1044 static noinline_for_stack 1045 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1046 { 1047 1048 struct ext4_group_info *this_grp; 1049 struct ext4_buddy e4b; 1050 struct page *page; 1051 int ret = 0; 1052 1053 might_sleep(); 1054 mb_debug(1, "init group %u\n", group); 1055 this_grp = ext4_get_group_info(sb, group); 1056 /* 1057 * This ensures that we don't reinit the buddy cache 1058 * page which map to the group from which we are already 1059 * allocating. If we are looking at the buddy cache we would 1060 * have taken a reference using ext4_mb_load_buddy and that 1061 * would have pinned buddy page to page cache. 1062 * The call to ext4_mb_get_buddy_page_lock will mark the 1063 * page accessed. 1064 */ 1065 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1066 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1067 /* 1068 * somebody initialized the group 1069 * return without doing anything 1070 */ 1071 goto err; 1072 } 1073 1074 page = e4b.bd_bitmap_page; 1075 ret = ext4_mb_init_cache(page, NULL, gfp); 1076 if (ret) 1077 goto err; 1078 if (!PageUptodate(page)) { 1079 ret = -EIO; 1080 goto err; 1081 } 1082 1083 if (e4b.bd_buddy_page == NULL) { 1084 /* 1085 * If both the bitmap and buddy are in 1086 * the same page we don't need to force 1087 * init the buddy 1088 */ 1089 ret = 0; 1090 goto err; 1091 } 1092 /* init buddy cache */ 1093 page = e4b.bd_buddy_page; 1094 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 1095 if (ret) 1096 goto err; 1097 if (!PageUptodate(page)) { 1098 ret = -EIO; 1099 goto err; 1100 } 1101 err: 1102 ext4_mb_put_buddy_page_lock(&e4b); 1103 return ret; 1104 } 1105 1106 /* 1107 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1108 * block group lock of all groups for this page; do not hold the BG lock when 1109 * calling this routine! 1110 */ 1111 static noinline_for_stack int 1112 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1113 struct ext4_buddy *e4b, gfp_t gfp) 1114 { 1115 int blocks_per_page; 1116 int block; 1117 int pnum; 1118 int poff; 1119 struct page *page; 1120 int ret; 1121 struct ext4_group_info *grp; 1122 struct ext4_sb_info *sbi = EXT4_SB(sb); 1123 struct inode *inode = sbi->s_buddy_cache; 1124 1125 might_sleep(); 1126 mb_debug(1, "load group %u\n", group); 1127 1128 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1129 grp = ext4_get_group_info(sb, group); 1130 1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1132 e4b->bd_info = grp; 1133 e4b->bd_sb = sb; 1134 e4b->bd_group = group; 1135 e4b->bd_buddy_page = NULL; 1136 e4b->bd_bitmap_page = NULL; 1137 1138 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1139 /* 1140 * we need full data about the group 1141 * to make a good selection 1142 */ 1143 ret = ext4_mb_init_group(sb, group, gfp); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 /* 1149 * the buddy cache inode stores the block bitmap 1150 * and buddy information in consecutive blocks. 1151 * So for each group we need two blocks. 1152 */ 1153 block = group * 2; 1154 pnum = block / blocks_per_page; 1155 poff = block % blocks_per_page; 1156 1157 /* we could use find_or_create_page(), but it locks page 1158 * what we'd like to avoid in fast path ... */ 1159 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1160 if (page == NULL || !PageUptodate(page)) { 1161 if (page) 1162 /* 1163 * drop the page reference and try 1164 * to get the page with lock. If we 1165 * are not uptodate that implies 1166 * somebody just created the page but 1167 * is yet to initialize the same. So 1168 * wait for it to initialize. 1169 */ 1170 put_page(page); 1171 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1172 if (page) { 1173 BUG_ON(page->mapping != inode->i_mapping); 1174 if (!PageUptodate(page)) { 1175 ret = ext4_mb_init_cache(page, NULL, gfp); 1176 if (ret) { 1177 unlock_page(page); 1178 goto err; 1179 } 1180 mb_cmp_bitmaps(e4b, page_address(page) + 1181 (poff * sb->s_blocksize)); 1182 } 1183 unlock_page(page); 1184 } 1185 } 1186 if (page == NULL) { 1187 ret = -ENOMEM; 1188 goto err; 1189 } 1190 if (!PageUptodate(page)) { 1191 ret = -EIO; 1192 goto err; 1193 } 1194 1195 /* Pages marked accessed already */ 1196 e4b->bd_bitmap_page = page; 1197 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1198 1199 block++; 1200 pnum = block / blocks_per_page; 1201 poff = block % blocks_per_page; 1202 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1204 if (page == NULL || !PageUptodate(page)) { 1205 if (page) 1206 put_page(page); 1207 page = find_or_create_page(inode->i_mapping, pnum, gfp); 1208 if (page) { 1209 BUG_ON(page->mapping != inode->i_mapping); 1210 if (!PageUptodate(page)) { 1211 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1212 gfp); 1213 if (ret) { 1214 unlock_page(page); 1215 goto err; 1216 } 1217 } 1218 unlock_page(page); 1219 } 1220 } 1221 if (page == NULL) { 1222 ret = -ENOMEM; 1223 goto err; 1224 } 1225 if (!PageUptodate(page)) { 1226 ret = -EIO; 1227 goto err; 1228 } 1229 1230 /* Pages marked accessed already */ 1231 e4b->bd_buddy_page = page; 1232 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1233 1234 BUG_ON(e4b->bd_bitmap_page == NULL); 1235 BUG_ON(e4b->bd_buddy_page == NULL); 1236 1237 return 0; 1238 1239 err: 1240 if (page) 1241 put_page(page); 1242 if (e4b->bd_bitmap_page) 1243 put_page(e4b->bd_bitmap_page); 1244 if (e4b->bd_buddy_page) 1245 put_page(e4b->bd_buddy_page); 1246 e4b->bd_buddy = NULL; 1247 e4b->bd_bitmap = NULL; 1248 return ret; 1249 } 1250 1251 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1252 struct ext4_buddy *e4b) 1253 { 1254 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1255 } 1256 1257 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1258 { 1259 if (e4b->bd_bitmap_page) 1260 put_page(e4b->bd_bitmap_page); 1261 if (e4b->bd_buddy_page) 1262 put_page(e4b->bd_buddy_page); 1263 } 1264 1265 1266 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1267 { 1268 int order = 1; 1269 int bb_incr = 1 << (e4b->bd_blkbits - 1); 1270 void *bb; 1271 1272 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1273 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1274 1275 bb = e4b->bd_buddy; 1276 while (order <= e4b->bd_blkbits + 1) { 1277 block = block >> 1; 1278 if (!mb_test_bit(block, bb)) { 1279 /* this block is part of buddy of order 'order' */ 1280 return order; 1281 } 1282 bb += bb_incr; 1283 bb_incr >>= 1; 1284 order++; 1285 } 1286 return 0; 1287 } 1288 1289 static void mb_clear_bits(void *bm, int cur, int len) 1290 { 1291 __u32 *addr; 1292 1293 len = cur + len; 1294 while (cur < len) { 1295 if ((cur & 31) == 0 && (len - cur) >= 32) { 1296 /* fast path: clear whole word at once */ 1297 addr = bm + (cur >> 3); 1298 *addr = 0; 1299 cur += 32; 1300 continue; 1301 } 1302 mb_clear_bit(cur, bm); 1303 cur++; 1304 } 1305 } 1306 1307 /* clear bits in given range 1308 * will return first found zero bit if any, -1 otherwise 1309 */ 1310 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1311 { 1312 __u32 *addr; 1313 int zero_bit = -1; 1314 1315 len = cur + len; 1316 while (cur < len) { 1317 if ((cur & 31) == 0 && (len - cur) >= 32) { 1318 /* fast path: clear whole word at once */ 1319 addr = bm + (cur >> 3); 1320 if (*addr != (__u32)(-1) && zero_bit == -1) 1321 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1322 *addr = 0; 1323 cur += 32; 1324 continue; 1325 } 1326 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1327 zero_bit = cur; 1328 cur++; 1329 } 1330 1331 return zero_bit; 1332 } 1333 1334 void ext4_set_bits(void *bm, int cur, int len) 1335 { 1336 __u32 *addr; 1337 1338 len = cur + len; 1339 while (cur < len) { 1340 if ((cur & 31) == 0 && (len - cur) >= 32) { 1341 /* fast path: set whole word at once */ 1342 addr = bm + (cur >> 3); 1343 *addr = 0xffffffff; 1344 cur += 32; 1345 continue; 1346 } 1347 mb_set_bit(cur, bm); 1348 cur++; 1349 } 1350 } 1351 1352 /* 1353 * _________________________________________________________________ */ 1354 1355 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1356 { 1357 if (mb_test_bit(*bit + side, bitmap)) { 1358 mb_clear_bit(*bit, bitmap); 1359 (*bit) -= side; 1360 return 1; 1361 } 1362 else { 1363 (*bit) += side; 1364 mb_set_bit(*bit, bitmap); 1365 return -1; 1366 } 1367 } 1368 1369 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1370 { 1371 int max; 1372 int order = 1; 1373 void *buddy = mb_find_buddy(e4b, order, &max); 1374 1375 while (buddy) { 1376 void *buddy2; 1377 1378 /* Bits in range [first; last] are known to be set since 1379 * corresponding blocks were allocated. Bits in range 1380 * (first; last) will stay set because they form buddies on 1381 * upper layer. We just deal with borders if they don't 1382 * align with upper layer and then go up. 1383 * Releasing entire group is all about clearing 1384 * single bit of highest order buddy. 1385 */ 1386 1387 /* Example: 1388 * --------------------------------- 1389 * | 1 | 1 | 1 | 1 | 1390 * --------------------------------- 1391 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1392 * --------------------------------- 1393 * 0 1 2 3 4 5 6 7 1394 * \_____________________/ 1395 * 1396 * Neither [1] nor [6] is aligned to above layer. 1397 * Left neighbour [0] is free, so mark it busy, 1398 * decrease bb_counters and extend range to 1399 * [0; 6] 1400 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1401 * mark [6] free, increase bb_counters and shrink range to 1402 * [0; 5]. 1403 * Then shift range to [0; 2], go up and do the same. 1404 */ 1405 1406 1407 if (first & 1) 1408 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1409 if (!(last & 1)) 1410 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1411 if (first > last) 1412 break; 1413 order++; 1414 1415 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1416 mb_clear_bits(buddy, first, last - first + 1); 1417 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1418 break; 1419 } 1420 first >>= 1; 1421 last >>= 1; 1422 buddy = buddy2; 1423 } 1424 } 1425 1426 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1427 int first, int count) 1428 { 1429 int left_is_free = 0; 1430 int right_is_free = 0; 1431 int block; 1432 int last = first + count - 1; 1433 struct super_block *sb = e4b->bd_sb; 1434 1435 if (WARN_ON(count == 0)) 1436 return; 1437 BUG_ON(last >= (sb->s_blocksize << 3)); 1438 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1439 /* Don't bother if the block group is corrupt. */ 1440 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1441 return; 1442 1443 mb_check_buddy(e4b); 1444 mb_free_blocks_double(inode, e4b, first, count); 1445 1446 e4b->bd_info->bb_free += count; 1447 if (first < e4b->bd_info->bb_first_free) 1448 e4b->bd_info->bb_first_free = first; 1449 1450 /* access memory sequentially: check left neighbour, 1451 * clear range and then check right neighbour 1452 */ 1453 if (first != 0) 1454 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1455 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1456 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1457 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1458 1459 if (unlikely(block != -1)) { 1460 struct ext4_sb_info *sbi = EXT4_SB(sb); 1461 ext4_fsblk_t blocknr; 1462 1463 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1464 blocknr += EXT4_C2B(EXT4_SB(sb), block); 1465 ext4_grp_locked_error(sb, e4b->bd_group, 1466 inode ? inode->i_ino : 0, 1467 blocknr, 1468 "freeing already freed block " 1469 "(bit %u); block bitmap corrupt.", 1470 block); 1471 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)) 1472 percpu_counter_sub(&sbi->s_freeclusters_counter, 1473 e4b->bd_info->bb_free); 1474 /* Mark the block group as corrupt. */ 1475 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1476 &e4b->bd_info->bb_state); 1477 mb_regenerate_buddy(e4b); 1478 goto done; 1479 } 1480 1481 /* let's maintain fragments counter */ 1482 if (left_is_free && right_is_free) 1483 e4b->bd_info->bb_fragments--; 1484 else if (!left_is_free && !right_is_free) 1485 e4b->bd_info->bb_fragments++; 1486 1487 /* buddy[0] == bd_bitmap is a special case, so handle 1488 * it right away and let mb_buddy_mark_free stay free of 1489 * zero order checks. 1490 * Check if neighbours are to be coaleasced, 1491 * adjust bitmap bb_counters and borders appropriately. 1492 */ 1493 if (first & 1) { 1494 first += !left_is_free; 1495 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1496 } 1497 if (!(last & 1)) { 1498 last -= !right_is_free; 1499 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1500 } 1501 1502 if (first <= last) 1503 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1504 1505 done: 1506 mb_set_largest_free_order(sb, e4b->bd_info); 1507 mb_check_buddy(e4b); 1508 } 1509 1510 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1511 int needed, struct ext4_free_extent *ex) 1512 { 1513 int next = block; 1514 int max, order; 1515 void *buddy; 1516 1517 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1518 BUG_ON(ex == NULL); 1519 1520 buddy = mb_find_buddy(e4b, 0, &max); 1521 BUG_ON(buddy == NULL); 1522 BUG_ON(block >= max); 1523 if (mb_test_bit(block, buddy)) { 1524 ex->fe_len = 0; 1525 ex->fe_start = 0; 1526 ex->fe_group = 0; 1527 return 0; 1528 } 1529 1530 /* find actual order */ 1531 order = mb_find_order_for_block(e4b, block); 1532 block = block >> order; 1533 1534 ex->fe_len = 1 << order; 1535 ex->fe_start = block << order; 1536 ex->fe_group = e4b->bd_group; 1537 1538 /* calc difference from given start */ 1539 next = next - ex->fe_start; 1540 ex->fe_len -= next; 1541 ex->fe_start += next; 1542 1543 while (needed > ex->fe_len && 1544 mb_find_buddy(e4b, order, &max)) { 1545 1546 if (block + 1 >= max) 1547 break; 1548 1549 next = (block + 1) * (1 << order); 1550 if (mb_test_bit(next, e4b->bd_bitmap)) 1551 break; 1552 1553 order = mb_find_order_for_block(e4b, next); 1554 1555 block = next >> order; 1556 ex->fe_len += 1 << order; 1557 } 1558 1559 if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) { 1560 /* Should never happen! (but apparently sometimes does?!?) */ 1561 WARN_ON(1); 1562 ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent " 1563 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 1564 block, order, needed, ex->fe_group, ex->fe_start, 1565 ex->fe_len, ex->fe_logical); 1566 ex->fe_len = 0; 1567 ex->fe_start = 0; 1568 ex->fe_group = 0; 1569 } 1570 return ex->fe_len; 1571 } 1572 1573 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1574 { 1575 int ord; 1576 int mlen = 0; 1577 int max = 0; 1578 int cur; 1579 int start = ex->fe_start; 1580 int len = ex->fe_len; 1581 unsigned ret = 0; 1582 int len0 = len; 1583 void *buddy; 1584 1585 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1586 BUG_ON(e4b->bd_group != ex->fe_group); 1587 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1588 mb_check_buddy(e4b); 1589 mb_mark_used_double(e4b, start, len); 1590 1591 e4b->bd_info->bb_free -= len; 1592 if (e4b->bd_info->bb_first_free == start) 1593 e4b->bd_info->bb_first_free += len; 1594 1595 /* let's maintain fragments counter */ 1596 if (start != 0) 1597 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1598 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1599 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1600 if (mlen && max) 1601 e4b->bd_info->bb_fragments++; 1602 else if (!mlen && !max) 1603 e4b->bd_info->bb_fragments--; 1604 1605 /* let's maintain buddy itself */ 1606 while (len) { 1607 ord = mb_find_order_for_block(e4b, start); 1608 1609 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1610 /* the whole chunk may be allocated at once! */ 1611 mlen = 1 << ord; 1612 buddy = mb_find_buddy(e4b, ord, &max); 1613 BUG_ON((start >> ord) >= max); 1614 mb_set_bit(start >> ord, buddy); 1615 e4b->bd_info->bb_counters[ord]--; 1616 start += mlen; 1617 len -= mlen; 1618 BUG_ON(len < 0); 1619 continue; 1620 } 1621 1622 /* store for history */ 1623 if (ret == 0) 1624 ret = len | (ord << 16); 1625 1626 /* we have to split large buddy */ 1627 BUG_ON(ord <= 0); 1628 buddy = mb_find_buddy(e4b, ord, &max); 1629 mb_set_bit(start >> ord, buddy); 1630 e4b->bd_info->bb_counters[ord]--; 1631 1632 ord--; 1633 cur = (start >> ord) & ~1U; 1634 buddy = mb_find_buddy(e4b, ord, &max); 1635 mb_clear_bit(cur, buddy); 1636 mb_clear_bit(cur + 1, buddy); 1637 e4b->bd_info->bb_counters[ord]++; 1638 e4b->bd_info->bb_counters[ord]++; 1639 } 1640 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1641 1642 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1643 mb_check_buddy(e4b); 1644 1645 return ret; 1646 } 1647 1648 /* 1649 * Must be called under group lock! 1650 */ 1651 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1652 struct ext4_buddy *e4b) 1653 { 1654 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1655 int ret; 1656 1657 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1658 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1659 1660 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1661 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1662 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1663 1664 /* preallocation can change ac_b_ex, thus we store actually 1665 * allocated blocks for history */ 1666 ac->ac_f_ex = ac->ac_b_ex; 1667 1668 ac->ac_status = AC_STATUS_FOUND; 1669 ac->ac_tail = ret & 0xffff; 1670 ac->ac_buddy = ret >> 16; 1671 1672 /* 1673 * take the page reference. We want the page to be pinned 1674 * so that we don't get a ext4_mb_init_cache_call for this 1675 * group until we update the bitmap. That would mean we 1676 * double allocate blocks. The reference is dropped 1677 * in ext4_mb_release_context 1678 */ 1679 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1680 get_page(ac->ac_bitmap_page); 1681 ac->ac_buddy_page = e4b->bd_buddy_page; 1682 get_page(ac->ac_buddy_page); 1683 /* store last allocated for subsequent stream allocation */ 1684 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1685 spin_lock(&sbi->s_md_lock); 1686 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1687 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1688 spin_unlock(&sbi->s_md_lock); 1689 } 1690 } 1691 1692 /* 1693 * regular allocator, for general purposes allocation 1694 */ 1695 1696 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1697 struct ext4_buddy *e4b, 1698 int finish_group) 1699 { 1700 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1701 struct ext4_free_extent *bex = &ac->ac_b_ex; 1702 struct ext4_free_extent *gex = &ac->ac_g_ex; 1703 struct ext4_free_extent ex; 1704 int max; 1705 1706 if (ac->ac_status == AC_STATUS_FOUND) 1707 return; 1708 /* 1709 * We don't want to scan for a whole year 1710 */ 1711 if (ac->ac_found > sbi->s_mb_max_to_scan && 1712 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1713 ac->ac_status = AC_STATUS_BREAK; 1714 return; 1715 } 1716 1717 /* 1718 * Haven't found good chunk so far, let's continue 1719 */ 1720 if (bex->fe_len < gex->fe_len) 1721 return; 1722 1723 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1724 && bex->fe_group == e4b->bd_group) { 1725 /* recheck chunk's availability - we don't know 1726 * when it was found (within this lock-unlock 1727 * period or not) */ 1728 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1729 if (max >= gex->fe_len) { 1730 ext4_mb_use_best_found(ac, e4b); 1731 return; 1732 } 1733 } 1734 } 1735 1736 /* 1737 * The routine checks whether found extent is good enough. If it is, 1738 * then the extent gets marked used and flag is set to the context 1739 * to stop scanning. Otherwise, the extent is compared with the 1740 * previous found extent and if new one is better, then it's stored 1741 * in the context. Later, the best found extent will be used, if 1742 * mballoc can't find good enough extent. 1743 * 1744 * FIXME: real allocation policy is to be designed yet! 1745 */ 1746 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1747 struct ext4_free_extent *ex, 1748 struct ext4_buddy *e4b) 1749 { 1750 struct ext4_free_extent *bex = &ac->ac_b_ex; 1751 struct ext4_free_extent *gex = &ac->ac_g_ex; 1752 1753 BUG_ON(ex->fe_len <= 0); 1754 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1755 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1756 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1757 1758 ac->ac_found++; 1759 1760 /* 1761 * The special case - take what you catch first 1762 */ 1763 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1764 *bex = *ex; 1765 ext4_mb_use_best_found(ac, e4b); 1766 return; 1767 } 1768 1769 /* 1770 * Let's check whether the chuck is good enough 1771 */ 1772 if (ex->fe_len == gex->fe_len) { 1773 *bex = *ex; 1774 ext4_mb_use_best_found(ac, e4b); 1775 return; 1776 } 1777 1778 /* 1779 * If this is first found extent, just store it in the context 1780 */ 1781 if (bex->fe_len == 0) { 1782 *bex = *ex; 1783 return; 1784 } 1785 1786 /* 1787 * If new found extent is better, store it in the context 1788 */ 1789 if (bex->fe_len < gex->fe_len) { 1790 /* if the request isn't satisfied, any found extent 1791 * larger than previous best one is better */ 1792 if (ex->fe_len > bex->fe_len) 1793 *bex = *ex; 1794 } else if (ex->fe_len > gex->fe_len) { 1795 /* if the request is satisfied, then we try to find 1796 * an extent that still satisfy the request, but is 1797 * smaller than previous one */ 1798 if (ex->fe_len < bex->fe_len) 1799 *bex = *ex; 1800 } 1801 1802 ext4_mb_check_limits(ac, e4b, 0); 1803 } 1804 1805 static noinline_for_stack 1806 int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1807 struct ext4_buddy *e4b) 1808 { 1809 struct ext4_free_extent ex = ac->ac_b_ex; 1810 ext4_group_t group = ex.fe_group; 1811 int max; 1812 int err; 1813 1814 BUG_ON(ex.fe_len <= 0); 1815 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1816 if (err) 1817 return err; 1818 1819 ext4_lock_group(ac->ac_sb, group); 1820 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1821 1822 if (max > 0) { 1823 ac->ac_b_ex = ex; 1824 ext4_mb_use_best_found(ac, e4b); 1825 } 1826 1827 ext4_unlock_group(ac->ac_sb, group); 1828 ext4_mb_unload_buddy(e4b); 1829 1830 return 0; 1831 } 1832 1833 static noinline_for_stack 1834 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1835 struct ext4_buddy *e4b) 1836 { 1837 ext4_group_t group = ac->ac_g_ex.fe_group; 1838 int max; 1839 int err; 1840 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1841 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1842 struct ext4_free_extent ex; 1843 1844 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1845 return 0; 1846 if (grp->bb_free == 0) 1847 return 0; 1848 1849 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1850 if (err) 1851 return err; 1852 1853 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1854 ext4_mb_unload_buddy(e4b); 1855 return 0; 1856 } 1857 1858 ext4_lock_group(ac->ac_sb, group); 1859 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1860 ac->ac_g_ex.fe_len, &ex); 1861 ex.fe_logical = 0xDEADFA11; /* debug value */ 1862 1863 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1864 ext4_fsblk_t start; 1865 1866 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1867 ex.fe_start; 1868 /* use do_div to get remainder (would be 64-bit modulo) */ 1869 if (do_div(start, sbi->s_stripe) == 0) { 1870 ac->ac_found++; 1871 ac->ac_b_ex = ex; 1872 ext4_mb_use_best_found(ac, e4b); 1873 } 1874 } else if (max >= ac->ac_g_ex.fe_len) { 1875 BUG_ON(ex.fe_len <= 0); 1876 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1877 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1878 ac->ac_found++; 1879 ac->ac_b_ex = ex; 1880 ext4_mb_use_best_found(ac, e4b); 1881 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1882 /* Sometimes, caller may want to merge even small 1883 * number of blocks to an existing extent */ 1884 BUG_ON(ex.fe_len <= 0); 1885 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1886 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1887 ac->ac_found++; 1888 ac->ac_b_ex = ex; 1889 ext4_mb_use_best_found(ac, e4b); 1890 } 1891 ext4_unlock_group(ac->ac_sb, group); 1892 ext4_mb_unload_buddy(e4b); 1893 1894 return 0; 1895 } 1896 1897 /* 1898 * The routine scans buddy structures (not bitmap!) from given order 1899 * to max order and tries to find big enough chunk to satisfy the req 1900 */ 1901 static noinline_for_stack 1902 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1903 struct ext4_buddy *e4b) 1904 { 1905 struct super_block *sb = ac->ac_sb; 1906 struct ext4_group_info *grp = e4b->bd_info; 1907 void *buddy; 1908 int i; 1909 int k; 1910 int max; 1911 1912 BUG_ON(ac->ac_2order <= 0); 1913 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1914 if (grp->bb_counters[i] == 0) 1915 continue; 1916 1917 buddy = mb_find_buddy(e4b, i, &max); 1918 BUG_ON(buddy == NULL); 1919 1920 k = mb_find_next_zero_bit(buddy, max, 0); 1921 BUG_ON(k >= max); 1922 1923 ac->ac_found++; 1924 1925 ac->ac_b_ex.fe_len = 1 << i; 1926 ac->ac_b_ex.fe_start = k << i; 1927 ac->ac_b_ex.fe_group = e4b->bd_group; 1928 1929 ext4_mb_use_best_found(ac, e4b); 1930 1931 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1932 1933 if (EXT4_SB(sb)->s_mb_stats) 1934 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1935 1936 break; 1937 } 1938 } 1939 1940 /* 1941 * The routine scans the group and measures all found extents. 1942 * In order to optimize scanning, caller must pass number of 1943 * free blocks in the group, so the routine can know upper limit. 1944 */ 1945 static noinline_for_stack 1946 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1947 struct ext4_buddy *e4b) 1948 { 1949 struct super_block *sb = ac->ac_sb; 1950 void *bitmap = e4b->bd_bitmap; 1951 struct ext4_free_extent ex; 1952 int i; 1953 int free; 1954 1955 free = e4b->bd_info->bb_free; 1956 BUG_ON(free <= 0); 1957 1958 i = e4b->bd_info->bb_first_free; 1959 1960 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1961 i = mb_find_next_zero_bit(bitmap, 1962 EXT4_CLUSTERS_PER_GROUP(sb), i); 1963 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1964 /* 1965 * IF we have corrupt bitmap, we won't find any 1966 * free blocks even though group info says we 1967 * we have free blocks 1968 */ 1969 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1970 "%d free clusters as per " 1971 "group info. But bitmap says 0", 1972 free); 1973 break; 1974 } 1975 1976 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 1977 BUG_ON(ex.fe_len <= 0); 1978 if (free < ex.fe_len) { 1979 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1980 "%d free clusters as per " 1981 "group info. But got %d blocks", 1982 free, ex.fe_len); 1983 /* 1984 * The number of free blocks differs. This mostly 1985 * indicate that the bitmap is corrupt. So exit 1986 * without claiming the space. 1987 */ 1988 break; 1989 } 1990 ex.fe_logical = 0xDEADC0DE; /* debug value */ 1991 ext4_mb_measure_extent(ac, &ex, e4b); 1992 1993 i += ex.fe_len; 1994 free -= ex.fe_len; 1995 } 1996 1997 ext4_mb_check_limits(ac, e4b, 1); 1998 } 1999 2000 /* 2001 * This is a special case for storages like raid5 2002 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2003 */ 2004 static noinline_for_stack 2005 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2006 struct ext4_buddy *e4b) 2007 { 2008 struct super_block *sb = ac->ac_sb; 2009 struct ext4_sb_info *sbi = EXT4_SB(sb); 2010 void *bitmap = e4b->bd_bitmap; 2011 struct ext4_free_extent ex; 2012 ext4_fsblk_t first_group_block; 2013 ext4_fsblk_t a; 2014 ext4_grpblk_t i; 2015 int max; 2016 2017 BUG_ON(sbi->s_stripe == 0); 2018 2019 /* find first stripe-aligned block in group */ 2020 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2021 2022 a = first_group_block + sbi->s_stripe - 1; 2023 do_div(a, sbi->s_stripe); 2024 i = (a * sbi->s_stripe) - first_group_block; 2025 2026 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2027 if (!mb_test_bit(i, bitmap)) { 2028 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2029 if (max >= sbi->s_stripe) { 2030 ac->ac_found++; 2031 ex.fe_logical = 0xDEADF00D; /* debug value */ 2032 ac->ac_b_ex = ex; 2033 ext4_mb_use_best_found(ac, e4b); 2034 break; 2035 } 2036 } 2037 i += sbi->s_stripe; 2038 } 2039 } 2040 2041 /* 2042 * This is now called BEFORE we load the buddy bitmap. 2043 * Returns either 1 or 0 indicating that the group is either suitable 2044 * for the allocation or not. In addition it can also return negative 2045 * error code when something goes wrong. 2046 */ 2047 static int ext4_mb_good_group(struct ext4_allocation_context *ac, 2048 ext4_group_t group, int cr) 2049 { 2050 unsigned free, fragments; 2051 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2052 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2053 2054 BUG_ON(cr < 0 || cr >= 4); 2055 2056 free = grp->bb_free; 2057 if (free == 0) 2058 return 0; 2059 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2060 return 0; 2061 2062 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2063 return 0; 2064 2065 /* We only do this if the grp has never been initialized */ 2066 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2067 int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); 2068 if (ret) 2069 return ret; 2070 } 2071 2072 fragments = grp->bb_fragments; 2073 if (fragments == 0) 2074 return 0; 2075 2076 switch (cr) { 2077 case 0: 2078 BUG_ON(ac->ac_2order == 0); 2079 2080 /* Avoid using the first bg of a flexgroup for data files */ 2081 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2082 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2083 ((group % flex_size) == 0)) 2084 return 0; 2085 2086 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || 2087 (free / fragments) >= ac->ac_g_ex.fe_len) 2088 return 1; 2089 2090 if (grp->bb_largest_free_order < ac->ac_2order) 2091 return 0; 2092 2093 return 1; 2094 case 1: 2095 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2096 return 1; 2097 break; 2098 case 2: 2099 if (free >= ac->ac_g_ex.fe_len) 2100 return 1; 2101 break; 2102 case 3: 2103 return 1; 2104 default: 2105 BUG(); 2106 } 2107 2108 return 0; 2109 } 2110 2111 static noinline_for_stack int 2112 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2113 { 2114 ext4_group_t ngroups, group, i; 2115 int cr; 2116 int err = 0, first_err = 0; 2117 struct ext4_sb_info *sbi; 2118 struct super_block *sb; 2119 struct ext4_buddy e4b; 2120 2121 sb = ac->ac_sb; 2122 sbi = EXT4_SB(sb); 2123 ngroups = ext4_get_groups_count(sb); 2124 /* non-extent files are limited to low blocks/groups */ 2125 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2126 ngroups = sbi->s_blockfile_groups; 2127 2128 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2129 2130 /* first, try the goal */ 2131 err = ext4_mb_find_by_goal(ac, &e4b); 2132 if (err || ac->ac_status == AC_STATUS_FOUND) 2133 goto out; 2134 2135 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2136 goto out; 2137 2138 /* 2139 * ac->ac2_order is set only if the fe_len is a power of 2 2140 * if ac2_order is set we also set criteria to 0 so that we 2141 * try exact allocation using buddy. 2142 */ 2143 i = fls(ac->ac_g_ex.fe_len); 2144 ac->ac_2order = 0; 2145 /* 2146 * We search using buddy data only if the order of the request 2147 * is greater than equal to the sbi_s_mb_order2_reqs 2148 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2149 * We also support searching for power-of-two requests only for 2150 * requests upto maximum buddy size we have constructed. 2151 */ 2152 if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) { 2153 /* 2154 * This should tell if fe_len is exactly power of 2 2155 */ 2156 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2157 ac->ac_2order = i - 1; 2158 } 2159 2160 /* if stream allocation is enabled, use global goal */ 2161 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2162 /* TBD: may be hot point */ 2163 spin_lock(&sbi->s_md_lock); 2164 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2165 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2166 spin_unlock(&sbi->s_md_lock); 2167 } 2168 2169 /* Let's just scan groups to find more-less suitable blocks */ 2170 cr = ac->ac_2order ? 0 : 1; 2171 /* 2172 * cr == 0 try to get exact allocation, 2173 * cr == 3 try to get anything 2174 */ 2175 repeat: 2176 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2177 ac->ac_criteria = cr; 2178 /* 2179 * searching for the right group start 2180 * from the goal value specified 2181 */ 2182 group = ac->ac_g_ex.fe_group; 2183 2184 for (i = 0; i < ngroups; group++, i++) { 2185 int ret = 0; 2186 cond_resched(); 2187 /* 2188 * Artificially restricted ngroups for non-extent 2189 * files makes group > ngroups possible on first loop. 2190 */ 2191 if (group >= ngroups) 2192 group = 0; 2193 2194 /* This now checks without needing the buddy page */ 2195 ret = ext4_mb_good_group(ac, group, cr); 2196 if (ret <= 0) { 2197 if (!first_err) 2198 first_err = ret; 2199 continue; 2200 } 2201 2202 err = ext4_mb_load_buddy(sb, group, &e4b); 2203 if (err) 2204 goto out; 2205 2206 ext4_lock_group(sb, group); 2207 2208 /* 2209 * We need to check again after locking the 2210 * block group 2211 */ 2212 ret = ext4_mb_good_group(ac, group, cr); 2213 if (ret <= 0) { 2214 ext4_unlock_group(sb, group); 2215 ext4_mb_unload_buddy(&e4b); 2216 if (!first_err) 2217 first_err = ret; 2218 continue; 2219 } 2220 2221 ac->ac_groups_scanned++; 2222 if (cr == 0) 2223 ext4_mb_simple_scan_group(ac, &e4b); 2224 else if (cr == 1 && sbi->s_stripe && 2225 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2226 ext4_mb_scan_aligned(ac, &e4b); 2227 else 2228 ext4_mb_complex_scan_group(ac, &e4b); 2229 2230 ext4_unlock_group(sb, group); 2231 ext4_mb_unload_buddy(&e4b); 2232 2233 if (ac->ac_status != AC_STATUS_CONTINUE) 2234 break; 2235 } 2236 } 2237 2238 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2239 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2240 /* 2241 * We've been searching too long. Let's try to allocate 2242 * the best chunk we've found so far 2243 */ 2244 2245 ext4_mb_try_best_found(ac, &e4b); 2246 if (ac->ac_status != AC_STATUS_FOUND) { 2247 /* 2248 * Someone more lucky has already allocated it. 2249 * The only thing we can do is just take first 2250 * found block(s) 2251 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2252 */ 2253 ac->ac_b_ex.fe_group = 0; 2254 ac->ac_b_ex.fe_start = 0; 2255 ac->ac_b_ex.fe_len = 0; 2256 ac->ac_status = AC_STATUS_CONTINUE; 2257 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2258 cr = 3; 2259 atomic_inc(&sbi->s_mb_lost_chunks); 2260 goto repeat; 2261 } 2262 } 2263 out: 2264 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2265 err = first_err; 2266 return err; 2267 } 2268 2269 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2270 { 2271 struct super_block *sb = seq->private; 2272 ext4_group_t group; 2273 2274 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2275 return NULL; 2276 group = *pos + 1; 2277 return (void *) ((unsigned long) group); 2278 } 2279 2280 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2281 { 2282 struct super_block *sb = seq->private; 2283 ext4_group_t group; 2284 2285 ++*pos; 2286 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2287 return NULL; 2288 group = *pos + 1; 2289 return (void *) ((unsigned long) group); 2290 } 2291 2292 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2293 { 2294 struct super_block *sb = seq->private; 2295 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2296 int i; 2297 int err, buddy_loaded = 0; 2298 struct ext4_buddy e4b; 2299 struct ext4_group_info *grinfo; 2300 struct sg { 2301 struct ext4_group_info info; 2302 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2303 } sg; 2304 2305 group--; 2306 if (group == 0) 2307 seq_puts(seq, "#group: free frags first [" 2308 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2309 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2310 2311 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2312 sizeof(struct ext4_group_info); 2313 grinfo = ext4_get_group_info(sb, group); 2314 /* Load the group info in memory only if not already loaded. */ 2315 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2316 err = ext4_mb_load_buddy(sb, group, &e4b); 2317 if (err) { 2318 seq_printf(seq, "#%-5u: I/O error\n", group); 2319 return 0; 2320 } 2321 buddy_loaded = 1; 2322 } 2323 2324 memcpy(&sg, ext4_get_group_info(sb, group), i); 2325 2326 if (buddy_loaded) 2327 ext4_mb_unload_buddy(&e4b); 2328 2329 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2330 sg.info.bb_fragments, sg.info.bb_first_free); 2331 for (i = 0; i <= 13; i++) 2332 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2333 sg.info.bb_counters[i] : 0); 2334 seq_printf(seq, " ]\n"); 2335 2336 return 0; 2337 } 2338 2339 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2340 { 2341 } 2342 2343 static const struct seq_operations ext4_mb_seq_groups_ops = { 2344 .start = ext4_mb_seq_groups_start, 2345 .next = ext4_mb_seq_groups_next, 2346 .stop = ext4_mb_seq_groups_stop, 2347 .show = ext4_mb_seq_groups_show, 2348 }; 2349 2350 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2351 { 2352 struct super_block *sb = PDE_DATA(inode); 2353 int rc; 2354 2355 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2356 if (rc == 0) { 2357 struct seq_file *m = file->private_data; 2358 m->private = sb; 2359 } 2360 return rc; 2361 2362 } 2363 2364 const struct file_operations ext4_seq_mb_groups_fops = { 2365 .open = ext4_mb_seq_groups_open, 2366 .read = seq_read, 2367 .llseek = seq_lseek, 2368 .release = seq_release, 2369 }; 2370 2371 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2372 { 2373 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2374 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2375 2376 BUG_ON(!cachep); 2377 return cachep; 2378 } 2379 2380 /* 2381 * Allocate the top-level s_group_info array for the specified number 2382 * of groups 2383 */ 2384 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2385 { 2386 struct ext4_sb_info *sbi = EXT4_SB(sb); 2387 unsigned size; 2388 struct ext4_group_info ***new_groupinfo; 2389 2390 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2391 EXT4_DESC_PER_BLOCK_BITS(sb); 2392 if (size <= sbi->s_group_info_size) 2393 return 0; 2394 2395 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2396 new_groupinfo = kvzalloc(size, GFP_KERNEL); 2397 if (!new_groupinfo) { 2398 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2399 return -ENOMEM; 2400 } 2401 if (sbi->s_group_info) { 2402 memcpy(new_groupinfo, sbi->s_group_info, 2403 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2404 kvfree(sbi->s_group_info); 2405 } 2406 sbi->s_group_info = new_groupinfo; 2407 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2408 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2409 sbi->s_group_info_size); 2410 return 0; 2411 } 2412 2413 /* Create and initialize ext4_group_info data for the given group. */ 2414 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2415 struct ext4_group_desc *desc) 2416 { 2417 int i; 2418 int metalen = 0; 2419 struct ext4_sb_info *sbi = EXT4_SB(sb); 2420 struct ext4_group_info **meta_group_info; 2421 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2422 2423 /* 2424 * First check if this group is the first of a reserved block. 2425 * If it's true, we have to allocate a new table of pointers 2426 * to ext4_group_info structures 2427 */ 2428 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2429 metalen = sizeof(*meta_group_info) << 2430 EXT4_DESC_PER_BLOCK_BITS(sb); 2431 meta_group_info = kmalloc(metalen, GFP_NOFS); 2432 if (meta_group_info == NULL) { 2433 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2434 "for a buddy group"); 2435 goto exit_meta_group_info; 2436 } 2437 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2438 meta_group_info; 2439 } 2440 2441 meta_group_info = 2442 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2443 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2444 2445 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 2446 if (meta_group_info[i] == NULL) { 2447 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2448 goto exit_group_info; 2449 } 2450 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2451 &(meta_group_info[i]->bb_state)); 2452 2453 /* 2454 * initialize bb_free to be able to skip 2455 * empty groups without initialization 2456 */ 2457 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2458 meta_group_info[i]->bb_free = 2459 ext4_free_clusters_after_init(sb, group, desc); 2460 } else { 2461 meta_group_info[i]->bb_free = 2462 ext4_free_group_clusters(sb, desc); 2463 } 2464 2465 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2466 init_rwsem(&meta_group_info[i]->alloc_sem); 2467 meta_group_info[i]->bb_free_root = RB_ROOT; 2468 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2469 2470 #ifdef DOUBLE_CHECK 2471 { 2472 struct buffer_head *bh; 2473 meta_group_info[i]->bb_bitmap = 2474 kmalloc(sb->s_blocksize, GFP_NOFS); 2475 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2476 bh = ext4_read_block_bitmap(sb, group); 2477 BUG_ON(IS_ERR_OR_NULL(bh)); 2478 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2479 sb->s_blocksize); 2480 put_bh(bh); 2481 } 2482 #endif 2483 2484 return 0; 2485 2486 exit_group_info: 2487 /* If a meta_group_info table has been allocated, release it now */ 2488 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2489 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2490 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; 2491 } 2492 exit_meta_group_info: 2493 return -ENOMEM; 2494 } /* ext4_mb_add_groupinfo */ 2495 2496 static int ext4_mb_init_backend(struct super_block *sb) 2497 { 2498 ext4_group_t ngroups = ext4_get_groups_count(sb); 2499 ext4_group_t i; 2500 struct ext4_sb_info *sbi = EXT4_SB(sb); 2501 int err; 2502 struct ext4_group_desc *desc; 2503 struct kmem_cache *cachep; 2504 2505 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2506 if (err) 2507 return err; 2508 2509 sbi->s_buddy_cache = new_inode(sb); 2510 if (sbi->s_buddy_cache == NULL) { 2511 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2512 goto err_freesgi; 2513 } 2514 /* To avoid potentially colliding with an valid on-disk inode number, 2515 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2516 * not in the inode hash, so it should never be found by iget(), but 2517 * this will avoid confusion if it ever shows up during debugging. */ 2518 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2519 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2520 for (i = 0; i < ngroups; i++) { 2521 desc = ext4_get_group_desc(sb, i, NULL); 2522 if (desc == NULL) { 2523 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2524 goto err_freebuddy; 2525 } 2526 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2527 goto err_freebuddy; 2528 } 2529 2530 return 0; 2531 2532 err_freebuddy: 2533 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2534 while (i-- > 0) 2535 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2536 i = sbi->s_group_info_size; 2537 while (i-- > 0) 2538 kfree(sbi->s_group_info[i]); 2539 iput(sbi->s_buddy_cache); 2540 err_freesgi: 2541 kvfree(sbi->s_group_info); 2542 return -ENOMEM; 2543 } 2544 2545 static void ext4_groupinfo_destroy_slabs(void) 2546 { 2547 int i; 2548 2549 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2550 if (ext4_groupinfo_caches[i]) 2551 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2552 ext4_groupinfo_caches[i] = NULL; 2553 } 2554 } 2555 2556 static int ext4_groupinfo_create_slab(size_t size) 2557 { 2558 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2559 int slab_size; 2560 int blocksize_bits = order_base_2(size); 2561 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2562 struct kmem_cache *cachep; 2563 2564 if (cache_index >= NR_GRPINFO_CACHES) 2565 return -EINVAL; 2566 2567 if (unlikely(cache_index < 0)) 2568 cache_index = 0; 2569 2570 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2571 if (ext4_groupinfo_caches[cache_index]) { 2572 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2573 return 0; /* Already created */ 2574 } 2575 2576 slab_size = offsetof(struct ext4_group_info, 2577 bb_counters[blocksize_bits + 2]); 2578 2579 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2580 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2581 NULL); 2582 2583 ext4_groupinfo_caches[cache_index] = cachep; 2584 2585 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2586 if (!cachep) { 2587 printk(KERN_EMERG 2588 "EXT4-fs: no memory for groupinfo slab cache\n"); 2589 return -ENOMEM; 2590 } 2591 2592 return 0; 2593 } 2594 2595 int ext4_mb_init(struct super_block *sb) 2596 { 2597 struct ext4_sb_info *sbi = EXT4_SB(sb); 2598 unsigned i, j; 2599 unsigned offset, offset_incr; 2600 unsigned max; 2601 int ret; 2602 2603 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2604 2605 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2606 if (sbi->s_mb_offsets == NULL) { 2607 ret = -ENOMEM; 2608 goto out; 2609 } 2610 2611 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2612 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2613 if (sbi->s_mb_maxs == NULL) { 2614 ret = -ENOMEM; 2615 goto out; 2616 } 2617 2618 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2619 if (ret < 0) 2620 goto out; 2621 2622 /* order 0 is regular bitmap */ 2623 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2624 sbi->s_mb_offsets[0] = 0; 2625 2626 i = 1; 2627 offset = 0; 2628 offset_incr = 1 << (sb->s_blocksize_bits - 1); 2629 max = sb->s_blocksize << 2; 2630 do { 2631 sbi->s_mb_offsets[i] = offset; 2632 sbi->s_mb_maxs[i] = max; 2633 offset += offset_incr; 2634 offset_incr = offset_incr >> 1; 2635 max = max >> 1; 2636 i++; 2637 } while (i <= sb->s_blocksize_bits + 1); 2638 2639 spin_lock_init(&sbi->s_md_lock); 2640 spin_lock_init(&sbi->s_bal_lock); 2641 sbi->s_mb_free_pending = 0; 2642 2643 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2644 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2645 sbi->s_mb_stats = MB_DEFAULT_STATS; 2646 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2647 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2648 /* 2649 * The default group preallocation is 512, which for 4k block 2650 * sizes translates to 2 megabytes. However for bigalloc file 2651 * systems, this is probably too big (i.e, if the cluster size 2652 * is 1 megabyte, then group preallocation size becomes half a 2653 * gigabyte!). As a default, we will keep a two megabyte 2654 * group pralloc size for cluster sizes up to 64k, and after 2655 * that, we will force a minimum group preallocation size of 2656 * 32 clusters. This translates to 8 megs when the cluster 2657 * size is 256k, and 32 megs when the cluster size is 1 meg, 2658 * which seems reasonable as a default. 2659 */ 2660 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2661 sbi->s_cluster_bits, 32); 2662 /* 2663 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2664 * to the lowest multiple of s_stripe which is bigger than 2665 * the s_mb_group_prealloc as determined above. We want 2666 * the preallocation size to be an exact multiple of the 2667 * RAID stripe size so that preallocations don't fragment 2668 * the stripes. 2669 */ 2670 if (sbi->s_stripe > 1) { 2671 sbi->s_mb_group_prealloc = roundup( 2672 sbi->s_mb_group_prealloc, sbi->s_stripe); 2673 } 2674 2675 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2676 if (sbi->s_locality_groups == NULL) { 2677 ret = -ENOMEM; 2678 goto out; 2679 } 2680 for_each_possible_cpu(i) { 2681 struct ext4_locality_group *lg; 2682 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2683 mutex_init(&lg->lg_mutex); 2684 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2685 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2686 spin_lock_init(&lg->lg_prealloc_lock); 2687 } 2688 2689 /* init file for buddy data */ 2690 ret = ext4_mb_init_backend(sb); 2691 if (ret != 0) 2692 goto out_free_locality_groups; 2693 2694 return 0; 2695 2696 out_free_locality_groups: 2697 free_percpu(sbi->s_locality_groups); 2698 sbi->s_locality_groups = NULL; 2699 out: 2700 kfree(sbi->s_mb_offsets); 2701 sbi->s_mb_offsets = NULL; 2702 kfree(sbi->s_mb_maxs); 2703 sbi->s_mb_maxs = NULL; 2704 return ret; 2705 } 2706 2707 /* need to called with the ext4 group lock held */ 2708 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2709 { 2710 struct ext4_prealloc_space *pa; 2711 struct list_head *cur, *tmp; 2712 int count = 0; 2713 2714 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2715 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2716 list_del(&pa->pa_group_list); 2717 count++; 2718 kmem_cache_free(ext4_pspace_cachep, pa); 2719 } 2720 if (count) 2721 mb_debug(1, "mballoc: %u PAs left\n", count); 2722 2723 } 2724 2725 int ext4_mb_release(struct super_block *sb) 2726 { 2727 ext4_group_t ngroups = ext4_get_groups_count(sb); 2728 ext4_group_t i; 2729 int num_meta_group_infos; 2730 struct ext4_group_info *grinfo; 2731 struct ext4_sb_info *sbi = EXT4_SB(sb); 2732 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2733 2734 if (sbi->s_group_info) { 2735 for (i = 0; i < ngroups; i++) { 2736 grinfo = ext4_get_group_info(sb, i); 2737 #ifdef DOUBLE_CHECK 2738 kfree(grinfo->bb_bitmap); 2739 #endif 2740 ext4_lock_group(sb, i); 2741 ext4_mb_cleanup_pa(grinfo); 2742 ext4_unlock_group(sb, i); 2743 kmem_cache_free(cachep, grinfo); 2744 } 2745 num_meta_group_infos = (ngroups + 2746 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2747 EXT4_DESC_PER_BLOCK_BITS(sb); 2748 for (i = 0; i < num_meta_group_infos; i++) 2749 kfree(sbi->s_group_info[i]); 2750 kvfree(sbi->s_group_info); 2751 } 2752 kfree(sbi->s_mb_offsets); 2753 kfree(sbi->s_mb_maxs); 2754 iput(sbi->s_buddy_cache); 2755 if (sbi->s_mb_stats) { 2756 ext4_msg(sb, KERN_INFO, 2757 "mballoc: %u blocks %u reqs (%u success)", 2758 atomic_read(&sbi->s_bal_allocated), 2759 atomic_read(&sbi->s_bal_reqs), 2760 atomic_read(&sbi->s_bal_success)); 2761 ext4_msg(sb, KERN_INFO, 2762 "mballoc: %u extents scanned, %u goal hits, " 2763 "%u 2^N hits, %u breaks, %u lost", 2764 atomic_read(&sbi->s_bal_ex_scanned), 2765 atomic_read(&sbi->s_bal_goals), 2766 atomic_read(&sbi->s_bal_2orders), 2767 atomic_read(&sbi->s_bal_breaks), 2768 atomic_read(&sbi->s_mb_lost_chunks)); 2769 ext4_msg(sb, KERN_INFO, 2770 "mballoc: %lu generated and it took %Lu", 2771 sbi->s_mb_buddies_generated, 2772 sbi->s_mb_generation_time); 2773 ext4_msg(sb, KERN_INFO, 2774 "mballoc: %u preallocated, %u discarded", 2775 atomic_read(&sbi->s_mb_preallocated), 2776 atomic_read(&sbi->s_mb_discarded)); 2777 } 2778 2779 free_percpu(sbi->s_locality_groups); 2780 2781 return 0; 2782 } 2783 2784 static inline int ext4_issue_discard(struct super_block *sb, 2785 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 2786 { 2787 ext4_fsblk_t discard_block; 2788 2789 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 2790 ext4_group_first_block_no(sb, block_group)); 2791 count = EXT4_C2B(EXT4_SB(sb), count); 2792 trace_ext4_discard_blocks(sb, 2793 (unsigned long long) discard_block, count); 2794 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2795 } 2796 2797 /* 2798 * This function is called by the jbd2 layer once the commit has finished, 2799 * so we know we can free the blocks that were released with that commit. 2800 */ 2801 static void ext4_free_data_callback(struct super_block *sb, 2802 struct ext4_journal_cb_entry *jce, 2803 int rc) 2804 { 2805 struct ext4_free_data *entry = (struct ext4_free_data *)jce; 2806 struct ext4_buddy e4b; 2807 struct ext4_group_info *db; 2808 int err, count = 0, count2 = 0; 2809 2810 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2811 entry->efd_count, entry->efd_group, entry); 2812 2813 if (test_opt(sb, DISCARD)) { 2814 err = ext4_issue_discard(sb, entry->efd_group, 2815 entry->efd_start_cluster, 2816 entry->efd_count); 2817 if (err && err != -EOPNOTSUPP) 2818 ext4_msg(sb, KERN_WARNING, "discard request in" 2819 " group:%d block:%d count:%d failed" 2820 " with %d", entry->efd_group, 2821 entry->efd_start_cluster, 2822 entry->efd_count, err); 2823 } 2824 2825 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 2826 /* we expect to find existing buddy because it's pinned */ 2827 BUG_ON(err != 0); 2828 2829 spin_lock(&EXT4_SB(sb)->s_md_lock); 2830 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 2831 spin_unlock(&EXT4_SB(sb)->s_md_lock); 2832 2833 db = e4b.bd_info; 2834 /* there are blocks to put in buddy to make them really free */ 2835 count += entry->efd_count; 2836 count2++; 2837 ext4_lock_group(sb, entry->efd_group); 2838 /* Take it out of per group rb tree */ 2839 rb_erase(&entry->efd_node, &(db->bb_free_root)); 2840 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 2841 2842 /* 2843 * Clear the trimmed flag for the group so that the next 2844 * ext4_trim_fs can trim it. 2845 * If the volume is mounted with -o discard, online discard 2846 * is supported and the free blocks will be trimmed online. 2847 */ 2848 if (!test_opt(sb, DISCARD)) 2849 EXT4_MB_GRP_CLEAR_TRIMMED(db); 2850 2851 if (!db->bb_free_root.rb_node) { 2852 /* No more items in the per group rb tree 2853 * balance refcounts from ext4_mb_free_metadata() 2854 */ 2855 put_page(e4b.bd_buddy_page); 2856 put_page(e4b.bd_bitmap_page); 2857 } 2858 ext4_unlock_group(sb, entry->efd_group); 2859 kmem_cache_free(ext4_free_data_cachep, entry); 2860 ext4_mb_unload_buddy(&e4b); 2861 2862 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2863 } 2864 2865 int __init ext4_init_mballoc(void) 2866 { 2867 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 2868 SLAB_RECLAIM_ACCOUNT); 2869 if (ext4_pspace_cachep == NULL) 2870 return -ENOMEM; 2871 2872 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 2873 SLAB_RECLAIM_ACCOUNT); 2874 if (ext4_ac_cachep == NULL) { 2875 kmem_cache_destroy(ext4_pspace_cachep); 2876 return -ENOMEM; 2877 } 2878 2879 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 2880 SLAB_RECLAIM_ACCOUNT); 2881 if (ext4_free_data_cachep == NULL) { 2882 kmem_cache_destroy(ext4_pspace_cachep); 2883 kmem_cache_destroy(ext4_ac_cachep); 2884 return -ENOMEM; 2885 } 2886 return 0; 2887 } 2888 2889 void ext4_exit_mballoc(void) 2890 { 2891 /* 2892 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2893 * before destroying the slab cache. 2894 */ 2895 rcu_barrier(); 2896 kmem_cache_destroy(ext4_pspace_cachep); 2897 kmem_cache_destroy(ext4_ac_cachep); 2898 kmem_cache_destroy(ext4_free_data_cachep); 2899 ext4_groupinfo_destroy_slabs(); 2900 } 2901 2902 2903 /* 2904 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2905 * Returns 0 if success or error code 2906 */ 2907 static noinline_for_stack int 2908 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2909 handle_t *handle, unsigned int reserv_clstrs) 2910 { 2911 struct buffer_head *bitmap_bh = NULL; 2912 struct ext4_group_desc *gdp; 2913 struct buffer_head *gdp_bh; 2914 struct ext4_sb_info *sbi; 2915 struct super_block *sb; 2916 ext4_fsblk_t block; 2917 int err, len; 2918 2919 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2920 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2921 2922 sb = ac->ac_sb; 2923 sbi = EXT4_SB(sb); 2924 2925 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2926 if (IS_ERR(bitmap_bh)) { 2927 err = PTR_ERR(bitmap_bh); 2928 bitmap_bh = NULL; 2929 goto out_err; 2930 } 2931 2932 BUFFER_TRACE(bitmap_bh, "getting write access"); 2933 err = ext4_journal_get_write_access(handle, bitmap_bh); 2934 if (err) 2935 goto out_err; 2936 2937 err = -EIO; 2938 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2939 if (!gdp) 2940 goto out_err; 2941 2942 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2943 ext4_free_group_clusters(sb, gdp)); 2944 2945 BUFFER_TRACE(gdp_bh, "get_write_access"); 2946 err = ext4_journal_get_write_access(handle, gdp_bh); 2947 if (err) 2948 goto out_err; 2949 2950 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2951 2952 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 2953 if (!ext4_data_block_valid(sbi, block, len)) { 2954 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2955 "fs metadata", block, block+len); 2956 /* File system mounted not to panic on error 2957 * Fix the bitmap and return EFSCORRUPTED 2958 * We leak some of the blocks here. 2959 */ 2960 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2961 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2962 ac->ac_b_ex.fe_len); 2963 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2964 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2965 if (!err) 2966 err = -EFSCORRUPTED; 2967 goto out_err; 2968 } 2969 2970 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2971 #ifdef AGGRESSIVE_CHECK 2972 { 2973 int i; 2974 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2975 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2976 bitmap_bh->b_data)); 2977 } 2978 } 2979 #endif 2980 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2981 ac->ac_b_ex.fe_len); 2982 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2983 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2984 ext4_free_group_clusters_set(sb, gdp, 2985 ext4_free_clusters_after_init(sb, 2986 ac->ac_b_ex.fe_group, gdp)); 2987 } 2988 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2989 ext4_free_group_clusters_set(sb, gdp, len); 2990 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 2991 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2992 2993 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2994 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2995 /* 2996 * Now reduce the dirty block count also. Should not go negative 2997 */ 2998 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2999 /* release all the reserved blocks if non delalloc */ 3000 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 3001 reserv_clstrs); 3002 3003 if (sbi->s_log_groups_per_flex) { 3004 ext4_group_t flex_group = ext4_flex_group(sbi, 3005 ac->ac_b_ex.fe_group); 3006 atomic64_sub(ac->ac_b_ex.fe_len, 3007 &sbi->s_flex_groups[flex_group].free_clusters); 3008 } 3009 3010 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3011 if (err) 3012 goto out_err; 3013 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3014 3015 out_err: 3016 brelse(bitmap_bh); 3017 return err; 3018 } 3019 3020 /* 3021 * here we normalize request for locality group 3022 * Group request are normalized to s_mb_group_prealloc, which goes to 3023 * s_strip if we set the same via mount option. 3024 * s_mb_group_prealloc can be configured via 3025 * /sys/fs/ext4/<partition>/mb_group_prealloc 3026 * 3027 * XXX: should we try to preallocate more than the group has now? 3028 */ 3029 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3030 { 3031 struct super_block *sb = ac->ac_sb; 3032 struct ext4_locality_group *lg = ac->ac_lg; 3033 3034 BUG_ON(lg == NULL); 3035 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3036 mb_debug(1, "#%u: goal %u blocks for locality group\n", 3037 current->pid, ac->ac_g_ex.fe_len); 3038 } 3039 3040 /* 3041 * Normalization means making request better in terms of 3042 * size and alignment 3043 */ 3044 static noinline_for_stack void 3045 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3046 struct ext4_allocation_request *ar) 3047 { 3048 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3049 int bsbits, max; 3050 ext4_lblk_t end; 3051 loff_t size, start_off; 3052 loff_t orig_size __maybe_unused; 3053 ext4_lblk_t start; 3054 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3055 struct ext4_prealloc_space *pa; 3056 3057 /* do normalize only data requests, metadata requests 3058 do not need preallocation */ 3059 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3060 return; 3061 3062 /* sometime caller may want exact blocks */ 3063 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3064 return; 3065 3066 /* caller may indicate that preallocation isn't 3067 * required (it's a tail, for example) */ 3068 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3069 return; 3070 3071 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3072 ext4_mb_normalize_group_request(ac); 3073 return ; 3074 } 3075 3076 bsbits = ac->ac_sb->s_blocksize_bits; 3077 3078 /* first, let's learn actual file size 3079 * given current request is allocated */ 3080 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3081 size = size << bsbits; 3082 if (size < i_size_read(ac->ac_inode)) 3083 size = i_size_read(ac->ac_inode); 3084 orig_size = size; 3085 3086 /* max size of free chunks */ 3087 max = 2 << bsbits; 3088 3089 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3090 (req <= (size) || max <= (chunk_size)) 3091 3092 /* first, try to predict filesize */ 3093 /* XXX: should this table be tunable? */ 3094 start_off = 0; 3095 if (size <= 16 * 1024) { 3096 size = 16 * 1024; 3097 } else if (size <= 32 * 1024) { 3098 size = 32 * 1024; 3099 } else if (size <= 64 * 1024) { 3100 size = 64 * 1024; 3101 } else if (size <= 128 * 1024) { 3102 size = 128 * 1024; 3103 } else if (size <= 256 * 1024) { 3104 size = 256 * 1024; 3105 } else if (size <= 512 * 1024) { 3106 size = 512 * 1024; 3107 } else if (size <= 1024 * 1024) { 3108 size = 1024 * 1024; 3109 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3110 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3111 (21 - bsbits)) << 21; 3112 size = 2 * 1024 * 1024; 3113 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3114 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3115 (22 - bsbits)) << 22; 3116 size = 4 * 1024 * 1024; 3117 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3118 (8<<20)>>bsbits, max, 8 * 1024)) { 3119 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3120 (23 - bsbits)) << 23; 3121 size = 8 * 1024 * 1024; 3122 } else { 3123 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 3124 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 3125 ac->ac_o_ex.fe_len) << bsbits; 3126 } 3127 size = size >> bsbits; 3128 start = start_off >> bsbits; 3129 3130 /* don't cover already allocated blocks in selected range */ 3131 if (ar->pleft && start <= ar->lleft) { 3132 size -= ar->lleft + 1 - start; 3133 start = ar->lleft + 1; 3134 } 3135 if (ar->pright && start + size - 1 >= ar->lright) 3136 size -= start + size - ar->lright; 3137 3138 /* 3139 * Trim allocation request for filesystems with artificially small 3140 * groups. 3141 */ 3142 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 3143 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 3144 3145 end = start + size; 3146 3147 /* check we don't cross already preallocated blocks */ 3148 rcu_read_lock(); 3149 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3150 ext4_lblk_t pa_end; 3151 3152 if (pa->pa_deleted) 3153 continue; 3154 spin_lock(&pa->pa_lock); 3155 if (pa->pa_deleted) { 3156 spin_unlock(&pa->pa_lock); 3157 continue; 3158 } 3159 3160 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3161 pa->pa_len); 3162 3163 /* PA must not overlap original request */ 3164 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3165 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3166 3167 /* skip PAs this normalized request doesn't overlap with */ 3168 if (pa->pa_lstart >= end || pa_end <= start) { 3169 spin_unlock(&pa->pa_lock); 3170 continue; 3171 } 3172 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3173 3174 /* adjust start or end to be adjacent to this pa */ 3175 if (pa_end <= ac->ac_o_ex.fe_logical) { 3176 BUG_ON(pa_end < start); 3177 start = pa_end; 3178 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3179 BUG_ON(pa->pa_lstart > end); 3180 end = pa->pa_lstart; 3181 } 3182 spin_unlock(&pa->pa_lock); 3183 } 3184 rcu_read_unlock(); 3185 size = end - start; 3186 3187 /* XXX: extra loop to check we really don't overlap preallocations */ 3188 rcu_read_lock(); 3189 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3190 ext4_lblk_t pa_end; 3191 3192 spin_lock(&pa->pa_lock); 3193 if (pa->pa_deleted == 0) { 3194 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3195 pa->pa_len); 3196 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3197 } 3198 spin_unlock(&pa->pa_lock); 3199 } 3200 rcu_read_unlock(); 3201 3202 if (start + size <= ac->ac_o_ex.fe_logical && 3203 start > ac->ac_o_ex.fe_logical) { 3204 ext4_msg(ac->ac_sb, KERN_ERR, 3205 "start %lu, size %lu, fe_logical %lu", 3206 (unsigned long) start, (unsigned long) size, 3207 (unsigned long) ac->ac_o_ex.fe_logical); 3208 BUG(); 3209 } 3210 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3211 3212 /* now prepare goal request */ 3213 3214 /* XXX: is it better to align blocks WRT to logical 3215 * placement or satisfy big request as is */ 3216 ac->ac_g_ex.fe_logical = start; 3217 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3218 3219 /* define goal start in order to merge */ 3220 if (ar->pright && (ar->lright == (start + size))) { 3221 /* merge to the right */ 3222 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3223 &ac->ac_f_ex.fe_group, 3224 &ac->ac_f_ex.fe_start); 3225 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3226 } 3227 if (ar->pleft && (ar->lleft + 1 == start)) { 3228 /* merge to the left */ 3229 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3230 &ac->ac_f_ex.fe_group, 3231 &ac->ac_f_ex.fe_start); 3232 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3233 } 3234 3235 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 3236 (unsigned) orig_size, (unsigned) start); 3237 } 3238 3239 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3240 { 3241 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3242 3243 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3244 atomic_inc(&sbi->s_bal_reqs); 3245 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3246 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3247 atomic_inc(&sbi->s_bal_success); 3248 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3249 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3250 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3251 atomic_inc(&sbi->s_bal_goals); 3252 if (ac->ac_found > sbi->s_mb_max_to_scan) 3253 atomic_inc(&sbi->s_bal_breaks); 3254 } 3255 3256 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3257 trace_ext4_mballoc_alloc(ac); 3258 else 3259 trace_ext4_mballoc_prealloc(ac); 3260 } 3261 3262 /* 3263 * Called on failure; free up any blocks from the inode PA for this 3264 * context. We don't need this for MB_GROUP_PA because we only change 3265 * pa_free in ext4_mb_release_context(), but on failure, we've already 3266 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3267 */ 3268 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3269 { 3270 struct ext4_prealloc_space *pa = ac->ac_pa; 3271 struct ext4_buddy e4b; 3272 int err; 3273 3274 if (pa == NULL) { 3275 if (ac->ac_f_ex.fe_len == 0) 3276 return; 3277 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 3278 if (err) { 3279 /* 3280 * This should never happen since we pin the 3281 * pages in the ext4_allocation_context so 3282 * ext4_mb_load_buddy() should never fail. 3283 */ 3284 WARN(1, "mb_load_buddy failed (%d)", err); 3285 return; 3286 } 3287 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3288 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 3289 ac->ac_f_ex.fe_len); 3290 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3291 ext4_mb_unload_buddy(&e4b); 3292 return; 3293 } 3294 if (pa->pa_type == MB_INODE_PA) 3295 pa->pa_free += ac->ac_b_ex.fe_len; 3296 } 3297 3298 /* 3299 * use blocks preallocated to inode 3300 */ 3301 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3302 struct ext4_prealloc_space *pa) 3303 { 3304 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3305 ext4_fsblk_t start; 3306 ext4_fsblk_t end; 3307 int len; 3308 3309 /* found preallocated blocks, use them */ 3310 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3311 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3312 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3313 len = EXT4_NUM_B2C(sbi, end - start); 3314 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3315 &ac->ac_b_ex.fe_start); 3316 ac->ac_b_ex.fe_len = len; 3317 ac->ac_status = AC_STATUS_FOUND; 3318 ac->ac_pa = pa; 3319 3320 BUG_ON(start < pa->pa_pstart); 3321 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3322 BUG_ON(pa->pa_free < len); 3323 pa->pa_free -= len; 3324 3325 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3326 } 3327 3328 /* 3329 * use blocks preallocated to locality group 3330 */ 3331 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3332 struct ext4_prealloc_space *pa) 3333 { 3334 unsigned int len = ac->ac_o_ex.fe_len; 3335 3336 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3337 &ac->ac_b_ex.fe_group, 3338 &ac->ac_b_ex.fe_start); 3339 ac->ac_b_ex.fe_len = len; 3340 ac->ac_status = AC_STATUS_FOUND; 3341 ac->ac_pa = pa; 3342 3343 /* we don't correct pa_pstart or pa_plen here to avoid 3344 * possible race when the group is being loaded concurrently 3345 * instead we correct pa later, after blocks are marked 3346 * in on-disk bitmap -- see ext4_mb_release_context() 3347 * Other CPUs are prevented from allocating from this pa by lg_mutex 3348 */ 3349 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3350 } 3351 3352 /* 3353 * Return the prealloc space that have minimal distance 3354 * from the goal block. @cpa is the prealloc 3355 * space that is having currently known minimal distance 3356 * from the goal block. 3357 */ 3358 static struct ext4_prealloc_space * 3359 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3360 struct ext4_prealloc_space *pa, 3361 struct ext4_prealloc_space *cpa) 3362 { 3363 ext4_fsblk_t cur_distance, new_distance; 3364 3365 if (cpa == NULL) { 3366 atomic_inc(&pa->pa_count); 3367 return pa; 3368 } 3369 cur_distance = abs(goal_block - cpa->pa_pstart); 3370 new_distance = abs(goal_block - pa->pa_pstart); 3371 3372 if (cur_distance <= new_distance) 3373 return cpa; 3374 3375 /* drop the previous reference */ 3376 atomic_dec(&cpa->pa_count); 3377 atomic_inc(&pa->pa_count); 3378 return pa; 3379 } 3380 3381 /* 3382 * search goal blocks in preallocated space 3383 */ 3384 static noinline_for_stack int 3385 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3386 { 3387 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3388 int order, i; 3389 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3390 struct ext4_locality_group *lg; 3391 struct ext4_prealloc_space *pa, *cpa = NULL; 3392 ext4_fsblk_t goal_block; 3393 3394 /* only data can be preallocated */ 3395 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3396 return 0; 3397 3398 /* first, try per-file preallocation */ 3399 rcu_read_lock(); 3400 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3401 3402 /* all fields in this condition don't change, 3403 * so we can skip locking for them */ 3404 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3405 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3406 EXT4_C2B(sbi, pa->pa_len))) 3407 continue; 3408 3409 /* non-extent files can't have physical blocks past 2^32 */ 3410 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3411 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3412 EXT4_MAX_BLOCK_FILE_PHYS)) 3413 continue; 3414 3415 /* found preallocated blocks, use them */ 3416 spin_lock(&pa->pa_lock); 3417 if (pa->pa_deleted == 0 && pa->pa_free) { 3418 atomic_inc(&pa->pa_count); 3419 ext4_mb_use_inode_pa(ac, pa); 3420 spin_unlock(&pa->pa_lock); 3421 ac->ac_criteria = 10; 3422 rcu_read_unlock(); 3423 return 1; 3424 } 3425 spin_unlock(&pa->pa_lock); 3426 } 3427 rcu_read_unlock(); 3428 3429 /* can we use group allocation? */ 3430 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3431 return 0; 3432 3433 /* inode may have no locality group for some reason */ 3434 lg = ac->ac_lg; 3435 if (lg == NULL) 3436 return 0; 3437 order = fls(ac->ac_o_ex.fe_len) - 1; 3438 if (order > PREALLOC_TB_SIZE - 1) 3439 /* The max size of hash table is PREALLOC_TB_SIZE */ 3440 order = PREALLOC_TB_SIZE - 1; 3441 3442 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3443 /* 3444 * search for the prealloc space that is having 3445 * minimal distance from the goal block. 3446 */ 3447 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3448 rcu_read_lock(); 3449 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3450 pa_inode_list) { 3451 spin_lock(&pa->pa_lock); 3452 if (pa->pa_deleted == 0 && 3453 pa->pa_free >= ac->ac_o_ex.fe_len) { 3454 3455 cpa = ext4_mb_check_group_pa(goal_block, 3456 pa, cpa); 3457 } 3458 spin_unlock(&pa->pa_lock); 3459 } 3460 rcu_read_unlock(); 3461 } 3462 if (cpa) { 3463 ext4_mb_use_group_pa(ac, cpa); 3464 ac->ac_criteria = 20; 3465 return 1; 3466 } 3467 return 0; 3468 } 3469 3470 /* 3471 * the function goes through all block freed in the group 3472 * but not yet committed and marks them used in in-core bitmap. 3473 * buddy must be generated from this bitmap 3474 * Need to be called with the ext4 group lock held 3475 */ 3476 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3477 ext4_group_t group) 3478 { 3479 struct rb_node *n; 3480 struct ext4_group_info *grp; 3481 struct ext4_free_data *entry; 3482 3483 grp = ext4_get_group_info(sb, group); 3484 n = rb_first(&(grp->bb_free_root)); 3485 3486 while (n) { 3487 entry = rb_entry(n, struct ext4_free_data, efd_node); 3488 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3489 n = rb_next(n); 3490 } 3491 return; 3492 } 3493 3494 /* 3495 * the function goes through all preallocation in this group and marks them 3496 * used in in-core bitmap. buddy must be generated from this bitmap 3497 * Need to be called with ext4 group lock held 3498 */ 3499 static noinline_for_stack 3500 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3501 ext4_group_t group) 3502 { 3503 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3504 struct ext4_prealloc_space *pa; 3505 struct list_head *cur; 3506 ext4_group_t groupnr; 3507 ext4_grpblk_t start; 3508 int preallocated = 0; 3509 int len; 3510 3511 /* all form of preallocation discards first load group, 3512 * so the only competing code is preallocation use. 3513 * we don't need any locking here 3514 * notice we do NOT ignore preallocations with pa_deleted 3515 * otherwise we could leave used blocks available for 3516 * allocation in buddy when concurrent ext4_mb_put_pa() 3517 * is dropping preallocation 3518 */ 3519 list_for_each(cur, &grp->bb_prealloc_list) { 3520 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3521 spin_lock(&pa->pa_lock); 3522 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3523 &groupnr, &start); 3524 len = pa->pa_len; 3525 spin_unlock(&pa->pa_lock); 3526 if (unlikely(len == 0)) 3527 continue; 3528 BUG_ON(groupnr != group); 3529 ext4_set_bits(bitmap, start, len); 3530 preallocated += len; 3531 } 3532 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3533 } 3534 3535 static void ext4_mb_pa_callback(struct rcu_head *head) 3536 { 3537 struct ext4_prealloc_space *pa; 3538 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3539 3540 BUG_ON(atomic_read(&pa->pa_count)); 3541 BUG_ON(pa->pa_deleted == 0); 3542 kmem_cache_free(ext4_pspace_cachep, pa); 3543 } 3544 3545 /* 3546 * drops a reference to preallocated space descriptor 3547 * if this was the last reference and the space is consumed 3548 */ 3549 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3550 struct super_block *sb, struct ext4_prealloc_space *pa) 3551 { 3552 ext4_group_t grp; 3553 ext4_fsblk_t grp_blk; 3554 3555 /* in this short window concurrent discard can set pa_deleted */ 3556 spin_lock(&pa->pa_lock); 3557 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3558 spin_unlock(&pa->pa_lock); 3559 return; 3560 } 3561 3562 if (pa->pa_deleted == 1) { 3563 spin_unlock(&pa->pa_lock); 3564 return; 3565 } 3566 3567 pa->pa_deleted = 1; 3568 spin_unlock(&pa->pa_lock); 3569 3570 grp_blk = pa->pa_pstart; 3571 /* 3572 * If doing group-based preallocation, pa_pstart may be in the 3573 * next group when pa is used up 3574 */ 3575 if (pa->pa_type == MB_GROUP_PA) 3576 grp_blk--; 3577 3578 grp = ext4_get_group_number(sb, grp_blk); 3579 3580 /* 3581 * possible race: 3582 * 3583 * P1 (buddy init) P2 (regular allocation) 3584 * find block B in PA 3585 * copy on-disk bitmap to buddy 3586 * mark B in on-disk bitmap 3587 * drop PA from group 3588 * mark all PAs in buddy 3589 * 3590 * thus, P1 initializes buddy with B available. to prevent this 3591 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3592 * against that pair 3593 */ 3594 ext4_lock_group(sb, grp); 3595 list_del(&pa->pa_group_list); 3596 ext4_unlock_group(sb, grp); 3597 3598 spin_lock(pa->pa_obj_lock); 3599 list_del_rcu(&pa->pa_inode_list); 3600 spin_unlock(pa->pa_obj_lock); 3601 3602 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3603 } 3604 3605 /* 3606 * creates new preallocated space for given inode 3607 */ 3608 static noinline_for_stack int 3609 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3610 { 3611 struct super_block *sb = ac->ac_sb; 3612 struct ext4_sb_info *sbi = EXT4_SB(sb); 3613 struct ext4_prealloc_space *pa; 3614 struct ext4_group_info *grp; 3615 struct ext4_inode_info *ei; 3616 3617 /* preallocate only when found space is larger then requested */ 3618 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3619 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3620 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3621 3622 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3623 if (pa == NULL) 3624 return -ENOMEM; 3625 3626 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3627 int winl; 3628 int wins; 3629 int win; 3630 int offs; 3631 3632 /* we can't allocate as much as normalizer wants. 3633 * so, found space must get proper lstart 3634 * to cover original request */ 3635 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3636 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3637 3638 /* we're limited by original request in that 3639 * logical block must be covered any way 3640 * winl is window we can move our chunk within */ 3641 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3642 3643 /* also, we should cover whole original request */ 3644 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 3645 3646 /* the smallest one defines real window */ 3647 win = min(winl, wins); 3648 3649 offs = ac->ac_o_ex.fe_logical % 3650 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3651 if (offs && offs < win) 3652 win = offs; 3653 3654 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 3655 EXT4_NUM_B2C(sbi, win); 3656 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3657 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3658 } 3659 3660 /* preallocation can change ac_b_ex, thus we store actually 3661 * allocated blocks for history */ 3662 ac->ac_f_ex = ac->ac_b_ex; 3663 3664 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3665 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3666 pa->pa_len = ac->ac_b_ex.fe_len; 3667 pa->pa_free = pa->pa_len; 3668 atomic_set(&pa->pa_count, 1); 3669 spin_lock_init(&pa->pa_lock); 3670 INIT_LIST_HEAD(&pa->pa_inode_list); 3671 INIT_LIST_HEAD(&pa->pa_group_list); 3672 pa->pa_deleted = 0; 3673 pa->pa_type = MB_INODE_PA; 3674 3675 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3676 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3677 trace_ext4_mb_new_inode_pa(ac, pa); 3678 3679 ext4_mb_use_inode_pa(ac, pa); 3680 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 3681 3682 ei = EXT4_I(ac->ac_inode); 3683 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3684 3685 pa->pa_obj_lock = &ei->i_prealloc_lock; 3686 pa->pa_inode = ac->ac_inode; 3687 3688 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3689 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3690 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3691 3692 spin_lock(pa->pa_obj_lock); 3693 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3694 spin_unlock(pa->pa_obj_lock); 3695 3696 return 0; 3697 } 3698 3699 /* 3700 * creates new preallocated space for locality group inodes belongs to 3701 */ 3702 static noinline_for_stack int 3703 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3704 { 3705 struct super_block *sb = ac->ac_sb; 3706 struct ext4_locality_group *lg; 3707 struct ext4_prealloc_space *pa; 3708 struct ext4_group_info *grp; 3709 3710 /* preallocate only when found space is larger then requested */ 3711 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3712 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3713 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3714 3715 BUG_ON(ext4_pspace_cachep == NULL); 3716 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3717 if (pa == NULL) 3718 return -ENOMEM; 3719 3720 /* preallocation can change ac_b_ex, thus we store actually 3721 * allocated blocks for history */ 3722 ac->ac_f_ex = ac->ac_b_ex; 3723 3724 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3725 pa->pa_lstart = pa->pa_pstart; 3726 pa->pa_len = ac->ac_b_ex.fe_len; 3727 pa->pa_free = pa->pa_len; 3728 atomic_set(&pa->pa_count, 1); 3729 spin_lock_init(&pa->pa_lock); 3730 INIT_LIST_HEAD(&pa->pa_inode_list); 3731 INIT_LIST_HEAD(&pa->pa_group_list); 3732 pa->pa_deleted = 0; 3733 pa->pa_type = MB_GROUP_PA; 3734 3735 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3736 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3737 trace_ext4_mb_new_group_pa(ac, pa); 3738 3739 ext4_mb_use_group_pa(ac, pa); 3740 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3741 3742 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3743 lg = ac->ac_lg; 3744 BUG_ON(lg == NULL); 3745 3746 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3747 pa->pa_inode = NULL; 3748 3749 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3750 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3751 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3752 3753 /* 3754 * We will later add the new pa to the right bucket 3755 * after updating the pa_free in ext4_mb_release_context 3756 */ 3757 return 0; 3758 } 3759 3760 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3761 { 3762 int err; 3763 3764 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3765 err = ext4_mb_new_group_pa(ac); 3766 else 3767 err = ext4_mb_new_inode_pa(ac); 3768 return err; 3769 } 3770 3771 /* 3772 * finds all unused blocks in on-disk bitmap, frees them in 3773 * in-core bitmap and buddy. 3774 * @pa must be unlinked from inode and group lists, so that 3775 * nobody else can find/use it. 3776 * the caller MUST hold group/inode locks. 3777 * TODO: optimize the case when there are no in-core structures yet 3778 */ 3779 static noinline_for_stack int 3780 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3781 struct ext4_prealloc_space *pa) 3782 { 3783 struct super_block *sb = e4b->bd_sb; 3784 struct ext4_sb_info *sbi = EXT4_SB(sb); 3785 unsigned int end; 3786 unsigned int next; 3787 ext4_group_t group; 3788 ext4_grpblk_t bit; 3789 unsigned long long grp_blk_start; 3790 int err = 0; 3791 int free = 0; 3792 3793 BUG_ON(pa->pa_deleted == 0); 3794 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3795 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 3796 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3797 end = bit + pa->pa_len; 3798 3799 while (bit < end) { 3800 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3801 if (bit >= end) 3802 break; 3803 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3804 mb_debug(1, " free preallocated %u/%u in group %u\n", 3805 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3806 (unsigned) next - bit, (unsigned) group); 3807 free += next - bit; 3808 3809 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3810 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 3811 EXT4_C2B(sbi, bit)), 3812 next - bit); 3813 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3814 bit = next + 1; 3815 } 3816 if (free != pa->pa_free) { 3817 ext4_msg(e4b->bd_sb, KERN_CRIT, 3818 "pa %p: logic %lu, phys. %lu, len %lu", 3819 pa, (unsigned long) pa->pa_lstart, 3820 (unsigned long) pa->pa_pstart, 3821 (unsigned long) pa->pa_len); 3822 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3823 free, pa->pa_free); 3824 /* 3825 * pa is already deleted so we use the value obtained 3826 * from the bitmap and continue. 3827 */ 3828 } 3829 atomic_add(free, &sbi->s_mb_discarded); 3830 3831 return err; 3832 } 3833 3834 static noinline_for_stack int 3835 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3836 struct ext4_prealloc_space *pa) 3837 { 3838 struct super_block *sb = e4b->bd_sb; 3839 ext4_group_t group; 3840 ext4_grpblk_t bit; 3841 3842 trace_ext4_mb_release_group_pa(sb, pa); 3843 BUG_ON(pa->pa_deleted == 0); 3844 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3845 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3846 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3847 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3848 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 3849 3850 return 0; 3851 } 3852 3853 /* 3854 * releases all preallocations in given group 3855 * 3856 * first, we need to decide discard policy: 3857 * - when do we discard 3858 * 1) ENOSPC 3859 * - how many do we discard 3860 * 1) how many requested 3861 */ 3862 static noinline_for_stack int 3863 ext4_mb_discard_group_preallocations(struct super_block *sb, 3864 ext4_group_t group, int needed) 3865 { 3866 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3867 struct buffer_head *bitmap_bh = NULL; 3868 struct ext4_prealloc_space *pa, *tmp; 3869 struct list_head list; 3870 struct ext4_buddy e4b; 3871 int err; 3872 int busy = 0; 3873 int free = 0; 3874 3875 mb_debug(1, "discard preallocation for group %u\n", group); 3876 3877 if (list_empty(&grp->bb_prealloc_list)) 3878 return 0; 3879 3880 bitmap_bh = ext4_read_block_bitmap(sb, group); 3881 if (IS_ERR(bitmap_bh)) { 3882 err = PTR_ERR(bitmap_bh); 3883 ext4_error(sb, "Error %d reading block bitmap for %u", 3884 err, group); 3885 return 0; 3886 } 3887 3888 err = ext4_mb_load_buddy(sb, group, &e4b); 3889 if (err) { 3890 ext4_error(sb, "Error loading buddy information for %u", group); 3891 put_bh(bitmap_bh); 3892 return 0; 3893 } 3894 3895 if (needed == 0) 3896 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 3897 3898 INIT_LIST_HEAD(&list); 3899 repeat: 3900 ext4_lock_group(sb, group); 3901 list_for_each_entry_safe(pa, tmp, 3902 &grp->bb_prealloc_list, pa_group_list) { 3903 spin_lock(&pa->pa_lock); 3904 if (atomic_read(&pa->pa_count)) { 3905 spin_unlock(&pa->pa_lock); 3906 busy = 1; 3907 continue; 3908 } 3909 if (pa->pa_deleted) { 3910 spin_unlock(&pa->pa_lock); 3911 continue; 3912 } 3913 3914 /* seems this one can be freed ... */ 3915 pa->pa_deleted = 1; 3916 3917 /* we can trust pa_free ... */ 3918 free += pa->pa_free; 3919 3920 spin_unlock(&pa->pa_lock); 3921 3922 list_del(&pa->pa_group_list); 3923 list_add(&pa->u.pa_tmp_list, &list); 3924 } 3925 3926 /* if we still need more blocks and some PAs were used, try again */ 3927 if (free < needed && busy) { 3928 busy = 0; 3929 ext4_unlock_group(sb, group); 3930 cond_resched(); 3931 goto repeat; 3932 } 3933 3934 /* found anything to free? */ 3935 if (list_empty(&list)) { 3936 BUG_ON(free != 0); 3937 goto out; 3938 } 3939 3940 /* now free all selected PAs */ 3941 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3942 3943 /* remove from object (inode or locality group) */ 3944 spin_lock(pa->pa_obj_lock); 3945 list_del_rcu(&pa->pa_inode_list); 3946 spin_unlock(pa->pa_obj_lock); 3947 3948 if (pa->pa_type == MB_GROUP_PA) 3949 ext4_mb_release_group_pa(&e4b, pa); 3950 else 3951 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3952 3953 list_del(&pa->u.pa_tmp_list); 3954 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3955 } 3956 3957 out: 3958 ext4_unlock_group(sb, group); 3959 ext4_mb_unload_buddy(&e4b); 3960 put_bh(bitmap_bh); 3961 return free; 3962 } 3963 3964 /* 3965 * releases all non-used preallocated blocks for given inode 3966 * 3967 * It's important to discard preallocations under i_data_sem 3968 * We don't want another block to be served from the prealloc 3969 * space when we are discarding the inode prealloc space. 3970 * 3971 * FIXME!! Make sure it is valid at all the call sites 3972 */ 3973 void ext4_discard_preallocations(struct inode *inode) 3974 { 3975 struct ext4_inode_info *ei = EXT4_I(inode); 3976 struct super_block *sb = inode->i_sb; 3977 struct buffer_head *bitmap_bh = NULL; 3978 struct ext4_prealloc_space *pa, *tmp; 3979 ext4_group_t group = 0; 3980 struct list_head list; 3981 struct ext4_buddy e4b; 3982 int err; 3983 3984 if (!S_ISREG(inode->i_mode)) { 3985 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3986 return; 3987 } 3988 3989 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3990 trace_ext4_discard_preallocations(inode); 3991 3992 INIT_LIST_HEAD(&list); 3993 3994 repeat: 3995 /* first, collect all pa's in the inode */ 3996 spin_lock(&ei->i_prealloc_lock); 3997 while (!list_empty(&ei->i_prealloc_list)) { 3998 pa = list_entry(ei->i_prealloc_list.next, 3999 struct ext4_prealloc_space, pa_inode_list); 4000 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 4001 spin_lock(&pa->pa_lock); 4002 if (atomic_read(&pa->pa_count)) { 4003 /* this shouldn't happen often - nobody should 4004 * use preallocation while we're discarding it */ 4005 spin_unlock(&pa->pa_lock); 4006 spin_unlock(&ei->i_prealloc_lock); 4007 ext4_msg(sb, KERN_ERR, 4008 "uh-oh! used pa while discarding"); 4009 WARN_ON(1); 4010 schedule_timeout_uninterruptible(HZ); 4011 goto repeat; 4012 4013 } 4014 if (pa->pa_deleted == 0) { 4015 pa->pa_deleted = 1; 4016 spin_unlock(&pa->pa_lock); 4017 list_del_rcu(&pa->pa_inode_list); 4018 list_add(&pa->u.pa_tmp_list, &list); 4019 continue; 4020 } 4021 4022 /* someone is deleting pa right now */ 4023 spin_unlock(&pa->pa_lock); 4024 spin_unlock(&ei->i_prealloc_lock); 4025 4026 /* we have to wait here because pa_deleted 4027 * doesn't mean pa is already unlinked from 4028 * the list. as we might be called from 4029 * ->clear_inode() the inode will get freed 4030 * and concurrent thread which is unlinking 4031 * pa from inode's list may access already 4032 * freed memory, bad-bad-bad */ 4033 4034 /* XXX: if this happens too often, we can 4035 * add a flag to force wait only in case 4036 * of ->clear_inode(), but not in case of 4037 * regular truncate */ 4038 schedule_timeout_uninterruptible(HZ); 4039 goto repeat; 4040 } 4041 spin_unlock(&ei->i_prealloc_lock); 4042 4043 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4044 BUG_ON(pa->pa_type != MB_INODE_PA); 4045 group = ext4_get_group_number(sb, pa->pa_pstart); 4046 4047 err = ext4_mb_load_buddy(sb, group, &e4b); 4048 if (err) { 4049 ext4_error(sb, "Error loading buddy information for %u", 4050 group); 4051 continue; 4052 } 4053 4054 bitmap_bh = ext4_read_block_bitmap(sb, group); 4055 if (IS_ERR(bitmap_bh)) { 4056 err = PTR_ERR(bitmap_bh); 4057 ext4_error(sb, "Error %d reading block bitmap for %u", 4058 err, group); 4059 ext4_mb_unload_buddy(&e4b); 4060 continue; 4061 } 4062 4063 ext4_lock_group(sb, group); 4064 list_del(&pa->pa_group_list); 4065 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4066 ext4_unlock_group(sb, group); 4067 4068 ext4_mb_unload_buddy(&e4b); 4069 put_bh(bitmap_bh); 4070 4071 list_del(&pa->u.pa_tmp_list); 4072 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4073 } 4074 } 4075 4076 #ifdef CONFIG_EXT4_DEBUG 4077 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4078 { 4079 struct super_block *sb = ac->ac_sb; 4080 ext4_group_t ngroups, i; 4081 4082 if (!ext4_mballoc_debug || 4083 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 4084 return; 4085 4086 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" 4087 " Allocation context details:"); 4088 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", 4089 ac->ac_status, ac->ac_flags); 4090 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " 4091 "goal %lu/%lu/%lu@%lu, " 4092 "best %lu/%lu/%lu@%lu cr %d", 4093 (unsigned long)ac->ac_o_ex.fe_group, 4094 (unsigned long)ac->ac_o_ex.fe_start, 4095 (unsigned long)ac->ac_o_ex.fe_len, 4096 (unsigned long)ac->ac_o_ex.fe_logical, 4097 (unsigned long)ac->ac_g_ex.fe_group, 4098 (unsigned long)ac->ac_g_ex.fe_start, 4099 (unsigned long)ac->ac_g_ex.fe_len, 4100 (unsigned long)ac->ac_g_ex.fe_logical, 4101 (unsigned long)ac->ac_b_ex.fe_group, 4102 (unsigned long)ac->ac_b_ex.fe_start, 4103 (unsigned long)ac->ac_b_ex.fe_len, 4104 (unsigned long)ac->ac_b_ex.fe_logical, 4105 (int)ac->ac_criteria); 4106 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); 4107 ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); 4108 ngroups = ext4_get_groups_count(sb); 4109 for (i = 0; i < ngroups; i++) { 4110 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4111 struct ext4_prealloc_space *pa; 4112 ext4_grpblk_t start; 4113 struct list_head *cur; 4114 ext4_lock_group(sb, i); 4115 list_for_each(cur, &grp->bb_prealloc_list) { 4116 pa = list_entry(cur, struct ext4_prealloc_space, 4117 pa_group_list); 4118 spin_lock(&pa->pa_lock); 4119 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4120 NULL, &start); 4121 spin_unlock(&pa->pa_lock); 4122 printk(KERN_ERR "PA:%u:%d:%u \n", i, 4123 start, pa->pa_len); 4124 } 4125 ext4_unlock_group(sb, i); 4126 4127 if (grp->bb_free == 0) 4128 continue; 4129 printk(KERN_ERR "%u: %d/%d \n", 4130 i, grp->bb_free, grp->bb_fragments); 4131 } 4132 printk(KERN_ERR "\n"); 4133 } 4134 #else 4135 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4136 { 4137 return; 4138 } 4139 #endif 4140 4141 /* 4142 * We use locality group preallocation for small size file. The size of the 4143 * file is determined by the current size or the resulting size after 4144 * allocation which ever is larger 4145 * 4146 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4147 */ 4148 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4149 { 4150 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4151 int bsbits = ac->ac_sb->s_blocksize_bits; 4152 loff_t size, isize; 4153 4154 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4155 return; 4156 4157 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4158 return; 4159 4160 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4161 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4162 >> bsbits; 4163 4164 if ((size == isize) && 4165 !ext4_fs_is_busy(sbi) && 4166 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 4167 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4168 return; 4169 } 4170 4171 if (sbi->s_mb_group_prealloc <= 0) { 4172 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4173 return; 4174 } 4175 4176 /* don't use group allocation for large files */ 4177 size = max(size, isize); 4178 if (size > sbi->s_mb_stream_request) { 4179 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4180 return; 4181 } 4182 4183 BUG_ON(ac->ac_lg != NULL); 4184 /* 4185 * locality group prealloc space are per cpu. The reason for having 4186 * per cpu locality group is to reduce the contention between block 4187 * request from multiple CPUs. 4188 */ 4189 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 4190 4191 /* we're going to use group allocation */ 4192 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4193 4194 /* serialize all allocations in the group */ 4195 mutex_lock(&ac->ac_lg->lg_mutex); 4196 } 4197 4198 static noinline_for_stack int 4199 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4200 struct ext4_allocation_request *ar) 4201 { 4202 struct super_block *sb = ar->inode->i_sb; 4203 struct ext4_sb_info *sbi = EXT4_SB(sb); 4204 struct ext4_super_block *es = sbi->s_es; 4205 ext4_group_t group; 4206 unsigned int len; 4207 ext4_fsblk_t goal; 4208 ext4_grpblk_t block; 4209 4210 /* we can't allocate > group size */ 4211 len = ar->len; 4212 4213 /* just a dirty hack to filter too big requests */ 4214 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4215 len = EXT4_CLUSTERS_PER_GROUP(sb); 4216 4217 /* start searching from the goal */ 4218 goal = ar->goal; 4219 if (goal < le32_to_cpu(es->s_first_data_block) || 4220 goal >= ext4_blocks_count(es)) 4221 goal = le32_to_cpu(es->s_first_data_block); 4222 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4223 4224 /* set up allocation goals */ 4225 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4226 ac->ac_status = AC_STATUS_CONTINUE; 4227 ac->ac_sb = sb; 4228 ac->ac_inode = ar->inode; 4229 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4230 ac->ac_o_ex.fe_group = group; 4231 ac->ac_o_ex.fe_start = block; 4232 ac->ac_o_ex.fe_len = len; 4233 ac->ac_g_ex = ac->ac_o_ex; 4234 ac->ac_flags = ar->flags; 4235 4236 /* we have to define context: we'll we work with a file or 4237 * locality group. this is a policy, actually */ 4238 ext4_mb_group_or_file(ac); 4239 4240 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4241 "left: %u/%u, right %u/%u to %swritable\n", 4242 (unsigned) ar->len, (unsigned) ar->logical, 4243 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4244 (unsigned) ar->lleft, (unsigned) ar->pleft, 4245 (unsigned) ar->lright, (unsigned) ar->pright, 4246 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4247 return 0; 4248 4249 } 4250 4251 static noinline_for_stack void 4252 ext4_mb_discard_lg_preallocations(struct super_block *sb, 4253 struct ext4_locality_group *lg, 4254 int order, int total_entries) 4255 { 4256 ext4_group_t group = 0; 4257 struct ext4_buddy e4b; 4258 struct list_head discard_list; 4259 struct ext4_prealloc_space *pa, *tmp; 4260 4261 mb_debug(1, "discard locality group preallocation\n"); 4262 4263 INIT_LIST_HEAD(&discard_list); 4264 4265 spin_lock(&lg->lg_prealloc_lock); 4266 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4267 pa_inode_list) { 4268 spin_lock(&pa->pa_lock); 4269 if (atomic_read(&pa->pa_count)) { 4270 /* 4271 * This is the pa that we just used 4272 * for block allocation. So don't 4273 * free that 4274 */ 4275 spin_unlock(&pa->pa_lock); 4276 continue; 4277 } 4278 if (pa->pa_deleted) { 4279 spin_unlock(&pa->pa_lock); 4280 continue; 4281 } 4282 /* only lg prealloc space */ 4283 BUG_ON(pa->pa_type != MB_GROUP_PA); 4284 4285 /* seems this one can be freed ... */ 4286 pa->pa_deleted = 1; 4287 spin_unlock(&pa->pa_lock); 4288 4289 list_del_rcu(&pa->pa_inode_list); 4290 list_add(&pa->u.pa_tmp_list, &discard_list); 4291 4292 total_entries--; 4293 if (total_entries <= 5) { 4294 /* 4295 * we want to keep only 5 entries 4296 * allowing it to grow to 8. This 4297 * mak sure we don't call discard 4298 * soon for this list. 4299 */ 4300 break; 4301 } 4302 } 4303 spin_unlock(&lg->lg_prealloc_lock); 4304 4305 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4306 4307 group = ext4_get_group_number(sb, pa->pa_pstart); 4308 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4309 ext4_error(sb, "Error loading buddy information for %u", 4310 group); 4311 continue; 4312 } 4313 ext4_lock_group(sb, group); 4314 list_del(&pa->pa_group_list); 4315 ext4_mb_release_group_pa(&e4b, pa); 4316 ext4_unlock_group(sb, group); 4317 4318 ext4_mb_unload_buddy(&e4b); 4319 list_del(&pa->u.pa_tmp_list); 4320 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4321 } 4322 } 4323 4324 /* 4325 * We have incremented pa_count. So it cannot be freed at this 4326 * point. Also we hold lg_mutex. So no parallel allocation is 4327 * possible from this lg. That means pa_free cannot be updated. 4328 * 4329 * A parallel ext4_mb_discard_group_preallocations is possible. 4330 * which can cause the lg_prealloc_list to be updated. 4331 */ 4332 4333 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4334 { 4335 int order, added = 0, lg_prealloc_count = 1; 4336 struct super_block *sb = ac->ac_sb; 4337 struct ext4_locality_group *lg = ac->ac_lg; 4338 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4339 4340 order = fls(pa->pa_free) - 1; 4341 if (order > PREALLOC_TB_SIZE - 1) 4342 /* The max size of hash table is PREALLOC_TB_SIZE */ 4343 order = PREALLOC_TB_SIZE - 1; 4344 /* Add the prealloc space to lg */ 4345 spin_lock(&lg->lg_prealloc_lock); 4346 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4347 pa_inode_list) { 4348 spin_lock(&tmp_pa->pa_lock); 4349 if (tmp_pa->pa_deleted) { 4350 spin_unlock(&tmp_pa->pa_lock); 4351 continue; 4352 } 4353 if (!added && pa->pa_free < tmp_pa->pa_free) { 4354 /* Add to the tail of the previous entry */ 4355 list_add_tail_rcu(&pa->pa_inode_list, 4356 &tmp_pa->pa_inode_list); 4357 added = 1; 4358 /* 4359 * we want to count the total 4360 * number of entries in the list 4361 */ 4362 } 4363 spin_unlock(&tmp_pa->pa_lock); 4364 lg_prealloc_count++; 4365 } 4366 if (!added) 4367 list_add_tail_rcu(&pa->pa_inode_list, 4368 &lg->lg_prealloc_list[order]); 4369 spin_unlock(&lg->lg_prealloc_lock); 4370 4371 /* Now trim the list to be not more than 8 elements */ 4372 if (lg_prealloc_count > 8) { 4373 ext4_mb_discard_lg_preallocations(sb, lg, 4374 order, lg_prealloc_count); 4375 return; 4376 } 4377 return ; 4378 } 4379 4380 /* 4381 * release all resource we used in allocation 4382 */ 4383 static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4384 { 4385 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4386 struct ext4_prealloc_space *pa = ac->ac_pa; 4387 if (pa) { 4388 if (pa->pa_type == MB_GROUP_PA) { 4389 /* see comment in ext4_mb_use_group_pa() */ 4390 spin_lock(&pa->pa_lock); 4391 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4392 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4393 pa->pa_free -= ac->ac_b_ex.fe_len; 4394 pa->pa_len -= ac->ac_b_ex.fe_len; 4395 spin_unlock(&pa->pa_lock); 4396 } 4397 } 4398 if (pa) { 4399 /* 4400 * We want to add the pa to the right bucket. 4401 * Remove it from the list and while adding 4402 * make sure the list to which we are adding 4403 * doesn't grow big. 4404 */ 4405 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4406 spin_lock(pa->pa_obj_lock); 4407 list_del_rcu(&pa->pa_inode_list); 4408 spin_unlock(pa->pa_obj_lock); 4409 ext4_mb_add_n_trim(ac); 4410 } 4411 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4412 } 4413 if (ac->ac_bitmap_page) 4414 put_page(ac->ac_bitmap_page); 4415 if (ac->ac_buddy_page) 4416 put_page(ac->ac_buddy_page); 4417 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4418 mutex_unlock(&ac->ac_lg->lg_mutex); 4419 ext4_mb_collect_stats(ac); 4420 return 0; 4421 } 4422 4423 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4424 { 4425 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4426 int ret; 4427 int freed = 0; 4428 4429 trace_ext4_mb_discard_preallocations(sb, needed); 4430 for (i = 0; i < ngroups && needed > 0; i++) { 4431 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4432 freed += ret; 4433 needed -= ret; 4434 } 4435 4436 return freed; 4437 } 4438 4439 /* 4440 * Main entry point into mballoc to allocate blocks 4441 * it tries to use preallocation first, then falls back 4442 * to usual allocation 4443 */ 4444 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4445 struct ext4_allocation_request *ar, int *errp) 4446 { 4447 int freed; 4448 struct ext4_allocation_context *ac = NULL; 4449 struct ext4_sb_info *sbi; 4450 struct super_block *sb; 4451 ext4_fsblk_t block = 0; 4452 unsigned int inquota = 0; 4453 unsigned int reserv_clstrs = 0; 4454 4455 might_sleep(); 4456 sb = ar->inode->i_sb; 4457 sbi = EXT4_SB(sb); 4458 4459 trace_ext4_request_blocks(ar); 4460 4461 /* Allow to use superuser reservation for quota file */ 4462 if (IS_NOQUOTA(ar->inode)) 4463 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4464 4465 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 4466 /* Without delayed allocation we need to verify 4467 * there is enough free blocks to do block allocation 4468 * and verify allocation doesn't exceed the quota limits. 4469 */ 4470 while (ar->len && 4471 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4472 4473 /* let others to free the space */ 4474 cond_resched(); 4475 ar->len = ar->len >> 1; 4476 } 4477 if (!ar->len) { 4478 *errp = -ENOSPC; 4479 return 0; 4480 } 4481 reserv_clstrs = ar->len; 4482 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4483 dquot_alloc_block_nofail(ar->inode, 4484 EXT4_C2B(sbi, ar->len)); 4485 } else { 4486 while (ar->len && 4487 dquot_alloc_block(ar->inode, 4488 EXT4_C2B(sbi, ar->len))) { 4489 4490 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4491 ar->len--; 4492 } 4493 } 4494 inquota = ar->len; 4495 if (ar->len == 0) { 4496 *errp = -EDQUOT; 4497 goto out; 4498 } 4499 } 4500 4501 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4502 if (!ac) { 4503 ar->len = 0; 4504 *errp = -ENOMEM; 4505 goto out; 4506 } 4507 4508 *errp = ext4_mb_initialize_context(ac, ar); 4509 if (*errp) { 4510 ar->len = 0; 4511 goto out; 4512 } 4513 4514 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4515 if (!ext4_mb_use_preallocated(ac)) { 4516 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4517 ext4_mb_normalize_request(ac, ar); 4518 repeat: 4519 /* allocate space in core */ 4520 *errp = ext4_mb_regular_allocator(ac); 4521 if (*errp) 4522 goto discard_and_exit; 4523 4524 /* as we've just preallocated more space than 4525 * user requested originally, we store allocated 4526 * space in a special descriptor */ 4527 if (ac->ac_status == AC_STATUS_FOUND && 4528 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4529 *errp = ext4_mb_new_preallocation(ac); 4530 if (*errp) { 4531 discard_and_exit: 4532 ext4_discard_allocated_blocks(ac); 4533 goto errout; 4534 } 4535 } 4536 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4537 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4538 if (*errp) { 4539 ext4_discard_allocated_blocks(ac); 4540 goto errout; 4541 } else { 4542 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4543 ar->len = ac->ac_b_ex.fe_len; 4544 } 4545 } else { 4546 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4547 if (freed) 4548 goto repeat; 4549 *errp = -ENOSPC; 4550 } 4551 4552 errout: 4553 if (*errp) { 4554 ac->ac_b_ex.fe_len = 0; 4555 ar->len = 0; 4556 ext4_mb_show_ac(ac); 4557 } 4558 ext4_mb_release_context(ac); 4559 out: 4560 if (ac) 4561 kmem_cache_free(ext4_ac_cachep, ac); 4562 if (inquota && ar->len < inquota) 4563 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4564 if (!ar->len) { 4565 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 4566 /* release all the reserved blocks if non delalloc */ 4567 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4568 reserv_clstrs); 4569 } 4570 4571 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4572 4573 return block; 4574 } 4575 4576 /* 4577 * We can merge two free data extents only if the physical blocks 4578 * are contiguous, AND the extents were freed by the same transaction, 4579 * AND the blocks are associated with the same group. 4580 */ 4581 static int can_merge(struct ext4_free_data *entry1, 4582 struct ext4_free_data *entry2) 4583 { 4584 if ((entry1->efd_tid == entry2->efd_tid) && 4585 (entry1->efd_group == entry2->efd_group) && 4586 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) 4587 return 1; 4588 return 0; 4589 } 4590 4591 static noinline_for_stack int 4592 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4593 struct ext4_free_data *new_entry) 4594 { 4595 ext4_group_t group = e4b->bd_group; 4596 ext4_grpblk_t cluster; 4597 ext4_grpblk_t clusters = new_entry->efd_count; 4598 struct ext4_free_data *entry; 4599 struct ext4_group_info *db = e4b->bd_info; 4600 struct super_block *sb = e4b->bd_sb; 4601 struct ext4_sb_info *sbi = EXT4_SB(sb); 4602 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4603 struct rb_node *parent = NULL, *new_node; 4604 4605 BUG_ON(!ext4_handle_valid(handle)); 4606 BUG_ON(e4b->bd_bitmap_page == NULL); 4607 BUG_ON(e4b->bd_buddy_page == NULL); 4608 4609 new_node = &new_entry->efd_node; 4610 cluster = new_entry->efd_start_cluster; 4611 4612 if (!*n) { 4613 /* first free block exent. We need to 4614 protect buddy cache from being freed, 4615 * otherwise we'll refresh it from 4616 * on-disk bitmap and lose not-yet-available 4617 * blocks */ 4618 get_page(e4b->bd_buddy_page); 4619 get_page(e4b->bd_bitmap_page); 4620 } 4621 while (*n) { 4622 parent = *n; 4623 entry = rb_entry(parent, struct ext4_free_data, efd_node); 4624 if (cluster < entry->efd_start_cluster) 4625 n = &(*n)->rb_left; 4626 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 4627 n = &(*n)->rb_right; 4628 else { 4629 ext4_grp_locked_error(sb, group, 0, 4630 ext4_group_first_block_no(sb, group) + 4631 EXT4_C2B(sbi, cluster), 4632 "Block already on to-be-freed list"); 4633 return 0; 4634 } 4635 } 4636 4637 rb_link_node(new_node, parent, n); 4638 rb_insert_color(new_node, &db->bb_free_root); 4639 4640 /* Now try to see the extent can be merged to left and right */ 4641 node = rb_prev(new_node); 4642 if (node) { 4643 entry = rb_entry(node, struct ext4_free_data, efd_node); 4644 if (can_merge(entry, new_entry) && 4645 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4646 new_entry->efd_start_cluster = entry->efd_start_cluster; 4647 new_entry->efd_count += entry->efd_count; 4648 rb_erase(node, &(db->bb_free_root)); 4649 kmem_cache_free(ext4_free_data_cachep, entry); 4650 } 4651 } 4652 4653 node = rb_next(new_node); 4654 if (node) { 4655 entry = rb_entry(node, struct ext4_free_data, efd_node); 4656 if (can_merge(new_entry, entry) && 4657 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4658 new_entry->efd_count += entry->efd_count; 4659 rb_erase(node, &(db->bb_free_root)); 4660 kmem_cache_free(ext4_free_data_cachep, entry); 4661 } 4662 } 4663 /* Add the extent to transaction's private list */ 4664 new_entry->efd_jce.jce_func = ext4_free_data_callback; 4665 spin_lock(&sbi->s_md_lock); 4666 _ext4_journal_callback_add(handle, &new_entry->efd_jce); 4667 sbi->s_mb_free_pending += clusters; 4668 spin_unlock(&sbi->s_md_lock); 4669 return 0; 4670 } 4671 4672 /** 4673 * ext4_free_blocks() -- Free given blocks and update quota 4674 * @handle: handle for this transaction 4675 * @inode: inode 4676 * @block: start physical block to free 4677 * @count: number of blocks to count 4678 * @flags: flags used by ext4_free_blocks 4679 */ 4680 void ext4_free_blocks(handle_t *handle, struct inode *inode, 4681 struct buffer_head *bh, ext4_fsblk_t block, 4682 unsigned long count, int flags) 4683 { 4684 struct buffer_head *bitmap_bh = NULL; 4685 struct super_block *sb = inode->i_sb; 4686 struct ext4_group_desc *gdp; 4687 unsigned int overflow; 4688 ext4_grpblk_t bit; 4689 struct buffer_head *gd_bh; 4690 ext4_group_t block_group; 4691 struct ext4_sb_info *sbi; 4692 struct ext4_buddy e4b; 4693 unsigned int count_clusters; 4694 int err = 0; 4695 int ret; 4696 4697 might_sleep(); 4698 if (bh) { 4699 if (block) 4700 BUG_ON(block != bh->b_blocknr); 4701 else 4702 block = bh->b_blocknr; 4703 } 4704 4705 sbi = EXT4_SB(sb); 4706 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4707 !ext4_data_block_valid(sbi, block, count)) { 4708 ext4_error(sb, "Freeing blocks not in datazone - " 4709 "block = %llu, count = %lu", block, count); 4710 goto error_return; 4711 } 4712 4713 ext4_debug("freeing block %llu\n", block); 4714 trace_ext4_free_blocks(inode, block, count, flags); 4715 4716 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4717 BUG_ON(count > 1); 4718 4719 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4720 inode, bh, block); 4721 } 4722 4723 /* 4724 * If the extent to be freed does not begin on a cluster 4725 * boundary, we need to deal with partial clusters at the 4726 * beginning and end of the extent. Normally we will free 4727 * blocks at the beginning or the end unless we are explicitly 4728 * requested to avoid doing so. 4729 */ 4730 overflow = EXT4_PBLK_COFF(sbi, block); 4731 if (overflow) { 4732 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4733 overflow = sbi->s_cluster_ratio - overflow; 4734 block += overflow; 4735 if (count > overflow) 4736 count -= overflow; 4737 else 4738 return; 4739 } else { 4740 block -= overflow; 4741 count += overflow; 4742 } 4743 } 4744 overflow = EXT4_LBLK_COFF(sbi, count); 4745 if (overflow) { 4746 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4747 if (count > overflow) 4748 count -= overflow; 4749 else 4750 return; 4751 } else 4752 count += sbi->s_cluster_ratio - overflow; 4753 } 4754 4755 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4756 int i; 4757 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 4758 4759 for (i = 0; i < count; i++) { 4760 cond_resched(); 4761 if (is_metadata) 4762 bh = sb_find_get_block(inode->i_sb, block + i); 4763 ext4_forget(handle, is_metadata, inode, bh, block + i); 4764 } 4765 } 4766 4767 do_more: 4768 overflow = 0; 4769 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4770 4771 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 4772 ext4_get_group_info(sb, block_group)))) 4773 return; 4774 4775 /* 4776 * Check to see if we are freeing blocks across a group 4777 * boundary. 4778 */ 4779 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4780 overflow = EXT4_C2B(sbi, bit) + count - 4781 EXT4_BLOCKS_PER_GROUP(sb); 4782 count -= overflow; 4783 } 4784 count_clusters = EXT4_NUM_B2C(sbi, count); 4785 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4786 if (IS_ERR(bitmap_bh)) { 4787 err = PTR_ERR(bitmap_bh); 4788 bitmap_bh = NULL; 4789 goto error_return; 4790 } 4791 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4792 if (!gdp) { 4793 err = -EIO; 4794 goto error_return; 4795 } 4796 4797 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4798 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4799 in_range(block, ext4_inode_table(sb, gdp), 4800 EXT4_SB(sb)->s_itb_per_group) || 4801 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4802 EXT4_SB(sb)->s_itb_per_group)) { 4803 4804 ext4_error(sb, "Freeing blocks in system zone - " 4805 "Block = %llu, count = %lu", block, count); 4806 /* err = 0. ext4_std_error should be a no op */ 4807 goto error_return; 4808 } 4809 4810 BUFFER_TRACE(bitmap_bh, "getting write access"); 4811 err = ext4_journal_get_write_access(handle, bitmap_bh); 4812 if (err) 4813 goto error_return; 4814 4815 /* 4816 * We are about to modify some metadata. Call the journal APIs 4817 * to unshare ->b_data if a currently-committing transaction is 4818 * using it 4819 */ 4820 BUFFER_TRACE(gd_bh, "get_write_access"); 4821 err = ext4_journal_get_write_access(handle, gd_bh); 4822 if (err) 4823 goto error_return; 4824 #ifdef AGGRESSIVE_CHECK 4825 { 4826 int i; 4827 for (i = 0; i < count_clusters; i++) 4828 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4829 } 4830 #endif 4831 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 4832 4833 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 4834 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 4835 GFP_NOFS|__GFP_NOFAIL); 4836 if (err) 4837 goto error_return; 4838 4839 /* 4840 * We need to make sure we don't reuse the freed block until after the 4841 * transaction is committed. We make an exception if the inode is to be 4842 * written in writeback mode since writeback mode has weak data 4843 * consistency guarantees. 4844 */ 4845 if (ext4_handle_valid(handle) && 4846 ((flags & EXT4_FREE_BLOCKS_METADATA) || 4847 !ext4_should_writeback_data(inode))) { 4848 struct ext4_free_data *new_entry; 4849 /* 4850 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 4851 * to fail. 4852 */ 4853 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 4854 GFP_NOFS|__GFP_NOFAIL); 4855 new_entry->efd_start_cluster = bit; 4856 new_entry->efd_group = block_group; 4857 new_entry->efd_count = count_clusters; 4858 new_entry->efd_tid = handle->h_transaction->t_tid; 4859 4860 ext4_lock_group(sb, block_group); 4861 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4862 ext4_mb_free_metadata(handle, &e4b, new_entry); 4863 } else { 4864 /* need to update group_info->bb_free and bitmap 4865 * with group lock held. generate_buddy look at 4866 * them with group lock_held 4867 */ 4868 if (test_opt(sb, DISCARD)) { 4869 err = ext4_issue_discard(sb, block_group, bit, count); 4870 if (err && err != -EOPNOTSUPP) 4871 ext4_msg(sb, KERN_WARNING, "discard request in" 4872 " group:%d block:%d count:%lu failed" 4873 " with %d", block_group, bit, count, 4874 err); 4875 } else 4876 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 4877 4878 ext4_lock_group(sb, block_group); 4879 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4880 mb_free_blocks(inode, &e4b, bit, count_clusters); 4881 } 4882 4883 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4884 ext4_free_group_clusters_set(sb, gdp, ret); 4885 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 4886 ext4_group_desc_csum_set(sb, block_group, gdp); 4887 ext4_unlock_group(sb, block_group); 4888 4889 if (sbi->s_log_groups_per_flex) { 4890 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4891 atomic64_add(count_clusters, 4892 &sbi->s_flex_groups[flex_group].free_clusters); 4893 } 4894 4895 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4896 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4897 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4898 4899 ext4_mb_unload_buddy(&e4b); 4900 4901 /* We dirtied the bitmap block */ 4902 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4903 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4904 4905 /* And the group descriptor block */ 4906 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4907 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4908 if (!err) 4909 err = ret; 4910 4911 if (overflow && !err) { 4912 block += count; 4913 count = overflow; 4914 put_bh(bitmap_bh); 4915 goto do_more; 4916 } 4917 error_return: 4918 brelse(bitmap_bh); 4919 ext4_std_error(sb, err); 4920 return; 4921 } 4922 4923 /** 4924 * ext4_group_add_blocks() -- Add given blocks to an existing group 4925 * @handle: handle to this transaction 4926 * @sb: super block 4927 * @block: start physical block to add to the block group 4928 * @count: number of blocks to free 4929 * 4930 * This marks the blocks as free in the bitmap and buddy. 4931 */ 4932 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 4933 ext4_fsblk_t block, unsigned long count) 4934 { 4935 struct buffer_head *bitmap_bh = NULL; 4936 struct buffer_head *gd_bh; 4937 ext4_group_t block_group; 4938 ext4_grpblk_t bit; 4939 unsigned int i; 4940 struct ext4_group_desc *desc; 4941 struct ext4_sb_info *sbi = EXT4_SB(sb); 4942 struct ext4_buddy e4b; 4943 int err = 0, ret, blk_free_count; 4944 ext4_grpblk_t blocks_freed; 4945 4946 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 4947 4948 if (count == 0) 4949 return 0; 4950 4951 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4952 /* 4953 * Check to see if we are freeing blocks across a group 4954 * boundary. 4955 */ 4956 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4957 ext4_warning(sb, "too much blocks added to group %u", 4958 block_group); 4959 err = -EINVAL; 4960 goto error_return; 4961 } 4962 4963 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4964 if (IS_ERR(bitmap_bh)) { 4965 err = PTR_ERR(bitmap_bh); 4966 bitmap_bh = NULL; 4967 goto error_return; 4968 } 4969 4970 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 4971 if (!desc) { 4972 err = -EIO; 4973 goto error_return; 4974 } 4975 4976 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 4977 in_range(ext4_inode_bitmap(sb, desc), block, count) || 4978 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 4979 in_range(block + count - 1, ext4_inode_table(sb, desc), 4980 sbi->s_itb_per_group)) { 4981 ext4_error(sb, "Adding blocks in system zones - " 4982 "Block = %llu, count = %lu", 4983 block, count); 4984 err = -EINVAL; 4985 goto error_return; 4986 } 4987 4988 BUFFER_TRACE(bitmap_bh, "getting write access"); 4989 err = ext4_journal_get_write_access(handle, bitmap_bh); 4990 if (err) 4991 goto error_return; 4992 4993 /* 4994 * We are about to modify some metadata. Call the journal APIs 4995 * to unshare ->b_data if a currently-committing transaction is 4996 * using it 4997 */ 4998 BUFFER_TRACE(gd_bh, "get_write_access"); 4999 err = ext4_journal_get_write_access(handle, gd_bh); 5000 if (err) 5001 goto error_return; 5002 5003 for (i = 0, blocks_freed = 0; i < count; i++) { 5004 BUFFER_TRACE(bitmap_bh, "clear bit"); 5005 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 5006 ext4_error(sb, "bit already cleared for block %llu", 5007 (ext4_fsblk_t)(block + i)); 5008 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 5009 } else { 5010 blocks_freed++; 5011 } 5012 } 5013 5014 err = ext4_mb_load_buddy(sb, block_group, &e4b); 5015 if (err) 5016 goto error_return; 5017 5018 /* 5019 * need to update group_info->bb_free and bitmap 5020 * with group lock held. generate_buddy look at 5021 * them with group lock_held 5022 */ 5023 ext4_lock_group(sb, block_group); 5024 mb_clear_bits(bitmap_bh->b_data, bit, count); 5025 mb_free_blocks(NULL, &e4b, bit, count); 5026 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 5027 ext4_free_group_clusters_set(sb, desc, blk_free_count); 5028 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 5029 ext4_group_desc_csum_set(sb, block_group, desc); 5030 ext4_unlock_group(sb, block_group); 5031 percpu_counter_add(&sbi->s_freeclusters_counter, 5032 EXT4_NUM_B2C(sbi, blocks_freed)); 5033 5034 if (sbi->s_log_groups_per_flex) { 5035 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 5036 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), 5037 &sbi->s_flex_groups[flex_group].free_clusters); 5038 } 5039 5040 ext4_mb_unload_buddy(&e4b); 5041 5042 /* We dirtied the bitmap block */ 5043 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5044 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5045 5046 /* And the group descriptor block */ 5047 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5048 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5049 if (!err) 5050 err = ret; 5051 5052 error_return: 5053 brelse(bitmap_bh); 5054 ext4_std_error(sb, err); 5055 return err; 5056 } 5057 5058 /** 5059 * ext4_trim_extent -- function to TRIM one single free extent in the group 5060 * @sb: super block for the file system 5061 * @start: starting block of the free extent in the alloc. group 5062 * @count: number of blocks to TRIM 5063 * @group: alloc. group we are working with 5064 * @e4b: ext4 buddy for the group 5065 * 5066 * Trim "count" blocks starting at "start" in the "group". To assure that no 5067 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5068 * be called with under the group lock. 5069 */ 5070 static int ext4_trim_extent(struct super_block *sb, int start, int count, 5071 ext4_group_t group, struct ext4_buddy *e4b) 5072 __releases(bitlock) 5073 __acquires(bitlock) 5074 { 5075 struct ext4_free_extent ex; 5076 int ret = 0; 5077 5078 trace_ext4_trim_extent(sb, group, start, count); 5079 5080 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5081 5082 ex.fe_start = start; 5083 ex.fe_group = group; 5084 ex.fe_len = count; 5085 5086 /* 5087 * Mark blocks used, so no one can reuse them while 5088 * being trimmed. 5089 */ 5090 mb_mark_used(e4b, &ex); 5091 ext4_unlock_group(sb, group); 5092 ret = ext4_issue_discard(sb, group, start, count); 5093 ext4_lock_group(sb, group); 5094 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5095 return ret; 5096 } 5097 5098 /** 5099 * ext4_trim_all_free -- function to trim all free space in alloc. group 5100 * @sb: super block for file system 5101 * @group: group to be trimmed 5102 * @start: first group block to examine 5103 * @max: last group block to examine 5104 * @minblocks: minimum extent block count 5105 * 5106 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5107 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5108 * the extent. 5109 * 5110 * 5111 * ext4_trim_all_free walks through group's block bitmap searching for free 5112 * extents. When the free extent is found, mark it as used in group buddy 5113 * bitmap. Then issue a TRIM command on this extent and free the extent in 5114 * the group buddy bitmap. This is done until whole group is scanned. 5115 */ 5116 static ext4_grpblk_t 5117 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5118 ext4_grpblk_t start, ext4_grpblk_t max, 5119 ext4_grpblk_t minblocks) 5120 { 5121 void *bitmap; 5122 ext4_grpblk_t next, count = 0, free_count = 0; 5123 struct ext4_buddy e4b; 5124 int ret = 0; 5125 5126 trace_ext4_trim_all_free(sb, group, start, max); 5127 5128 ret = ext4_mb_load_buddy(sb, group, &e4b); 5129 if (ret) { 5130 ext4_error(sb, "Error in loading buddy " 5131 "information for %u", group); 5132 return ret; 5133 } 5134 bitmap = e4b.bd_bitmap; 5135 5136 ext4_lock_group(sb, group); 5137 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5138 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5139 goto out; 5140 5141 start = (e4b.bd_info->bb_first_free > start) ? 5142 e4b.bd_info->bb_first_free : start; 5143 5144 while (start <= max) { 5145 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5146 if (start > max) 5147 break; 5148 next = mb_find_next_bit(bitmap, max + 1, start); 5149 5150 if ((next - start) >= minblocks) { 5151 ret = ext4_trim_extent(sb, start, 5152 next - start, group, &e4b); 5153 if (ret && ret != -EOPNOTSUPP) 5154 break; 5155 ret = 0; 5156 count += next - start; 5157 } 5158 free_count += next - start; 5159 start = next + 1; 5160 5161 if (fatal_signal_pending(current)) { 5162 count = -ERESTARTSYS; 5163 break; 5164 } 5165 5166 if (need_resched()) { 5167 ext4_unlock_group(sb, group); 5168 cond_resched(); 5169 ext4_lock_group(sb, group); 5170 } 5171 5172 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5173 break; 5174 } 5175 5176 if (!ret) { 5177 ret = count; 5178 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5179 } 5180 out: 5181 ext4_unlock_group(sb, group); 5182 ext4_mb_unload_buddy(&e4b); 5183 5184 ext4_debug("trimmed %d blocks in the group %d\n", 5185 count, group); 5186 5187 return ret; 5188 } 5189 5190 /** 5191 * ext4_trim_fs() -- trim ioctl handle function 5192 * @sb: superblock for filesystem 5193 * @range: fstrim_range structure 5194 * 5195 * start: First Byte to trim 5196 * len: number of Bytes to trim from start 5197 * minlen: minimum extent length in Bytes 5198 * ext4_trim_fs goes through all allocation groups containing Bytes from 5199 * start to start+len. For each such a group ext4_trim_all_free function 5200 * is invoked to trim all free space. 5201 */ 5202 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5203 { 5204 struct ext4_group_info *grp; 5205 ext4_group_t group, first_group, last_group; 5206 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5207 uint64_t start, end, minlen, trimmed = 0; 5208 ext4_fsblk_t first_data_blk = 5209 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5210 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5211 int ret = 0; 5212 5213 start = range->start >> sb->s_blocksize_bits; 5214 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5215 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5216 range->minlen >> sb->s_blocksize_bits); 5217 5218 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5219 start >= max_blks || 5220 range->len < sb->s_blocksize) 5221 return -EINVAL; 5222 if (end >= max_blks) 5223 end = max_blks - 1; 5224 if (end <= first_data_blk) 5225 goto out; 5226 if (start < first_data_blk) 5227 start = first_data_blk; 5228 5229 /* Determine first and last group to examine based on start and end */ 5230 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5231 &first_group, &first_cluster); 5232 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5233 &last_group, &last_cluster); 5234 5235 /* end now represents the last cluster to discard in this group */ 5236 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5237 5238 for (group = first_group; group <= last_group; group++) { 5239 grp = ext4_get_group_info(sb, group); 5240 /* We only do this if the grp has never been initialized */ 5241 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5242 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 5243 if (ret) 5244 break; 5245 } 5246 5247 /* 5248 * For all the groups except the last one, last cluster will 5249 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5250 * change it for the last group, note that last_cluster is 5251 * already computed earlier by ext4_get_group_no_and_offset() 5252 */ 5253 if (group == last_group) 5254 end = last_cluster; 5255 5256 if (grp->bb_free >= minlen) { 5257 cnt = ext4_trim_all_free(sb, group, first_cluster, 5258 end, minlen); 5259 if (cnt < 0) { 5260 ret = cnt; 5261 break; 5262 } 5263 trimmed += cnt; 5264 } 5265 5266 /* 5267 * For every group except the first one, we are sure 5268 * that the first cluster to discard will be cluster #0. 5269 */ 5270 first_cluster = 0; 5271 } 5272 5273 if (!ret) 5274 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5275 5276 out: 5277 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5278 return ret; 5279 } 5280 5281 /* Iterate all the free extents in the group. */ 5282 int 5283 ext4_mballoc_query_range( 5284 struct super_block *sb, 5285 ext4_group_t group, 5286 ext4_grpblk_t start, 5287 ext4_grpblk_t end, 5288 ext4_mballoc_query_range_fn formatter, 5289 void *priv) 5290 { 5291 void *bitmap; 5292 ext4_grpblk_t next; 5293 struct ext4_buddy e4b; 5294 int error; 5295 5296 error = ext4_mb_load_buddy(sb, group, &e4b); 5297 if (error) 5298 return error; 5299 bitmap = e4b.bd_bitmap; 5300 5301 ext4_lock_group(sb, group); 5302 5303 start = (e4b.bd_info->bb_first_free > start) ? 5304 e4b.bd_info->bb_first_free : start; 5305 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 5306 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5307 5308 while (start <= end) { 5309 start = mb_find_next_zero_bit(bitmap, end + 1, start); 5310 if (start > end) 5311 break; 5312 next = mb_find_next_bit(bitmap, end + 1, start); 5313 5314 ext4_unlock_group(sb, group); 5315 error = formatter(sb, group, start, next - start, priv); 5316 if (error) 5317 goto out_unload; 5318 ext4_lock_group(sb, group); 5319 5320 start = next + 1; 5321 } 5322 5323 ext4_unlock_group(sb, group); 5324 out_unload: 5325 ext4_mb_unload_buddy(&e4b); 5326 5327 return error; 5328 } 5329