1 /* 2 * linux/fs/ext4/ialloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * BSD ufs-inspired inode and directory allocation by 10 * Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/ext4_fs.h> 19 #include <linux/ext4_jbd2.h> 20 #include <linux/stat.h> 21 #include <linux/string.h> 22 #include <linux/quotaops.h> 23 #include <linux/buffer_head.h> 24 #include <linux/random.h> 25 #include <linux/bitops.h> 26 #include <linux/blkdev.h> 27 #include <asm/byteorder.h> 28 29 #include "xattr.h" 30 #include "acl.h" 31 #include "group.h" 32 33 /* 34 * ialloc.c contains the inodes allocation and deallocation routines 35 */ 36 37 /* 38 * The free inodes are managed by bitmaps. A file system contains several 39 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 40 * block for inodes, N blocks for the inode table and data blocks. 41 * 42 * The file system contains group descriptors which are located after the 43 * super block. Each descriptor contains the number of the bitmap block and 44 * the free blocks count in the block. 45 */ 46 47 /* 48 * To avoid calling the atomic setbit hundreds or thousands of times, we only 49 * need to use it within a single byte (to ensure we get endianness right). 50 * We can use memset for the rest of the bitmap as there are no other users. 51 */ 52 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 53 { 54 int i; 55 56 if (start_bit >= end_bit) 57 return; 58 59 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); 60 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 61 ext4_set_bit(i, bitmap); 62 if (i < end_bit) 63 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 64 } 65 66 /* Initializes an uninitialized inode bitmap */ 67 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, 68 ext4_group_t block_group, 69 struct ext4_group_desc *gdp) 70 { 71 struct ext4_sb_info *sbi = EXT4_SB(sb); 72 73 J_ASSERT_BH(bh, buffer_locked(bh)); 74 75 /* If checksum is bad mark all blocks and inodes use to prevent 76 * allocation, essentially implementing a per-group read-only flag. */ 77 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 78 ext4_error(sb, __FUNCTION__, "Checksum bad for group %lu\n", 79 block_group); 80 gdp->bg_free_blocks_count = 0; 81 gdp->bg_free_inodes_count = 0; 82 gdp->bg_itable_unused = 0; 83 memset(bh->b_data, 0xff, sb->s_blocksize); 84 return 0; 85 } 86 87 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 88 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), 89 bh->b_data); 90 91 return EXT4_INODES_PER_GROUP(sb); 92 } 93 94 /* 95 * Read the inode allocation bitmap for a given block_group, reading 96 * into the specified slot in the superblock's bitmap cache. 97 * 98 * Return buffer_head of bitmap on success or NULL. 99 */ 100 static struct buffer_head * 101 read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 102 { 103 struct ext4_group_desc *desc; 104 struct buffer_head *bh = NULL; 105 106 desc = ext4_get_group_desc(sb, block_group, NULL); 107 if (!desc) 108 goto error_out; 109 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 110 bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc)); 111 if (!buffer_uptodate(bh)) { 112 lock_buffer(bh); 113 if (!buffer_uptodate(bh)) { 114 ext4_init_inode_bitmap(sb, bh, block_group, 115 desc); 116 set_buffer_uptodate(bh); 117 } 118 unlock_buffer(bh); 119 } 120 } else { 121 bh = sb_bread(sb, ext4_inode_bitmap(sb, desc)); 122 } 123 if (!bh) 124 ext4_error(sb, "read_inode_bitmap", 125 "Cannot read inode bitmap - " 126 "block_group = %lu, inode_bitmap = %llu", 127 block_group, ext4_inode_bitmap(sb, desc)); 128 error_out: 129 return bh; 130 } 131 132 /* 133 * NOTE! When we get the inode, we're the only people 134 * that have access to it, and as such there are no 135 * race conditions we have to worry about. The inode 136 * is not on the hash-lists, and it cannot be reached 137 * through the filesystem because the directory entry 138 * has been deleted earlier. 139 * 140 * HOWEVER: we must make sure that we get no aliases, 141 * which means that we have to call "clear_inode()" 142 * _before_ we mark the inode not in use in the inode 143 * bitmaps. Otherwise a newly created file might use 144 * the same inode number (not actually the same pointer 145 * though), and then we'd have two inodes sharing the 146 * same inode number and space on the harddisk. 147 */ 148 void ext4_free_inode (handle_t *handle, struct inode * inode) 149 { 150 struct super_block * sb = inode->i_sb; 151 int is_directory; 152 unsigned long ino; 153 struct buffer_head *bitmap_bh = NULL; 154 struct buffer_head *bh2; 155 ext4_group_t block_group; 156 unsigned long bit; 157 struct ext4_group_desc * gdp; 158 struct ext4_super_block * es; 159 struct ext4_sb_info *sbi; 160 int fatal = 0, err; 161 162 if (atomic_read(&inode->i_count) > 1) { 163 printk ("ext4_free_inode: inode has count=%d\n", 164 atomic_read(&inode->i_count)); 165 return; 166 } 167 if (inode->i_nlink) { 168 printk ("ext4_free_inode: inode has nlink=%d\n", 169 inode->i_nlink); 170 return; 171 } 172 if (!sb) { 173 printk("ext4_free_inode: inode on nonexistent device\n"); 174 return; 175 } 176 sbi = EXT4_SB(sb); 177 178 ino = inode->i_ino; 179 ext4_debug ("freeing inode %lu\n", ino); 180 181 /* 182 * Note: we must free any quota before locking the superblock, 183 * as writing the quota to disk may need the lock as well. 184 */ 185 DQUOT_INIT(inode); 186 ext4_xattr_delete_inode(handle, inode); 187 DQUOT_FREE_INODE(inode); 188 DQUOT_DROP(inode); 189 190 is_directory = S_ISDIR(inode->i_mode); 191 192 /* Do this BEFORE marking the inode not in use or returning an error */ 193 clear_inode (inode); 194 195 es = EXT4_SB(sb)->s_es; 196 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 197 ext4_error (sb, "ext4_free_inode", 198 "reserved or nonexistent inode %lu", ino); 199 goto error_return; 200 } 201 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 202 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 203 bitmap_bh = read_inode_bitmap(sb, block_group); 204 if (!bitmap_bh) 205 goto error_return; 206 207 BUFFER_TRACE(bitmap_bh, "get_write_access"); 208 fatal = ext4_journal_get_write_access(handle, bitmap_bh); 209 if (fatal) 210 goto error_return; 211 212 /* Ok, now we can actually update the inode bitmaps.. */ 213 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 214 bit, bitmap_bh->b_data)) 215 ext4_error (sb, "ext4_free_inode", 216 "bit already cleared for inode %lu", ino); 217 else { 218 gdp = ext4_get_group_desc (sb, block_group, &bh2); 219 220 BUFFER_TRACE(bh2, "get_write_access"); 221 fatal = ext4_journal_get_write_access(handle, bh2); 222 if (fatal) goto error_return; 223 224 if (gdp) { 225 spin_lock(sb_bgl_lock(sbi, block_group)); 226 gdp->bg_free_inodes_count = cpu_to_le16( 227 le16_to_cpu(gdp->bg_free_inodes_count) + 1); 228 if (is_directory) 229 gdp->bg_used_dirs_count = cpu_to_le16( 230 le16_to_cpu(gdp->bg_used_dirs_count) - 1); 231 gdp->bg_checksum = ext4_group_desc_csum(sbi, 232 block_group, gdp); 233 spin_unlock(sb_bgl_lock(sbi, block_group)); 234 percpu_counter_inc(&sbi->s_freeinodes_counter); 235 if (is_directory) 236 percpu_counter_dec(&sbi->s_dirs_counter); 237 238 } 239 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); 240 err = ext4_journal_dirty_metadata(handle, bh2); 241 if (!fatal) fatal = err; 242 } 243 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata"); 244 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 245 if (!fatal) 246 fatal = err; 247 sb->s_dirt = 1; 248 error_return: 249 brelse(bitmap_bh); 250 ext4_std_error(sb, fatal); 251 } 252 253 /* 254 * There are two policies for allocating an inode. If the new inode is 255 * a directory, then a forward search is made for a block group with both 256 * free space and a low directory-to-inode ratio; if that fails, then of 257 * the groups with above-average free space, that group with the fewest 258 * directories already is chosen. 259 * 260 * For other inodes, search forward from the parent directory\'s block 261 * group to find a free inode. 262 */ 263 static ext4_group_t find_group_dir(struct super_block *sb, struct inode *parent) 264 { 265 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 266 unsigned int freei, avefreei; 267 struct ext4_group_desc *desc, *best_desc = NULL; 268 ext4_group_t group, best_group = -1; 269 270 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter); 271 avefreei = freei / ngroups; 272 273 for (group = 0; group < ngroups; group++) { 274 desc = ext4_get_group_desc (sb, group, NULL); 275 if (!desc || !desc->bg_free_inodes_count) 276 continue; 277 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 278 continue; 279 if (!best_desc || 280 (le16_to_cpu(desc->bg_free_blocks_count) > 281 le16_to_cpu(best_desc->bg_free_blocks_count))) { 282 best_group = group; 283 best_desc = desc; 284 } 285 } 286 return best_group; 287 } 288 289 /* 290 * Orlov's allocator for directories. 291 * 292 * We always try to spread first-level directories. 293 * 294 * If there are blockgroups with both free inodes and free blocks counts 295 * not worse than average we return one with smallest directory count. 296 * Otherwise we simply return a random group. 297 * 298 * For the rest rules look so: 299 * 300 * It's OK to put directory into a group unless 301 * it has too many directories already (max_dirs) or 302 * it has too few free inodes left (min_inodes) or 303 * it has too few free blocks left (min_blocks) or 304 * it's already running too large debt (max_debt). 305 * Parent's group is prefered, if it doesn't satisfy these 306 * conditions we search cyclically through the rest. If none 307 * of the groups look good we just look for a group with more 308 * free inodes than average (starting at parent's group). 309 * 310 * Debt is incremented each time we allocate a directory and decremented 311 * when we allocate an inode, within 0--255. 312 */ 313 314 #define INODE_COST 64 315 #define BLOCK_COST 256 316 317 static ext4_group_t find_group_orlov(struct super_block *sb, 318 struct inode *parent) 319 { 320 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 321 struct ext4_sb_info *sbi = EXT4_SB(sb); 322 struct ext4_super_block *es = sbi->s_es; 323 ext4_group_t ngroups = sbi->s_groups_count; 324 int inodes_per_group = EXT4_INODES_PER_GROUP(sb); 325 unsigned int freei, avefreei; 326 ext4_fsblk_t freeb, avefreeb; 327 ext4_fsblk_t blocks_per_dir; 328 unsigned int ndirs; 329 int max_debt, max_dirs, min_inodes; 330 ext4_grpblk_t min_blocks; 331 ext4_group_t group = -1, i; 332 struct ext4_group_desc *desc; 333 334 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); 335 avefreei = freei / ngroups; 336 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 337 avefreeb = freeb; 338 do_div(avefreeb, ngroups); 339 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); 340 341 if ((parent == sb->s_root->d_inode) || 342 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) { 343 int best_ndir = inodes_per_group; 344 ext4_group_t best_group = -1; 345 346 get_random_bytes(&group, sizeof(group)); 347 parent_group = (unsigned)group % ngroups; 348 for (i = 0; i < ngroups; i++) { 349 group = (parent_group + i) % ngroups; 350 desc = ext4_get_group_desc (sb, group, NULL); 351 if (!desc || !desc->bg_free_inodes_count) 352 continue; 353 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) 354 continue; 355 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 356 continue; 357 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) 358 continue; 359 best_group = group; 360 best_ndir = le16_to_cpu(desc->bg_used_dirs_count); 361 } 362 if (best_group >= 0) 363 return best_group; 364 goto fallback; 365 } 366 367 blocks_per_dir = ext4_blocks_count(es) - freeb; 368 do_div(blocks_per_dir, ndirs); 369 370 max_dirs = ndirs / ngroups + inodes_per_group / 16; 371 min_inodes = avefreei - inodes_per_group / 4; 372 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4; 373 374 max_debt = EXT4_BLOCKS_PER_GROUP(sb); 375 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST); 376 if (max_debt * INODE_COST > inodes_per_group) 377 max_debt = inodes_per_group / INODE_COST; 378 if (max_debt > 255) 379 max_debt = 255; 380 if (max_debt == 0) 381 max_debt = 1; 382 383 for (i = 0; i < ngroups; i++) { 384 group = (parent_group + i) % ngroups; 385 desc = ext4_get_group_desc (sb, group, NULL); 386 if (!desc || !desc->bg_free_inodes_count) 387 continue; 388 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) 389 continue; 390 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) 391 continue; 392 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) 393 continue; 394 return group; 395 } 396 397 fallback: 398 for (i = 0; i < ngroups; i++) { 399 group = (parent_group + i) % ngroups; 400 desc = ext4_get_group_desc (sb, group, NULL); 401 if (!desc || !desc->bg_free_inodes_count) 402 continue; 403 if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) 404 return group; 405 } 406 407 if (avefreei) { 408 /* 409 * The free-inodes counter is approximate, and for really small 410 * filesystems the above test can fail to find any blockgroups 411 */ 412 avefreei = 0; 413 goto fallback; 414 } 415 416 return -1; 417 } 418 419 static ext4_group_t find_group_other(struct super_block *sb, 420 struct inode *parent) 421 { 422 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 423 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 424 struct ext4_group_desc *desc; 425 ext4_group_t group, i; 426 427 /* 428 * Try to place the inode in its parent directory 429 */ 430 group = parent_group; 431 desc = ext4_get_group_desc (sb, group, NULL); 432 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 433 le16_to_cpu(desc->bg_free_blocks_count)) 434 return group; 435 436 /* 437 * We're going to place this inode in a different blockgroup from its 438 * parent. We want to cause files in a common directory to all land in 439 * the same blockgroup. But we want files which are in a different 440 * directory which shares a blockgroup with our parent to land in a 441 * different blockgroup. 442 * 443 * So add our directory's i_ino into the starting point for the hash. 444 */ 445 group = (group + parent->i_ino) % ngroups; 446 447 /* 448 * Use a quadratic hash to find a group with a free inode and some free 449 * blocks. 450 */ 451 for (i = 1; i < ngroups; i <<= 1) { 452 group += i; 453 if (group >= ngroups) 454 group -= ngroups; 455 desc = ext4_get_group_desc (sb, group, NULL); 456 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 457 le16_to_cpu(desc->bg_free_blocks_count)) 458 return group; 459 } 460 461 /* 462 * That failed: try linear search for a free inode, even if that group 463 * has no free blocks. 464 */ 465 group = parent_group; 466 for (i = 0; i < ngroups; i++) { 467 if (++group >= ngroups) 468 group = 0; 469 desc = ext4_get_group_desc (sb, group, NULL); 470 if (desc && le16_to_cpu(desc->bg_free_inodes_count)) 471 return group; 472 } 473 474 return -1; 475 } 476 477 /* 478 * There are two policies for allocating an inode. If the new inode is 479 * a directory, then a forward search is made for a block group with both 480 * free space and a low directory-to-inode ratio; if that fails, then of 481 * the groups with above-average free space, that group with the fewest 482 * directories already is chosen. 483 * 484 * For other inodes, search forward from the parent directory's block 485 * group to find a free inode. 486 */ 487 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode) 488 { 489 struct super_block *sb; 490 struct buffer_head *bitmap_bh = NULL; 491 struct buffer_head *bh2; 492 ext4_group_t group; 493 unsigned long ino = 0; 494 struct inode * inode; 495 struct ext4_group_desc * gdp = NULL; 496 struct ext4_super_block * es; 497 struct ext4_inode_info *ei; 498 struct ext4_sb_info *sbi; 499 int err = 0; 500 struct inode *ret; 501 int i, free = 0; 502 503 /* Cannot create files in a deleted directory */ 504 if (!dir || !dir->i_nlink) 505 return ERR_PTR(-EPERM); 506 507 sb = dir->i_sb; 508 inode = new_inode(sb); 509 if (!inode) 510 return ERR_PTR(-ENOMEM); 511 ei = EXT4_I(inode); 512 513 sbi = EXT4_SB(sb); 514 es = sbi->s_es; 515 if (S_ISDIR(mode)) { 516 if (test_opt (sb, OLDALLOC)) 517 group = find_group_dir(sb, dir); 518 else 519 group = find_group_orlov(sb, dir); 520 } else 521 group = find_group_other(sb, dir); 522 523 err = -ENOSPC; 524 if (group == -1) 525 goto out; 526 527 for (i = 0; i < sbi->s_groups_count; i++) { 528 err = -EIO; 529 530 gdp = ext4_get_group_desc(sb, group, &bh2); 531 if (!gdp) 532 goto fail; 533 534 brelse(bitmap_bh); 535 bitmap_bh = read_inode_bitmap(sb, group); 536 if (!bitmap_bh) 537 goto fail; 538 539 ino = 0; 540 541 repeat_in_this_group: 542 ino = ext4_find_next_zero_bit((unsigned long *) 543 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); 544 if (ino < EXT4_INODES_PER_GROUP(sb)) { 545 546 BUFFER_TRACE(bitmap_bh, "get_write_access"); 547 err = ext4_journal_get_write_access(handle, bitmap_bh); 548 if (err) 549 goto fail; 550 551 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group), 552 ino, bitmap_bh->b_data)) { 553 /* we won it */ 554 BUFFER_TRACE(bitmap_bh, 555 "call ext4_journal_dirty_metadata"); 556 err = ext4_journal_dirty_metadata(handle, 557 bitmap_bh); 558 if (err) 559 goto fail; 560 goto got; 561 } 562 /* we lost it */ 563 jbd2_journal_release_buffer(handle, bitmap_bh); 564 565 if (++ino < EXT4_INODES_PER_GROUP(sb)) 566 goto repeat_in_this_group; 567 } 568 569 /* 570 * This case is possible in concurrent environment. It is very 571 * rare. We cannot repeat the find_group_xxx() call because 572 * that will simply return the same blockgroup, because the 573 * group descriptor metadata has not yet been updated. 574 * So we just go onto the next blockgroup. 575 */ 576 if (++group == sbi->s_groups_count) 577 group = 0; 578 } 579 err = -ENOSPC; 580 goto out; 581 582 got: 583 ino++; 584 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 585 ino > EXT4_INODES_PER_GROUP(sb)) { 586 ext4_error(sb, __FUNCTION__, 587 "reserved inode or inode > inodes count - " 588 "block_group = %lu, inode=%lu", group, 589 ino + group * EXT4_INODES_PER_GROUP(sb)); 590 err = -EIO; 591 goto fail; 592 } 593 594 BUFFER_TRACE(bh2, "get_write_access"); 595 err = ext4_journal_get_write_access(handle, bh2); 596 if (err) goto fail; 597 598 /* We may have to initialize the block bitmap if it isn't already */ 599 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 600 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 601 struct buffer_head *block_bh = read_block_bitmap(sb, group); 602 603 BUFFER_TRACE(block_bh, "get block bitmap access"); 604 err = ext4_journal_get_write_access(handle, block_bh); 605 if (err) { 606 brelse(block_bh); 607 goto fail; 608 } 609 610 free = 0; 611 spin_lock(sb_bgl_lock(sbi, group)); 612 /* recheck and clear flag under lock if we still need to */ 613 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 614 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 615 free = ext4_free_blocks_after_init(sb, group, gdp); 616 gdp->bg_free_blocks_count = cpu_to_le16(free); 617 } 618 spin_unlock(sb_bgl_lock(sbi, group)); 619 620 /* Don't need to dirty bitmap block if we didn't change it */ 621 if (free) { 622 BUFFER_TRACE(block_bh, "dirty block bitmap"); 623 err = ext4_journal_dirty_metadata(handle, block_bh); 624 } 625 626 brelse(block_bh); 627 if (err) 628 goto fail; 629 } 630 631 spin_lock(sb_bgl_lock(sbi, group)); 632 /* If we didn't allocate from within the initialized part of the inode 633 * table then we need to initialize up to this inode. */ 634 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 635 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 636 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); 637 638 /* When marking the block group with 639 * ~EXT4_BG_INODE_UNINIT we don't want to depend 640 * on the value of bg_itable_unsed even though 641 * mke2fs could have initialized the same for us. 642 * Instead we calculated the value below 643 */ 644 645 free = 0; 646 } else { 647 free = EXT4_INODES_PER_GROUP(sb) - 648 le16_to_cpu(gdp->bg_itable_unused); 649 } 650 651 /* 652 * Check the relative inode number against the last used 653 * relative inode number in this group. if it is greater 654 * we need to update the bg_itable_unused count 655 * 656 */ 657 if (ino > free) 658 gdp->bg_itable_unused = 659 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino); 660 } 661 662 gdp->bg_free_inodes_count = 663 cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1); 664 if (S_ISDIR(mode)) { 665 gdp->bg_used_dirs_count = 666 cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1); 667 } 668 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 669 spin_unlock(sb_bgl_lock(sbi, group)); 670 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); 671 err = ext4_journal_dirty_metadata(handle, bh2); 672 if (err) goto fail; 673 674 percpu_counter_dec(&sbi->s_freeinodes_counter); 675 if (S_ISDIR(mode)) 676 percpu_counter_inc(&sbi->s_dirs_counter); 677 sb->s_dirt = 1; 678 679 inode->i_uid = current->fsuid; 680 if (test_opt (sb, GRPID)) 681 inode->i_gid = dir->i_gid; 682 else if (dir->i_mode & S_ISGID) { 683 inode->i_gid = dir->i_gid; 684 if (S_ISDIR(mode)) 685 mode |= S_ISGID; 686 } else 687 inode->i_gid = current->fsgid; 688 inode->i_mode = mode; 689 690 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); 691 /* This is the optimal IO size (for stat), not the fs block size */ 692 inode->i_blocks = 0; 693 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = 694 ext4_current_time(inode); 695 696 memset(ei->i_data, 0, sizeof(ei->i_data)); 697 ei->i_dir_start_lookup = 0; 698 ei->i_disksize = 0; 699 700 ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL; 701 if (S_ISLNK(mode)) 702 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); 703 /* dirsync only applies to directories */ 704 if (!S_ISDIR(mode)) 705 ei->i_flags &= ~EXT4_DIRSYNC_FL; 706 ei->i_file_acl = 0; 707 ei->i_dir_acl = 0; 708 ei->i_dtime = 0; 709 ei->i_block_alloc_info = NULL; 710 ei->i_block_group = group; 711 712 ext4_set_inode_flags(inode); 713 if (IS_DIRSYNC(inode)) 714 handle->h_sync = 1; 715 insert_inode_hash(inode); 716 spin_lock(&sbi->s_next_gen_lock); 717 inode->i_generation = sbi->s_next_generation++; 718 spin_unlock(&sbi->s_next_gen_lock); 719 720 ei->i_state = EXT4_STATE_NEW; 721 722 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 723 724 ret = inode; 725 if(DQUOT_ALLOC_INODE(inode)) { 726 err = -EDQUOT; 727 goto fail_drop; 728 } 729 730 err = ext4_init_acl(handle, inode, dir); 731 if (err) 732 goto fail_free_drop; 733 734 err = ext4_init_security(handle,inode, dir); 735 if (err) 736 goto fail_free_drop; 737 738 err = ext4_mark_inode_dirty(handle, inode); 739 if (err) { 740 ext4_std_error(sb, err); 741 goto fail_free_drop; 742 } 743 if (test_opt(sb, EXTENTS)) { 744 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 745 ext4_ext_tree_init(handle, inode); 746 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 747 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 748 if (err) goto fail; 749 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS); 750 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "call ext4_journal_dirty_metadata"); 751 err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 752 } 753 } 754 755 ext4_debug("allocating inode %lu\n", inode->i_ino); 756 goto really_out; 757 fail: 758 ext4_std_error(sb, err); 759 out: 760 iput(inode); 761 ret = ERR_PTR(err); 762 really_out: 763 brelse(bitmap_bh); 764 return ret; 765 766 fail_free_drop: 767 DQUOT_FREE_INODE(inode); 768 769 fail_drop: 770 DQUOT_DROP(inode); 771 inode->i_flags |= S_NOQUOTA; 772 inode->i_nlink = 0; 773 iput(inode); 774 brelse(bitmap_bh); 775 return ERR_PTR(err); 776 } 777 778 /* Verify that we are loading a valid orphan from disk */ 779 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) 780 { 781 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 782 ext4_group_t block_group; 783 int bit; 784 struct buffer_head *bitmap_bh = NULL; 785 struct inode *inode = NULL; 786 787 /* Error cases - e2fsck has already cleaned up for us */ 788 if (ino > max_ino) { 789 ext4_warning(sb, __FUNCTION__, 790 "bad orphan ino %lu! e2fsck was run?", ino); 791 goto out; 792 } 793 794 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 795 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 796 bitmap_bh = read_inode_bitmap(sb, block_group); 797 if (!bitmap_bh) { 798 ext4_warning(sb, __FUNCTION__, 799 "inode bitmap error for orphan %lu", ino); 800 goto out; 801 } 802 803 /* Having the inode bit set should be a 100% indicator that this 804 * is a valid orphan (no e2fsck run on fs). Orphans also include 805 * inodes that were being truncated, so we can't check i_nlink==0. 806 */ 807 if (!ext4_test_bit(bit, bitmap_bh->b_data) || 808 !(inode = iget(sb, ino)) || is_bad_inode(inode) || 809 NEXT_ORPHAN(inode) > max_ino) { 810 ext4_warning(sb, __FUNCTION__, 811 "bad orphan inode %lu! e2fsck was run?", ino); 812 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 813 bit, (unsigned long long)bitmap_bh->b_blocknr, 814 ext4_test_bit(bit, bitmap_bh->b_data)); 815 printk(KERN_NOTICE "inode=%p\n", inode); 816 if (inode) { 817 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 818 is_bad_inode(inode)); 819 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 820 NEXT_ORPHAN(inode)); 821 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 822 } 823 /* Avoid freeing blocks if we got a bad deleted inode */ 824 if (inode && inode->i_nlink == 0) 825 inode->i_blocks = 0; 826 iput(inode); 827 inode = NULL; 828 } 829 out: 830 brelse(bitmap_bh); 831 return inode; 832 } 833 834 unsigned long ext4_count_free_inodes (struct super_block * sb) 835 { 836 unsigned long desc_count; 837 struct ext4_group_desc *gdp; 838 ext4_group_t i; 839 #ifdef EXT4FS_DEBUG 840 struct ext4_super_block *es; 841 unsigned long bitmap_count, x; 842 struct buffer_head *bitmap_bh = NULL; 843 844 es = EXT4_SB(sb)->s_es; 845 desc_count = 0; 846 bitmap_count = 0; 847 gdp = NULL; 848 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 849 gdp = ext4_get_group_desc (sb, i, NULL); 850 if (!gdp) 851 continue; 852 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 853 brelse(bitmap_bh); 854 bitmap_bh = read_inode_bitmap(sb, i); 855 if (!bitmap_bh) 856 continue; 857 858 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); 859 printk("group %d: stored = %d, counted = %lu\n", 860 i, le16_to_cpu(gdp->bg_free_inodes_count), x); 861 bitmap_count += x; 862 } 863 brelse(bitmap_bh); 864 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n", 865 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 866 return desc_count; 867 #else 868 desc_count = 0; 869 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 870 gdp = ext4_get_group_desc (sb, i, NULL); 871 if (!gdp) 872 continue; 873 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 874 cond_resched(); 875 } 876 return desc_count; 877 #endif 878 } 879 880 /* Called at mount-time, super-block is locked */ 881 unsigned long ext4_count_dirs (struct super_block * sb) 882 { 883 unsigned long count = 0; 884 ext4_group_t i; 885 886 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 887 struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL); 888 if (!gdp) 889 continue; 890 count += le16_to_cpu(gdp->bg_used_dirs_count); 891 } 892 return count; 893 } 894 895