1 /* 2 * linux/fs/ext4/ialloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * BSD ufs-inspired inode and directory allocation by 10 * Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/stat.h> 19 #include <linux/string.h> 20 #include <linux/quotaops.h> 21 #include <linux/buffer_head.h> 22 #include <linux/random.h> 23 #include <linux/bitops.h> 24 #include <linux/blkdev.h> 25 #include <asm/byteorder.h> 26 #include "ext4.h" 27 #include "ext4_jbd2.h" 28 #include "xattr.h" 29 #include "acl.h" 30 #include "group.h" 31 32 /* 33 * ialloc.c contains the inodes allocation and deallocation routines 34 */ 35 36 /* 37 * The free inodes are managed by bitmaps. A file system contains several 38 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 39 * block for inodes, N blocks for the inode table and data blocks. 40 * 41 * The file system contains group descriptors which are located after the 42 * super block. Each descriptor contains the number of the bitmap block and 43 * the free blocks count in the block. 44 */ 45 46 /* 47 * To avoid calling the atomic setbit hundreds or thousands of times, we only 48 * need to use it within a single byte (to ensure we get endianness right). 49 * We can use memset for the rest of the bitmap as there are no other users. 50 */ 51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 52 { 53 int i; 54 55 if (start_bit >= end_bit) 56 return; 57 58 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); 59 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 60 ext4_set_bit(i, bitmap); 61 if (i < end_bit) 62 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 63 } 64 65 /* Initializes an uninitialized inode bitmap */ 66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, 67 ext4_group_t block_group, 68 struct ext4_group_desc *gdp) 69 { 70 struct ext4_sb_info *sbi = EXT4_SB(sb); 71 72 J_ASSERT_BH(bh, buffer_locked(bh)); 73 74 /* If checksum is bad mark all blocks and inodes use to prevent 75 * allocation, essentially implementing a per-group read-only flag. */ 76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 77 ext4_error(sb, __func__, "Checksum bad for group %lu\n", 78 block_group); 79 gdp->bg_free_blocks_count = 0; 80 gdp->bg_free_inodes_count = 0; 81 gdp->bg_itable_unused = 0; 82 memset(bh->b_data, 0xff, sb->s_blocksize); 83 return 0; 84 } 85 86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb), 88 bh->b_data); 89 90 return EXT4_INODES_PER_GROUP(sb); 91 } 92 93 /* 94 * Read the inode allocation bitmap for a given block_group, reading 95 * into the specified slot in the superblock's bitmap cache. 96 * 97 * Return buffer_head of bitmap on success or NULL. 98 */ 99 static struct buffer_head * 100 read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 101 { 102 struct ext4_group_desc *desc; 103 struct buffer_head *bh = NULL; 104 105 desc = ext4_get_group_desc(sb, block_group, NULL); 106 if (!desc) 107 goto error_out; 108 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 109 bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc)); 110 if (!buffer_uptodate(bh)) { 111 lock_buffer(bh); 112 if (!buffer_uptodate(bh)) { 113 ext4_init_inode_bitmap(sb, bh, block_group, 114 desc); 115 set_buffer_uptodate(bh); 116 } 117 unlock_buffer(bh); 118 } 119 } else { 120 bh = sb_bread(sb, ext4_inode_bitmap(sb, desc)); 121 } 122 if (!bh) 123 ext4_error(sb, "read_inode_bitmap", 124 "Cannot read inode bitmap - " 125 "block_group = %lu, inode_bitmap = %llu", 126 block_group, ext4_inode_bitmap(sb, desc)); 127 error_out: 128 return bh; 129 } 130 131 /* 132 * NOTE! When we get the inode, we're the only people 133 * that have access to it, and as such there are no 134 * race conditions we have to worry about. The inode 135 * is not on the hash-lists, and it cannot be reached 136 * through the filesystem because the directory entry 137 * has been deleted earlier. 138 * 139 * HOWEVER: we must make sure that we get no aliases, 140 * which means that we have to call "clear_inode()" 141 * _before_ we mark the inode not in use in the inode 142 * bitmaps. Otherwise a newly created file might use 143 * the same inode number (not actually the same pointer 144 * though), and then we'd have two inodes sharing the 145 * same inode number and space on the harddisk. 146 */ 147 void ext4_free_inode (handle_t *handle, struct inode * inode) 148 { 149 struct super_block * sb = inode->i_sb; 150 int is_directory; 151 unsigned long ino; 152 struct buffer_head *bitmap_bh = NULL; 153 struct buffer_head *bh2; 154 ext4_group_t block_group; 155 unsigned long bit; 156 struct ext4_group_desc * gdp; 157 struct ext4_super_block * es; 158 struct ext4_sb_info *sbi; 159 int fatal = 0, err; 160 161 if (atomic_read(&inode->i_count) > 1) { 162 printk ("ext4_free_inode: inode has count=%d\n", 163 atomic_read(&inode->i_count)); 164 return; 165 } 166 if (inode->i_nlink) { 167 printk ("ext4_free_inode: inode has nlink=%d\n", 168 inode->i_nlink); 169 return; 170 } 171 if (!sb) { 172 printk("ext4_free_inode: inode on nonexistent device\n"); 173 return; 174 } 175 sbi = EXT4_SB(sb); 176 177 ino = inode->i_ino; 178 ext4_debug ("freeing inode %lu\n", ino); 179 180 /* 181 * Note: we must free any quota before locking the superblock, 182 * as writing the quota to disk may need the lock as well. 183 */ 184 DQUOT_INIT(inode); 185 ext4_xattr_delete_inode(handle, inode); 186 DQUOT_FREE_INODE(inode); 187 DQUOT_DROP(inode); 188 189 is_directory = S_ISDIR(inode->i_mode); 190 191 /* Do this BEFORE marking the inode not in use or returning an error */ 192 clear_inode (inode); 193 194 es = EXT4_SB(sb)->s_es; 195 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 196 ext4_error (sb, "ext4_free_inode", 197 "reserved or nonexistent inode %lu", ino); 198 goto error_return; 199 } 200 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 201 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 202 bitmap_bh = read_inode_bitmap(sb, block_group); 203 if (!bitmap_bh) 204 goto error_return; 205 206 BUFFER_TRACE(bitmap_bh, "get_write_access"); 207 fatal = ext4_journal_get_write_access(handle, bitmap_bh); 208 if (fatal) 209 goto error_return; 210 211 /* Ok, now we can actually update the inode bitmaps.. */ 212 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), 213 bit, bitmap_bh->b_data)) 214 ext4_error (sb, "ext4_free_inode", 215 "bit already cleared for inode %lu", ino); 216 else { 217 gdp = ext4_get_group_desc (sb, block_group, &bh2); 218 219 BUFFER_TRACE(bh2, "get_write_access"); 220 fatal = ext4_journal_get_write_access(handle, bh2); 221 if (fatal) goto error_return; 222 223 if (gdp) { 224 spin_lock(sb_bgl_lock(sbi, block_group)); 225 le16_add_cpu(&gdp->bg_free_inodes_count, 1); 226 if (is_directory) 227 le16_add_cpu(&gdp->bg_used_dirs_count, -1); 228 gdp->bg_checksum = ext4_group_desc_csum(sbi, 229 block_group, gdp); 230 spin_unlock(sb_bgl_lock(sbi, block_group)); 231 percpu_counter_inc(&sbi->s_freeinodes_counter); 232 if (is_directory) 233 percpu_counter_dec(&sbi->s_dirs_counter); 234 235 } 236 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); 237 err = ext4_journal_dirty_metadata(handle, bh2); 238 if (!fatal) fatal = err; 239 } 240 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata"); 241 err = ext4_journal_dirty_metadata(handle, bitmap_bh); 242 if (!fatal) 243 fatal = err; 244 sb->s_dirt = 1; 245 error_return: 246 brelse(bitmap_bh); 247 ext4_std_error(sb, fatal); 248 } 249 250 /* 251 * There are two policies for allocating an inode. If the new inode is 252 * a directory, then a forward search is made for a block group with both 253 * free space and a low directory-to-inode ratio; if that fails, then of 254 * the groups with above-average free space, that group with the fewest 255 * directories already is chosen. 256 * 257 * For other inodes, search forward from the parent directory\'s block 258 * group to find a free inode. 259 */ 260 static int find_group_dir(struct super_block *sb, struct inode *parent, 261 ext4_group_t *best_group) 262 { 263 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 264 unsigned int freei, avefreei; 265 struct ext4_group_desc *desc, *best_desc = NULL; 266 ext4_group_t group; 267 int ret = -1; 268 269 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter); 270 avefreei = freei / ngroups; 271 272 for (group = 0; group < ngroups; group++) { 273 desc = ext4_get_group_desc (sb, group, NULL); 274 if (!desc || !desc->bg_free_inodes_count) 275 continue; 276 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 277 continue; 278 if (!best_desc || 279 (le16_to_cpu(desc->bg_free_blocks_count) > 280 le16_to_cpu(best_desc->bg_free_blocks_count))) { 281 *best_group = group; 282 best_desc = desc; 283 ret = 0; 284 } 285 } 286 return ret; 287 } 288 289 /* 290 * Orlov's allocator for directories. 291 * 292 * We always try to spread first-level directories. 293 * 294 * If there are blockgroups with both free inodes and free blocks counts 295 * not worse than average we return one with smallest directory count. 296 * Otherwise we simply return a random group. 297 * 298 * For the rest rules look so: 299 * 300 * It's OK to put directory into a group unless 301 * it has too many directories already (max_dirs) or 302 * it has too few free inodes left (min_inodes) or 303 * it has too few free blocks left (min_blocks) or 304 * it's already running too large debt (max_debt). 305 * Parent's group is preferred, if it doesn't satisfy these 306 * conditions we search cyclically through the rest. If none 307 * of the groups look good we just look for a group with more 308 * free inodes than average (starting at parent's group). 309 * 310 * Debt is incremented each time we allocate a directory and decremented 311 * when we allocate an inode, within 0--255. 312 */ 313 314 #define INODE_COST 64 315 #define BLOCK_COST 256 316 317 static int find_group_orlov(struct super_block *sb, struct inode *parent, 318 ext4_group_t *group) 319 { 320 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 321 struct ext4_sb_info *sbi = EXT4_SB(sb); 322 struct ext4_super_block *es = sbi->s_es; 323 ext4_group_t ngroups = sbi->s_groups_count; 324 int inodes_per_group = EXT4_INODES_PER_GROUP(sb); 325 unsigned int freei, avefreei; 326 ext4_fsblk_t freeb, avefreeb; 327 ext4_fsblk_t blocks_per_dir; 328 unsigned int ndirs; 329 int max_debt, max_dirs, min_inodes; 330 ext4_grpblk_t min_blocks; 331 ext4_group_t i; 332 struct ext4_group_desc *desc; 333 334 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); 335 avefreei = freei / ngroups; 336 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 337 avefreeb = freeb; 338 do_div(avefreeb, ngroups); 339 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); 340 341 if ((parent == sb->s_root->d_inode) || 342 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) { 343 int best_ndir = inodes_per_group; 344 ext4_group_t grp; 345 int ret = -1; 346 347 get_random_bytes(&grp, sizeof(grp)); 348 parent_group = (unsigned)grp % ngroups; 349 for (i = 0; i < ngroups; i++) { 350 grp = (parent_group + i) % ngroups; 351 desc = ext4_get_group_desc(sb, grp, NULL); 352 if (!desc || !desc->bg_free_inodes_count) 353 continue; 354 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) 355 continue; 356 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 357 continue; 358 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) 359 continue; 360 *group = grp; 361 ret = 0; 362 best_ndir = le16_to_cpu(desc->bg_used_dirs_count); 363 } 364 if (ret == 0) 365 return ret; 366 goto fallback; 367 } 368 369 blocks_per_dir = ext4_blocks_count(es) - freeb; 370 do_div(blocks_per_dir, ndirs); 371 372 max_dirs = ndirs / ngroups + inodes_per_group / 16; 373 min_inodes = avefreei - inodes_per_group / 4; 374 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4; 375 376 max_debt = EXT4_BLOCKS_PER_GROUP(sb); 377 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST); 378 if (max_debt * INODE_COST > inodes_per_group) 379 max_debt = inodes_per_group / INODE_COST; 380 if (max_debt > 255) 381 max_debt = 255; 382 if (max_debt == 0) 383 max_debt = 1; 384 385 for (i = 0; i < ngroups; i++) { 386 *group = (parent_group + i) % ngroups; 387 desc = ext4_get_group_desc(sb, *group, NULL); 388 if (!desc || !desc->bg_free_inodes_count) 389 continue; 390 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) 391 continue; 392 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) 393 continue; 394 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) 395 continue; 396 return 0; 397 } 398 399 fallback: 400 for (i = 0; i < ngroups; i++) { 401 *group = (parent_group + i) % ngroups; 402 desc = ext4_get_group_desc(sb, *group, NULL); 403 if (desc && desc->bg_free_inodes_count && 404 le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) 405 return 0; 406 } 407 408 if (avefreei) { 409 /* 410 * The free-inodes counter is approximate, and for really small 411 * filesystems the above test can fail to find any blockgroups 412 */ 413 avefreei = 0; 414 goto fallback; 415 } 416 417 return -1; 418 } 419 420 static int find_group_other(struct super_block *sb, struct inode *parent, 421 ext4_group_t *group) 422 { 423 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 424 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 425 struct ext4_group_desc *desc; 426 ext4_group_t i; 427 428 /* 429 * Try to place the inode in its parent directory 430 */ 431 *group = parent_group; 432 desc = ext4_get_group_desc(sb, *group, NULL); 433 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 434 le16_to_cpu(desc->bg_free_blocks_count)) 435 return 0; 436 437 /* 438 * We're going to place this inode in a different blockgroup from its 439 * parent. We want to cause files in a common directory to all land in 440 * the same blockgroup. But we want files which are in a different 441 * directory which shares a blockgroup with our parent to land in a 442 * different blockgroup. 443 * 444 * So add our directory's i_ino into the starting point for the hash. 445 */ 446 *group = (*group + parent->i_ino) % ngroups; 447 448 /* 449 * Use a quadratic hash to find a group with a free inode and some free 450 * blocks. 451 */ 452 for (i = 1; i < ngroups; i <<= 1) { 453 *group += i; 454 if (*group >= ngroups) 455 *group -= ngroups; 456 desc = ext4_get_group_desc(sb, *group, NULL); 457 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && 458 le16_to_cpu(desc->bg_free_blocks_count)) 459 return 0; 460 } 461 462 /* 463 * That failed: try linear search for a free inode, even if that group 464 * has no free blocks. 465 */ 466 *group = parent_group; 467 for (i = 0; i < ngroups; i++) { 468 if (++*group >= ngroups) 469 *group = 0; 470 desc = ext4_get_group_desc(sb, *group, NULL); 471 if (desc && le16_to_cpu(desc->bg_free_inodes_count)) 472 return 0; 473 } 474 475 return -1; 476 } 477 478 /* 479 * There are two policies for allocating an inode. If the new inode is 480 * a directory, then a forward search is made for a block group with both 481 * free space and a low directory-to-inode ratio; if that fails, then of 482 * the groups with above-average free space, that group with the fewest 483 * directories already is chosen. 484 * 485 * For other inodes, search forward from the parent directory's block 486 * group to find a free inode. 487 */ 488 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode) 489 { 490 struct super_block *sb; 491 struct buffer_head *bitmap_bh = NULL; 492 struct buffer_head *bh2; 493 ext4_group_t group = 0; 494 unsigned long ino = 0; 495 struct inode * inode; 496 struct ext4_group_desc * gdp = NULL; 497 struct ext4_super_block * es; 498 struct ext4_inode_info *ei; 499 struct ext4_sb_info *sbi; 500 int ret2, err = 0; 501 struct inode *ret; 502 ext4_group_t i; 503 int free = 0; 504 505 /* Cannot create files in a deleted directory */ 506 if (!dir || !dir->i_nlink) 507 return ERR_PTR(-EPERM); 508 509 sb = dir->i_sb; 510 inode = new_inode(sb); 511 if (!inode) 512 return ERR_PTR(-ENOMEM); 513 ei = EXT4_I(inode); 514 515 sbi = EXT4_SB(sb); 516 es = sbi->s_es; 517 if (S_ISDIR(mode)) { 518 if (test_opt (sb, OLDALLOC)) 519 ret2 = find_group_dir(sb, dir, &group); 520 else 521 ret2 = find_group_orlov(sb, dir, &group); 522 } else 523 ret2 = find_group_other(sb, dir, &group); 524 525 err = -ENOSPC; 526 if (ret2 == -1) 527 goto out; 528 529 for (i = 0; i < sbi->s_groups_count; i++) { 530 err = -EIO; 531 532 gdp = ext4_get_group_desc(sb, group, &bh2); 533 if (!gdp) 534 goto fail; 535 536 brelse(bitmap_bh); 537 bitmap_bh = read_inode_bitmap(sb, group); 538 if (!bitmap_bh) 539 goto fail; 540 541 ino = 0; 542 543 repeat_in_this_group: 544 ino = ext4_find_next_zero_bit((unsigned long *) 545 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); 546 if (ino < EXT4_INODES_PER_GROUP(sb)) { 547 548 BUFFER_TRACE(bitmap_bh, "get_write_access"); 549 err = ext4_journal_get_write_access(handle, bitmap_bh); 550 if (err) 551 goto fail; 552 553 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group), 554 ino, bitmap_bh->b_data)) { 555 /* we won it */ 556 BUFFER_TRACE(bitmap_bh, 557 "call ext4_journal_dirty_metadata"); 558 err = ext4_journal_dirty_metadata(handle, 559 bitmap_bh); 560 if (err) 561 goto fail; 562 goto got; 563 } 564 /* we lost it */ 565 jbd2_journal_release_buffer(handle, bitmap_bh); 566 567 if (++ino < EXT4_INODES_PER_GROUP(sb)) 568 goto repeat_in_this_group; 569 } 570 571 /* 572 * This case is possible in concurrent environment. It is very 573 * rare. We cannot repeat the find_group_xxx() call because 574 * that will simply return the same blockgroup, because the 575 * group descriptor metadata has not yet been updated. 576 * So we just go onto the next blockgroup. 577 */ 578 if (++group == sbi->s_groups_count) 579 group = 0; 580 } 581 err = -ENOSPC; 582 goto out; 583 584 got: 585 ino++; 586 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || 587 ino > EXT4_INODES_PER_GROUP(sb)) { 588 ext4_error(sb, __func__, 589 "reserved inode or inode > inodes count - " 590 "block_group = %lu, inode=%lu", group, 591 ino + group * EXT4_INODES_PER_GROUP(sb)); 592 err = -EIO; 593 goto fail; 594 } 595 596 BUFFER_TRACE(bh2, "get_write_access"); 597 err = ext4_journal_get_write_access(handle, bh2); 598 if (err) goto fail; 599 600 /* We may have to initialize the block bitmap if it isn't already */ 601 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 602 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 603 struct buffer_head *block_bh = read_block_bitmap(sb, group); 604 605 BUFFER_TRACE(block_bh, "get block bitmap access"); 606 err = ext4_journal_get_write_access(handle, block_bh); 607 if (err) { 608 brelse(block_bh); 609 goto fail; 610 } 611 612 free = 0; 613 spin_lock(sb_bgl_lock(sbi, group)); 614 /* recheck and clear flag under lock if we still need to */ 615 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 616 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 617 free = ext4_free_blocks_after_init(sb, group, gdp); 618 gdp->bg_free_blocks_count = cpu_to_le16(free); 619 } 620 spin_unlock(sb_bgl_lock(sbi, group)); 621 622 /* Don't need to dirty bitmap block if we didn't change it */ 623 if (free) { 624 BUFFER_TRACE(block_bh, "dirty block bitmap"); 625 err = ext4_journal_dirty_metadata(handle, block_bh); 626 } 627 628 brelse(block_bh); 629 if (err) 630 goto fail; 631 } 632 633 spin_lock(sb_bgl_lock(sbi, group)); 634 /* If we didn't allocate from within the initialized part of the inode 635 * table then we need to initialize up to this inode. */ 636 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 637 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 638 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); 639 640 /* When marking the block group with 641 * ~EXT4_BG_INODE_UNINIT we don't want to depend 642 * on the value of bg_itable_unsed even though 643 * mke2fs could have initialized the same for us. 644 * Instead we calculated the value below 645 */ 646 647 free = 0; 648 } else { 649 free = EXT4_INODES_PER_GROUP(sb) - 650 le16_to_cpu(gdp->bg_itable_unused); 651 } 652 653 /* 654 * Check the relative inode number against the last used 655 * relative inode number in this group. if it is greater 656 * we need to update the bg_itable_unused count 657 * 658 */ 659 if (ino > free) 660 gdp->bg_itable_unused = 661 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino); 662 } 663 664 le16_add_cpu(&gdp->bg_free_inodes_count, -1); 665 if (S_ISDIR(mode)) { 666 le16_add_cpu(&gdp->bg_used_dirs_count, 1); 667 } 668 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 669 spin_unlock(sb_bgl_lock(sbi, group)); 670 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata"); 671 err = ext4_journal_dirty_metadata(handle, bh2); 672 if (err) goto fail; 673 674 percpu_counter_dec(&sbi->s_freeinodes_counter); 675 if (S_ISDIR(mode)) 676 percpu_counter_inc(&sbi->s_dirs_counter); 677 sb->s_dirt = 1; 678 679 inode->i_uid = current->fsuid; 680 if (test_opt (sb, GRPID)) 681 inode->i_gid = dir->i_gid; 682 else if (dir->i_mode & S_ISGID) { 683 inode->i_gid = dir->i_gid; 684 if (S_ISDIR(mode)) 685 mode |= S_ISGID; 686 } else 687 inode->i_gid = current->fsgid; 688 inode->i_mode = mode; 689 690 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); 691 /* This is the optimal IO size (for stat), not the fs block size */ 692 inode->i_blocks = 0; 693 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = 694 ext4_current_time(inode); 695 696 memset(ei->i_data, 0, sizeof(ei->i_data)); 697 ei->i_dir_start_lookup = 0; 698 ei->i_disksize = 0; 699 700 /* 701 * Don't inherit extent flag from directory. We set extent flag on 702 * newly created directory and file only if -o extent mount option is 703 * specified 704 */ 705 ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL); 706 if (S_ISLNK(mode)) 707 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); 708 /* dirsync only applies to directories */ 709 if (!S_ISDIR(mode)) 710 ei->i_flags &= ~EXT4_DIRSYNC_FL; 711 ei->i_file_acl = 0; 712 ei->i_dtime = 0; 713 ei->i_block_alloc_info = NULL; 714 ei->i_block_group = group; 715 716 ext4_set_inode_flags(inode); 717 if (IS_DIRSYNC(inode)) 718 handle->h_sync = 1; 719 insert_inode_hash(inode); 720 spin_lock(&sbi->s_next_gen_lock); 721 inode->i_generation = sbi->s_next_generation++; 722 spin_unlock(&sbi->s_next_gen_lock); 723 724 ei->i_state = EXT4_STATE_NEW; 725 726 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 727 728 ret = inode; 729 if(DQUOT_ALLOC_INODE(inode)) { 730 err = -EDQUOT; 731 goto fail_drop; 732 } 733 734 err = ext4_init_acl(handle, inode, dir); 735 if (err) 736 goto fail_free_drop; 737 738 err = ext4_init_security(handle,inode, dir); 739 if (err) 740 goto fail_free_drop; 741 742 if (test_opt(sb, EXTENTS)) { 743 /* set extent flag only for diretory, file and normal symlink*/ 744 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 745 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 746 ext4_ext_tree_init(handle, inode); 747 err = ext4_update_incompat_feature(handle, sb, 748 EXT4_FEATURE_INCOMPAT_EXTENTS); 749 if (err) 750 goto fail_free_drop; 751 } 752 } 753 754 err = ext4_mark_inode_dirty(handle, inode); 755 if (err) { 756 ext4_std_error(sb, err); 757 goto fail_free_drop; 758 } 759 760 ext4_debug("allocating inode %lu\n", inode->i_ino); 761 goto really_out; 762 fail: 763 ext4_std_error(sb, err); 764 out: 765 iput(inode); 766 ret = ERR_PTR(err); 767 really_out: 768 brelse(bitmap_bh); 769 return ret; 770 771 fail_free_drop: 772 DQUOT_FREE_INODE(inode); 773 774 fail_drop: 775 DQUOT_DROP(inode); 776 inode->i_flags |= S_NOQUOTA; 777 inode->i_nlink = 0; 778 iput(inode); 779 brelse(bitmap_bh); 780 return ERR_PTR(err); 781 } 782 783 /* Verify that we are loading a valid orphan from disk */ 784 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) 785 { 786 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 787 ext4_group_t block_group; 788 int bit; 789 struct buffer_head *bitmap_bh; 790 struct inode *inode = NULL; 791 long err = -EIO; 792 793 /* Error cases - e2fsck has already cleaned up for us */ 794 if (ino > max_ino) { 795 ext4_warning(sb, __func__, 796 "bad orphan ino %lu! e2fsck was run?", ino); 797 goto error; 798 } 799 800 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 801 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 802 bitmap_bh = read_inode_bitmap(sb, block_group); 803 if (!bitmap_bh) { 804 ext4_warning(sb, __func__, 805 "inode bitmap error for orphan %lu", ino); 806 goto error; 807 } 808 809 /* Having the inode bit set should be a 100% indicator that this 810 * is a valid orphan (no e2fsck run on fs). Orphans also include 811 * inodes that were being truncated, so we can't check i_nlink==0. 812 */ 813 if (!ext4_test_bit(bit, bitmap_bh->b_data)) 814 goto bad_orphan; 815 816 inode = ext4_iget(sb, ino); 817 if (IS_ERR(inode)) 818 goto iget_failed; 819 820 if (NEXT_ORPHAN(inode) > max_ino) 821 goto bad_orphan; 822 brelse(bitmap_bh); 823 return inode; 824 825 iget_failed: 826 err = PTR_ERR(inode); 827 inode = NULL; 828 bad_orphan: 829 ext4_warning(sb, __func__, 830 "bad orphan inode %lu! e2fsck was run?", ino); 831 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 832 bit, (unsigned long long)bitmap_bh->b_blocknr, 833 ext4_test_bit(bit, bitmap_bh->b_data)); 834 printk(KERN_NOTICE "inode=%p\n", inode); 835 if (inode) { 836 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 837 is_bad_inode(inode)); 838 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 839 NEXT_ORPHAN(inode)); 840 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 841 /* Avoid freeing blocks if we got a bad deleted inode */ 842 if (inode->i_nlink == 0) 843 inode->i_blocks = 0; 844 iput(inode); 845 } 846 brelse(bitmap_bh); 847 error: 848 return ERR_PTR(err); 849 } 850 851 unsigned long ext4_count_free_inodes (struct super_block * sb) 852 { 853 unsigned long desc_count; 854 struct ext4_group_desc *gdp; 855 ext4_group_t i; 856 #ifdef EXT4FS_DEBUG 857 struct ext4_super_block *es; 858 unsigned long bitmap_count, x; 859 struct buffer_head *bitmap_bh = NULL; 860 861 es = EXT4_SB(sb)->s_es; 862 desc_count = 0; 863 bitmap_count = 0; 864 gdp = NULL; 865 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 866 gdp = ext4_get_group_desc (sb, i, NULL); 867 if (!gdp) 868 continue; 869 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 870 brelse(bitmap_bh); 871 bitmap_bh = read_inode_bitmap(sb, i); 872 if (!bitmap_bh) 873 continue; 874 875 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); 876 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 877 i, le16_to_cpu(gdp->bg_free_inodes_count), x); 878 bitmap_count += x; 879 } 880 brelse(bitmap_bh); 881 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n", 882 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 883 return desc_count; 884 #else 885 desc_count = 0; 886 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 887 gdp = ext4_get_group_desc (sb, i, NULL); 888 if (!gdp) 889 continue; 890 desc_count += le16_to_cpu(gdp->bg_free_inodes_count); 891 cond_resched(); 892 } 893 return desc_count; 894 #endif 895 } 896 897 /* Called at mount-time, super-block is locked */ 898 unsigned long ext4_count_dirs (struct super_block * sb) 899 { 900 unsigned long count = 0; 901 ext4_group_t i; 902 903 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) { 904 struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL); 905 if (!gdp) 906 continue; 907 count += le16_to_cpu(gdp->bg_used_dirs_count); 908 } 909 return count; 910 } 911 912