1 /* 2 * linux/fs/ext4/ialloc.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * BSD ufs-inspired inode and directory allocation by 10 * Stephen Tweedie (sct@redhat.com), 1993 11 * Big-endian to little-endian byte-swapping/bitmaps by 12 * David S. Miller (davem@caip.rutgers.edu), 1995 13 */ 14 15 #include <linux/time.h> 16 #include <linux/fs.h> 17 #include <linux/jbd2.h> 18 #include <linux/stat.h> 19 #include <linux/string.h> 20 #include <linux/quotaops.h> 21 #include <linux/buffer_head.h> 22 #include <linux/random.h> 23 #include <linux/bitops.h> 24 #include <linux/blkdev.h> 25 #include <asm/byteorder.h> 26 27 #include "ext4.h" 28 #include "ext4_jbd2.h" 29 #include "xattr.h" 30 #include "acl.h" 31 32 #include <trace/events/ext4.h> 33 34 /* 35 * ialloc.c contains the inodes allocation and deallocation routines 36 */ 37 38 /* 39 * The free inodes are managed by bitmaps. A file system contains several 40 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 41 * block for inodes, N blocks for the inode table and data blocks. 42 * 43 * The file system contains group descriptors which are located after the 44 * super block. Each descriptor contains the number of the bitmap block and 45 * the free blocks count in the block. 46 */ 47 48 /* 49 * To avoid calling the atomic setbit hundreds or thousands of times, we only 50 * need to use it within a single byte (to ensure we get endianness right). 51 * We can use memset for the rest of the bitmap as there are no other users. 52 */ 53 void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) 54 { 55 int i; 56 57 if (start_bit >= end_bit) 58 return; 59 60 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); 61 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) 62 ext4_set_bit(i, bitmap); 63 if (i < end_bit) 64 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); 65 } 66 67 /* Initializes an uninitialized inode bitmap */ 68 static unsigned ext4_init_inode_bitmap(struct super_block *sb, 69 struct buffer_head *bh, 70 ext4_group_t block_group, 71 struct ext4_group_desc *gdp) 72 { 73 J_ASSERT_BH(bh, buffer_locked(bh)); 74 75 /* If checksum is bad mark all blocks and inodes use to prevent 76 * allocation, essentially implementing a per-group read-only flag. */ 77 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 78 ext4_error(sb, "Checksum bad for group %u", block_group); 79 ext4_free_group_clusters_set(sb, gdp, 0); 80 ext4_free_inodes_set(sb, gdp, 0); 81 ext4_itable_unused_set(sb, gdp, 0); 82 memset(bh->b_data, 0xff, sb->s_blocksize); 83 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, 84 EXT4_INODES_PER_GROUP(sb) / 8); 85 return 0; 86 } 87 88 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 89 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, 90 bh->b_data); 91 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, 92 EXT4_INODES_PER_GROUP(sb) / 8); 93 ext4_group_desc_csum_set(sb, block_group, gdp); 94 95 return EXT4_INODES_PER_GROUP(sb); 96 } 97 98 void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate) 99 { 100 if (uptodate) { 101 set_buffer_uptodate(bh); 102 set_bitmap_uptodate(bh); 103 } 104 unlock_buffer(bh); 105 put_bh(bh); 106 } 107 108 /* 109 * Read the inode allocation bitmap for a given block_group, reading 110 * into the specified slot in the superblock's bitmap cache. 111 * 112 * Return buffer_head of bitmap on success or NULL. 113 */ 114 static struct buffer_head * 115 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) 116 { 117 struct ext4_group_desc *desc; 118 struct buffer_head *bh = NULL; 119 ext4_fsblk_t bitmap_blk; 120 121 desc = ext4_get_group_desc(sb, block_group, NULL); 122 if (!desc) 123 return NULL; 124 125 bitmap_blk = ext4_inode_bitmap(sb, desc); 126 bh = sb_getblk(sb, bitmap_blk); 127 if (unlikely(!bh)) { 128 ext4_error(sb, "Cannot read inode bitmap - " 129 "block_group = %u, inode_bitmap = %llu", 130 block_group, bitmap_blk); 131 return NULL; 132 } 133 if (bitmap_uptodate(bh)) 134 goto verify; 135 136 lock_buffer(bh); 137 if (bitmap_uptodate(bh)) { 138 unlock_buffer(bh); 139 goto verify; 140 } 141 142 ext4_lock_group(sb, block_group); 143 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 144 ext4_init_inode_bitmap(sb, bh, block_group, desc); 145 set_bitmap_uptodate(bh); 146 set_buffer_uptodate(bh); 147 set_buffer_verified(bh); 148 ext4_unlock_group(sb, block_group); 149 unlock_buffer(bh); 150 return bh; 151 } 152 ext4_unlock_group(sb, block_group); 153 154 if (buffer_uptodate(bh)) { 155 /* 156 * if not uninit if bh is uptodate, 157 * bitmap is also uptodate 158 */ 159 set_bitmap_uptodate(bh); 160 unlock_buffer(bh); 161 goto verify; 162 } 163 /* 164 * submit the buffer_head for reading 165 */ 166 trace_ext4_load_inode_bitmap(sb, block_group); 167 bh->b_end_io = ext4_end_bitmap_read; 168 get_bh(bh); 169 submit_bh(READ, bh); 170 wait_on_buffer(bh); 171 if (!buffer_uptodate(bh)) { 172 put_bh(bh); 173 ext4_error(sb, "Cannot read inode bitmap - " 174 "block_group = %u, inode_bitmap = %llu", 175 block_group, bitmap_blk); 176 return NULL; 177 } 178 179 verify: 180 ext4_lock_group(sb, block_group); 181 if (!buffer_verified(bh) && 182 !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, 183 EXT4_INODES_PER_GROUP(sb) / 8)) { 184 ext4_unlock_group(sb, block_group); 185 put_bh(bh); 186 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, " 187 "inode_bitmap = %llu", block_group, bitmap_blk); 188 return NULL; 189 } 190 ext4_unlock_group(sb, block_group); 191 set_buffer_verified(bh); 192 return bh; 193 } 194 195 /* 196 * NOTE! When we get the inode, we're the only people 197 * that have access to it, and as such there are no 198 * race conditions we have to worry about. The inode 199 * is not on the hash-lists, and it cannot be reached 200 * through the filesystem because the directory entry 201 * has been deleted earlier. 202 * 203 * HOWEVER: we must make sure that we get no aliases, 204 * which means that we have to call "clear_inode()" 205 * _before_ we mark the inode not in use in the inode 206 * bitmaps. Otherwise a newly created file might use 207 * the same inode number (not actually the same pointer 208 * though), and then we'd have two inodes sharing the 209 * same inode number and space on the harddisk. 210 */ 211 void ext4_free_inode(handle_t *handle, struct inode *inode) 212 { 213 struct super_block *sb = inode->i_sb; 214 int is_directory; 215 unsigned long ino; 216 struct buffer_head *bitmap_bh = NULL; 217 struct buffer_head *bh2; 218 ext4_group_t block_group; 219 unsigned long bit; 220 struct ext4_group_desc *gdp; 221 struct ext4_super_block *es; 222 struct ext4_sb_info *sbi; 223 int fatal = 0, err, count, cleared; 224 225 if (!sb) { 226 printk(KERN_ERR "EXT4-fs: %s:%d: inode on " 227 "nonexistent device\n", __func__, __LINE__); 228 return; 229 } 230 if (atomic_read(&inode->i_count) > 1) { 231 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d", 232 __func__, __LINE__, inode->i_ino, 233 atomic_read(&inode->i_count)); 234 return; 235 } 236 if (inode->i_nlink) { 237 ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n", 238 __func__, __LINE__, inode->i_ino, inode->i_nlink); 239 return; 240 } 241 sbi = EXT4_SB(sb); 242 243 ino = inode->i_ino; 244 ext4_debug("freeing inode %lu\n", ino); 245 trace_ext4_free_inode(inode); 246 247 /* 248 * Note: we must free any quota before locking the superblock, 249 * as writing the quota to disk may need the lock as well. 250 */ 251 dquot_initialize(inode); 252 ext4_xattr_delete_inode(handle, inode); 253 dquot_free_inode(inode); 254 dquot_drop(inode); 255 256 is_directory = S_ISDIR(inode->i_mode); 257 258 /* Do this BEFORE marking the inode not in use or returning an error */ 259 ext4_clear_inode(inode); 260 261 es = EXT4_SB(sb)->s_es; 262 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { 263 ext4_error(sb, "reserved or nonexistent inode %lu", ino); 264 goto error_return; 265 } 266 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 267 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 268 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 269 if (!bitmap_bh) 270 goto error_return; 271 272 BUFFER_TRACE(bitmap_bh, "get_write_access"); 273 fatal = ext4_journal_get_write_access(handle, bitmap_bh); 274 if (fatal) 275 goto error_return; 276 277 fatal = -ESRCH; 278 gdp = ext4_get_group_desc(sb, block_group, &bh2); 279 if (gdp) { 280 BUFFER_TRACE(bh2, "get_write_access"); 281 fatal = ext4_journal_get_write_access(handle, bh2); 282 } 283 ext4_lock_group(sb, block_group); 284 cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data); 285 if (fatal || !cleared) { 286 ext4_unlock_group(sb, block_group); 287 goto out; 288 } 289 290 count = ext4_free_inodes_count(sb, gdp) + 1; 291 ext4_free_inodes_set(sb, gdp, count); 292 if (is_directory) { 293 count = ext4_used_dirs_count(sb, gdp) - 1; 294 ext4_used_dirs_set(sb, gdp, count); 295 percpu_counter_dec(&sbi->s_dirs_counter); 296 } 297 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, 298 EXT4_INODES_PER_GROUP(sb) / 8); 299 ext4_group_desc_csum_set(sb, block_group, gdp); 300 ext4_unlock_group(sb, block_group); 301 302 percpu_counter_inc(&sbi->s_freeinodes_counter); 303 if (sbi->s_log_groups_per_flex) { 304 ext4_group_t f = ext4_flex_group(sbi, block_group); 305 306 atomic_inc(&sbi->s_flex_groups[f].free_inodes); 307 if (is_directory) 308 atomic_dec(&sbi->s_flex_groups[f].used_dirs); 309 } 310 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); 311 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); 312 out: 313 if (cleared) { 314 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); 315 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 316 if (!fatal) 317 fatal = err; 318 } else 319 ext4_error(sb, "bit already cleared for inode %lu", ino); 320 321 error_return: 322 brelse(bitmap_bh); 323 ext4_std_error(sb, fatal); 324 } 325 326 struct orlov_stats { 327 __u32 free_inodes; 328 __u32 free_clusters; 329 __u32 used_dirs; 330 }; 331 332 /* 333 * Helper function for Orlov's allocator; returns critical information 334 * for a particular block group or flex_bg. If flex_size is 1, then g 335 * is a block group number; otherwise it is flex_bg number. 336 */ 337 static void get_orlov_stats(struct super_block *sb, ext4_group_t g, 338 int flex_size, struct orlov_stats *stats) 339 { 340 struct ext4_group_desc *desc; 341 struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; 342 343 if (flex_size > 1) { 344 stats->free_inodes = atomic_read(&flex_group[g].free_inodes); 345 stats->free_clusters = atomic_read(&flex_group[g].free_clusters); 346 stats->used_dirs = atomic_read(&flex_group[g].used_dirs); 347 return; 348 } 349 350 desc = ext4_get_group_desc(sb, g, NULL); 351 if (desc) { 352 stats->free_inodes = ext4_free_inodes_count(sb, desc); 353 stats->free_clusters = ext4_free_group_clusters(sb, desc); 354 stats->used_dirs = ext4_used_dirs_count(sb, desc); 355 } else { 356 stats->free_inodes = 0; 357 stats->free_clusters = 0; 358 stats->used_dirs = 0; 359 } 360 } 361 362 /* 363 * Orlov's allocator for directories. 364 * 365 * We always try to spread first-level directories. 366 * 367 * If there are blockgroups with both free inodes and free blocks counts 368 * not worse than average we return one with smallest directory count. 369 * Otherwise we simply return a random group. 370 * 371 * For the rest rules look so: 372 * 373 * It's OK to put directory into a group unless 374 * it has too many directories already (max_dirs) or 375 * it has too few free inodes left (min_inodes) or 376 * it has too few free blocks left (min_blocks) or 377 * Parent's group is preferred, if it doesn't satisfy these 378 * conditions we search cyclically through the rest. If none 379 * of the groups look good we just look for a group with more 380 * free inodes than average (starting at parent's group). 381 */ 382 383 static int find_group_orlov(struct super_block *sb, struct inode *parent, 384 ext4_group_t *group, umode_t mode, 385 const struct qstr *qstr) 386 { 387 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 388 struct ext4_sb_info *sbi = EXT4_SB(sb); 389 ext4_group_t real_ngroups = ext4_get_groups_count(sb); 390 int inodes_per_group = EXT4_INODES_PER_GROUP(sb); 391 unsigned int freei, avefreei, grp_free; 392 ext4_fsblk_t freeb, avefreec; 393 unsigned int ndirs; 394 int max_dirs, min_inodes; 395 ext4_grpblk_t min_clusters; 396 ext4_group_t i, grp, g, ngroups; 397 struct ext4_group_desc *desc; 398 struct orlov_stats stats; 399 int flex_size = ext4_flex_bg_size(sbi); 400 struct dx_hash_info hinfo; 401 402 ngroups = real_ngroups; 403 if (flex_size > 1) { 404 ngroups = (real_ngroups + flex_size - 1) >> 405 sbi->s_log_groups_per_flex; 406 parent_group >>= sbi->s_log_groups_per_flex; 407 } 408 409 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); 410 avefreei = freei / ngroups; 411 freeb = EXT4_C2B(sbi, 412 percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 413 avefreec = freeb; 414 do_div(avefreec, ngroups); 415 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); 416 417 if (S_ISDIR(mode) && 418 ((parent == sb->s_root->d_inode) || 419 (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { 420 int best_ndir = inodes_per_group; 421 int ret = -1; 422 423 if (qstr) { 424 hinfo.hash_version = DX_HASH_HALF_MD4; 425 hinfo.seed = sbi->s_hash_seed; 426 ext4fs_dirhash(qstr->name, qstr->len, &hinfo); 427 grp = hinfo.hash; 428 } else 429 get_random_bytes(&grp, sizeof(grp)); 430 parent_group = (unsigned)grp % ngroups; 431 for (i = 0; i < ngroups; i++) { 432 g = (parent_group + i) % ngroups; 433 get_orlov_stats(sb, g, flex_size, &stats); 434 if (!stats.free_inodes) 435 continue; 436 if (stats.used_dirs >= best_ndir) 437 continue; 438 if (stats.free_inodes < avefreei) 439 continue; 440 if (stats.free_clusters < avefreec) 441 continue; 442 grp = g; 443 ret = 0; 444 best_ndir = stats.used_dirs; 445 } 446 if (ret) 447 goto fallback; 448 found_flex_bg: 449 if (flex_size == 1) { 450 *group = grp; 451 return 0; 452 } 453 454 /* 455 * We pack inodes at the beginning of the flexgroup's 456 * inode tables. Block allocation decisions will do 457 * something similar, although regular files will 458 * start at 2nd block group of the flexgroup. See 459 * ext4_ext_find_goal() and ext4_find_near(). 460 */ 461 grp *= flex_size; 462 for (i = 0; i < flex_size; i++) { 463 if (grp+i >= real_ngroups) 464 break; 465 desc = ext4_get_group_desc(sb, grp+i, NULL); 466 if (desc && ext4_free_inodes_count(sb, desc)) { 467 *group = grp+i; 468 return 0; 469 } 470 } 471 goto fallback; 472 } 473 474 max_dirs = ndirs / ngroups + inodes_per_group / 16; 475 min_inodes = avefreei - inodes_per_group*flex_size / 4; 476 if (min_inodes < 1) 477 min_inodes = 1; 478 min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; 479 480 /* 481 * Start looking in the flex group where we last allocated an 482 * inode for this parent directory 483 */ 484 if (EXT4_I(parent)->i_last_alloc_group != ~0) { 485 parent_group = EXT4_I(parent)->i_last_alloc_group; 486 if (flex_size > 1) 487 parent_group >>= sbi->s_log_groups_per_flex; 488 } 489 490 for (i = 0; i < ngroups; i++) { 491 grp = (parent_group + i) % ngroups; 492 get_orlov_stats(sb, grp, flex_size, &stats); 493 if (stats.used_dirs >= max_dirs) 494 continue; 495 if (stats.free_inodes < min_inodes) 496 continue; 497 if (stats.free_clusters < min_clusters) 498 continue; 499 goto found_flex_bg; 500 } 501 502 fallback: 503 ngroups = real_ngroups; 504 avefreei = freei / ngroups; 505 fallback_retry: 506 parent_group = EXT4_I(parent)->i_block_group; 507 for (i = 0; i < ngroups; i++) { 508 grp = (parent_group + i) % ngroups; 509 desc = ext4_get_group_desc(sb, grp, NULL); 510 if (desc) { 511 grp_free = ext4_free_inodes_count(sb, desc); 512 if (grp_free && grp_free >= avefreei) { 513 *group = grp; 514 return 0; 515 } 516 } 517 } 518 519 if (avefreei) { 520 /* 521 * The free-inodes counter is approximate, and for really small 522 * filesystems the above test can fail to find any blockgroups 523 */ 524 avefreei = 0; 525 goto fallback_retry; 526 } 527 528 return -1; 529 } 530 531 static int find_group_other(struct super_block *sb, struct inode *parent, 532 ext4_group_t *group, umode_t mode) 533 { 534 ext4_group_t parent_group = EXT4_I(parent)->i_block_group; 535 ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); 536 struct ext4_group_desc *desc; 537 int flex_size = ext4_flex_bg_size(EXT4_SB(sb)); 538 539 /* 540 * Try to place the inode is the same flex group as its 541 * parent. If we can't find space, use the Orlov algorithm to 542 * find another flex group, and store that information in the 543 * parent directory's inode information so that use that flex 544 * group for future allocations. 545 */ 546 if (flex_size > 1) { 547 int retry = 0; 548 549 try_again: 550 parent_group &= ~(flex_size-1); 551 last = parent_group + flex_size; 552 if (last > ngroups) 553 last = ngroups; 554 for (i = parent_group; i < last; i++) { 555 desc = ext4_get_group_desc(sb, i, NULL); 556 if (desc && ext4_free_inodes_count(sb, desc)) { 557 *group = i; 558 return 0; 559 } 560 } 561 if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) { 562 retry = 1; 563 parent_group = EXT4_I(parent)->i_last_alloc_group; 564 goto try_again; 565 } 566 /* 567 * If this didn't work, use the Orlov search algorithm 568 * to find a new flex group; we pass in the mode to 569 * avoid the topdir algorithms. 570 */ 571 *group = parent_group + flex_size; 572 if (*group > ngroups) 573 *group = 0; 574 return find_group_orlov(sb, parent, group, mode, NULL); 575 } 576 577 /* 578 * Try to place the inode in its parent directory 579 */ 580 *group = parent_group; 581 desc = ext4_get_group_desc(sb, *group, NULL); 582 if (desc && ext4_free_inodes_count(sb, desc) && 583 ext4_free_group_clusters(sb, desc)) 584 return 0; 585 586 /* 587 * We're going to place this inode in a different blockgroup from its 588 * parent. We want to cause files in a common directory to all land in 589 * the same blockgroup. But we want files which are in a different 590 * directory which shares a blockgroup with our parent to land in a 591 * different blockgroup. 592 * 593 * So add our directory's i_ino into the starting point for the hash. 594 */ 595 *group = (*group + parent->i_ino) % ngroups; 596 597 /* 598 * Use a quadratic hash to find a group with a free inode and some free 599 * blocks. 600 */ 601 for (i = 1; i < ngroups; i <<= 1) { 602 *group += i; 603 if (*group >= ngroups) 604 *group -= ngroups; 605 desc = ext4_get_group_desc(sb, *group, NULL); 606 if (desc && ext4_free_inodes_count(sb, desc) && 607 ext4_free_group_clusters(sb, desc)) 608 return 0; 609 } 610 611 /* 612 * That failed: try linear search for a free inode, even if that group 613 * has no free blocks. 614 */ 615 *group = parent_group; 616 for (i = 0; i < ngroups; i++) { 617 if (++*group >= ngroups) 618 *group = 0; 619 desc = ext4_get_group_desc(sb, *group, NULL); 620 if (desc && ext4_free_inodes_count(sb, desc)) 621 return 0; 622 } 623 624 return -1; 625 } 626 627 /* 628 * There are two policies for allocating an inode. If the new inode is 629 * a directory, then a forward search is made for a block group with both 630 * free space and a low directory-to-inode ratio; if that fails, then of 631 * the groups with above-average free space, that group with the fewest 632 * directories already is chosen. 633 * 634 * For other inodes, search forward from the parent directory's block 635 * group to find a free inode. 636 */ 637 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, 638 const struct qstr *qstr, __u32 goal, uid_t *owner) 639 { 640 struct super_block *sb; 641 struct buffer_head *inode_bitmap_bh = NULL; 642 struct buffer_head *group_desc_bh; 643 ext4_group_t ngroups, group = 0; 644 unsigned long ino = 0; 645 struct inode *inode; 646 struct ext4_group_desc *gdp = NULL; 647 struct ext4_inode_info *ei; 648 struct ext4_sb_info *sbi; 649 int ret2, err = 0; 650 struct inode *ret; 651 ext4_group_t i; 652 ext4_group_t flex_group; 653 654 /* Cannot create files in a deleted directory */ 655 if (!dir || !dir->i_nlink) 656 return ERR_PTR(-EPERM); 657 658 sb = dir->i_sb; 659 ngroups = ext4_get_groups_count(sb); 660 trace_ext4_request_inode(dir, mode); 661 inode = new_inode(sb); 662 if (!inode) 663 return ERR_PTR(-ENOMEM); 664 ei = EXT4_I(inode); 665 sbi = EXT4_SB(sb); 666 667 if (!goal) 668 goal = sbi->s_inode_goal; 669 670 if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { 671 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); 672 ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); 673 ret2 = 0; 674 goto got_group; 675 } 676 677 if (S_ISDIR(mode)) 678 ret2 = find_group_orlov(sb, dir, &group, mode, qstr); 679 else 680 ret2 = find_group_other(sb, dir, &group, mode); 681 682 got_group: 683 EXT4_I(dir)->i_last_alloc_group = group; 684 err = -ENOSPC; 685 if (ret2 == -1) 686 goto out; 687 688 /* 689 * Normally we will only go through one pass of this loop, 690 * unless we get unlucky and it turns out the group we selected 691 * had its last inode grabbed by someone else. 692 */ 693 for (i = 0; i < ngroups; i++, ino = 0) { 694 err = -EIO; 695 696 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); 697 if (!gdp) 698 goto fail; 699 700 /* 701 * Check free inodes count before loading bitmap. 702 */ 703 if (ext4_free_inodes_count(sb, gdp) == 0) { 704 if (++group == ngroups) 705 group = 0; 706 continue; 707 } 708 709 brelse(inode_bitmap_bh); 710 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); 711 if (!inode_bitmap_bh) 712 goto fail; 713 714 repeat_in_this_group: 715 ino = ext4_find_next_zero_bit((unsigned long *) 716 inode_bitmap_bh->b_data, 717 EXT4_INODES_PER_GROUP(sb), ino); 718 if (ino >= EXT4_INODES_PER_GROUP(sb)) { 719 if (++group == ngroups) 720 group = 0; 721 continue; 722 } 723 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { 724 ext4_error(sb, "reserved inode found cleared - " 725 "inode=%lu", ino + 1); 726 continue; 727 } 728 BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); 729 err = ext4_journal_get_write_access(handle, inode_bitmap_bh); 730 if (err) 731 goto fail; 732 ext4_lock_group(sb, group); 733 ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data); 734 ext4_unlock_group(sb, group); 735 ino++; /* the inode bitmap is zero-based */ 736 if (!ret2) 737 goto got; /* we grabbed the inode! */ 738 if (ino < EXT4_INODES_PER_GROUP(sb)) 739 goto repeat_in_this_group; 740 } 741 err = -ENOSPC; 742 goto out; 743 744 got: 745 BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); 746 err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); 747 if (err) 748 goto fail; 749 750 /* We may have to initialize the block bitmap if it isn't already */ 751 if (ext4_has_group_desc_csum(sb) && 752 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 753 struct buffer_head *block_bitmap_bh; 754 755 block_bitmap_bh = ext4_read_block_bitmap(sb, group); 756 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); 757 err = ext4_journal_get_write_access(handle, block_bitmap_bh); 758 if (err) { 759 brelse(block_bitmap_bh); 760 goto fail; 761 } 762 763 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); 764 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh); 765 766 /* recheck and clear flag under lock if we still need to */ 767 ext4_lock_group(sb, group); 768 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 769 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 770 ext4_free_group_clusters_set(sb, gdp, 771 ext4_free_clusters_after_init(sb, group, gdp)); 772 ext4_block_bitmap_csum_set(sb, group, gdp, 773 block_bitmap_bh); 774 ext4_group_desc_csum_set(sb, group, gdp); 775 } 776 ext4_unlock_group(sb, group); 777 brelse(block_bitmap_bh); 778 779 if (err) 780 goto fail; 781 } 782 783 BUFFER_TRACE(group_desc_bh, "get_write_access"); 784 err = ext4_journal_get_write_access(handle, group_desc_bh); 785 if (err) 786 goto fail; 787 788 /* Update the relevant bg descriptor fields */ 789 if (ext4_has_group_desc_csum(sb)) { 790 int free; 791 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 792 793 down_read(&grp->alloc_sem); /* protect vs itable lazyinit */ 794 ext4_lock_group(sb, group); /* while we modify the bg desc */ 795 free = EXT4_INODES_PER_GROUP(sb) - 796 ext4_itable_unused_count(sb, gdp); 797 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { 798 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); 799 free = 0; 800 } 801 /* 802 * Check the relative inode number against the last used 803 * relative inode number in this group. if it is greater 804 * we need to update the bg_itable_unused count 805 */ 806 if (ino > free) 807 ext4_itable_unused_set(sb, gdp, 808 (EXT4_INODES_PER_GROUP(sb) - ino)); 809 up_read(&grp->alloc_sem); 810 } else { 811 ext4_lock_group(sb, group); 812 } 813 814 ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); 815 if (S_ISDIR(mode)) { 816 ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); 817 if (sbi->s_log_groups_per_flex) { 818 ext4_group_t f = ext4_flex_group(sbi, group); 819 820 atomic_inc(&sbi->s_flex_groups[f].used_dirs); 821 } 822 } 823 if (ext4_has_group_desc_csum(sb)) { 824 ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, 825 EXT4_INODES_PER_GROUP(sb) / 8); 826 ext4_group_desc_csum_set(sb, group, gdp); 827 } 828 ext4_unlock_group(sb, group); 829 830 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); 831 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); 832 if (err) 833 goto fail; 834 835 percpu_counter_dec(&sbi->s_freeinodes_counter); 836 if (S_ISDIR(mode)) 837 percpu_counter_inc(&sbi->s_dirs_counter); 838 839 if (sbi->s_log_groups_per_flex) { 840 flex_group = ext4_flex_group(sbi, group); 841 atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); 842 } 843 if (owner) { 844 inode->i_mode = mode; 845 i_uid_write(inode, owner[0]); 846 i_gid_write(inode, owner[1]); 847 } else if (test_opt(sb, GRPID)) { 848 inode->i_mode = mode; 849 inode->i_uid = current_fsuid(); 850 inode->i_gid = dir->i_gid; 851 } else 852 inode_init_owner(inode, dir, mode); 853 854 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); 855 /* This is the optimal IO size (for stat), not the fs block size */ 856 inode->i_blocks = 0; 857 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = 858 ext4_current_time(inode); 859 860 memset(ei->i_data, 0, sizeof(ei->i_data)); 861 ei->i_dir_start_lookup = 0; 862 ei->i_disksize = 0; 863 864 /* Don't inherit extent flag from directory, amongst others. */ 865 ei->i_flags = 866 ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); 867 ei->i_file_acl = 0; 868 ei->i_dtime = 0; 869 ei->i_block_group = group; 870 ei->i_last_alloc_group = ~0; 871 872 ext4_set_inode_flags(inode); 873 if (IS_DIRSYNC(inode)) 874 ext4_handle_sync(handle); 875 if (insert_inode_locked(inode) < 0) { 876 /* 877 * Likely a bitmap corruption causing inode to be allocated 878 * twice. 879 */ 880 err = -EIO; 881 goto fail; 882 } 883 spin_lock(&sbi->s_next_gen_lock); 884 inode->i_generation = sbi->s_next_generation++; 885 spin_unlock(&sbi->s_next_gen_lock); 886 887 /* Precompute checksum seed for inode metadata */ 888 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 889 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 890 __u32 csum; 891 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 892 __le32 inum = cpu_to_le32(inode->i_ino); 893 __le32 gen = cpu_to_le32(inode->i_generation); 894 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 895 sizeof(inum)); 896 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 897 sizeof(gen)); 898 } 899 900 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 901 ext4_set_inode_state(inode, EXT4_STATE_NEW); 902 903 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 904 905 ei->i_inline_off = 0; 906 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA)) 907 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); 908 909 ret = inode; 910 dquot_initialize(inode); 911 err = dquot_alloc_inode(inode); 912 if (err) 913 goto fail_drop; 914 915 err = ext4_init_acl(handle, inode, dir); 916 if (err) 917 goto fail_free_drop; 918 919 err = ext4_init_security(handle, inode, dir, qstr); 920 if (err) 921 goto fail_free_drop; 922 923 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 924 /* set extent flag only for directory, file and normal symlink*/ 925 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 926 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); 927 ext4_ext_tree_init(handle, inode); 928 } 929 } 930 931 if (ext4_handle_valid(handle)) { 932 ei->i_sync_tid = handle->h_transaction->t_tid; 933 ei->i_datasync_tid = handle->h_transaction->t_tid; 934 } 935 936 err = ext4_mark_inode_dirty(handle, inode); 937 if (err) { 938 ext4_std_error(sb, err); 939 goto fail_free_drop; 940 } 941 942 ext4_debug("allocating inode %lu\n", inode->i_ino); 943 trace_ext4_allocate_inode(inode, dir, mode); 944 goto really_out; 945 fail: 946 ext4_std_error(sb, err); 947 out: 948 iput(inode); 949 ret = ERR_PTR(err); 950 really_out: 951 brelse(inode_bitmap_bh); 952 return ret; 953 954 fail_free_drop: 955 dquot_free_inode(inode); 956 957 fail_drop: 958 dquot_drop(inode); 959 inode->i_flags |= S_NOQUOTA; 960 clear_nlink(inode); 961 unlock_new_inode(inode); 962 iput(inode); 963 brelse(inode_bitmap_bh); 964 return ERR_PTR(err); 965 } 966 967 /* Verify that we are loading a valid orphan from disk */ 968 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) 969 { 970 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); 971 ext4_group_t block_group; 972 int bit; 973 struct buffer_head *bitmap_bh; 974 struct inode *inode = NULL; 975 long err = -EIO; 976 977 /* Error cases - e2fsck has already cleaned up for us */ 978 if (ino > max_ino) { 979 ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); 980 goto error; 981 } 982 983 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 984 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); 985 bitmap_bh = ext4_read_inode_bitmap(sb, block_group); 986 if (!bitmap_bh) { 987 ext4_warning(sb, "inode bitmap error for orphan %lu", ino); 988 goto error; 989 } 990 991 /* Having the inode bit set should be a 100% indicator that this 992 * is a valid orphan (no e2fsck run on fs). Orphans also include 993 * inodes that were being truncated, so we can't check i_nlink==0. 994 */ 995 if (!ext4_test_bit(bit, bitmap_bh->b_data)) 996 goto bad_orphan; 997 998 inode = ext4_iget(sb, ino); 999 if (IS_ERR(inode)) 1000 goto iget_failed; 1001 1002 /* 1003 * If the orphans has i_nlinks > 0 then it should be able to be 1004 * truncated, otherwise it won't be removed from the orphan list 1005 * during processing and an infinite loop will result. 1006 */ 1007 if (inode->i_nlink && !ext4_can_truncate(inode)) 1008 goto bad_orphan; 1009 1010 if (NEXT_ORPHAN(inode) > max_ino) 1011 goto bad_orphan; 1012 brelse(bitmap_bh); 1013 return inode; 1014 1015 iget_failed: 1016 err = PTR_ERR(inode); 1017 inode = NULL; 1018 bad_orphan: 1019 ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); 1020 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", 1021 bit, (unsigned long long)bitmap_bh->b_blocknr, 1022 ext4_test_bit(bit, bitmap_bh->b_data)); 1023 printk(KERN_NOTICE "inode=%p\n", inode); 1024 if (inode) { 1025 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", 1026 is_bad_inode(inode)); 1027 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", 1028 NEXT_ORPHAN(inode)); 1029 printk(KERN_NOTICE "max_ino=%lu\n", max_ino); 1030 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); 1031 /* Avoid freeing blocks if we got a bad deleted inode */ 1032 if (inode->i_nlink == 0) 1033 inode->i_blocks = 0; 1034 iput(inode); 1035 } 1036 brelse(bitmap_bh); 1037 error: 1038 return ERR_PTR(err); 1039 } 1040 1041 unsigned long ext4_count_free_inodes(struct super_block *sb) 1042 { 1043 unsigned long desc_count; 1044 struct ext4_group_desc *gdp; 1045 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 1046 #ifdef EXT4FS_DEBUG 1047 struct ext4_super_block *es; 1048 unsigned long bitmap_count, x; 1049 struct buffer_head *bitmap_bh = NULL; 1050 1051 es = EXT4_SB(sb)->s_es; 1052 desc_count = 0; 1053 bitmap_count = 0; 1054 gdp = NULL; 1055 for (i = 0; i < ngroups; i++) { 1056 gdp = ext4_get_group_desc(sb, i, NULL); 1057 if (!gdp) 1058 continue; 1059 desc_count += ext4_free_inodes_count(sb, gdp); 1060 brelse(bitmap_bh); 1061 bitmap_bh = ext4_read_inode_bitmap(sb, i); 1062 if (!bitmap_bh) 1063 continue; 1064 1065 x = ext4_count_free(bitmap_bh->b_data, 1066 EXT4_INODES_PER_GROUP(sb) / 8); 1067 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", 1068 (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); 1069 bitmap_count += x; 1070 } 1071 brelse(bitmap_bh); 1072 printk(KERN_DEBUG "ext4_count_free_inodes: " 1073 "stored = %u, computed = %lu, %lu\n", 1074 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); 1075 return desc_count; 1076 #else 1077 desc_count = 0; 1078 for (i = 0; i < ngroups; i++) { 1079 gdp = ext4_get_group_desc(sb, i, NULL); 1080 if (!gdp) 1081 continue; 1082 desc_count += ext4_free_inodes_count(sb, gdp); 1083 cond_resched(); 1084 } 1085 return desc_count; 1086 #endif 1087 } 1088 1089 /* Called at mount-time, super-block is locked */ 1090 unsigned long ext4_count_dirs(struct super_block * sb) 1091 { 1092 unsigned long count = 0; 1093 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 1094 1095 for (i = 0; i < ngroups; i++) { 1096 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 1097 if (!gdp) 1098 continue; 1099 count += ext4_used_dirs_count(sb, gdp); 1100 } 1101 return count; 1102 } 1103 1104 /* 1105 * Zeroes not yet zeroed inode table - just write zeroes through the whole 1106 * inode table. Must be called without any spinlock held. The only place 1107 * where it is called from on active part of filesystem is ext4lazyinit 1108 * thread, so we do not need any special locks, however we have to prevent 1109 * inode allocation from the current group, so we take alloc_sem lock, to 1110 * block ext4_new_inode() until we are finished. 1111 */ 1112 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, 1113 int barrier) 1114 { 1115 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1116 struct ext4_sb_info *sbi = EXT4_SB(sb); 1117 struct ext4_group_desc *gdp = NULL; 1118 struct buffer_head *group_desc_bh; 1119 handle_t *handle; 1120 ext4_fsblk_t blk; 1121 int num, ret = 0, used_blks = 0; 1122 1123 /* This should not happen, but just to be sure check this */ 1124 if (sb->s_flags & MS_RDONLY) { 1125 ret = 1; 1126 goto out; 1127 } 1128 1129 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); 1130 if (!gdp) 1131 goto out; 1132 1133 /* 1134 * We do not need to lock this, because we are the only one 1135 * handling this flag. 1136 */ 1137 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) 1138 goto out; 1139 1140 handle = ext4_journal_start_sb(sb, 1); 1141 if (IS_ERR(handle)) { 1142 ret = PTR_ERR(handle); 1143 goto out; 1144 } 1145 1146 down_write(&grp->alloc_sem); 1147 /* 1148 * If inode bitmap was already initialized there may be some 1149 * used inodes so we need to skip blocks with used inodes in 1150 * inode table. 1151 */ 1152 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) 1153 used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - 1154 ext4_itable_unused_count(sb, gdp)), 1155 sbi->s_inodes_per_block); 1156 1157 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1158 ext4_error(sb, "Something is wrong with group %u: " 1159 "used itable blocks: %d; " 1160 "itable unused count: %u", 1161 group, used_blks, 1162 ext4_itable_unused_count(sb, gdp)); 1163 ret = 1; 1164 goto err_out; 1165 } 1166 1167 blk = ext4_inode_table(sb, gdp) + used_blks; 1168 num = sbi->s_itb_per_group - used_blks; 1169 1170 BUFFER_TRACE(group_desc_bh, "get_write_access"); 1171 ret = ext4_journal_get_write_access(handle, 1172 group_desc_bh); 1173 if (ret) 1174 goto err_out; 1175 1176 /* 1177 * Skip zeroout if the inode table is full. But we set the ZEROED 1178 * flag anyway, because obviously, when it is full it does not need 1179 * further zeroing. 1180 */ 1181 if (unlikely(num == 0)) 1182 goto skip_zeroout; 1183 1184 ext4_debug("going to zero out inode table in group %d\n", 1185 group); 1186 ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); 1187 if (ret < 0) 1188 goto err_out; 1189 if (barrier) 1190 blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); 1191 1192 skip_zeroout: 1193 ext4_lock_group(sb, group); 1194 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); 1195 ext4_group_desc_csum_set(sb, group, gdp); 1196 ext4_unlock_group(sb, group); 1197 1198 BUFFER_TRACE(group_desc_bh, 1199 "call ext4_handle_dirty_metadata"); 1200 ret = ext4_handle_dirty_metadata(handle, NULL, 1201 group_desc_bh); 1202 1203 err_out: 1204 up_write(&grp->alloc_sem); 1205 ext4_journal_stop(handle); 1206 out: 1207 return ret; 1208 } 1209