1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/super.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * Big-endian to little-endian byte-swapping/bitmaps by 17 * David S. Miller (davem@caip.rutgers.edu), 1995 18 */ 19 20 #include <linux/module.h> 21 #include <linux/string.h> 22 #include <linux/fs.h> 23 #include <linux/time.h> 24 #include <linux/vmalloc.h> 25 #include <linux/slab.h> 26 #include <linux/init.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/parser.h> 30 #include <linux/buffer_head.h> 31 #include <linux/exportfs.h> 32 #include <linux/vfs.h> 33 #include <linux/random.h> 34 #include <linux/mount.h> 35 #include <linux/namei.h> 36 #include <linux/quotaops.h> 37 #include <linux/seq_file.h> 38 #include <linux/ctype.h> 39 #include <linux/log2.h> 40 #include <linux/crc16.h> 41 #include <linux/dax.h> 42 #include <linux/uaccess.h> 43 #include <linux/iversion.h> 44 #include <linux/unicode.h> 45 #include <linux/part_stat.h> 46 #include <linux/kthread.h> 47 #include <linux/freezer.h> 48 #include <linux/fsnotify.h> 49 #include <linux/fs_context.h> 50 #include <linux/fs_parser.h> 51 52 #include "ext4.h" 53 #include "ext4_extents.h" /* Needed for trace points definition */ 54 #include "ext4_jbd2.h" 55 #include "xattr.h" 56 #include "acl.h" 57 #include "mballoc.h" 58 #include "fsmap.h" 59 60 #define CREATE_TRACE_POINTS 61 #include <trace/events/ext4.h> 62 63 static struct ext4_lazy_init *ext4_li_info; 64 static DEFINE_MUTEX(ext4_li_mtx); 65 static struct ratelimit_state ext4_mount_msg_ratelimit; 66 67 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 68 unsigned long journal_devnum); 69 static int ext4_show_options(struct seq_file *seq, struct dentry *root); 70 static void ext4_update_super(struct super_block *sb); 71 static int ext4_commit_super(struct super_block *sb); 72 static int ext4_mark_recovery_complete(struct super_block *sb, 73 struct ext4_super_block *es); 74 static int ext4_clear_journal_err(struct super_block *sb, 75 struct ext4_super_block *es); 76 static int ext4_sync_fs(struct super_block *sb, int wait); 77 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 78 static int ext4_unfreeze(struct super_block *sb); 79 static int ext4_freeze(struct super_block *sb); 80 static inline int ext2_feature_set_ok(struct super_block *sb); 81 static inline int ext3_feature_set_ok(struct super_block *sb); 82 static void ext4_destroy_lazyinit_thread(void); 83 static void ext4_unregister_li_request(struct super_block *sb); 84 static void ext4_clear_request_list(void); 85 static struct inode *ext4_get_journal_inode(struct super_block *sb, 86 unsigned int journal_inum); 87 static int ext4_validate_options(struct fs_context *fc); 88 static int ext4_check_opt_consistency(struct fs_context *fc, 89 struct super_block *sb); 90 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); 91 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); 92 static int ext4_get_tree(struct fs_context *fc); 93 static int ext4_reconfigure(struct fs_context *fc); 94 static void ext4_fc_free(struct fs_context *fc); 95 static int ext4_init_fs_context(struct fs_context *fc); 96 static const struct fs_parameter_spec ext4_param_specs[]; 97 98 /* 99 * Lock ordering 100 * 101 * page fault path: 102 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start 103 * -> page lock -> i_data_sem (rw) 104 * 105 * buffered write path: 106 * sb_start_write -> i_mutex -> mmap_lock 107 * sb_start_write -> i_mutex -> transaction start -> page lock -> 108 * i_data_sem (rw) 109 * 110 * truncate: 111 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> 112 * page lock 113 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> 114 * i_data_sem (rw) 115 * 116 * direct IO: 117 * sb_start_write -> i_mutex -> mmap_lock 118 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw) 119 * 120 * writepages: 121 * transaction start -> page lock(s) -> i_data_sem (rw) 122 */ 123 124 static const struct fs_context_operations ext4_context_ops = { 125 .parse_param = ext4_parse_param, 126 .get_tree = ext4_get_tree, 127 .reconfigure = ext4_reconfigure, 128 .free = ext4_fc_free, 129 }; 130 131 132 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 133 static struct file_system_type ext2_fs_type = { 134 .owner = THIS_MODULE, 135 .name = "ext2", 136 .init_fs_context = ext4_init_fs_context, 137 .parameters = ext4_param_specs, 138 .kill_sb = kill_block_super, 139 .fs_flags = FS_REQUIRES_DEV, 140 }; 141 MODULE_ALIAS_FS("ext2"); 142 MODULE_ALIAS("ext2"); 143 #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) 144 #else 145 #define IS_EXT2_SB(sb) (0) 146 #endif 147 148 149 static struct file_system_type ext3_fs_type = { 150 .owner = THIS_MODULE, 151 .name = "ext3", 152 .init_fs_context = ext4_init_fs_context, 153 .parameters = ext4_param_specs, 154 .kill_sb = kill_block_super, 155 .fs_flags = FS_REQUIRES_DEV, 156 }; 157 MODULE_ALIAS_FS("ext3"); 158 MODULE_ALIAS("ext3"); 159 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) 160 161 162 static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 163 bh_end_io_t *end_io) 164 { 165 /* 166 * buffer's verified bit is no longer valid after reading from 167 * disk again due to write out error, clear it to make sure we 168 * recheck the buffer contents. 169 */ 170 clear_buffer_verified(bh); 171 172 bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 173 get_bh(bh); 174 submit_bh(REQ_OP_READ | op_flags, bh); 175 } 176 177 void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 178 bh_end_io_t *end_io) 179 { 180 BUG_ON(!buffer_locked(bh)); 181 182 if (ext4_buffer_uptodate(bh)) { 183 unlock_buffer(bh); 184 return; 185 } 186 __ext4_read_bh(bh, op_flags, end_io); 187 } 188 189 int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io) 190 { 191 BUG_ON(!buffer_locked(bh)); 192 193 if (ext4_buffer_uptodate(bh)) { 194 unlock_buffer(bh); 195 return 0; 196 } 197 198 __ext4_read_bh(bh, op_flags, end_io); 199 200 wait_on_buffer(bh); 201 if (buffer_uptodate(bh)) 202 return 0; 203 return -EIO; 204 } 205 206 int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 207 { 208 lock_buffer(bh); 209 if (!wait) { 210 ext4_read_bh_nowait(bh, op_flags, NULL); 211 return 0; 212 } 213 return ext4_read_bh(bh, op_flags, NULL); 214 } 215 216 /* 217 * This works like __bread_gfp() except it uses ERR_PTR for error 218 * returns. Currently with sb_bread it's impossible to distinguish 219 * between ENOMEM and EIO situations (since both result in a NULL 220 * return. 221 */ 222 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, 223 sector_t block, 224 blk_opf_t op_flags, gfp_t gfp) 225 { 226 struct buffer_head *bh; 227 int ret; 228 229 bh = sb_getblk_gfp(sb, block, gfp); 230 if (bh == NULL) 231 return ERR_PTR(-ENOMEM); 232 if (ext4_buffer_uptodate(bh)) 233 return bh; 234 235 ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); 236 if (ret) { 237 put_bh(bh); 238 return ERR_PTR(ret); 239 } 240 return bh; 241 } 242 243 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 244 blk_opf_t op_flags) 245 { 246 return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE); 247 } 248 249 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 250 sector_t block) 251 { 252 return __ext4_sb_bread_gfp(sb, block, 0, 0); 253 } 254 255 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 256 { 257 struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); 258 259 if (likely(bh)) { 260 if (trylock_buffer(bh)) 261 ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); 262 brelse(bh); 263 } 264 } 265 266 static int ext4_verify_csum_type(struct super_block *sb, 267 struct ext4_super_block *es) 268 { 269 if (!ext4_has_feature_metadata_csum(sb)) 270 return 1; 271 272 return es->s_checksum_type == EXT4_CRC32C_CHKSUM; 273 } 274 275 __le32 ext4_superblock_csum(struct super_block *sb, 276 struct ext4_super_block *es) 277 { 278 struct ext4_sb_info *sbi = EXT4_SB(sb); 279 int offset = offsetof(struct ext4_super_block, s_checksum); 280 __u32 csum; 281 282 csum = ext4_chksum(sbi, ~0, (char *)es, offset); 283 284 return cpu_to_le32(csum); 285 } 286 287 static int ext4_superblock_csum_verify(struct super_block *sb, 288 struct ext4_super_block *es) 289 { 290 if (!ext4_has_metadata_csum(sb)) 291 return 1; 292 293 return es->s_checksum == ext4_superblock_csum(sb, es); 294 } 295 296 void ext4_superblock_csum_set(struct super_block *sb) 297 { 298 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 299 300 if (!ext4_has_metadata_csum(sb)) 301 return; 302 303 es->s_checksum = ext4_superblock_csum(sb, es); 304 } 305 306 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 307 struct ext4_group_desc *bg) 308 { 309 return le32_to_cpu(bg->bg_block_bitmap_lo) | 310 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 311 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 312 } 313 314 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 315 struct ext4_group_desc *bg) 316 { 317 return le32_to_cpu(bg->bg_inode_bitmap_lo) | 318 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 319 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 320 } 321 322 ext4_fsblk_t ext4_inode_table(struct super_block *sb, 323 struct ext4_group_desc *bg) 324 { 325 return le32_to_cpu(bg->bg_inode_table_lo) | 326 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 327 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 328 } 329 330 __u32 ext4_free_group_clusters(struct super_block *sb, 331 struct ext4_group_desc *bg) 332 { 333 return le16_to_cpu(bg->bg_free_blocks_count_lo) | 334 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 335 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 336 } 337 338 __u32 ext4_free_inodes_count(struct super_block *sb, 339 struct ext4_group_desc *bg) 340 { 341 return le16_to_cpu(bg->bg_free_inodes_count_lo) | 342 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 343 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); 344 } 345 346 __u32 ext4_used_dirs_count(struct super_block *sb, 347 struct ext4_group_desc *bg) 348 { 349 return le16_to_cpu(bg->bg_used_dirs_count_lo) | 350 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 351 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 352 } 353 354 __u32 ext4_itable_unused_count(struct super_block *sb, 355 struct ext4_group_desc *bg) 356 { 357 return le16_to_cpu(bg->bg_itable_unused_lo) | 358 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 359 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 360 } 361 362 void ext4_block_bitmap_set(struct super_block *sb, 363 struct ext4_group_desc *bg, ext4_fsblk_t blk) 364 { 365 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 366 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 367 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 368 } 369 370 void ext4_inode_bitmap_set(struct super_block *sb, 371 struct ext4_group_desc *bg, ext4_fsblk_t blk) 372 { 373 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 374 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 375 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 376 } 377 378 void ext4_inode_table_set(struct super_block *sb, 379 struct ext4_group_desc *bg, ext4_fsblk_t blk) 380 { 381 bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 382 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 383 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 384 } 385 386 void ext4_free_group_clusters_set(struct super_block *sb, 387 struct ext4_group_desc *bg, __u32 count) 388 { 389 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 390 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 391 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 392 } 393 394 void ext4_free_inodes_set(struct super_block *sb, 395 struct ext4_group_desc *bg, __u32 count) 396 { 397 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); 398 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 399 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); 400 } 401 402 void ext4_used_dirs_set(struct super_block *sb, 403 struct ext4_group_desc *bg, __u32 count) 404 { 405 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 406 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 407 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 408 } 409 410 void ext4_itable_unused_set(struct super_block *sb, 411 struct ext4_group_desc *bg, __u32 count) 412 { 413 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 414 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 415 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 416 } 417 418 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now) 419 { 420 now = clamp_val(now, 0, (1ull << 40) - 1); 421 422 *lo = cpu_to_le32(lower_32_bits(now)); 423 *hi = upper_32_bits(now); 424 } 425 426 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) 427 { 428 return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo); 429 } 430 #define ext4_update_tstamp(es, tstamp) \ 431 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \ 432 ktime_get_real_seconds()) 433 #define ext4_get_tstamp(es, tstamp) \ 434 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) 435 436 /* 437 * The del_gendisk() function uninitializes the disk-specific data 438 * structures, including the bdi structure, without telling anyone 439 * else. Once this happens, any attempt to call mark_buffer_dirty() 440 * (for example, by ext4_commit_super), will cause a kernel OOPS. 441 * This is a kludge to prevent these oops until we can put in a proper 442 * hook in del_gendisk() to inform the VFS and file system layers. 443 */ 444 static int block_device_ejected(struct super_block *sb) 445 { 446 struct inode *bd_inode = sb->s_bdev->bd_inode; 447 struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 448 449 return bdi->dev == NULL; 450 } 451 452 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 453 { 454 struct super_block *sb = journal->j_private; 455 struct ext4_sb_info *sbi = EXT4_SB(sb); 456 int error = is_journal_aborted(journal); 457 struct ext4_journal_cb_entry *jce; 458 459 BUG_ON(txn->t_state == T_FINISHED); 460 461 ext4_process_freed_data(sb, txn->t_tid); 462 463 spin_lock(&sbi->s_md_lock); 464 while (!list_empty(&txn->t_private_list)) { 465 jce = list_entry(txn->t_private_list.next, 466 struct ext4_journal_cb_entry, jce_list); 467 list_del_init(&jce->jce_list); 468 spin_unlock(&sbi->s_md_lock); 469 jce->jce_func(sb, jce, error); 470 spin_lock(&sbi->s_md_lock); 471 } 472 spin_unlock(&sbi->s_md_lock); 473 } 474 475 /* 476 * This writepage callback for write_cache_pages() 477 * takes care of a few cases after page cleaning. 478 * 479 * write_cache_pages() already checks for dirty pages 480 * and calls clear_page_dirty_for_io(), which we want, 481 * to write protect the pages. 482 * 483 * However, we may have to redirty a page (see below.) 484 */ 485 static int ext4_journalled_writepage_callback(struct folio *folio, 486 struct writeback_control *wbc, 487 void *data) 488 { 489 transaction_t *transaction = (transaction_t *) data; 490 struct buffer_head *bh, *head; 491 struct journal_head *jh; 492 493 bh = head = folio_buffers(folio); 494 do { 495 /* 496 * We have to redirty a page in these cases: 497 * 1) If buffer is dirty, it means the page was dirty because it 498 * contains a buffer that needs checkpointing. So the dirty bit 499 * needs to be preserved so that checkpointing writes the buffer 500 * properly. 501 * 2) If buffer is not part of the committing transaction 502 * (we may have just accidentally come across this buffer because 503 * inode range tracking is not exact) or if the currently running 504 * transaction already contains this buffer as well, dirty bit 505 * needs to be preserved so that the buffer gets writeprotected 506 * properly on running transaction's commit. 507 */ 508 jh = bh2jh(bh); 509 if (buffer_dirty(bh) || 510 (jh && (jh->b_transaction != transaction || 511 jh->b_next_transaction))) { 512 folio_redirty_for_writepage(wbc, folio); 513 goto out; 514 } 515 } while ((bh = bh->b_this_page) != head); 516 517 out: 518 return AOP_WRITEPAGE_ACTIVATE; 519 } 520 521 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) 522 { 523 struct address_space *mapping = jinode->i_vfs_inode->i_mapping; 524 struct writeback_control wbc = { 525 .sync_mode = WB_SYNC_ALL, 526 .nr_to_write = LONG_MAX, 527 .range_start = jinode->i_dirty_start, 528 .range_end = jinode->i_dirty_end, 529 }; 530 531 return write_cache_pages(mapping, &wbc, 532 ext4_journalled_writepage_callback, 533 jinode->i_transaction); 534 } 535 536 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) 537 { 538 int ret; 539 540 if (ext4_should_journal_data(jinode->i_vfs_inode)) 541 ret = ext4_journalled_submit_inode_data_buffers(jinode); 542 else 543 ret = ext4_normal_submit_inode_data_buffers(jinode); 544 return ret; 545 } 546 547 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) 548 { 549 int ret = 0; 550 551 if (!ext4_should_journal_data(jinode->i_vfs_inode)) 552 ret = jbd2_journal_finish_inode_data_buffers(jinode); 553 554 return ret; 555 } 556 557 static bool system_going_down(void) 558 { 559 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 560 || system_state == SYSTEM_RESTART; 561 } 562 563 struct ext4_err_translation { 564 int code; 565 int errno; 566 }; 567 568 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } 569 570 static struct ext4_err_translation err_translation[] = { 571 EXT4_ERR_TRANSLATE(EIO), 572 EXT4_ERR_TRANSLATE(ENOMEM), 573 EXT4_ERR_TRANSLATE(EFSBADCRC), 574 EXT4_ERR_TRANSLATE(EFSCORRUPTED), 575 EXT4_ERR_TRANSLATE(ENOSPC), 576 EXT4_ERR_TRANSLATE(ENOKEY), 577 EXT4_ERR_TRANSLATE(EROFS), 578 EXT4_ERR_TRANSLATE(EFBIG), 579 EXT4_ERR_TRANSLATE(EEXIST), 580 EXT4_ERR_TRANSLATE(ERANGE), 581 EXT4_ERR_TRANSLATE(EOVERFLOW), 582 EXT4_ERR_TRANSLATE(EBUSY), 583 EXT4_ERR_TRANSLATE(ENOTDIR), 584 EXT4_ERR_TRANSLATE(ENOTEMPTY), 585 EXT4_ERR_TRANSLATE(ESHUTDOWN), 586 EXT4_ERR_TRANSLATE(EFAULT), 587 }; 588 589 static int ext4_errno_to_code(int errno) 590 { 591 int i; 592 593 for (i = 0; i < ARRAY_SIZE(err_translation); i++) 594 if (err_translation[i].errno == errno) 595 return err_translation[i].code; 596 return EXT4_ERR_UNKNOWN; 597 } 598 599 static void save_error_info(struct super_block *sb, int error, 600 __u32 ino, __u64 block, 601 const char *func, unsigned int line) 602 { 603 struct ext4_sb_info *sbi = EXT4_SB(sb); 604 605 /* We default to EFSCORRUPTED error... */ 606 if (error == 0) 607 error = EFSCORRUPTED; 608 609 spin_lock(&sbi->s_error_lock); 610 sbi->s_add_error_count++; 611 sbi->s_last_error_code = error; 612 sbi->s_last_error_line = line; 613 sbi->s_last_error_ino = ino; 614 sbi->s_last_error_block = block; 615 sbi->s_last_error_func = func; 616 sbi->s_last_error_time = ktime_get_real_seconds(); 617 if (!sbi->s_first_error_time) { 618 sbi->s_first_error_code = error; 619 sbi->s_first_error_line = line; 620 sbi->s_first_error_ino = ino; 621 sbi->s_first_error_block = block; 622 sbi->s_first_error_func = func; 623 sbi->s_first_error_time = sbi->s_last_error_time; 624 } 625 spin_unlock(&sbi->s_error_lock); 626 } 627 628 /* Deal with the reporting of failure conditions on a filesystem such as 629 * inconsistencies detected or read IO failures. 630 * 631 * On ext2, we can store the error state of the filesystem in the 632 * superblock. That is not possible on ext4, because we may have other 633 * write ordering constraints on the superblock which prevent us from 634 * writing it out straight away; and given that the journal is about to 635 * be aborted, we can't rely on the current, or future, transactions to 636 * write out the superblock safely. 637 * 638 * We'll just use the jbd2_journal_abort() error code to record an error in 639 * the journal instead. On recovery, the journal will complain about 640 * that error until we've noted it down and cleared it. 641 * 642 * If force_ro is set, we unconditionally force the filesystem into an 643 * ABORT|READONLY state, unless the error response on the fs has been set to 644 * panic in which case we take the easy way out and panic immediately. This is 645 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM 646 * at a critical moment in log management. 647 */ 648 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, 649 __u32 ino, __u64 block, 650 const char *func, unsigned int line) 651 { 652 journal_t *journal = EXT4_SB(sb)->s_journal; 653 bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT); 654 655 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 656 if (test_opt(sb, WARN_ON_ERROR)) 657 WARN_ON_ONCE(1); 658 659 if (!continue_fs && !sb_rdonly(sb)) { 660 ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED); 661 if (journal) 662 jbd2_journal_abort(journal, -EIO); 663 } 664 665 if (!bdev_read_only(sb->s_bdev)) { 666 save_error_info(sb, error, ino, block, func, line); 667 /* 668 * In case the fs should keep running, we need to writeout 669 * superblock through the journal. Due to lock ordering 670 * constraints, it may not be safe to do it right here so we 671 * defer superblock flushing to a workqueue. 672 */ 673 if (continue_fs && journal) 674 schedule_work(&EXT4_SB(sb)->s_error_work); 675 else 676 ext4_commit_super(sb); 677 } 678 679 /* 680 * We force ERRORS_RO behavior when system is rebooting. Otherwise we 681 * could panic during 'reboot -f' as the underlying device got already 682 * disabled. 683 */ 684 if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { 685 panic("EXT4-fs (device %s): panic forced after error\n", 686 sb->s_id); 687 } 688 689 if (sb_rdonly(sb) || continue_fs) 690 return; 691 692 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 693 /* 694 * Make sure updated value of ->s_mount_flags will be visible before 695 * ->s_flags update 696 */ 697 smp_wmb(); 698 sb->s_flags |= SB_RDONLY; 699 } 700 701 static void flush_stashed_error_work(struct work_struct *work) 702 { 703 struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, 704 s_error_work); 705 journal_t *journal = sbi->s_journal; 706 handle_t *handle; 707 708 /* 709 * If the journal is still running, we have to write out superblock 710 * through the journal to avoid collisions of other journalled sb 711 * updates. 712 * 713 * We use directly jbd2 functions here to avoid recursing back into 714 * ext4 error handling code during handling of previous errors. 715 */ 716 if (!sb_rdonly(sbi->s_sb) && journal) { 717 struct buffer_head *sbh = sbi->s_sbh; 718 handle = jbd2_journal_start(journal, 1); 719 if (IS_ERR(handle)) 720 goto write_directly; 721 if (jbd2_journal_get_write_access(handle, sbh)) { 722 jbd2_journal_stop(handle); 723 goto write_directly; 724 } 725 ext4_update_super(sbi->s_sb); 726 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 727 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 728 "superblock detected"); 729 clear_buffer_write_io_error(sbh); 730 set_buffer_uptodate(sbh); 731 } 732 733 if (jbd2_journal_dirty_metadata(handle, sbh)) { 734 jbd2_journal_stop(handle); 735 goto write_directly; 736 } 737 jbd2_journal_stop(handle); 738 ext4_notify_error_sysfs(sbi); 739 return; 740 } 741 write_directly: 742 /* 743 * Write through journal failed. Write sb directly to get error info 744 * out and hope for the best. 745 */ 746 ext4_commit_super(sbi->s_sb); 747 ext4_notify_error_sysfs(sbi); 748 } 749 750 #define ext4_error_ratelimit(sb) \ 751 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ 752 "EXT4-fs error") 753 754 void __ext4_error(struct super_block *sb, const char *function, 755 unsigned int line, bool force_ro, int error, __u64 block, 756 const char *fmt, ...) 757 { 758 struct va_format vaf; 759 va_list args; 760 761 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 762 return; 763 764 trace_ext4_error(sb, function, line); 765 if (ext4_error_ratelimit(sb)) { 766 va_start(args, fmt); 767 vaf.fmt = fmt; 768 vaf.va = &args; 769 printk(KERN_CRIT 770 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 771 sb->s_id, function, line, current->comm, &vaf); 772 va_end(args); 773 } 774 fsnotify_sb_error(sb, NULL, error ? error : EFSCORRUPTED); 775 776 ext4_handle_error(sb, force_ro, error, 0, block, function, line); 777 } 778 779 void __ext4_error_inode(struct inode *inode, const char *function, 780 unsigned int line, ext4_fsblk_t block, int error, 781 const char *fmt, ...) 782 { 783 va_list args; 784 struct va_format vaf; 785 786 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 787 return; 788 789 trace_ext4_error(inode->i_sb, function, line); 790 if (ext4_error_ratelimit(inode->i_sb)) { 791 va_start(args, fmt); 792 vaf.fmt = fmt; 793 vaf.va = &args; 794 if (block) 795 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 796 "inode #%lu: block %llu: comm %s: %pV\n", 797 inode->i_sb->s_id, function, line, inode->i_ino, 798 block, current->comm, &vaf); 799 else 800 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 801 "inode #%lu: comm %s: %pV\n", 802 inode->i_sb->s_id, function, line, inode->i_ino, 803 current->comm, &vaf); 804 va_end(args); 805 } 806 fsnotify_sb_error(inode->i_sb, inode, error ? error : EFSCORRUPTED); 807 808 ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block, 809 function, line); 810 } 811 812 void __ext4_error_file(struct file *file, const char *function, 813 unsigned int line, ext4_fsblk_t block, 814 const char *fmt, ...) 815 { 816 va_list args; 817 struct va_format vaf; 818 struct inode *inode = file_inode(file); 819 char pathname[80], *path; 820 821 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 822 return; 823 824 trace_ext4_error(inode->i_sb, function, line); 825 if (ext4_error_ratelimit(inode->i_sb)) { 826 path = file_path(file, pathname, sizeof(pathname)); 827 if (IS_ERR(path)) 828 path = "(unknown)"; 829 va_start(args, fmt); 830 vaf.fmt = fmt; 831 vaf.va = &args; 832 if (block) 833 printk(KERN_CRIT 834 "EXT4-fs error (device %s): %s:%d: inode #%lu: " 835 "block %llu: comm %s: path %s: %pV\n", 836 inode->i_sb->s_id, function, line, inode->i_ino, 837 block, current->comm, path, &vaf); 838 else 839 printk(KERN_CRIT 840 "EXT4-fs error (device %s): %s:%d: inode #%lu: " 841 "comm %s: path %s: %pV\n", 842 inode->i_sb->s_id, function, line, inode->i_ino, 843 current->comm, path, &vaf); 844 va_end(args); 845 } 846 fsnotify_sb_error(inode->i_sb, inode, EFSCORRUPTED); 847 848 ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block, 849 function, line); 850 } 851 852 const char *ext4_decode_error(struct super_block *sb, int errno, 853 char nbuf[16]) 854 { 855 char *errstr = NULL; 856 857 switch (errno) { 858 case -EFSCORRUPTED: 859 errstr = "Corrupt filesystem"; 860 break; 861 case -EFSBADCRC: 862 errstr = "Filesystem failed CRC"; 863 break; 864 case -EIO: 865 errstr = "IO failure"; 866 break; 867 case -ENOMEM: 868 errstr = "Out of memory"; 869 break; 870 case -EROFS: 871 if (!sb || (EXT4_SB(sb)->s_journal && 872 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 873 errstr = "Journal has aborted"; 874 else 875 errstr = "Readonly filesystem"; 876 break; 877 default: 878 /* If the caller passed in an extra buffer for unknown 879 * errors, textualise them now. Else we just return 880 * NULL. */ 881 if (nbuf) { 882 /* Check for truncated error codes... */ 883 if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 884 errstr = nbuf; 885 } 886 break; 887 } 888 889 return errstr; 890 } 891 892 /* __ext4_std_error decodes expected errors from journaling functions 893 * automatically and invokes the appropriate error response. */ 894 895 void __ext4_std_error(struct super_block *sb, const char *function, 896 unsigned int line, int errno) 897 { 898 char nbuf[16]; 899 const char *errstr; 900 901 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 902 return; 903 904 /* Special case: if the error is EROFS, and we're not already 905 * inside a transaction, then there's really no point in logging 906 * an error. */ 907 if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) 908 return; 909 910 if (ext4_error_ratelimit(sb)) { 911 errstr = ext4_decode_error(sb, errno, nbuf); 912 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 913 sb->s_id, function, line, errstr); 914 } 915 fsnotify_sb_error(sb, NULL, errno ? errno : EFSCORRUPTED); 916 917 ext4_handle_error(sb, false, -errno, 0, 0, function, line); 918 } 919 920 void __ext4_msg(struct super_block *sb, 921 const char *prefix, const char *fmt, ...) 922 { 923 struct va_format vaf; 924 va_list args; 925 926 if (sb) { 927 atomic_inc(&EXT4_SB(sb)->s_msg_count); 928 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), 929 "EXT4-fs")) 930 return; 931 } 932 933 va_start(args, fmt); 934 vaf.fmt = fmt; 935 vaf.va = &args; 936 if (sb) 937 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); 938 else 939 printk("%sEXT4-fs: %pV\n", prefix, &vaf); 940 va_end(args); 941 } 942 943 static int ext4_warning_ratelimit(struct super_block *sb) 944 { 945 atomic_inc(&EXT4_SB(sb)->s_warning_count); 946 return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), 947 "EXT4-fs warning"); 948 } 949 950 void __ext4_warning(struct super_block *sb, const char *function, 951 unsigned int line, const char *fmt, ...) 952 { 953 struct va_format vaf; 954 va_list args; 955 956 if (!ext4_warning_ratelimit(sb)) 957 return; 958 959 va_start(args, fmt); 960 vaf.fmt = fmt; 961 vaf.va = &args; 962 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", 963 sb->s_id, function, line, &vaf); 964 va_end(args); 965 } 966 967 void __ext4_warning_inode(const struct inode *inode, const char *function, 968 unsigned int line, const char *fmt, ...) 969 { 970 struct va_format vaf; 971 va_list args; 972 973 if (!ext4_warning_ratelimit(inode->i_sb)) 974 return; 975 976 va_start(args, fmt); 977 vaf.fmt = fmt; 978 vaf.va = &args; 979 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " 980 "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, 981 function, line, inode->i_ino, current->comm, &vaf); 982 va_end(args); 983 } 984 985 void __ext4_grp_locked_error(const char *function, unsigned int line, 986 struct super_block *sb, ext4_group_t grp, 987 unsigned long ino, ext4_fsblk_t block, 988 const char *fmt, ...) 989 __releases(bitlock) 990 __acquires(bitlock) 991 { 992 struct va_format vaf; 993 va_list args; 994 995 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 996 return; 997 998 trace_ext4_error(sb, function, line); 999 if (ext4_error_ratelimit(sb)) { 1000 va_start(args, fmt); 1001 vaf.fmt = fmt; 1002 vaf.va = &args; 1003 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 1004 sb->s_id, function, line, grp); 1005 if (ino) 1006 printk(KERN_CONT "inode %lu: ", ino); 1007 if (block) 1008 printk(KERN_CONT "block %llu:", 1009 (unsigned long long) block); 1010 printk(KERN_CONT "%pV\n", &vaf); 1011 va_end(args); 1012 } 1013 1014 if (test_opt(sb, ERRORS_CONT)) { 1015 if (test_opt(sb, WARN_ON_ERROR)) 1016 WARN_ON_ONCE(1); 1017 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 1018 if (!bdev_read_only(sb->s_bdev)) { 1019 save_error_info(sb, EFSCORRUPTED, ino, block, function, 1020 line); 1021 schedule_work(&EXT4_SB(sb)->s_error_work); 1022 } 1023 return; 1024 } 1025 ext4_unlock_group(sb, grp); 1026 ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line); 1027 /* 1028 * We only get here in the ERRORS_RO case; relocking the group 1029 * may be dangerous, but nothing bad will happen since the 1030 * filesystem will have already been marked read/only and the 1031 * journal has been aborted. We return 1 as a hint to callers 1032 * who might what to use the return value from 1033 * ext4_grp_locked_error() to distinguish between the 1034 * ERRORS_CONT and ERRORS_RO case, and perhaps return more 1035 * aggressively from the ext4 function in question, with a 1036 * more appropriate error code. 1037 */ 1038 ext4_lock_group(sb, grp); 1039 return; 1040 } 1041 1042 void ext4_mark_group_bitmap_corrupted(struct super_block *sb, 1043 ext4_group_t group, 1044 unsigned int flags) 1045 { 1046 struct ext4_sb_info *sbi = EXT4_SB(sb); 1047 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1048 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 1049 int ret; 1050 1051 if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { 1052 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1053 &grp->bb_state); 1054 if (!ret) 1055 percpu_counter_sub(&sbi->s_freeclusters_counter, 1056 grp->bb_free); 1057 } 1058 1059 if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) { 1060 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, 1061 &grp->bb_state); 1062 if (!ret && gdp) { 1063 int count; 1064 1065 count = ext4_free_inodes_count(sb, gdp); 1066 percpu_counter_sub(&sbi->s_freeinodes_counter, 1067 count); 1068 } 1069 } 1070 } 1071 1072 void ext4_update_dynamic_rev(struct super_block *sb) 1073 { 1074 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1075 1076 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 1077 return; 1078 1079 ext4_warning(sb, 1080 "updating to rev %d because of new feature flag, " 1081 "running e2fsck is recommended", 1082 EXT4_DYNAMIC_REV); 1083 1084 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 1085 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 1086 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 1087 /* leave es->s_feature_*compat flags alone */ 1088 /* es->s_uuid will be set by e2fsck if empty */ 1089 1090 /* 1091 * The rest of the superblock fields should be zero, and if not it 1092 * means they are likely already in use, so leave them alone. We 1093 * can leave it up to e2fsck to clean up any inconsistencies there. 1094 */ 1095 } 1096 1097 /* 1098 * Open the external journal device 1099 */ 1100 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) 1101 { 1102 struct block_device *bdev; 1103 1104 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); 1105 if (IS_ERR(bdev)) 1106 goto fail; 1107 return bdev; 1108 1109 fail: 1110 ext4_msg(sb, KERN_ERR, 1111 "failed to open journal device unknown-block(%u,%u) %ld", 1112 MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); 1113 return NULL; 1114 } 1115 1116 /* 1117 * Release the journal device 1118 */ 1119 static void ext4_blkdev_put(struct block_device *bdev) 1120 { 1121 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1122 } 1123 1124 static void ext4_blkdev_remove(struct ext4_sb_info *sbi) 1125 { 1126 struct block_device *bdev; 1127 bdev = sbi->s_journal_bdev; 1128 if (bdev) { 1129 ext4_blkdev_put(bdev); 1130 sbi->s_journal_bdev = NULL; 1131 } 1132 } 1133 1134 static inline struct inode *orphan_list_entry(struct list_head *l) 1135 { 1136 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 1137 } 1138 1139 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 1140 { 1141 struct list_head *l; 1142 1143 ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 1144 le32_to_cpu(sbi->s_es->s_last_orphan)); 1145 1146 printk(KERN_ERR "sb_info orphan list:\n"); 1147 list_for_each(l, &sbi->s_orphan) { 1148 struct inode *inode = orphan_list_entry(l); 1149 printk(KERN_ERR " " 1150 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 1151 inode->i_sb->s_id, inode->i_ino, inode, 1152 inode->i_mode, inode->i_nlink, 1153 NEXT_ORPHAN(inode)); 1154 } 1155 } 1156 1157 #ifdef CONFIG_QUOTA 1158 static int ext4_quota_off(struct super_block *sb, int type); 1159 1160 static inline void ext4_quota_off_umount(struct super_block *sb) 1161 { 1162 int type; 1163 1164 /* Use our quota_off function to clear inode flags etc. */ 1165 for (type = 0; type < EXT4_MAXQUOTAS; type++) 1166 ext4_quota_off(sb, type); 1167 } 1168 1169 /* 1170 * This is a helper function which is used in the mount/remount 1171 * codepaths (which holds s_umount) to fetch the quota file name. 1172 */ 1173 static inline char *get_qf_name(struct super_block *sb, 1174 struct ext4_sb_info *sbi, 1175 int type) 1176 { 1177 return rcu_dereference_protected(sbi->s_qf_names[type], 1178 lockdep_is_held(&sb->s_umount)); 1179 } 1180 #else 1181 static inline void ext4_quota_off_umount(struct super_block *sb) 1182 { 1183 } 1184 #endif 1185 1186 static void ext4_put_super(struct super_block *sb) 1187 { 1188 struct ext4_sb_info *sbi = EXT4_SB(sb); 1189 struct ext4_super_block *es = sbi->s_es; 1190 struct buffer_head **group_desc; 1191 struct flex_groups **flex_groups; 1192 int aborted = 0; 1193 int i, err; 1194 1195 /* 1196 * Unregister sysfs before destroying jbd2 journal. 1197 * Since we could still access attr_journal_task attribute via sysfs 1198 * path which could have sbi->s_journal->j_task as NULL 1199 * Unregister sysfs before flush sbi->s_error_work. 1200 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If 1201 * read metadata verify failed then will queue error work. 1202 * flush_stashed_error_work will call start_this_handle may trigger 1203 * BUG_ON. 1204 */ 1205 ext4_unregister_sysfs(sb); 1206 1207 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount")) 1208 ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.", 1209 &sb->s_uuid); 1210 1211 ext4_unregister_li_request(sb); 1212 ext4_quota_off_umount(sb); 1213 1214 flush_work(&sbi->s_error_work); 1215 destroy_workqueue(sbi->rsv_conversion_wq); 1216 ext4_release_orphan_info(sb); 1217 1218 if (sbi->s_journal) { 1219 aborted = is_journal_aborted(sbi->s_journal); 1220 err = jbd2_journal_destroy(sbi->s_journal); 1221 sbi->s_journal = NULL; 1222 if ((err < 0) && !aborted) { 1223 ext4_abort(sb, -err, "Couldn't clean up the journal"); 1224 } 1225 } 1226 1227 ext4_es_unregister_shrinker(sbi); 1228 timer_shutdown_sync(&sbi->s_err_report); 1229 ext4_release_system_zone(sb); 1230 ext4_mb_release(sb); 1231 ext4_ext_release(sb); 1232 1233 if (!sb_rdonly(sb) && !aborted) { 1234 ext4_clear_feature_journal_needs_recovery(sb); 1235 ext4_clear_feature_orphan_present(sb); 1236 es->s_state = cpu_to_le16(sbi->s_mount_state); 1237 } 1238 if (!sb_rdonly(sb)) 1239 ext4_commit_super(sb); 1240 1241 rcu_read_lock(); 1242 group_desc = rcu_dereference(sbi->s_group_desc); 1243 for (i = 0; i < sbi->s_gdb_count; i++) 1244 brelse(group_desc[i]); 1245 kvfree(group_desc); 1246 flex_groups = rcu_dereference(sbi->s_flex_groups); 1247 if (flex_groups) { 1248 for (i = 0; i < sbi->s_flex_groups_allocated; i++) 1249 kvfree(flex_groups[i]); 1250 kvfree(flex_groups); 1251 } 1252 rcu_read_unlock(); 1253 percpu_counter_destroy(&sbi->s_freeclusters_counter); 1254 percpu_counter_destroy(&sbi->s_freeinodes_counter); 1255 percpu_counter_destroy(&sbi->s_dirs_counter); 1256 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 1257 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 1258 percpu_free_rwsem(&sbi->s_writepages_rwsem); 1259 #ifdef CONFIG_QUOTA 1260 for (i = 0; i < EXT4_MAXQUOTAS; i++) 1261 kfree(get_qf_name(sb, sbi, i)); 1262 #endif 1263 1264 /* Debugging code just in case the in-memory inode orphan list 1265 * isn't empty. The on-disk one can be non-empty if we've 1266 * detected an error and taken the fs readonly, but the 1267 * in-memory list had better be clean by this point. */ 1268 if (!list_empty(&sbi->s_orphan)) 1269 dump_orphan_list(sb, sbi); 1270 ASSERT(list_empty(&sbi->s_orphan)); 1271 1272 sync_blockdev(sb->s_bdev); 1273 invalidate_bdev(sb->s_bdev); 1274 if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) { 1275 /* 1276 * Invalidate the journal device's buffers. We don't want them 1277 * floating about in memory - the physical journal device may 1278 * hotswapped, and it breaks the `ro-after' testing code. 1279 */ 1280 sync_blockdev(sbi->s_journal_bdev); 1281 invalidate_bdev(sbi->s_journal_bdev); 1282 ext4_blkdev_remove(sbi); 1283 } 1284 1285 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 1286 sbi->s_ea_inode_cache = NULL; 1287 1288 ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 1289 sbi->s_ea_block_cache = NULL; 1290 1291 ext4_stop_mmpd(sbi); 1292 1293 brelse(sbi->s_sbh); 1294 sb->s_fs_info = NULL; 1295 /* 1296 * Now that we are completely done shutting down the 1297 * superblock, we need to actually destroy the kobject. 1298 */ 1299 kobject_put(&sbi->s_kobj); 1300 wait_for_completion(&sbi->s_kobj_unregister); 1301 if (sbi->s_chksum_driver) 1302 crypto_free_shash(sbi->s_chksum_driver); 1303 kfree(sbi->s_blockgroup_lock); 1304 fs_put_dax(sbi->s_daxdev, NULL); 1305 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 1306 #if IS_ENABLED(CONFIG_UNICODE) 1307 utf8_unload(sb->s_encoding); 1308 #endif 1309 kfree(sbi); 1310 } 1311 1312 static struct kmem_cache *ext4_inode_cachep; 1313 1314 /* 1315 * Called inside transaction, so use GFP_NOFS 1316 */ 1317 static struct inode *ext4_alloc_inode(struct super_block *sb) 1318 { 1319 struct ext4_inode_info *ei; 1320 1321 ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS); 1322 if (!ei) 1323 return NULL; 1324 1325 inode_set_iversion(&ei->vfs_inode, 1); 1326 ei->i_flags = 0; 1327 spin_lock_init(&ei->i_raw_lock); 1328 INIT_LIST_HEAD(&ei->i_prealloc_list); 1329 atomic_set(&ei->i_prealloc_active, 0); 1330 spin_lock_init(&ei->i_prealloc_lock); 1331 ext4_es_init_tree(&ei->i_es_tree); 1332 rwlock_init(&ei->i_es_lock); 1333 INIT_LIST_HEAD(&ei->i_es_list); 1334 ei->i_es_all_nr = 0; 1335 ei->i_es_shk_nr = 0; 1336 ei->i_es_shrink_lblk = 0; 1337 ei->i_reserved_data_blocks = 0; 1338 spin_lock_init(&(ei->i_block_reservation_lock)); 1339 ext4_init_pending_tree(&ei->i_pending_tree); 1340 #ifdef CONFIG_QUOTA 1341 ei->i_reserved_quota = 0; 1342 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); 1343 #endif 1344 ei->jinode = NULL; 1345 INIT_LIST_HEAD(&ei->i_rsv_conversion_list); 1346 spin_lock_init(&ei->i_completed_io_lock); 1347 ei->i_sync_tid = 0; 1348 ei->i_datasync_tid = 0; 1349 atomic_set(&ei->i_unwritten, 0); 1350 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 1351 ext4_fc_init_inode(&ei->vfs_inode); 1352 mutex_init(&ei->i_fc_lock); 1353 return &ei->vfs_inode; 1354 } 1355 1356 static int ext4_drop_inode(struct inode *inode) 1357 { 1358 int drop = generic_drop_inode(inode); 1359 1360 if (!drop) 1361 drop = fscrypt_drop_inode(inode); 1362 1363 trace_ext4_drop_inode(inode, drop); 1364 return drop; 1365 } 1366 1367 static void ext4_free_in_core_inode(struct inode *inode) 1368 { 1369 fscrypt_free_inode(inode); 1370 if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { 1371 pr_warn("%s: inode %ld still in fc list", 1372 __func__, inode->i_ino); 1373 } 1374 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 1375 } 1376 1377 static void ext4_destroy_inode(struct inode *inode) 1378 { 1379 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 1380 ext4_msg(inode->i_sb, KERN_ERR, 1381 "Inode %lu (%p): orphan list check failed!", 1382 inode->i_ino, EXT4_I(inode)); 1383 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 1384 EXT4_I(inode), sizeof(struct ext4_inode_info), 1385 true); 1386 dump_stack(); 1387 } 1388 1389 if (EXT4_I(inode)->i_reserved_data_blocks) 1390 ext4_msg(inode->i_sb, KERN_ERR, 1391 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 1392 inode->i_ino, EXT4_I(inode), 1393 EXT4_I(inode)->i_reserved_data_blocks); 1394 } 1395 1396 static void init_once(void *foo) 1397 { 1398 struct ext4_inode_info *ei = foo; 1399 1400 INIT_LIST_HEAD(&ei->i_orphan); 1401 init_rwsem(&ei->xattr_sem); 1402 init_rwsem(&ei->i_data_sem); 1403 inode_init_once(&ei->vfs_inode); 1404 ext4_fc_init_inode(&ei->vfs_inode); 1405 } 1406 1407 static int __init init_inodecache(void) 1408 { 1409 ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache", 1410 sizeof(struct ext4_inode_info), 0, 1411 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| 1412 SLAB_ACCOUNT), 1413 offsetof(struct ext4_inode_info, i_data), 1414 sizeof_field(struct ext4_inode_info, i_data), 1415 init_once); 1416 if (ext4_inode_cachep == NULL) 1417 return -ENOMEM; 1418 return 0; 1419 } 1420 1421 static void destroy_inodecache(void) 1422 { 1423 /* 1424 * Make sure all delayed rcu free inodes are flushed before we 1425 * destroy cache. 1426 */ 1427 rcu_barrier(); 1428 kmem_cache_destroy(ext4_inode_cachep); 1429 } 1430 1431 void ext4_clear_inode(struct inode *inode) 1432 { 1433 ext4_fc_del(inode); 1434 invalidate_inode_buffers(inode); 1435 clear_inode(inode); 1436 ext4_discard_preallocations(inode, 0); 1437 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1438 dquot_drop(inode); 1439 if (EXT4_I(inode)->jinode) { 1440 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 1441 EXT4_I(inode)->jinode); 1442 jbd2_free_inode(EXT4_I(inode)->jinode); 1443 EXT4_I(inode)->jinode = NULL; 1444 } 1445 fscrypt_put_encryption_info(inode); 1446 fsverity_cleanup_inode(inode); 1447 } 1448 1449 static struct inode *ext4_nfs_get_inode(struct super_block *sb, 1450 u64 ino, u32 generation) 1451 { 1452 struct inode *inode; 1453 1454 /* 1455 * Currently we don't know the generation for parent directory, so 1456 * a generation of 0 means "accept any" 1457 */ 1458 inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE); 1459 if (IS_ERR(inode)) 1460 return ERR_CAST(inode); 1461 if (generation && inode->i_generation != generation) { 1462 iput(inode); 1463 return ERR_PTR(-ESTALE); 1464 } 1465 1466 return inode; 1467 } 1468 1469 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 1470 int fh_len, int fh_type) 1471 { 1472 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 1473 ext4_nfs_get_inode); 1474 } 1475 1476 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 1477 int fh_len, int fh_type) 1478 { 1479 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 1480 ext4_nfs_get_inode); 1481 } 1482 1483 static int ext4_nfs_commit_metadata(struct inode *inode) 1484 { 1485 struct writeback_control wbc = { 1486 .sync_mode = WB_SYNC_ALL 1487 }; 1488 1489 trace_ext4_nfs_commit_metadata(inode); 1490 return ext4_write_inode(inode, &wbc); 1491 } 1492 1493 #ifdef CONFIG_QUOTA 1494 static const char * const quotatypes[] = INITQFNAMES; 1495 #define QTYPE2NAME(t) (quotatypes[t]) 1496 1497 static int ext4_write_dquot(struct dquot *dquot); 1498 static int ext4_acquire_dquot(struct dquot *dquot); 1499 static int ext4_release_dquot(struct dquot *dquot); 1500 static int ext4_mark_dquot_dirty(struct dquot *dquot); 1501 static int ext4_write_info(struct super_block *sb, int type); 1502 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1503 const struct path *path); 1504 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1505 size_t len, loff_t off); 1506 static ssize_t ext4_quota_write(struct super_block *sb, int type, 1507 const char *data, size_t len, loff_t off); 1508 static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 1509 unsigned int flags); 1510 1511 static struct dquot **ext4_get_dquots(struct inode *inode) 1512 { 1513 return EXT4_I(inode)->i_dquot; 1514 } 1515 1516 static const struct dquot_operations ext4_quota_operations = { 1517 .get_reserved_space = ext4_get_reserved_space, 1518 .write_dquot = ext4_write_dquot, 1519 .acquire_dquot = ext4_acquire_dquot, 1520 .release_dquot = ext4_release_dquot, 1521 .mark_dirty = ext4_mark_dquot_dirty, 1522 .write_info = ext4_write_info, 1523 .alloc_dquot = dquot_alloc, 1524 .destroy_dquot = dquot_destroy, 1525 .get_projid = ext4_get_projid, 1526 .get_inode_usage = ext4_get_inode_usage, 1527 .get_next_id = dquot_get_next_id, 1528 }; 1529 1530 static const struct quotactl_ops ext4_qctl_operations = { 1531 .quota_on = ext4_quota_on, 1532 .quota_off = ext4_quota_off, 1533 .quota_sync = dquot_quota_sync, 1534 .get_state = dquot_get_state, 1535 .set_info = dquot_set_dqinfo, 1536 .get_dqblk = dquot_get_dqblk, 1537 .set_dqblk = dquot_set_dqblk, 1538 .get_nextdqblk = dquot_get_next_dqblk, 1539 }; 1540 #endif 1541 1542 static const struct super_operations ext4_sops = { 1543 .alloc_inode = ext4_alloc_inode, 1544 .free_inode = ext4_free_in_core_inode, 1545 .destroy_inode = ext4_destroy_inode, 1546 .write_inode = ext4_write_inode, 1547 .dirty_inode = ext4_dirty_inode, 1548 .drop_inode = ext4_drop_inode, 1549 .evict_inode = ext4_evict_inode, 1550 .put_super = ext4_put_super, 1551 .sync_fs = ext4_sync_fs, 1552 .freeze_fs = ext4_freeze, 1553 .unfreeze_fs = ext4_unfreeze, 1554 .statfs = ext4_statfs, 1555 .show_options = ext4_show_options, 1556 #ifdef CONFIG_QUOTA 1557 .quota_read = ext4_quota_read, 1558 .quota_write = ext4_quota_write, 1559 .get_dquots = ext4_get_dquots, 1560 #endif 1561 }; 1562 1563 static const struct export_operations ext4_export_ops = { 1564 .fh_to_dentry = ext4_fh_to_dentry, 1565 .fh_to_parent = ext4_fh_to_parent, 1566 .get_parent = ext4_get_parent, 1567 .commit_metadata = ext4_nfs_commit_metadata, 1568 }; 1569 1570 enum { 1571 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1572 Opt_resgid, Opt_resuid, Opt_sb, 1573 Opt_nouid32, Opt_debug, Opt_removed, 1574 Opt_user_xattr, Opt_acl, 1575 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, 1576 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1577 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1578 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1579 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, 1580 Opt_inlinecrypt, 1581 Opt_usrjquota, Opt_grpjquota, Opt_quota, 1582 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 1583 Opt_usrquota, Opt_grpquota, Opt_prjquota, 1584 Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, 1585 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, 1586 Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize, 1587 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 1588 Opt_inode_readahead_blks, Opt_journal_ioprio, 1589 Opt_dioread_nolock, Opt_dioread_lock, 1590 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1591 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, 1592 Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan, 1593 Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type, 1594 #ifdef CONFIG_EXT4_DEBUG 1595 Opt_fc_debug_max_replay, Opt_fc_debug_force 1596 #endif 1597 }; 1598 1599 static const struct constant_table ext4_param_errors[] = { 1600 {"continue", EXT4_MOUNT_ERRORS_CONT}, 1601 {"panic", EXT4_MOUNT_ERRORS_PANIC}, 1602 {"remount-ro", EXT4_MOUNT_ERRORS_RO}, 1603 {} 1604 }; 1605 1606 static const struct constant_table ext4_param_data[] = { 1607 {"journal", EXT4_MOUNT_JOURNAL_DATA}, 1608 {"ordered", EXT4_MOUNT_ORDERED_DATA}, 1609 {"writeback", EXT4_MOUNT_WRITEBACK_DATA}, 1610 {} 1611 }; 1612 1613 static const struct constant_table ext4_param_data_err[] = { 1614 {"abort", Opt_data_err_abort}, 1615 {"ignore", Opt_data_err_ignore}, 1616 {} 1617 }; 1618 1619 static const struct constant_table ext4_param_jqfmt[] = { 1620 {"vfsold", QFMT_VFS_OLD}, 1621 {"vfsv0", QFMT_VFS_V0}, 1622 {"vfsv1", QFMT_VFS_V1}, 1623 {} 1624 }; 1625 1626 static const struct constant_table ext4_param_dax[] = { 1627 {"always", Opt_dax_always}, 1628 {"inode", Opt_dax_inode}, 1629 {"never", Opt_dax_never}, 1630 {} 1631 }; 1632 1633 /* String parameter that allows empty argument */ 1634 #define fsparam_string_empty(NAME, OPT) \ 1635 __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) 1636 1637 /* 1638 * Mount option specification 1639 * We don't use fsparam_flag_no because of the way we set the 1640 * options and the way we show them in _ext4_show_options(). To 1641 * keep the changes to a minimum, let's keep the negative options 1642 * separate for now. 1643 */ 1644 static const struct fs_parameter_spec ext4_param_specs[] = { 1645 fsparam_flag ("bsddf", Opt_bsd_df), 1646 fsparam_flag ("minixdf", Opt_minix_df), 1647 fsparam_flag ("grpid", Opt_grpid), 1648 fsparam_flag ("bsdgroups", Opt_grpid), 1649 fsparam_flag ("nogrpid", Opt_nogrpid), 1650 fsparam_flag ("sysvgroups", Opt_nogrpid), 1651 fsparam_u32 ("resgid", Opt_resgid), 1652 fsparam_u32 ("resuid", Opt_resuid), 1653 fsparam_u32 ("sb", Opt_sb), 1654 fsparam_enum ("errors", Opt_errors, ext4_param_errors), 1655 fsparam_flag ("nouid32", Opt_nouid32), 1656 fsparam_flag ("debug", Opt_debug), 1657 fsparam_flag ("oldalloc", Opt_removed), 1658 fsparam_flag ("orlov", Opt_removed), 1659 fsparam_flag ("user_xattr", Opt_user_xattr), 1660 fsparam_flag ("acl", Opt_acl), 1661 fsparam_flag ("norecovery", Opt_noload), 1662 fsparam_flag ("noload", Opt_noload), 1663 fsparam_flag ("bh", Opt_removed), 1664 fsparam_flag ("nobh", Opt_removed), 1665 fsparam_u32 ("commit", Opt_commit), 1666 fsparam_u32 ("min_batch_time", Opt_min_batch_time), 1667 fsparam_u32 ("max_batch_time", Opt_max_batch_time), 1668 fsparam_u32 ("journal_dev", Opt_journal_dev), 1669 fsparam_bdev ("journal_path", Opt_journal_path), 1670 fsparam_flag ("journal_checksum", Opt_journal_checksum), 1671 fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum), 1672 fsparam_flag ("journal_async_commit",Opt_journal_async_commit), 1673 fsparam_flag ("abort", Opt_abort), 1674 fsparam_enum ("data", Opt_data, ext4_param_data), 1675 fsparam_enum ("data_err", Opt_data_err, 1676 ext4_param_data_err), 1677 fsparam_string_empty 1678 ("usrjquota", Opt_usrjquota), 1679 fsparam_string_empty 1680 ("grpjquota", Opt_grpjquota), 1681 fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt), 1682 fsparam_flag ("grpquota", Opt_grpquota), 1683 fsparam_flag ("quota", Opt_quota), 1684 fsparam_flag ("noquota", Opt_noquota), 1685 fsparam_flag ("usrquota", Opt_usrquota), 1686 fsparam_flag ("prjquota", Opt_prjquota), 1687 fsparam_flag ("barrier", Opt_barrier), 1688 fsparam_u32 ("barrier", Opt_barrier), 1689 fsparam_flag ("nobarrier", Opt_nobarrier), 1690 fsparam_flag ("i_version", Opt_removed), 1691 fsparam_flag ("dax", Opt_dax), 1692 fsparam_enum ("dax", Opt_dax_type, ext4_param_dax), 1693 fsparam_u32 ("stripe", Opt_stripe), 1694 fsparam_flag ("delalloc", Opt_delalloc), 1695 fsparam_flag ("nodelalloc", Opt_nodelalloc), 1696 fsparam_flag ("warn_on_error", Opt_warn_on_error), 1697 fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error), 1698 fsparam_u32 ("debug_want_extra_isize", 1699 Opt_debug_want_extra_isize), 1700 fsparam_flag ("mblk_io_submit", Opt_removed), 1701 fsparam_flag ("nomblk_io_submit", Opt_removed), 1702 fsparam_flag ("block_validity", Opt_block_validity), 1703 fsparam_flag ("noblock_validity", Opt_noblock_validity), 1704 fsparam_u32 ("inode_readahead_blks", 1705 Opt_inode_readahead_blks), 1706 fsparam_u32 ("journal_ioprio", Opt_journal_ioprio), 1707 fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc), 1708 fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc), 1709 fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc), 1710 fsparam_flag ("dioread_nolock", Opt_dioread_nolock), 1711 fsparam_flag ("nodioread_nolock", Opt_dioread_lock), 1712 fsparam_flag ("dioread_lock", Opt_dioread_lock), 1713 fsparam_flag ("discard", Opt_discard), 1714 fsparam_flag ("nodiscard", Opt_nodiscard), 1715 fsparam_u32 ("init_itable", Opt_init_itable), 1716 fsparam_flag ("init_itable", Opt_init_itable), 1717 fsparam_flag ("noinit_itable", Opt_noinit_itable), 1718 #ifdef CONFIG_EXT4_DEBUG 1719 fsparam_flag ("fc_debug_force", Opt_fc_debug_force), 1720 fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay), 1721 #endif 1722 fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb), 1723 fsparam_flag ("test_dummy_encryption", 1724 Opt_test_dummy_encryption), 1725 fsparam_string ("test_dummy_encryption", 1726 Opt_test_dummy_encryption), 1727 fsparam_flag ("inlinecrypt", Opt_inlinecrypt), 1728 fsparam_flag ("nombcache", Opt_nombcache), 1729 fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */ 1730 fsparam_flag ("prefetch_block_bitmaps", 1731 Opt_removed), 1732 fsparam_flag ("no_prefetch_block_bitmaps", 1733 Opt_no_prefetch_block_bitmaps), 1734 fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan), 1735 fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */ 1736 fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */ 1737 fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */ 1738 fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */ 1739 fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */ 1740 {} 1741 }; 1742 1743 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1744 1745 #define MOPT_SET 0x0001 1746 #define MOPT_CLEAR 0x0002 1747 #define MOPT_NOSUPPORT 0x0004 1748 #define MOPT_EXPLICIT 0x0008 1749 #ifdef CONFIG_QUOTA 1750 #define MOPT_Q 0 1751 #define MOPT_QFMT 0x0010 1752 #else 1753 #define MOPT_Q MOPT_NOSUPPORT 1754 #define MOPT_QFMT MOPT_NOSUPPORT 1755 #endif 1756 #define MOPT_NO_EXT2 0x0020 1757 #define MOPT_NO_EXT3 0x0040 1758 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) 1759 #define MOPT_SKIP 0x0080 1760 #define MOPT_2 0x0100 1761 1762 static const struct mount_opts { 1763 int token; 1764 int mount_opt; 1765 int flags; 1766 } ext4_mount_opts[] = { 1767 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, 1768 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, 1769 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, 1770 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, 1771 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, 1772 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, 1773 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, 1774 MOPT_EXT4_ONLY | MOPT_SET}, 1775 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, 1776 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1777 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, 1778 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, 1779 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1780 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1781 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1782 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1783 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, 1784 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, 1785 {Opt_commit, 0, MOPT_NO_EXT2}, 1786 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1787 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1788 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1789 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1790 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1791 EXT4_MOUNT_JOURNAL_CHECKSUM), 1792 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1793 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, 1794 {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2}, 1795 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, 1796 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, 1797 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, 1798 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, 1799 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, 1800 {Opt_dax_type, 0, MOPT_EXT4_ONLY}, 1801 {Opt_journal_dev, 0, MOPT_NO_EXT2}, 1802 {Opt_journal_path, 0, MOPT_NO_EXT2}, 1803 {Opt_journal_ioprio, 0, MOPT_NO_EXT2}, 1804 {Opt_data, 0, MOPT_NO_EXT2}, 1805 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, 1806 #ifdef CONFIG_EXT4_FS_POSIX_ACL 1807 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, 1808 #else 1809 {Opt_acl, 0, MOPT_NOSUPPORT}, 1810 #endif 1811 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, 1812 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, 1813 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, 1814 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, 1815 MOPT_SET | MOPT_Q}, 1816 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 1817 MOPT_SET | MOPT_Q}, 1818 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA, 1819 MOPT_SET | MOPT_Q}, 1820 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 1821 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), 1822 MOPT_CLEAR | MOPT_Q}, 1823 {Opt_usrjquota, 0, MOPT_Q}, 1824 {Opt_grpjquota, 0, MOPT_Q}, 1825 {Opt_jqfmt, 0, MOPT_QFMT}, 1826 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, 1827 {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS, 1828 MOPT_SET}, 1829 #ifdef CONFIG_EXT4_DEBUG 1830 {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, 1831 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, 1832 #endif 1833 {Opt_err, 0, 0} 1834 }; 1835 1836 #if IS_ENABLED(CONFIG_UNICODE) 1837 static const struct ext4_sb_encodings { 1838 __u16 magic; 1839 char *name; 1840 unsigned int version; 1841 } ext4_sb_encoding_map[] = { 1842 {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 1843 }; 1844 1845 static const struct ext4_sb_encodings * 1846 ext4_sb_read_encoding(const struct ext4_super_block *es) 1847 { 1848 __u16 magic = le16_to_cpu(es->s_encoding); 1849 int i; 1850 1851 for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++) 1852 if (magic == ext4_sb_encoding_map[i].magic) 1853 return &ext4_sb_encoding_map[i]; 1854 1855 return NULL; 1856 } 1857 #endif 1858 1859 #define EXT4_SPEC_JQUOTA (1 << 0) 1860 #define EXT4_SPEC_JQFMT (1 << 1) 1861 #define EXT4_SPEC_DATAJ (1 << 2) 1862 #define EXT4_SPEC_SB_BLOCK (1 << 3) 1863 #define EXT4_SPEC_JOURNAL_DEV (1 << 4) 1864 #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) 1865 #define EXT4_SPEC_s_want_extra_isize (1 << 7) 1866 #define EXT4_SPEC_s_max_batch_time (1 << 8) 1867 #define EXT4_SPEC_s_min_batch_time (1 << 9) 1868 #define EXT4_SPEC_s_inode_readahead_blks (1 << 10) 1869 #define EXT4_SPEC_s_li_wait_mult (1 << 11) 1870 #define EXT4_SPEC_s_max_dir_size_kb (1 << 12) 1871 #define EXT4_SPEC_s_stripe (1 << 13) 1872 #define EXT4_SPEC_s_resuid (1 << 14) 1873 #define EXT4_SPEC_s_resgid (1 << 15) 1874 #define EXT4_SPEC_s_commit_interval (1 << 16) 1875 #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17) 1876 #define EXT4_SPEC_s_sb_block (1 << 18) 1877 #define EXT4_SPEC_mb_optimize_scan (1 << 19) 1878 1879 struct ext4_fs_context { 1880 char *s_qf_names[EXT4_MAXQUOTAS]; 1881 struct fscrypt_dummy_policy dummy_enc_policy; 1882 int s_jquota_fmt; /* Format of quota to use */ 1883 #ifdef CONFIG_EXT4_DEBUG 1884 int s_fc_debug_max_replay; 1885 #endif 1886 unsigned short qname_spec; 1887 unsigned long vals_s_flags; /* Bits to set in s_flags */ 1888 unsigned long mask_s_flags; /* Bits changed in s_flags */ 1889 unsigned long journal_devnum; 1890 unsigned long s_commit_interval; 1891 unsigned long s_stripe; 1892 unsigned int s_inode_readahead_blks; 1893 unsigned int s_want_extra_isize; 1894 unsigned int s_li_wait_mult; 1895 unsigned int s_max_dir_size_kb; 1896 unsigned int journal_ioprio; 1897 unsigned int vals_s_mount_opt; 1898 unsigned int mask_s_mount_opt; 1899 unsigned int vals_s_mount_opt2; 1900 unsigned int mask_s_mount_opt2; 1901 unsigned long vals_s_mount_flags; 1902 unsigned long mask_s_mount_flags; 1903 unsigned int opt_flags; /* MOPT flags */ 1904 unsigned int spec; 1905 u32 s_max_batch_time; 1906 u32 s_min_batch_time; 1907 kuid_t s_resuid; 1908 kgid_t s_resgid; 1909 ext4_fsblk_t s_sb_block; 1910 }; 1911 1912 static void ext4_fc_free(struct fs_context *fc) 1913 { 1914 struct ext4_fs_context *ctx = fc->fs_private; 1915 int i; 1916 1917 if (!ctx) 1918 return; 1919 1920 for (i = 0; i < EXT4_MAXQUOTAS; i++) 1921 kfree(ctx->s_qf_names[i]); 1922 1923 fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); 1924 kfree(ctx); 1925 } 1926 1927 int ext4_init_fs_context(struct fs_context *fc) 1928 { 1929 struct ext4_fs_context *ctx; 1930 1931 ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 1932 if (!ctx) 1933 return -ENOMEM; 1934 1935 fc->fs_private = ctx; 1936 fc->ops = &ext4_context_ops; 1937 1938 return 0; 1939 } 1940 1941 #ifdef CONFIG_QUOTA 1942 /* 1943 * Note the name of the specified quota file. 1944 */ 1945 static int note_qf_name(struct fs_context *fc, int qtype, 1946 struct fs_parameter *param) 1947 { 1948 struct ext4_fs_context *ctx = fc->fs_private; 1949 char *qname; 1950 1951 if (param->size < 1) { 1952 ext4_msg(NULL, KERN_ERR, "Missing quota name"); 1953 return -EINVAL; 1954 } 1955 if (strchr(param->string, '/')) { 1956 ext4_msg(NULL, KERN_ERR, 1957 "quotafile must be on filesystem root"); 1958 return -EINVAL; 1959 } 1960 if (ctx->s_qf_names[qtype]) { 1961 if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) { 1962 ext4_msg(NULL, KERN_ERR, 1963 "%s quota file already specified", 1964 QTYPE2NAME(qtype)); 1965 return -EINVAL; 1966 } 1967 return 0; 1968 } 1969 1970 qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 1971 if (!qname) { 1972 ext4_msg(NULL, KERN_ERR, 1973 "Not enough memory for storing quotafile name"); 1974 return -ENOMEM; 1975 } 1976 ctx->s_qf_names[qtype] = qname; 1977 ctx->qname_spec |= 1 << qtype; 1978 ctx->spec |= EXT4_SPEC_JQUOTA; 1979 return 0; 1980 } 1981 1982 /* 1983 * Clear the name of the specified quota file. 1984 */ 1985 static int unnote_qf_name(struct fs_context *fc, int qtype) 1986 { 1987 struct ext4_fs_context *ctx = fc->fs_private; 1988 1989 if (ctx->s_qf_names[qtype]) 1990 kfree(ctx->s_qf_names[qtype]); 1991 1992 ctx->s_qf_names[qtype] = NULL; 1993 ctx->qname_spec |= 1 << qtype; 1994 ctx->spec |= EXT4_SPEC_JQUOTA; 1995 return 0; 1996 } 1997 #endif 1998 1999 static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, 2000 struct ext4_fs_context *ctx) 2001 { 2002 int err; 2003 2004 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 2005 ext4_msg(NULL, KERN_WARNING, 2006 "test_dummy_encryption option not supported"); 2007 return -EINVAL; 2008 } 2009 err = fscrypt_parse_test_dummy_encryption(param, 2010 &ctx->dummy_enc_policy); 2011 if (err == -EINVAL) { 2012 ext4_msg(NULL, KERN_WARNING, 2013 "Value of option \"%s\" is unrecognized", param->key); 2014 } else if (err == -EEXIST) { 2015 ext4_msg(NULL, KERN_WARNING, 2016 "Conflicting test_dummy_encryption options"); 2017 return -EINVAL; 2018 } 2019 return err; 2020 } 2021 2022 #define EXT4_SET_CTX(name) \ 2023 static inline void ctx_set_##name(struct ext4_fs_context *ctx, \ 2024 unsigned long flag) \ 2025 { \ 2026 ctx->mask_s_##name |= flag; \ 2027 ctx->vals_s_##name |= flag; \ 2028 } 2029 2030 #define EXT4_CLEAR_CTX(name) \ 2031 static inline void ctx_clear_##name(struct ext4_fs_context *ctx, \ 2032 unsigned long flag) \ 2033 { \ 2034 ctx->mask_s_##name |= flag; \ 2035 ctx->vals_s_##name &= ~flag; \ 2036 } 2037 2038 #define EXT4_TEST_CTX(name) \ 2039 static inline unsigned long \ 2040 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 2041 { \ 2042 return (ctx->vals_s_##name & flag); \ 2043 } 2044 2045 EXT4_SET_CTX(flags); /* set only */ 2046 EXT4_SET_CTX(mount_opt); 2047 EXT4_CLEAR_CTX(mount_opt); 2048 EXT4_TEST_CTX(mount_opt); 2049 EXT4_SET_CTX(mount_opt2); 2050 EXT4_CLEAR_CTX(mount_opt2); 2051 EXT4_TEST_CTX(mount_opt2); 2052 2053 static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit) 2054 { 2055 set_bit(bit, &ctx->mask_s_mount_flags); 2056 set_bit(bit, &ctx->vals_s_mount_flags); 2057 } 2058 2059 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) 2060 { 2061 struct ext4_fs_context *ctx = fc->fs_private; 2062 struct fs_parse_result result; 2063 const struct mount_opts *m; 2064 int is_remount; 2065 kuid_t uid; 2066 kgid_t gid; 2067 int token; 2068 2069 token = fs_parse(fc, ext4_param_specs, param, &result); 2070 if (token < 0) 2071 return token; 2072 is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 2073 2074 for (m = ext4_mount_opts; m->token != Opt_err; m++) 2075 if (token == m->token) 2076 break; 2077 2078 ctx->opt_flags |= m->flags; 2079 2080 if (m->flags & MOPT_EXPLICIT) { 2081 if (m->mount_opt & EXT4_MOUNT_DELALLOC) { 2082 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC); 2083 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { 2084 ctx_set_mount_opt2(ctx, 2085 EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM); 2086 } else 2087 return -EINVAL; 2088 } 2089 2090 if (m->flags & MOPT_NOSUPPORT) { 2091 ext4_msg(NULL, KERN_ERR, "%s option not supported", 2092 param->key); 2093 return 0; 2094 } 2095 2096 switch (token) { 2097 #ifdef CONFIG_QUOTA 2098 case Opt_usrjquota: 2099 if (!*param->string) 2100 return unnote_qf_name(fc, USRQUOTA); 2101 else 2102 return note_qf_name(fc, USRQUOTA, param); 2103 case Opt_grpjquota: 2104 if (!*param->string) 2105 return unnote_qf_name(fc, GRPQUOTA); 2106 else 2107 return note_qf_name(fc, GRPQUOTA, param); 2108 #endif 2109 case Opt_sb: 2110 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2111 ext4_msg(NULL, KERN_WARNING, 2112 "Ignoring %s option on remount", param->key); 2113 } else { 2114 ctx->s_sb_block = result.uint_32; 2115 ctx->spec |= EXT4_SPEC_s_sb_block; 2116 } 2117 return 0; 2118 case Opt_removed: 2119 ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", 2120 param->key); 2121 return 0; 2122 case Opt_abort: 2123 ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED); 2124 return 0; 2125 case Opt_inlinecrypt: 2126 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 2127 ctx_set_flags(ctx, SB_INLINECRYPT); 2128 #else 2129 ext4_msg(NULL, KERN_ERR, "inline encryption not supported"); 2130 #endif 2131 return 0; 2132 case Opt_errors: 2133 ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK); 2134 ctx_set_mount_opt(ctx, result.uint_32); 2135 return 0; 2136 #ifdef CONFIG_QUOTA 2137 case Opt_jqfmt: 2138 ctx->s_jquota_fmt = result.uint_32; 2139 ctx->spec |= EXT4_SPEC_JQFMT; 2140 return 0; 2141 #endif 2142 case Opt_data: 2143 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2144 ctx_set_mount_opt(ctx, result.uint_32); 2145 ctx->spec |= EXT4_SPEC_DATAJ; 2146 return 0; 2147 case Opt_commit: 2148 if (result.uint_32 == 0) 2149 result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE; 2150 else if (result.uint_32 > INT_MAX / HZ) { 2151 ext4_msg(NULL, KERN_ERR, 2152 "Invalid commit interval %d, " 2153 "must be smaller than %d", 2154 result.uint_32, INT_MAX / HZ); 2155 return -EINVAL; 2156 } 2157 ctx->s_commit_interval = HZ * result.uint_32; 2158 ctx->spec |= EXT4_SPEC_s_commit_interval; 2159 return 0; 2160 case Opt_debug_want_extra_isize: 2161 if ((result.uint_32 & 1) || (result.uint_32 < 4)) { 2162 ext4_msg(NULL, KERN_ERR, 2163 "Invalid want_extra_isize %d", result.uint_32); 2164 return -EINVAL; 2165 } 2166 ctx->s_want_extra_isize = result.uint_32; 2167 ctx->spec |= EXT4_SPEC_s_want_extra_isize; 2168 return 0; 2169 case Opt_max_batch_time: 2170 ctx->s_max_batch_time = result.uint_32; 2171 ctx->spec |= EXT4_SPEC_s_max_batch_time; 2172 return 0; 2173 case Opt_min_batch_time: 2174 ctx->s_min_batch_time = result.uint_32; 2175 ctx->spec |= EXT4_SPEC_s_min_batch_time; 2176 return 0; 2177 case Opt_inode_readahead_blks: 2178 if (result.uint_32 && 2179 (result.uint_32 > (1 << 30) || 2180 !is_power_of_2(result.uint_32))) { 2181 ext4_msg(NULL, KERN_ERR, 2182 "EXT4-fs: inode_readahead_blks must be " 2183 "0 or a power of 2 smaller than 2^31"); 2184 return -EINVAL; 2185 } 2186 ctx->s_inode_readahead_blks = result.uint_32; 2187 ctx->spec |= EXT4_SPEC_s_inode_readahead_blks; 2188 return 0; 2189 case Opt_init_itable: 2190 ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE); 2191 ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 2192 if (param->type == fs_value_is_string) 2193 ctx->s_li_wait_mult = result.uint_32; 2194 ctx->spec |= EXT4_SPEC_s_li_wait_mult; 2195 return 0; 2196 case Opt_max_dir_size_kb: 2197 ctx->s_max_dir_size_kb = result.uint_32; 2198 ctx->spec |= EXT4_SPEC_s_max_dir_size_kb; 2199 return 0; 2200 #ifdef CONFIG_EXT4_DEBUG 2201 case Opt_fc_debug_max_replay: 2202 ctx->s_fc_debug_max_replay = result.uint_32; 2203 ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay; 2204 return 0; 2205 #endif 2206 case Opt_stripe: 2207 ctx->s_stripe = result.uint_32; 2208 ctx->spec |= EXT4_SPEC_s_stripe; 2209 return 0; 2210 case Opt_resuid: 2211 uid = make_kuid(current_user_ns(), result.uint_32); 2212 if (!uid_valid(uid)) { 2213 ext4_msg(NULL, KERN_ERR, "Invalid uid value %d", 2214 result.uint_32); 2215 return -EINVAL; 2216 } 2217 ctx->s_resuid = uid; 2218 ctx->spec |= EXT4_SPEC_s_resuid; 2219 return 0; 2220 case Opt_resgid: 2221 gid = make_kgid(current_user_ns(), result.uint_32); 2222 if (!gid_valid(gid)) { 2223 ext4_msg(NULL, KERN_ERR, "Invalid gid value %d", 2224 result.uint_32); 2225 return -EINVAL; 2226 } 2227 ctx->s_resgid = gid; 2228 ctx->spec |= EXT4_SPEC_s_resgid; 2229 return 0; 2230 case Opt_journal_dev: 2231 if (is_remount) { 2232 ext4_msg(NULL, KERN_ERR, 2233 "Cannot specify journal on remount"); 2234 return -EINVAL; 2235 } 2236 ctx->journal_devnum = result.uint_32; 2237 ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2238 return 0; 2239 case Opt_journal_path: 2240 { 2241 struct inode *journal_inode; 2242 struct path path; 2243 int error; 2244 2245 if (is_remount) { 2246 ext4_msg(NULL, KERN_ERR, 2247 "Cannot specify journal on remount"); 2248 return -EINVAL; 2249 } 2250 2251 error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); 2252 if (error) { 2253 ext4_msg(NULL, KERN_ERR, "error: could not find " 2254 "journal device path"); 2255 return -EINVAL; 2256 } 2257 2258 journal_inode = d_inode(path.dentry); 2259 ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev); 2260 ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2261 path_put(&path); 2262 return 0; 2263 } 2264 case Opt_journal_ioprio: 2265 if (result.uint_32 > 7) { 2266 ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority" 2267 " (must be 0-7)"); 2268 return -EINVAL; 2269 } 2270 ctx->journal_ioprio = 2271 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32); 2272 ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; 2273 return 0; 2274 case Opt_test_dummy_encryption: 2275 return ext4_parse_test_dummy_encryption(param, ctx); 2276 case Opt_dax: 2277 case Opt_dax_type: 2278 #ifdef CONFIG_FS_DAX 2279 { 2280 int type = (token == Opt_dax) ? 2281 Opt_dax : result.uint_32; 2282 2283 switch (type) { 2284 case Opt_dax: 2285 case Opt_dax_always: 2286 ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2287 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2288 break; 2289 case Opt_dax_never: 2290 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2291 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2292 break; 2293 case Opt_dax_inode: 2294 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2295 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2296 /* Strictly for printing options */ 2297 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE); 2298 break; 2299 } 2300 return 0; 2301 } 2302 #else 2303 ext4_msg(NULL, KERN_INFO, "dax option not supported"); 2304 return -EINVAL; 2305 #endif 2306 case Opt_data_err: 2307 if (result.uint_32 == Opt_data_err_abort) 2308 ctx_set_mount_opt(ctx, m->mount_opt); 2309 else if (result.uint_32 == Opt_data_err_ignore) 2310 ctx_clear_mount_opt(ctx, m->mount_opt); 2311 return 0; 2312 case Opt_mb_optimize_scan: 2313 if (result.int_32 == 1) { 2314 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 2315 ctx->spec |= EXT4_SPEC_mb_optimize_scan; 2316 } else if (result.int_32 == 0) { 2317 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 2318 ctx->spec |= EXT4_SPEC_mb_optimize_scan; 2319 } else { 2320 ext4_msg(NULL, KERN_WARNING, 2321 "mb_optimize_scan should be set to 0 or 1."); 2322 return -EINVAL; 2323 } 2324 return 0; 2325 } 2326 2327 /* 2328 * At this point we should only be getting options requiring MOPT_SET, 2329 * or MOPT_CLEAR. Anything else is a bug 2330 */ 2331 if (m->token == Opt_err) { 2332 ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s", 2333 param->key); 2334 WARN_ON(1); 2335 return -EINVAL; 2336 } 2337 2338 else { 2339 unsigned int set = 0; 2340 2341 if ((param->type == fs_value_is_flag) || 2342 result.uint_32 > 0) 2343 set = 1; 2344 2345 if (m->flags & MOPT_CLEAR) 2346 set = !set; 2347 else if (unlikely(!(m->flags & MOPT_SET))) { 2348 ext4_msg(NULL, KERN_WARNING, 2349 "buggy handling of option %s", 2350 param->key); 2351 WARN_ON(1); 2352 return -EINVAL; 2353 } 2354 if (m->flags & MOPT_2) { 2355 if (set != 0) 2356 ctx_set_mount_opt2(ctx, m->mount_opt); 2357 else 2358 ctx_clear_mount_opt2(ctx, m->mount_opt); 2359 } else { 2360 if (set != 0) 2361 ctx_set_mount_opt(ctx, m->mount_opt); 2362 else 2363 ctx_clear_mount_opt(ctx, m->mount_opt); 2364 } 2365 } 2366 2367 return 0; 2368 } 2369 2370 static int parse_options(struct fs_context *fc, char *options) 2371 { 2372 struct fs_parameter param; 2373 int ret; 2374 char *key; 2375 2376 if (!options) 2377 return 0; 2378 2379 while ((key = strsep(&options, ",")) != NULL) { 2380 if (*key) { 2381 size_t v_len = 0; 2382 char *value = strchr(key, '='); 2383 2384 param.type = fs_value_is_flag; 2385 param.string = NULL; 2386 2387 if (value) { 2388 if (value == key) 2389 continue; 2390 2391 *value++ = 0; 2392 v_len = strlen(value); 2393 param.string = kmemdup_nul(value, v_len, 2394 GFP_KERNEL); 2395 if (!param.string) 2396 return -ENOMEM; 2397 param.type = fs_value_is_string; 2398 } 2399 2400 param.key = key; 2401 param.size = v_len; 2402 2403 ret = ext4_parse_param(fc, ¶m); 2404 if (param.string) 2405 kfree(param.string); 2406 if (ret < 0) 2407 return ret; 2408 } 2409 } 2410 2411 ret = ext4_validate_options(fc); 2412 if (ret < 0) 2413 return ret; 2414 2415 return 0; 2416 } 2417 2418 static int parse_apply_sb_mount_options(struct super_block *sb, 2419 struct ext4_fs_context *m_ctx) 2420 { 2421 struct ext4_sb_info *sbi = EXT4_SB(sb); 2422 char *s_mount_opts = NULL; 2423 struct ext4_fs_context *s_ctx = NULL; 2424 struct fs_context *fc = NULL; 2425 int ret = -ENOMEM; 2426 2427 if (!sbi->s_es->s_mount_opts[0]) 2428 return 0; 2429 2430 s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, 2431 sizeof(sbi->s_es->s_mount_opts), 2432 GFP_KERNEL); 2433 if (!s_mount_opts) 2434 return ret; 2435 2436 fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 2437 if (!fc) 2438 goto out_free; 2439 2440 s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 2441 if (!s_ctx) 2442 goto out_free; 2443 2444 fc->fs_private = s_ctx; 2445 fc->s_fs_info = sbi; 2446 2447 ret = parse_options(fc, s_mount_opts); 2448 if (ret < 0) 2449 goto parse_failed; 2450 2451 ret = ext4_check_opt_consistency(fc, sb); 2452 if (ret < 0) { 2453 parse_failed: 2454 ext4_msg(sb, KERN_WARNING, 2455 "failed to parse options in superblock: %s", 2456 s_mount_opts); 2457 ret = 0; 2458 goto out_free; 2459 } 2460 2461 if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV) 2462 m_ctx->journal_devnum = s_ctx->journal_devnum; 2463 if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) 2464 m_ctx->journal_ioprio = s_ctx->journal_ioprio; 2465 2466 ext4_apply_options(fc, sb); 2467 ret = 0; 2468 2469 out_free: 2470 if (fc) { 2471 ext4_fc_free(fc); 2472 kfree(fc); 2473 } 2474 kfree(s_mount_opts); 2475 return ret; 2476 } 2477 2478 static void ext4_apply_quota_options(struct fs_context *fc, 2479 struct super_block *sb) 2480 { 2481 #ifdef CONFIG_QUOTA 2482 bool quota_feature = ext4_has_feature_quota(sb); 2483 struct ext4_fs_context *ctx = fc->fs_private; 2484 struct ext4_sb_info *sbi = EXT4_SB(sb); 2485 char *qname; 2486 int i; 2487 2488 if (quota_feature) 2489 return; 2490 2491 if (ctx->spec & EXT4_SPEC_JQUOTA) { 2492 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2493 if (!(ctx->qname_spec & (1 << i))) 2494 continue; 2495 2496 qname = ctx->s_qf_names[i]; /* May be NULL */ 2497 if (qname) 2498 set_opt(sb, QUOTA); 2499 ctx->s_qf_names[i] = NULL; 2500 qname = rcu_replace_pointer(sbi->s_qf_names[i], qname, 2501 lockdep_is_held(&sb->s_umount)); 2502 if (qname) 2503 kfree_rcu(qname); 2504 } 2505 } 2506 2507 if (ctx->spec & EXT4_SPEC_JQFMT) 2508 sbi->s_jquota_fmt = ctx->s_jquota_fmt; 2509 #endif 2510 } 2511 2512 /* 2513 * Check quota settings consistency. 2514 */ 2515 static int ext4_check_quota_consistency(struct fs_context *fc, 2516 struct super_block *sb) 2517 { 2518 #ifdef CONFIG_QUOTA 2519 struct ext4_fs_context *ctx = fc->fs_private; 2520 struct ext4_sb_info *sbi = EXT4_SB(sb); 2521 bool quota_feature = ext4_has_feature_quota(sb); 2522 bool quota_loaded = sb_any_quota_loaded(sb); 2523 bool usr_qf_name, grp_qf_name, usrquota, grpquota; 2524 int quota_flags, i; 2525 2526 /* 2527 * We do the test below only for project quotas. 'usrquota' and 2528 * 'grpquota' mount options are allowed even without quota feature 2529 * to support legacy quotas in quota files. 2530 */ 2531 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) && 2532 !ext4_has_feature_project(sb)) { 2533 ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. " 2534 "Cannot enable project quota enforcement."); 2535 return -EINVAL; 2536 } 2537 2538 quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 2539 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA; 2540 if (quota_loaded && 2541 ctx->mask_s_mount_opt & quota_flags && 2542 !ctx_test_mount_opt(ctx, quota_flags)) 2543 goto err_quota_change; 2544 2545 if (ctx->spec & EXT4_SPEC_JQUOTA) { 2546 2547 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2548 if (!(ctx->qname_spec & (1 << i))) 2549 continue; 2550 2551 if (quota_loaded && 2552 !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i]) 2553 goto err_jquota_change; 2554 2555 if (sbi->s_qf_names[i] && ctx->s_qf_names[i] && 2556 strcmp(get_qf_name(sb, sbi, i), 2557 ctx->s_qf_names[i]) != 0) 2558 goto err_jquota_specified; 2559 } 2560 2561 if (quota_feature) { 2562 ext4_msg(NULL, KERN_INFO, 2563 "Journaled quota options ignored when " 2564 "QUOTA feature is enabled"); 2565 return 0; 2566 } 2567 } 2568 2569 if (ctx->spec & EXT4_SPEC_JQFMT) { 2570 if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded) 2571 goto err_jquota_change; 2572 if (quota_feature) { 2573 ext4_msg(NULL, KERN_INFO, "Quota format mount options " 2574 "ignored when QUOTA feature is enabled"); 2575 return 0; 2576 } 2577 } 2578 2579 /* Make sure we don't mix old and new quota format */ 2580 usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) || 2581 ctx->s_qf_names[USRQUOTA]); 2582 grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) || 2583 ctx->s_qf_names[GRPQUOTA]); 2584 2585 usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 2586 test_opt(sb, USRQUOTA)); 2587 2588 grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) || 2589 test_opt(sb, GRPQUOTA)); 2590 2591 if (usr_qf_name) { 2592 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2593 usrquota = false; 2594 } 2595 if (grp_qf_name) { 2596 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2597 grpquota = false; 2598 } 2599 2600 if (usr_qf_name || grp_qf_name) { 2601 if (usrquota || grpquota) { 2602 ext4_msg(NULL, KERN_ERR, "old and new quota " 2603 "format mixing"); 2604 return -EINVAL; 2605 } 2606 2607 if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) { 2608 ext4_msg(NULL, KERN_ERR, "journaled quota format " 2609 "not specified"); 2610 return -EINVAL; 2611 } 2612 } 2613 2614 return 0; 2615 2616 err_quota_change: 2617 ext4_msg(NULL, KERN_ERR, 2618 "Cannot change quota options when quota turned on"); 2619 return -EINVAL; 2620 err_jquota_change: 2621 ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota " 2622 "options when quota turned on"); 2623 return -EINVAL; 2624 err_jquota_specified: 2625 ext4_msg(NULL, KERN_ERR, "%s quota file already specified", 2626 QTYPE2NAME(i)); 2627 return -EINVAL; 2628 #else 2629 return 0; 2630 #endif 2631 } 2632 2633 static int ext4_check_test_dummy_encryption(const struct fs_context *fc, 2634 struct super_block *sb) 2635 { 2636 const struct ext4_fs_context *ctx = fc->fs_private; 2637 const struct ext4_sb_info *sbi = EXT4_SB(sb); 2638 2639 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) 2640 return 0; 2641 2642 if (!ext4_has_feature_encrypt(sb)) { 2643 ext4_msg(NULL, KERN_WARNING, 2644 "test_dummy_encryption requires encrypt feature"); 2645 return -EINVAL; 2646 } 2647 /* 2648 * This mount option is just for testing, and it's not worthwhile to 2649 * implement the extra complexity (e.g. RCU protection) that would be 2650 * needed to allow it to be set or changed during remount. We do allow 2651 * it to be specified during remount, but only if there is no change. 2652 */ 2653 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2654 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 2655 &ctx->dummy_enc_policy)) 2656 return 0; 2657 ext4_msg(NULL, KERN_WARNING, 2658 "Can't set or change test_dummy_encryption on remount"); 2659 return -EINVAL; 2660 } 2661 /* Also make sure s_mount_opts didn't contain a conflicting value. */ 2662 if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { 2663 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 2664 &ctx->dummy_enc_policy)) 2665 return 0; 2666 ext4_msg(NULL, KERN_WARNING, 2667 "Conflicting test_dummy_encryption options"); 2668 return -EINVAL; 2669 } 2670 return 0; 2671 } 2672 2673 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, 2674 struct super_block *sb) 2675 { 2676 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || 2677 /* if already set, it was already verified to be the same */ 2678 fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) 2679 return; 2680 EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; 2681 memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); 2682 ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); 2683 } 2684 2685 static int ext4_check_opt_consistency(struct fs_context *fc, 2686 struct super_block *sb) 2687 { 2688 struct ext4_fs_context *ctx = fc->fs_private; 2689 struct ext4_sb_info *sbi = fc->s_fs_info; 2690 int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 2691 int err; 2692 2693 if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { 2694 ext4_msg(NULL, KERN_ERR, 2695 "Mount option(s) incompatible with ext2"); 2696 return -EINVAL; 2697 } 2698 if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { 2699 ext4_msg(NULL, KERN_ERR, 2700 "Mount option(s) incompatible with ext3"); 2701 return -EINVAL; 2702 } 2703 2704 if (ctx->s_want_extra_isize > 2705 (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) { 2706 ext4_msg(NULL, KERN_ERR, 2707 "Invalid want_extra_isize %d", 2708 ctx->s_want_extra_isize); 2709 return -EINVAL; 2710 } 2711 2712 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DIOREAD_NOLOCK)) { 2713 int blocksize = 2714 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 2715 if (blocksize < PAGE_SIZE) 2716 ext4_msg(NULL, KERN_WARNING, "Warning: mounting with an " 2717 "experimental mount option 'dioread_nolock' " 2718 "for blocksize < PAGE_SIZE"); 2719 } 2720 2721 err = ext4_check_test_dummy_encryption(fc, sb); 2722 if (err) 2723 return err; 2724 2725 if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) { 2726 if (!sbi->s_journal) { 2727 ext4_msg(NULL, KERN_WARNING, 2728 "Remounting file system with no journal " 2729 "so ignoring journalled data option"); 2730 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2731 } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) != 2732 test_opt(sb, DATA_FLAGS)) { 2733 ext4_msg(NULL, KERN_ERR, "Cannot change data mode " 2734 "on remount"); 2735 return -EINVAL; 2736 } 2737 } 2738 2739 if (is_remount) { 2740 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 2741 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 2742 ext4_msg(NULL, KERN_ERR, "can't mount with " 2743 "both data=journal and dax"); 2744 return -EINVAL; 2745 } 2746 2747 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 2748 (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 2749 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { 2750 fail_dax_change_remount: 2751 ext4_msg(NULL, KERN_ERR, "can't change " 2752 "dax mount option while remounting"); 2753 return -EINVAL; 2754 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) && 2755 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 2756 (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) { 2757 goto fail_dax_change_remount; 2758 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) && 2759 ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 2760 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 2761 !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) { 2762 goto fail_dax_change_remount; 2763 } 2764 } 2765 2766 return ext4_check_quota_consistency(fc, sb); 2767 } 2768 2769 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) 2770 { 2771 struct ext4_fs_context *ctx = fc->fs_private; 2772 struct ext4_sb_info *sbi = fc->s_fs_info; 2773 2774 sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; 2775 sbi->s_mount_opt |= ctx->vals_s_mount_opt; 2776 sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; 2777 sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; 2778 sbi->s_mount_flags &= ~ctx->mask_s_mount_flags; 2779 sbi->s_mount_flags |= ctx->vals_s_mount_flags; 2780 sb->s_flags &= ~ctx->mask_s_flags; 2781 sb->s_flags |= ctx->vals_s_flags; 2782 2783 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; }) 2784 APPLY(s_commit_interval); 2785 APPLY(s_stripe); 2786 APPLY(s_max_batch_time); 2787 APPLY(s_min_batch_time); 2788 APPLY(s_want_extra_isize); 2789 APPLY(s_inode_readahead_blks); 2790 APPLY(s_max_dir_size_kb); 2791 APPLY(s_li_wait_mult); 2792 APPLY(s_resgid); 2793 APPLY(s_resuid); 2794 2795 #ifdef CONFIG_EXT4_DEBUG 2796 APPLY(s_fc_debug_max_replay); 2797 #endif 2798 2799 ext4_apply_quota_options(fc, sb); 2800 ext4_apply_test_dummy_encryption(ctx, sb); 2801 } 2802 2803 2804 static int ext4_validate_options(struct fs_context *fc) 2805 { 2806 #ifdef CONFIG_QUOTA 2807 struct ext4_fs_context *ctx = fc->fs_private; 2808 char *usr_qf_name, *grp_qf_name; 2809 2810 usr_qf_name = ctx->s_qf_names[USRQUOTA]; 2811 grp_qf_name = ctx->s_qf_names[GRPQUOTA]; 2812 2813 if (usr_qf_name || grp_qf_name) { 2814 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name) 2815 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2816 2817 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name) 2818 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2819 2820 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 2821 ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) { 2822 ext4_msg(NULL, KERN_ERR, "old and new quota " 2823 "format mixing"); 2824 return -EINVAL; 2825 } 2826 } 2827 #endif 2828 return 1; 2829 } 2830 2831 static inline void ext4_show_quota_options(struct seq_file *seq, 2832 struct super_block *sb) 2833 { 2834 #if defined(CONFIG_QUOTA) 2835 struct ext4_sb_info *sbi = EXT4_SB(sb); 2836 char *usr_qf_name, *grp_qf_name; 2837 2838 if (sbi->s_jquota_fmt) { 2839 char *fmtname = ""; 2840 2841 switch (sbi->s_jquota_fmt) { 2842 case QFMT_VFS_OLD: 2843 fmtname = "vfsold"; 2844 break; 2845 case QFMT_VFS_V0: 2846 fmtname = "vfsv0"; 2847 break; 2848 case QFMT_VFS_V1: 2849 fmtname = "vfsv1"; 2850 break; 2851 } 2852 seq_printf(seq, ",jqfmt=%s", fmtname); 2853 } 2854 2855 rcu_read_lock(); 2856 usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]); 2857 grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]); 2858 if (usr_qf_name) 2859 seq_show_option(seq, "usrjquota", usr_qf_name); 2860 if (grp_qf_name) 2861 seq_show_option(seq, "grpjquota", grp_qf_name); 2862 rcu_read_unlock(); 2863 #endif 2864 } 2865 2866 static const char *token2str(int token) 2867 { 2868 const struct fs_parameter_spec *spec; 2869 2870 for (spec = ext4_param_specs; spec->name != NULL; spec++) 2871 if (spec->opt == token && !spec->type) 2872 break; 2873 return spec->name; 2874 } 2875 2876 /* 2877 * Show an option if 2878 * - it's set to a non-default value OR 2879 * - if the per-sb default is different from the global default 2880 */ 2881 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, 2882 int nodefs) 2883 { 2884 struct ext4_sb_info *sbi = EXT4_SB(sb); 2885 struct ext4_super_block *es = sbi->s_es; 2886 int def_errors; 2887 const struct mount_opts *m; 2888 char sep = nodefs ? '\n' : ','; 2889 2890 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) 2891 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) 2892 2893 if (sbi->s_sb_block != 1) 2894 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); 2895 2896 for (m = ext4_mount_opts; m->token != Opt_err; m++) { 2897 int want_set = m->flags & MOPT_SET; 2898 int opt_2 = m->flags & MOPT_2; 2899 unsigned int mount_opt, def_mount_opt; 2900 2901 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || 2902 m->flags & MOPT_SKIP) 2903 continue; 2904 2905 if (opt_2) { 2906 mount_opt = sbi->s_mount_opt2; 2907 def_mount_opt = sbi->s_def_mount_opt2; 2908 } else { 2909 mount_opt = sbi->s_mount_opt; 2910 def_mount_opt = sbi->s_def_mount_opt; 2911 } 2912 /* skip if same as the default */ 2913 if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt))) 2914 continue; 2915 /* select Opt_noFoo vs Opt_Foo */ 2916 if ((want_set && 2917 (mount_opt & m->mount_opt) != m->mount_opt) || 2918 (!want_set && (mount_opt & m->mount_opt))) 2919 continue; 2920 SEQ_OPTS_PRINT("%s", token2str(m->token)); 2921 } 2922 2923 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 2924 le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) 2925 SEQ_OPTS_PRINT("resuid=%u", 2926 from_kuid_munged(&init_user_ns, sbi->s_resuid)); 2927 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 2928 le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) 2929 SEQ_OPTS_PRINT("resgid=%u", 2930 from_kgid_munged(&init_user_ns, sbi->s_resgid)); 2931 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); 2932 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) 2933 SEQ_OPTS_PUTS("errors=remount-ro"); 2934 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 2935 SEQ_OPTS_PUTS("errors=continue"); 2936 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 2937 SEQ_OPTS_PUTS("errors=panic"); 2938 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 2939 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); 2940 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) 2941 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2942 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2943 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2944 if (nodefs || sbi->s_stripe) 2945 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2946 if (nodefs || EXT4_MOUNT_DATA_FLAGS & 2947 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 2948 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 2949 SEQ_OPTS_PUTS("data=journal"); 2950 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 2951 SEQ_OPTS_PUTS("data=ordered"); 2952 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 2953 SEQ_OPTS_PUTS("data=writeback"); 2954 } 2955 if (nodefs || 2956 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 2957 SEQ_OPTS_PRINT("inode_readahead_blks=%u", 2958 sbi->s_inode_readahead_blks); 2959 2960 if (test_opt(sb, INIT_INODE_TABLE) && (nodefs || 2961 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) 2962 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); 2963 if (nodefs || sbi->s_max_dir_size_kb) 2964 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 2965 if (test_opt(sb, DATA_ERR_ABORT)) 2966 SEQ_OPTS_PUTS("data_err=abort"); 2967 2968 fscrypt_show_test_dummy_encryption(seq, sep, sb); 2969 2970 if (sb->s_flags & SB_INLINECRYPT) 2971 SEQ_OPTS_PUTS("inlinecrypt"); 2972 2973 if (test_opt(sb, DAX_ALWAYS)) { 2974 if (IS_EXT2_SB(sb)) 2975 SEQ_OPTS_PUTS("dax"); 2976 else 2977 SEQ_OPTS_PUTS("dax=always"); 2978 } else if (test_opt2(sb, DAX_NEVER)) { 2979 SEQ_OPTS_PUTS("dax=never"); 2980 } else if (test_opt2(sb, DAX_INODE)) { 2981 SEQ_OPTS_PUTS("dax=inode"); 2982 } 2983 2984 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 2985 !test_opt2(sb, MB_OPTIMIZE_SCAN)) { 2986 SEQ_OPTS_PUTS("mb_optimize_scan=0"); 2987 } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 2988 test_opt2(sb, MB_OPTIMIZE_SCAN)) { 2989 SEQ_OPTS_PUTS("mb_optimize_scan=1"); 2990 } 2991 2992 ext4_show_quota_options(seq, sb); 2993 return 0; 2994 } 2995 2996 static int ext4_show_options(struct seq_file *seq, struct dentry *root) 2997 { 2998 return _ext4_show_options(seq, root->d_sb, 0); 2999 } 3000 3001 int ext4_seq_options_show(struct seq_file *seq, void *offset) 3002 { 3003 struct super_block *sb = seq->private; 3004 int rc; 3005 3006 seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); 3007 rc = _ext4_show_options(seq, sb, 1); 3008 seq_puts(seq, "\n"); 3009 return rc; 3010 } 3011 3012 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 3013 int read_only) 3014 { 3015 struct ext4_sb_info *sbi = EXT4_SB(sb); 3016 int err = 0; 3017 3018 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 3019 ext4_msg(sb, KERN_ERR, "revision level too high, " 3020 "forcing read-only mode"); 3021 err = -EROFS; 3022 goto done; 3023 } 3024 if (read_only) 3025 goto done; 3026 if (!(sbi->s_mount_state & EXT4_VALID_FS)) 3027 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 3028 "running e2fsck is recommended"); 3029 else if (sbi->s_mount_state & EXT4_ERROR_FS) 3030 ext4_msg(sb, KERN_WARNING, 3031 "warning: mounting fs with errors, " 3032 "running e2fsck is recommended"); 3033 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && 3034 le16_to_cpu(es->s_mnt_count) >= 3035 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 3036 ext4_msg(sb, KERN_WARNING, 3037 "warning: maximal mount count reached, " 3038 "running e2fsck is recommended"); 3039 else if (le32_to_cpu(es->s_checkinterval) && 3040 (ext4_get_tstamp(es, s_lastcheck) + 3041 le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds())) 3042 ext4_msg(sb, KERN_WARNING, 3043 "warning: checktime reached, " 3044 "running e2fsck is recommended"); 3045 if (!sbi->s_journal) 3046 es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 3047 if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 3048 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 3049 le16_add_cpu(&es->s_mnt_count, 1); 3050 ext4_update_tstamp(es, s_mtime); 3051 if (sbi->s_journal) { 3052 ext4_set_feature_journal_needs_recovery(sb); 3053 if (ext4_has_feature_orphan_file(sb)) 3054 ext4_set_feature_orphan_present(sb); 3055 } 3056 3057 err = ext4_commit_super(sb); 3058 done: 3059 if (test_opt(sb, DEBUG)) 3060 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 3061 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", 3062 sb->s_blocksize, 3063 sbi->s_groups_count, 3064 EXT4_BLOCKS_PER_GROUP(sb), 3065 EXT4_INODES_PER_GROUP(sb), 3066 sbi->s_mount_opt, sbi->s_mount_opt2); 3067 return err; 3068 } 3069 3070 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) 3071 { 3072 struct ext4_sb_info *sbi = EXT4_SB(sb); 3073 struct flex_groups **old_groups, **new_groups; 3074 int size, i, j; 3075 3076 if (!sbi->s_log_groups_per_flex) 3077 return 0; 3078 3079 size = ext4_flex_group(sbi, ngroup - 1) + 1; 3080 if (size <= sbi->s_flex_groups_allocated) 3081 return 0; 3082 3083 new_groups = kvzalloc(roundup_pow_of_two(size * 3084 sizeof(*sbi->s_flex_groups)), GFP_KERNEL); 3085 if (!new_groups) { 3086 ext4_msg(sb, KERN_ERR, 3087 "not enough memory for %d flex group pointers", size); 3088 return -ENOMEM; 3089 } 3090 for (i = sbi->s_flex_groups_allocated; i < size; i++) { 3091 new_groups[i] = kvzalloc(roundup_pow_of_two( 3092 sizeof(struct flex_groups)), 3093 GFP_KERNEL); 3094 if (!new_groups[i]) { 3095 for (j = sbi->s_flex_groups_allocated; j < i; j++) 3096 kvfree(new_groups[j]); 3097 kvfree(new_groups); 3098 ext4_msg(sb, KERN_ERR, 3099 "not enough memory for %d flex groups", size); 3100 return -ENOMEM; 3101 } 3102 } 3103 rcu_read_lock(); 3104 old_groups = rcu_dereference(sbi->s_flex_groups); 3105 if (old_groups) 3106 memcpy(new_groups, old_groups, 3107 (sbi->s_flex_groups_allocated * 3108 sizeof(struct flex_groups *))); 3109 rcu_read_unlock(); 3110 rcu_assign_pointer(sbi->s_flex_groups, new_groups); 3111 sbi->s_flex_groups_allocated = size; 3112 if (old_groups) 3113 ext4_kvfree_array_rcu(old_groups); 3114 return 0; 3115 } 3116 3117 static int ext4_fill_flex_info(struct super_block *sb) 3118 { 3119 struct ext4_sb_info *sbi = EXT4_SB(sb); 3120 struct ext4_group_desc *gdp = NULL; 3121 struct flex_groups *fg; 3122 ext4_group_t flex_group; 3123 int i, err; 3124 3125 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 3126 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { 3127 sbi->s_log_groups_per_flex = 0; 3128 return 1; 3129 } 3130 3131 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 3132 if (err) 3133 goto failed; 3134 3135 for (i = 0; i < sbi->s_groups_count; i++) { 3136 gdp = ext4_get_group_desc(sb, i, NULL); 3137 3138 flex_group = ext4_flex_group(sbi, i); 3139 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 3140 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); 3141 atomic64_add(ext4_free_group_clusters(sb, gdp), 3142 &fg->free_clusters); 3143 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); 3144 } 3145 3146 return 1; 3147 failed: 3148 return 0; 3149 } 3150 3151 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 3152 struct ext4_group_desc *gdp) 3153 { 3154 int offset = offsetof(struct ext4_group_desc, bg_checksum); 3155 __u16 crc = 0; 3156 __le32 le_group = cpu_to_le32(block_group); 3157 struct ext4_sb_info *sbi = EXT4_SB(sb); 3158 3159 if (ext4_has_metadata_csum(sbi->s_sb)) { 3160 /* Use new metadata_csum algorithm */ 3161 __u32 csum32; 3162 __u16 dummy_csum = 0; 3163 3164 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, 3165 sizeof(le_group)); 3166 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); 3167 csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, 3168 sizeof(dummy_csum)); 3169 offset += sizeof(dummy_csum); 3170 if (offset < sbi->s_desc_size) 3171 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, 3172 sbi->s_desc_size - offset); 3173 3174 crc = csum32 & 0xFFFF; 3175 goto out; 3176 } 3177 3178 /* old crc16 code */ 3179 if (!ext4_has_feature_gdt_csum(sb)) 3180 return 0; 3181 3182 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 3183 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 3184 crc = crc16(crc, (__u8 *)gdp, offset); 3185 offset += sizeof(gdp->bg_checksum); /* skip checksum */ 3186 /* for checksum of struct ext4_group_desc do the rest...*/ 3187 if (ext4_has_feature_64bit(sb) && 3188 offset < le16_to_cpu(sbi->s_es->s_desc_size)) 3189 crc = crc16(crc, (__u8 *)gdp + offset, 3190 le16_to_cpu(sbi->s_es->s_desc_size) - 3191 offset); 3192 3193 out: 3194 return cpu_to_le16(crc); 3195 } 3196 3197 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, 3198 struct ext4_group_desc *gdp) 3199 { 3200 if (ext4_has_group_desc_csum(sb) && 3201 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) 3202 return 0; 3203 3204 return 1; 3205 } 3206 3207 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, 3208 struct ext4_group_desc *gdp) 3209 { 3210 if (!ext4_has_group_desc_csum(sb)) 3211 return; 3212 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); 3213 } 3214 3215 /* Called at mount-time, super-block is locked */ 3216 static int ext4_check_descriptors(struct super_block *sb, 3217 ext4_fsblk_t sb_block, 3218 ext4_group_t *first_not_zeroed) 3219 { 3220 struct ext4_sb_info *sbi = EXT4_SB(sb); 3221 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 3222 ext4_fsblk_t last_block; 3223 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 3224 ext4_fsblk_t block_bitmap; 3225 ext4_fsblk_t inode_bitmap; 3226 ext4_fsblk_t inode_table; 3227 int flexbg_flag = 0; 3228 ext4_group_t i, grp = sbi->s_groups_count; 3229 3230 if (ext4_has_feature_flex_bg(sb)) 3231 flexbg_flag = 1; 3232 3233 ext4_debug("Checking group descriptors"); 3234 3235 for (i = 0; i < sbi->s_groups_count; i++) { 3236 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 3237 3238 if (i == sbi->s_groups_count - 1 || flexbg_flag) 3239 last_block = ext4_blocks_count(sbi->s_es) - 1; 3240 else 3241 last_block = first_block + 3242 (EXT4_BLOCKS_PER_GROUP(sb) - 1); 3243 3244 if ((grp == sbi->s_groups_count) && 3245 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3246 grp = i; 3247 3248 block_bitmap = ext4_block_bitmap(sb, gdp); 3249 if (block_bitmap == sb_block) { 3250 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3251 "Block bitmap for group %u overlaps " 3252 "superblock", i); 3253 if (!sb_rdonly(sb)) 3254 return 0; 3255 } 3256 if (block_bitmap >= sb_block + 1 && 3257 block_bitmap <= last_bg_block) { 3258 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3259 "Block bitmap for group %u overlaps " 3260 "block group descriptors", i); 3261 if (!sb_rdonly(sb)) 3262 return 0; 3263 } 3264 if (block_bitmap < first_block || block_bitmap > last_block) { 3265 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3266 "Block bitmap for group %u not in group " 3267 "(block %llu)!", i, block_bitmap); 3268 return 0; 3269 } 3270 inode_bitmap = ext4_inode_bitmap(sb, gdp); 3271 if (inode_bitmap == sb_block) { 3272 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3273 "Inode bitmap for group %u overlaps " 3274 "superblock", i); 3275 if (!sb_rdonly(sb)) 3276 return 0; 3277 } 3278 if (inode_bitmap >= sb_block + 1 && 3279 inode_bitmap <= last_bg_block) { 3280 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3281 "Inode bitmap for group %u overlaps " 3282 "block group descriptors", i); 3283 if (!sb_rdonly(sb)) 3284 return 0; 3285 } 3286 if (inode_bitmap < first_block || inode_bitmap > last_block) { 3287 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3288 "Inode bitmap for group %u not in group " 3289 "(block %llu)!", i, inode_bitmap); 3290 return 0; 3291 } 3292 inode_table = ext4_inode_table(sb, gdp); 3293 if (inode_table == sb_block) { 3294 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3295 "Inode table for group %u overlaps " 3296 "superblock", i); 3297 if (!sb_rdonly(sb)) 3298 return 0; 3299 } 3300 if (inode_table >= sb_block + 1 && 3301 inode_table <= last_bg_block) { 3302 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3303 "Inode table for group %u overlaps " 3304 "block group descriptors", i); 3305 if (!sb_rdonly(sb)) 3306 return 0; 3307 } 3308 if (inode_table < first_block || 3309 inode_table + sbi->s_itb_per_group - 1 > last_block) { 3310 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3311 "Inode table for group %u not in group " 3312 "(block %llu)!", i, inode_table); 3313 return 0; 3314 } 3315 ext4_lock_group(sb, i); 3316 if (!ext4_group_desc_csum_verify(sb, i, gdp)) { 3317 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3318 "Checksum for group %u failed (%u!=%u)", 3319 i, le16_to_cpu(ext4_group_desc_csum(sb, i, 3320 gdp)), le16_to_cpu(gdp->bg_checksum)); 3321 if (!sb_rdonly(sb)) { 3322 ext4_unlock_group(sb, i); 3323 return 0; 3324 } 3325 } 3326 ext4_unlock_group(sb, i); 3327 if (!flexbg_flag) 3328 first_block += EXT4_BLOCKS_PER_GROUP(sb); 3329 } 3330 if (NULL != first_not_zeroed) 3331 *first_not_zeroed = grp; 3332 return 1; 3333 } 3334 3335 /* 3336 * Maximal extent format file size. 3337 * Resulting logical blkno at s_maxbytes must fit in our on-disk 3338 * extent format containers, within a sector_t, and within i_blocks 3339 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 3340 * so that won't be a limiting factor. 3341 * 3342 * However there is other limiting factor. We do store extents in the form 3343 * of starting block and length, hence the resulting length of the extent 3344 * covering maximum file size must fit into on-disk format containers as 3345 * well. Given that length is always by 1 unit bigger than max unit (because 3346 * we count 0 as well) we have to lower the s_maxbytes by one fs block. 3347 * 3348 * Note, this does *not* consider any metadata overhead for vfs i_blocks. 3349 */ 3350 static loff_t ext4_max_size(int blkbits, int has_huge_files) 3351 { 3352 loff_t res; 3353 loff_t upper_limit = MAX_LFS_FILESIZE; 3354 3355 BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64)); 3356 3357 if (!has_huge_files) { 3358 upper_limit = (1LL << 32) - 1; 3359 3360 /* total blocks in file system block size */ 3361 upper_limit >>= (blkbits - 9); 3362 upper_limit <<= blkbits; 3363 } 3364 3365 /* 3366 * 32-bit extent-start container, ee_block. We lower the maxbytes 3367 * by one fs block, so ee_len can cover the extent of maximum file 3368 * size 3369 */ 3370 res = (1LL << 32) - 1; 3371 res <<= blkbits; 3372 3373 /* Sanity check against vm- & vfs- imposed limits */ 3374 if (res > upper_limit) 3375 res = upper_limit; 3376 3377 return res; 3378 } 3379 3380 /* 3381 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 3382 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 3383 * We need to be 1 filesystem block less than the 2^48 sector limit. 3384 */ 3385 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3386 { 3387 loff_t upper_limit, res = EXT4_NDIR_BLOCKS; 3388 int meta_blocks; 3389 unsigned int ppb = 1 << (bits - 2); 3390 3391 /* 3392 * This is calculated to be the largest file size for a dense, block 3393 * mapped file such that the file's total number of 512-byte sectors, 3394 * including data and all indirect blocks, does not exceed (2^48 - 1). 3395 * 3396 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 3397 * number of 512-byte sectors of the file. 3398 */ 3399 if (!has_huge_files) { 3400 /* 3401 * !has_huge_files or implies that the inode i_block field 3402 * represents total file blocks in 2^32 512-byte sectors == 3403 * size of vfs inode i_blocks * 8 3404 */ 3405 upper_limit = (1LL << 32) - 1; 3406 3407 /* total blocks in file system block size */ 3408 upper_limit >>= (bits - 9); 3409 3410 } else { 3411 /* 3412 * We use 48 bit ext4_inode i_blocks 3413 * With EXT4_HUGE_FILE_FL set the i_blocks 3414 * represent total number of blocks in 3415 * file system block size 3416 */ 3417 upper_limit = (1LL << 48) - 1; 3418 3419 } 3420 3421 /* Compute how many blocks we can address by block tree */ 3422 res += ppb; 3423 res += ppb * ppb; 3424 res += ((loff_t)ppb) * ppb * ppb; 3425 /* Compute how many metadata blocks are needed */ 3426 meta_blocks = 1; 3427 meta_blocks += 1 + ppb; 3428 meta_blocks += 1 + ppb + ppb * ppb; 3429 /* Does block tree limit file size? */ 3430 if (res + meta_blocks <= upper_limit) 3431 goto check_lfs; 3432 3433 res = upper_limit; 3434 /* How many metadata blocks are needed for addressing upper_limit? */ 3435 upper_limit -= EXT4_NDIR_BLOCKS; 3436 /* indirect blocks */ 3437 meta_blocks = 1; 3438 upper_limit -= ppb; 3439 /* double indirect blocks */ 3440 if (upper_limit < ppb * ppb) { 3441 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb); 3442 res -= meta_blocks; 3443 goto check_lfs; 3444 } 3445 meta_blocks += 1 + ppb; 3446 upper_limit -= ppb * ppb; 3447 /* tripple indirect blocks for the rest */ 3448 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) + 3449 DIV_ROUND_UP_ULL(upper_limit, ppb*ppb); 3450 res -= meta_blocks; 3451 check_lfs: 3452 res <<= bits; 3453 if (res > MAX_LFS_FILESIZE) 3454 res = MAX_LFS_FILESIZE; 3455 3456 return res; 3457 } 3458 3459 static ext4_fsblk_t descriptor_loc(struct super_block *sb, 3460 ext4_fsblk_t logical_sb_block, int nr) 3461 { 3462 struct ext4_sb_info *sbi = EXT4_SB(sb); 3463 ext4_group_t bg, first_meta_bg; 3464 int has_super = 0; 3465 3466 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 3467 3468 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) 3469 return logical_sb_block + nr + 1; 3470 bg = sbi->s_desc_per_block * nr; 3471 if (ext4_bg_has_super(sb, bg)) 3472 has_super = 1; 3473 3474 /* 3475 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at 3476 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled 3477 * on modern mke2fs or blksize > 1k on older mke2fs) then we must 3478 * compensate. 3479 */ 3480 if (sb->s_blocksize == 1024 && nr == 0 && 3481 le32_to_cpu(sbi->s_es->s_first_data_block) == 0) 3482 has_super++; 3483 3484 return (has_super + ext4_group_first_block_no(sb, bg)); 3485 } 3486 3487 /** 3488 * ext4_get_stripe_size: Get the stripe size. 3489 * @sbi: In memory super block info 3490 * 3491 * If we have specified it via mount option, then 3492 * use the mount option value. If the value specified at mount time is 3493 * greater than the blocks per group use the super block value. 3494 * If the super block value is greater than blocks per group return 0. 3495 * Allocator needs it be less than blocks per group. 3496 * 3497 */ 3498 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 3499 { 3500 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 3501 unsigned long stripe_width = 3502 le32_to_cpu(sbi->s_es->s_raid_stripe_width); 3503 int ret; 3504 3505 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 3506 ret = sbi->s_stripe; 3507 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) 3508 ret = stripe_width; 3509 else if (stride && stride <= sbi->s_blocks_per_group) 3510 ret = stride; 3511 else 3512 ret = 0; 3513 3514 /* 3515 * If the stripe width is 1, this makes no sense and 3516 * we set it to 0 to turn off stripe handling code. 3517 */ 3518 if (ret <= 1) 3519 ret = 0; 3520 3521 return ret; 3522 } 3523 3524 /* 3525 * Check whether this filesystem can be mounted based on 3526 * the features present and the RDONLY/RDWR mount requested. 3527 * Returns 1 if this filesystem can be mounted as requested, 3528 * 0 if it cannot be. 3529 */ 3530 int ext4_feature_set_ok(struct super_block *sb, int readonly) 3531 { 3532 if (ext4_has_unknown_ext4_incompat_features(sb)) { 3533 ext4_msg(sb, KERN_ERR, 3534 "Couldn't mount because of " 3535 "unsupported optional features (%x)", 3536 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 3537 ~EXT4_FEATURE_INCOMPAT_SUPP)); 3538 return 0; 3539 } 3540 3541 #if !IS_ENABLED(CONFIG_UNICODE) 3542 if (ext4_has_feature_casefold(sb)) { 3543 ext4_msg(sb, KERN_ERR, 3544 "Filesystem with casefold feature cannot be " 3545 "mounted without CONFIG_UNICODE"); 3546 return 0; 3547 } 3548 #endif 3549 3550 if (readonly) 3551 return 1; 3552 3553 if (ext4_has_feature_readonly(sb)) { 3554 ext4_msg(sb, KERN_INFO, "filesystem is read-only"); 3555 sb->s_flags |= SB_RDONLY; 3556 return 1; 3557 } 3558 3559 /* Check that feature set is OK for a read-write mount */ 3560 if (ext4_has_unknown_ext4_ro_compat_features(sb)) { 3561 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 3562 "unsupported optional features (%x)", 3563 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 3564 ~EXT4_FEATURE_RO_COMPAT_SUPP)); 3565 return 0; 3566 } 3567 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { 3568 ext4_msg(sb, KERN_ERR, 3569 "Can't support bigalloc feature without " 3570 "extents feature\n"); 3571 return 0; 3572 } 3573 3574 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) 3575 if (!readonly && (ext4_has_feature_quota(sb) || 3576 ext4_has_feature_project(sb))) { 3577 ext4_msg(sb, KERN_ERR, 3578 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); 3579 return 0; 3580 } 3581 #endif /* CONFIG_QUOTA */ 3582 return 1; 3583 } 3584 3585 /* 3586 * This function is called once a day if we have errors logged 3587 * on the file system 3588 */ 3589 static void print_daily_error_info(struct timer_list *t) 3590 { 3591 struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report); 3592 struct super_block *sb = sbi->s_sb; 3593 struct ext4_super_block *es = sbi->s_es; 3594 3595 if (es->s_error_count) 3596 /* fsck newer than v1.41.13 is needed to clean this condition. */ 3597 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", 3598 le32_to_cpu(es->s_error_count)); 3599 if (es->s_first_error_time) { 3600 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d", 3601 sb->s_id, 3602 ext4_get_tstamp(es, s_first_error_time), 3603 (int) sizeof(es->s_first_error_func), 3604 es->s_first_error_func, 3605 le32_to_cpu(es->s_first_error_line)); 3606 if (es->s_first_error_ino) 3607 printk(KERN_CONT ": inode %u", 3608 le32_to_cpu(es->s_first_error_ino)); 3609 if (es->s_first_error_block) 3610 printk(KERN_CONT ": block %llu", (unsigned long long) 3611 le64_to_cpu(es->s_first_error_block)); 3612 printk(KERN_CONT "\n"); 3613 } 3614 if (es->s_last_error_time) { 3615 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d", 3616 sb->s_id, 3617 ext4_get_tstamp(es, s_last_error_time), 3618 (int) sizeof(es->s_last_error_func), 3619 es->s_last_error_func, 3620 le32_to_cpu(es->s_last_error_line)); 3621 if (es->s_last_error_ino) 3622 printk(KERN_CONT ": inode %u", 3623 le32_to_cpu(es->s_last_error_ino)); 3624 if (es->s_last_error_block) 3625 printk(KERN_CONT ": block %llu", (unsigned long long) 3626 le64_to_cpu(es->s_last_error_block)); 3627 printk(KERN_CONT "\n"); 3628 } 3629 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 3630 } 3631 3632 /* Find next suitable group and run ext4_init_inode_table */ 3633 static int ext4_run_li_request(struct ext4_li_request *elr) 3634 { 3635 struct ext4_group_desc *gdp = NULL; 3636 struct super_block *sb = elr->lr_super; 3637 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 3638 ext4_group_t group = elr->lr_next_group; 3639 unsigned int prefetch_ios = 0; 3640 int ret = 0; 3641 u64 start_time; 3642 3643 if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { 3644 elr->lr_next_group = ext4_mb_prefetch(sb, group, 3645 EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); 3646 if (prefetch_ios) 3647 ext4_mb_prefetch_fini(sb, elr->lr_next_group, 3648 prefetch_ios); 3649 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, 3650 prefetch_ios); 3651 if (group >= elr->lr_next_group) { 3652 ret = 1; 3653 if (elr->lr_first_not_zeroed != ngroups && 3654 !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { 3655 elr->lr_next_group = elr->lr_first_not_zeroed; 3656 elr->lr_mode = EXT4_LI_MODE_ITABLE; 3657 ret = 0; 3658 } 3659 } 3660 return ret; 3661 } 3662 3663 for (; group < ngroups; group++) { 3664 gdp = ext4_get_group_desc(sb, group, NULL); 3665 if (!gdp) { 3666 ret = 1; 3667 break; 3668 } 3669 3670 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3671 break; 3672 } 3673 3674 if (group >= ngroups) 3675 ret = 1; 3676 3677 if (!ret) { 3678 start_time = ktime_get_real_ns(); 3679 ret = ext4_init_inode_table(sb, group, 3680 elr->lr_timeout ? 0 : 1); 3681 trace_ext4_lazy_itable_init(sb, group); 3682 if (elr->lr_timeout == 0) { 3683 elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) * 3684 EXT4_SB(elr->lr_super)->s_li_wait_mult); 3685 } 3686 elr->lr_next_sched = jiffies + elr->lr_timeout; 3687 elr->lr_next_group = group + 1; 3688 } 3689 return ret; 3690 } 3691 3692 /* 3693 * Remove lr_request from the list_request and free the 3694 * request structure. Should be called with li_list_mtx held 3695 */ 3696 static void ext4_remove_li_request(struct ext4_li_request *elr) 3697 { 3698 if (!elr) 3699 return; 3700 3701 list_del(&elr->lr_request); 3702 EXT4_SB(elr->lr_super)->s_li_request = NULL; 3703 kfree(elr); 3704 } 3705 3706 static void ext4_unregister_li_request(struct super_block *sb) 3707 { 3708 mutex_lock(&ext4_li_mtx); 3709 if (!ext4_li_info) { 3710 mutex_unlock(&ext4_li_mtx); 3711 return; 3712 } 3713 3714 mutex_lock(&ext4_li_info->li_list_mtx); 3715 ext4_remove_li_request(EXT4_SB(sb)->s_li_request); 3716 mutex_unlock(&ext4_li_info->li_list_mtx); 3717 mutex_unlock(&ext4_li_mtx); 3718 } 3719 3720 static struct task_struct *ext4_lazyinit_task; 3721 3722 /* 3723 * This is the function where ext4lazyinit thread lives. It walks 3724 * through the request list searching for next scheduled filesystem. 3725 * When such a fs is found, run the lazy initialization request 3726 * (ext4_rn_li_request) and keep track of the time spend in this 3727 * function. Based on that time we compute next schedule time of 3728 * the request. When walking through the list is complete, compute 3729 * next waking time and put itself into sleep. 3730 */ 3731 static int ext4_lazyinit_thread(void *arg) 3732 { 3733 struct ext4_lazy_init *eli = arg; 3734 struct list_head *pos, *n; 3735 struct ext4_li_request *elr; 3736 unsigned long next_wakeup, cur; 3737 3738 BUG_ON(NULL == eli); 3739 set_freezable(); 3740 3741 cont_thread: 3742 while (true) { 3743 next_wakeup = MAX_JIFFY_OFFSET; 3744 3745 mutex_lock(&eli->li_list_mtx); 3746 if (list_empty(&eli->li_request_list)) { 3747 mutex_unlock(&eli->li_list_mtx); 3748 goto exit_thread; 3749 } 3750 list_for_each_safe(pos, n, &eli->li_request_list) { 3751 int err = 0; 3752 int progress = 0; 3753 elr = list_entry(pos, struct ext4_li_request, 3754 lr_request); 3755 3756 if (time_before(jiffies, elr->lr_next_sched)) { 3757 if (time_before(elr->lr_next_sched, next_wakeup)) 3758 next_wakeup = elr->lr_next_sched; 3759 continue; 3760 } 3761 if (down_read_trylock(&elr->lr_super->s_umount)) { 3762 if (sb_start_write_trylock(elr->lr_super)) { 3763 progress = 1; 3764 /* 3765 * We hold sb->s_umount, sb can not 3766 * be removed from the list, it is 3767 * now safe to drop li_list_mtx 3768 */ 3769 mutex_unlock(&eli->li_list_mtx); 3770 err = ext4_run_li_request(elr); 3771 sb_end_write(elr->lr_super); 3772 mutex_lock(&eli->li_list_mtx); 3773 n = pos->next; 3774 } 3775 up_read((&elr->lr_super->s_umount)); 3776 } 3777 /* error, remove the lazy_init job */ 3778 if (err) { 3779 ext4_remove_li_request(elr); 3780 continue; 3781 } 3782 if (!progress) { 3783 elr->lr_next_sched = jiffies + 3784 get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3785 } 3786 if (time_before(elr->lr_next_sched, next_wakeup)) 3787 next_wakeup = elr->lr_next_sched; 3788 } 3789 mutex_unlock(&eli->li_list_mtx); 3790 3791 try_to_freeze(); 3792 3793 cur = jiffies; 3794 if ((time_after_eq(cur, next_wakeup)) || 3795 (MAX_JIFFY_OFFSET == next_wakeup)) { 3796 cond_resched(); 3797 continue; 3798 } 3799 3800 schedule_timeout_interruptible(next_wakeup - cur); 3801 3802 if (kthread_should_stop()) { 3803 ext4_clear_request_list(); 3804 goto exit_thread; 3805 } 3806 } 3807 3808 exit_thread: 3809 /* 3810 * It looks like the request list is empty, but we need 3811 * to check it under the li_list_mtx lock, to prevent any 3812 * additions into it, and of course we should lock ext4_li_mtx 3813 * to atomically free the list and ext4_li_info, because at 3814 * this point another ext4 filesystem could be registering 3815 * new one. 3816 */ 3817 mutex_lock(&ext4_li_mtx); 3818 mutex_lock(&eli->li_list_mtx); 3819 if (!list_empty(&eli->li_request_list)) { 3820 mutex_unlock(&eli->li_list_mtx); 3821 mutex_unlock(&ext4_li_mtx); 3822 goto cont_thread; 3823 } 3824 mutex_unlock(&eli->li_list_mtx); 3825 kfree(ext4_li_info); 3826 ext4_li_info = NULL; 3827 mutex_unlock(&ext4_li_mtx); 3828 3829 return 0; 3830 } 3831 3832 static void ext4_clear_request_list(void) 3833 { 3834 struct list_head *pos, *n; 3835 struct ext4_li_request *elr; 3836 3837 mutex_lock(&ext4_li_info->li_list_mtx); 3838 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 3839 elr = list_entry(pos, struct ext4_li_request, 3840 lr_request); 3841 ext4_remove_li_request(elr); 3842 } 3843 mutex_unlock(&ext4_li_info->li_list_mtx); 3844 } 3845 3846 static int ext4_run_lazyinit_thread(void) 3847 { 3848 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, 3849 ext4_li_info, "ext4lazyinit"); 3850 if (IS_ERR(ext4_lazyinit_task)) { 3851 int err = PTR_ERR(ext4_lazyinit_task); 3852 ext4_clear_request_list(); 3853 kfree(ext4_li_info); 3854 ext4_li_info = NULL; 3855 printk(KERN_CRIT "EXT4-fs: error %d creating inode table " 3856 "initialization thread\n", 3857 err); 3858 return err; 3859 } 3860 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 3861 return 0; 3862 } 3863 3864 /* 3865 * Check whether it make sense to run itable init. thread or not. 3866 * If there is at least one uninitialized inode table, return 3867 * corresponding group number, else the loop goes through all 3868 * groups and return total number of groups. 3869 */ 3870 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) 3871 { 3872 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 3873 struct ext4_group_desc *gdp = NULL; 3874 3875 if (!ext4_has_group_desc_csum(sb)) 3876 return ngroups; 3877 3878 for (group = 0; group < ngroups; group++) { 3879 gdp = ext4_get_group_desc(sb, group, NULL); 3880 if (!gdp) 3881 continue; 3882 3883 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3884 break; 3885 } 3886 3887 return group; 3888 } 3889 3890 static int ext4_li_info_new(void) 3891 { 3892 struct ext4_lazy_init *eli = NULL; 3893 3894 eli = kzalloc(sizeof(*eli), GFP_KERNEL); 3895 if (!eli) 3896 return -ENOMEM; 3897 3898 INIT_LIST_HEAD(&eli->li_request_list); 3899 mutex_init(&eli->li_list_mtx); 3900 3901 eli->li_state |= EXT4_LAZYINIT_QUIT; 3902 3903 ext4_li_info = eli; 3904 3905 return 0; 3906 } 3907 3908 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, 3909 ext4_group_t start) 3910 { 3911 struct ext4_li_request *elr; 3912 3913 elr = kzalloc(sizeof(*elr), GFP_KERNEL); 3914 if (!elr) 3915 return NULL; 3916 3917 elr->lr_super = sb; 3918 elr->lr_first_not_zeroed = start; 3919 if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) { 3920 elr->lr_mode = EXT4_LI_MODE_ITABLE; 3921 elr->lr_next_group = start; 3922 } else { 3923 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; 3924 } 3925 3926 /* 3927 * Randomize first schedule time of the request to 3928 * spread the inode table initialization requests 3929 * better. 3930 */ 3931 elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3932 return elr; 3933 } 3934 3935 int ext4_register_li_request(struct super_block *sb, 3936 ext4_group_t first_not_zeroed) 3937 { 3938 struct ext4_sb_info *sbi = EXT4_SB(sb); 3939 struct ext4_li_request *elr = NULL; 3940 ext4_group_t ngroups = sbi->s_groups_count; 3941 int ret = 0; 3942 3943 mutex_lock(&ext4_li_mtx); 3944 if (sbi->s_li_request != NULL) { 3945 /* 3946 * Reset timeout so it can be computed again, because 3947 * s_li_wait_mult might have changed. 3948 */ 3949 sbi->s_li_request->lr_timeout = 0; 3950 goto out; 3951 } 3952 3953 if (sb_rdonly(sb) || 3954 (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) && 3955 (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE)))) 3956 goto out; 3957 3958 elr = ext4_li_request_new(sb, first_not_zeroed); 3959 if (!elr) { 3960 ret = -ENOMEM; 3961 goto out; 3962 } 3963 3964 if (NULL == ext4_li_info) { 3965 ret = ext4_li_info_new(); 3966 if (ret) 3967 goto out; 3968 } 3969 3970 mutex_lock(&ext4_li_info->li_list_mtx); 3971 list_add(&elr->lr_request, &ext4_li_info->li_request_list); 3972 mutex_unlock(&ext4_li_info->li_list_mtx); 3973 3974 sbi->s_li_request = elr; 3975 /* 3976 * set elr to NULL here since it has been inserted to 3977 * the request_list and the removal and free of it is 3978 * handled by ext4_clear_request_list from now on. 3979 */ 3980 elr = NULL; 3981 3982 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 3983 ret = ext4_run_lazyinit_thread(); 3984 if (ret) 3985 goto out; 3986 } 3987 out: 3988 mutex_unlock(&ext4_li_mtx); 3989 if (ret) 3990 kfree(elr); 3991 return ret; 3992 } 3993 3994 /* 3995 * We do not need to lock anything since this is called on 3996 * module unload. 3997 */ 3998 static void ext4_destroy_lazyinit_thread(void) 3999 { 4000 /* 4001 * If thread exited earlier 4002 * there's nothing to be done. 4003 */ 4004 if (!ext4_li_info || !ext4_lazyinit_task) 4005 return; 4006 4007 kthread_stop(ext4_lazyinit_task); 4008 } 4009 4010 static int set_journal_csum_feature_set(struct super_block *sb) 4011 { 4012 int ret = 1; 4013 int compat, incompat; 4014 struct ext4_sb_info *sbi = EXT4_SB(sb); 4015 4016 if (ext4_has_metadata_csum(sb)) { 4017 /* journal checksum v3 */ 4018 compat = 0; 4019 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 4020 } else { 4021 /* journal checksum v1 */ 4022 compat = JBD2_FEATURE_COMPAT_CHECKSUM; 4023 incompat = 0; 4024 } 4025 4026 jbd2_journal_clear_features(sbi->s_journal, 4027 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 4028 JBD2_FEATURE_INCOMPAT_CSUM_V3 | 4029 JBD2_FEATURE_INCOMPAT_CSUM_V2); 4030 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 4031 ret = jbd2_journal_set_features(sbi->s_journal, 4032 compat, 0, 4033 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 4034 incompat); 4035 } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 4036 ret = jbd2_journal_set_features(sbi->s_journal, 4037 compat, 0, 4038 incompat); 4039 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4040 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 4041 } else { 4042 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4043 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 4044 } 4045 4046 return ret; 4047 } 4048 4049 /* 4050 * Note: calculating the overhead so we can be compatible with 4051 * historical BSD practice is quite difficult in the face of 4052 * clusters/bigalloc. This is because multiple metadata blocks from 4053 * different block group can end up in the same allocation cluster. 4054 * Calculating the exact overhead in the face of clustered allocation 4055 * requires either O(all block bitmaps) in memory or O(number of block 4056 * groups**2) in time. We will still calculate the superblock for 4057 * older file systems --- and if we come across with a bigalloc file 4058 * system with zero in s_overhead_clusters the estimate will be close to 4059 * correct especially for very large cluster sizes --- but for newer 4060 * file systems, it's better to calculate this figure once at mkfs 4061 * time, and store it in the superblock. If the superblock value is 4062 * present (even for non-bigalloc file systems), we will use it. 4063 */ 4064 static int count_overhead(struct super_block *sb, ext4_group_t grp, 4065 char *buf) 4066 { 4067 struct ext4_sb_info *sbi = EXT4_SB(sb); 4068 struct ext4_group_desc *gdp; 4069 ext4_fsblk_t first_block, last_block, b; 4070 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4071 int s, j, count = 0; 4072 int has_super = ext4_bg_has_super(sb, grp); 4073 4074 if (!ext4_has_feature_bigalloc(sb)) 4075 return (has_super + ext4_bg_num_gdb(sb, grp) + 4076 (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + 4077 sbi->s_itb_per_group + 2); 4078 4079 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + 4080 (grp * EXT4_BLOCKS_PER_GROUP(sb)); 4081 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; 4082 for (i = 0; i < ngroups; i++) { 4083 gdp = ext4_get_group_desc(sb, i, NULL); 4084 b = ext4_block_bitmap(sb, gdp); 4085 if (b >= first_block && b <= last_block) { 4086 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4087 count++; 4088 } 4089 b = ext4_inode_bitmap(sb, gdp); 4090 if (b >= first_block && b <= last_block) { 4091 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4092 count++; 4093 } 4094 b = ext4_inode_table(sb, gdp); 4095 if (b >= first_block && b + sbi->s_itb_per_group <= last_block) 4096 for (j = 0; j < sbi->s_itb_per_group; j++, b++) { 4097 int c = EXT4_B2C(sbi, b - first_block); 4098 ext4_set_bit(c, buf); 4099 count++; 4100 } 4101 if (i != grp) 4102 continue; 4103 s = 0; 4104 if (ext4_bg_has_super(sb, grp)) { 4105 ext4_set_bit(s++, buf); 4106 count++; 4107 } 4108 j = ext4_bg_num_gdb(sb, grp); 4109 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { 4110 ext4_error(sb, "Invalid number of block group " 4111 "descriptor blocks: %d", j); 4112 j = EXT4_BLOCKS_PER_GROUP(sb) - s; 4113 } 4114 count += j; 4115 for (; j > 0; j--) 4116 ext4_set_bit(EXT4_B2C(sbi, s++), buf); 4117 } 4118 if (!count) 4119 return 0; 4120 return EXT4_CLUSTERS_PER_GROUP(sb) - 4121 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); 4122 } 4123 4124 /* 4125 * Compute the overhead and stash it in sbi->s_overhead 4126 */ 4127 int ext4_calculate_overhead(struct super_block *sb) 4128 { 4129 struct ext4_sb_info *sbi = EXT4_SB(sb); 4130 struct ext4_super_block *es = sbi->s_es; 4131 struct inode *j_inode; 4132 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum); 4133 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4134 ext4_fsblk_t overhead = 0; 4135 char *buf = (char *) get_zeroed_page(GFP_NOFS); 4136 4137 if (!buf) 4138 return -ENOMEM; 4139 4140 /* 4141 * Compute the overhead (FS structures). This is constant 4142 * for a given filesystem unless the number of block groups 4143 * changes so we cache the previous value until it does. 4144 */ 4145 4146 /* 4147 * All of the blocks before first_data_block are overhead 4148 */ 4149 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); 4150 4151 /* 4152 * Add the overhead found in each block group 4153 */ 4154 for (i = 0; i < ngroups; i++) { 4155 int blks; 4156 4157 blks = count_overhead(sb, i, buf); 4158 overhead += blks; 4159 if (blks) 4160 memset(buf, 0, PAGE_SIZE); 4161 cond_resched(); 4162 } 4163 4164 /* 4165 * Add the internal journal blocks whether the journal has been 4166 * loaded or not 4167 */ 4168 if (sbi->s_journal && !sbi->s_journal_bdev) 4169 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len); 4170 else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { 4171 /* j_inum for internal journal is non-zero */ 4172 j_inode = ext4_get_journal_inode(sb, j_inum); 4173 if (j_inode) { 4174 j_blocks = j_inode->i_size >> sb->s_blocksize_bits; 4175 overhead += EXT4_NUM_B2C(sbi, j_blocks); 4176 iput(j_inode); 4177 } else { 4178 ext4_msg(sb, KERN_ERR, "can't get journal size"); 4179 } 4180 } 4181 sbi->s_overhead = overhead; 4182 smp_wmb(); 4183 free_page((unsigned long) buf); 4184 return 0; 4185 } 4186 4187 static void ext4_set_resv_clusters(struct super_block *sb) 4188 { 4189 ext4_fsblk_t resv_clusters; 4190 struct ext4_sb_info *sbi = EXT4_SB(sb); 4191 4192 /* 4193 * There's no need to reserve anything when we aren't using extents. 4194 * The space estimates are exact, there are no unwritten extents, 4195 * hole punching doesn't need new metadata... This is needed especially 4196 * to keep ext2/3 backward compatibility. 4197 */ 4198 if (!ext4_has_feature_extents(sb)) 4199 return; 4200 /* 4201 * By default we reserve 2% or 4096 clusters, whichever is smaller. 4202 * This should cover the situations where we can not afford to run 4203 * out of space like for example punch hole, or converting 4204 * unwritten extents in delalloc path. In most cases such 4205 * allocation would require 1, or 2 blocks, higher numbers are 4206 * very rare. 4207 */ 4208 resv_clusters = (ext4_blocks_count(sbi->s_es) >> 4209 sbi->s_cluster_bits); 4210 4211 do_div(resv_clusters, 50); 4212 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 4213 4214 atomic64_set(&sbi->s_resv_clusters, resv_clusters); 4215 } 4216 4217 static const char *ext4_quota_mode(struct super_block *sb) 4218 { 4219 #ifdef CONFIG_QUOTA 4220 if (!ext4_quota_capable(sb)) 4221 return "none"; 4222 4223 if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb)) 4224 return "journalled"; 4225 else 4226 return "writeback"; 4227 #else 4228 return "disabled"; 4229 #endif 4230 } 4231 4232 static void ext4_setup_csum_trigger(struct super_block *sb, 4233 enum ext4_journal_trigger_type type, 4234 void (*trigger)( 4235 struct jbd2_buffer_trigger_type *type, 4236 struct buffer_head *bh, 4237 void *mapped_data, 4238 size_t size)) 4239 { 4240 struct ext4_sb_info *sbi = EXT4_SB(sb); 4241 4242 sbi->s_journal_triggers[type].sb = sb; 4243 sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; 4244 } 4245 4246 static void ext4_free_sbi(struct ext4_sb_info *sbi) 4247 { 4248 if (!sbi) 4249 return; 4250 4251 kfree(sbi->s_blockgroup_lock); 4252 fs_put_dax(sbi->s_daxdev, NULL); 4253 kfree(sbi); 4254 } 4255 4256 static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) 4257 { 4258 struct ext4_sb_info *sbi; 4259 4260 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 4261 if (!sbi) 4262 return NULL; 4263 4264 sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, 4265 NULL, NULL); 4266 4267 sbi->s_blockgroup_lock = 4268 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 4269 4270 if (!sbi->s_blockgroup_lock) 4271 goto err_out; 4272 4273 sb->s_fs_info = sbi; 4274 sbi->s_sb = sb; 4275 return sbi; 4276 err_out: 4277 fs_put_dax(sbi->s_daxdev, NULL); 4278 kfree(sbi); 4279 return NULL; 4280 } 4281 4282 static void ext4_set_def_opts(struct super_block *sb, 4283 struct ext4_super_block *es) 4284 { 4285 unsigned long def_mount_opts; 4286 4287 /* Set defaults before we parse the mount options */ 4288 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 4289 set_opt(sb, INIT_INODE_TABLE); 4290 if (def_mount_opts & EXT4_DEFM_DEBUG) 4291 set_opt(sb, DEBUG); 4292 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 4293 set_opt(sb, GRPID); 4294 if (def_mount_opts & EXT4_DEFM_UID16) 4295 set_opt(sb, NO_UID32); 4296 /* xattr user namespace & acls are now defaulted on */ 4297 set_opt(sb, XATTR_USER); 4298 #ifdef CONFIG_EXT4_FS_POSIX_ACL 4299 set_opt(sb, POSIX_ACL); 4300 #endif 4301 if (ext4_has_feature_fast_commit(sb)) 4302 set_opt2(sb, JOURNAL_FAST_COMMIT); 4303 /* don't forget to enable journal_csum when metadata_csum is enabled. */ 4304 if (ext4_has_metadata_csum(sb)) 4305 set_opt(sb, JOURNAL_CHECKSUM); 4306 4307 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 4308 set_opt(sb, JOURNAL_DATA); 4309 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 4310 set_opt(sb, ORDERED_DATA); 4311 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 4312 set_opt(sb, WRITEBACK_DATA); 4313 4314 if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC) 4315 set_opt(sb, ERRORS_PANIC); 4316 else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE) 4317 set_opt(sb, ERRORS_CONT); 4318 else 4319 set_opt(sb, ERRORS_RO); 4320 /* block_validity enabled by default; disable with noblock_validity */ 4321 set_opt(sb, BLOCK_VALIDITY); 4322 if (def_mount_opts & EXT4_DEFM_DISCARD) 4323 set_opt(sb, DISCARD); 4324 4325 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 4326 set_opt(sb, BARRIER); 4327 4328 /* 4329 * enable delayed allocation by default 4330 * Use -o nodelalloc to turn it off 4331 */ 4332 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && 4333 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 4334 set_opt(sb, DELALLOC); 4335 4336 if (sb->s_blocksize == PAGE_SIZE) 4337 set_opt(sb, DIOREAD_NOLOCK); 4338 } 4339 4340 static int ext4_handle_clustersize(struct super_block *sb) 4341 { 4342 struct ext4_sb_info *sbi = EXT4_SB(sb); 4343 struct ext4_super_block *es = sbi->s_es; 4344 int clustersize; 4345 4346 /* Handle clustersize */ 4347 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); 4348 if (ext4_has_feature_bigalloc(sb)) { 4349 if (clustersize < sb->s_blocksize) { 4350 ext4_msg(sb, KERN_ERR, 4351 "cluster size (%d) smaller than " 4352 "block size (%lu)", clustersize, sb->s_blocksize); 4353 return -EINVAL; 4354 } 4355 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 4356 le32_to_cpu(es->s_log_block_size); 4357 sbi->s_clusters_per_group = 4358 le32_to_cpu(es->s_clusters_per_group); 4359 if (sbi->s_clusters_per_group > sb->s_blocksize * 8) { 4360 ext4_msg(sb, KERN_ERR, 4361 "#clusters per group too big: %lu", 4362 sbi->s_clusters_per_group); 4363 return -EINVAL; 4364 } 4365 if (sbi->s_blocks_per_group != 4366 (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) { 4367 ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " 4368 "clusters per group (%lu) inconsistent", 4369 sbi->s_blocks_per_group, 4370 sbi->s_clusters_per_group); 4371 return -EINVAL; 4372 } 4373 } else { 4374 if (clustersize != sb->s_blocksize) { 4375 ext4_msg(sb, KERN_ERR, 4376 "fragment/cluster size (%d) != " 4377 "block size (%lu)", clustersize, sb->s_blocksize); 4378 return -EINVAL; 4379 } 4380 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 4381 ext4_msg(sb, KERN_ERR, 4382 "#blocks per group too big: %lu", 4383 sbi->s_blocks_per_group); 4384 return -EINVAL; 4385 } 4386 sbi->s_clusters_per_group = sbi->s_blocks_per_group; 4387 sbi->s_cluster_bits = 0; 4388 } 4389 sbi->s_cluster_ratio = clustersize / sb->s_blocksize; 4390 4391 /* Do we have standard group size of clustersize * 8 blocks ? */ 4392 if (sbi->s_blocks_per_group == clustersize << 3) 4393 set_opt2(sb, STD_GROUP_SIZE); 4394 4395 return 0; 4396 } 4397 4398 static void ext4_fast_commit_init(struct super_block *sb) 4399 { 4400 struct ext4_sb_info *sbi = EXT4_SB(sb); 4401 4402 /* Initialize fast commit stuff */ 4403 atomic_set(&sbi->s_fc_subtid, 0); 4404 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); 4405 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); 4406 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); 4407 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); 4408 sbi->s_fc_bytes = 0; 4409 ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); 4410 sbi->s_fc_ineligible_tid = 0; 4411 spin_lock_init(&sbi->s_fc_lock); 4412 memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); 4413 sbi->s_fc_replay_state.fc_regions = NULL; 4414 sbi->s_fc_replay_state.fc_regions_size = 0; 4415 sbi->s_fc_replay_state.fc_regions_used = 0; 4416 sbi->s_fc_replay_state.fc_regions_valid = 0; 4417 sbi->s_fc_replay_state.fc_modified_inodes = NULL; 4418 sbi->s_fc_replay_state.fc_modified_inodes_size = 0; 4419 sbi->s_fc_replay_state.fc_modified_inodes_used = 0; 4420 } 4421 4422 static int ext4_inode_info_init(struct super_block *sb, 4423 struct ext4_super_block *es) 4424 { 4425 struct ext4_sb_info *sbi = EXT4_SB(sb); 4426 4427 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 4428 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 4429 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 4430 } else { 4431 sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 4432 sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 4433 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { 4434 ext4_msg(sb, KERN_ERR, "invalid first ino: %u", 4435 sbi->s_first_ino); 4436 return -EINVAL; 4437 } 4438 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 4439 (!is_power_of_2(sbi->s_inode_size)) || 4440 (sbi->s_inode_size > sb->s_blocksize)) { 4441 ext4_msg(sb, KERN_ERR, 4442 "unsupported inode size: %d", 4443 sbi->s_inode_size); 4444 ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize); 4445 return -EINVAL; 4446 } 4447 /* 4448 * i_atime_extra is the last extra field available for 4449 * [acm]times in struct ext4_inode. Checking for that 4450 * field should suffice to ensure we have extra space 4451 * for all three. 4452 */ 4453 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + 4454 sizeof(((struct ext4_inode *)0)->i_atime_extra)) { 4455 sb->s_time_gran = 1; 4456 sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; 4457 } else { 4458 sb->s_time_gran = NSEC_PER_SEC; 4459 sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; 4460 } 4461 sb->s_time_min = EXT4_TIMESTAMP_MIN; 4462 } 4463 4464 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 4465 sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 4466 EXT4_GOOD_OLD_INODE_SIZE; 4467 if (ext4_has_feature_extra_isize(sb)) { 4468 unsigned v, max = (sbi->s_inode_size - 4469 EXT4_GOOD_OLD_INODE_SIZE); 4470 4471 v = le16_to_cpu(es->s_want_extra_isize); 4472 if (v > max) { 4473 ext4_msg(sb, KERN_ERR, 4474 "bad s_want_extra_isize: %d", v); 4475 return -EINVAL; 4476 } 4477 if (sbi->s_want_extra_isize < v) 4478 sbi->s_want_extra_isize = v; 4479 4480 v = le16_to_cpu(es->s_min_extra_isize); 4481 if (v > max) { 4482 ext4_msg(sb, KERN_ERR, 4483 "bad s_min_extra_isize: %d", v); 4484 return -EINVAL; 4485 } 4486 if (sbi->s_want_extra_isize < v) 4487 sbi->s_want_extra_isize = v; 4488 } 4489 } 4490 4491 return 0; 4492 } 4493 4494 #if IS_ENABLED(CONFIG_UNICODE) 4495 static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 4496 { 4497 const struct ext4_sb_encodings *encoding_info; 4498 struct unicode_map *encoding; 4499 __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags); 4500 4501 if (!ext4_has_feature_casefold(sb) || sb->s_encoding) 4502 return 0; 4503 4504 encoding_info = ext4_sb_read_encoding(es); 4505 if (!encoding_info) { 4506 ext4_msg(sb, KERN_ERR, 4507 "Encoding requested by superblock is unknown"); 4508 return -EINVAL; 4509 } 4510 4511 encoding = utf8_load(encoding_info->version); 4512 if (IS_ERR(encoding)) { 4513 ext4_msg(sb, KERN_ERR, 4514 "can't mount with superblock charset: %s-%u.%u.%u " 4515 "not supported by the kernel. flags: 0x%x.", 4516 encoding_info->name, 4517 unicode_major(encoding_info->version), 4518 unicode_minor(encoding_info->version), 4519 unicode_rev(encoding_info->version), 4520 encoding_flags); 4521 return -EINVAL; 4522 } 4523 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: " 4524 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 4525 unicode_major(encoding_info->version), 4526 unicode_minor(encoding_info->version), 4527 unicode_rev(encoding_info->version), 4528 encoding_flags); 4529 4530 sb->s_encoding = encoding; 4531 sb->s_encoding_flags = encoding_flags; 4532 4533 return 0; 4534 } 4535 #else 4536 static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 4537 { 4538 return 0; 4539 } 4540 #endif 4541 4542 static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es) 4543 { 4544 struct ext4_sb_info *sbi = EXT4_SB(sb); 4545 4546 /* Warn if metadata_csum and gdt_csum are both set. */ 4547 if (ext4_has_feature_metadata_csum(sb) && 4548 ext4_has_feature_gdt_csum(sb)) 4549 ext4_warning(sb, "metadata_csum and uninit_bg are " 4550 "redundant flags; please run fsck."); 4551 4552 /* Check for a known checksum algorithm */ 4553 if (!ext4_verify_csum_type(sb, es)) { 4554 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4555 "unknown checksum algorithm."); 4556 return -EINVAL; 4557 } 4558 ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, 4559 ext4_orphan_file_block_trigger); 4560 4561 /* Load the checksum driver */ 4562 sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 4563 if (IS_ERR(sbi->s_chksum_driver)) { 4564 int ret = PTR_ERR(sbi->s_chksum_driver); 4565 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); 4566 sbi->s_chksum_driver = NULL; 4567 return ret; 4568 } 4569 4570 /* Check superblock checksum */ 4571 if (!ext4_superblock_csum_verify(sb, es)) { 4572 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4573 "invalid superblock checksum. Run e2fsck?"); 4574 return -EFSBADCRC; 4575 } 4576 4577 /* Precompute checksum seed for all metadata */ 4578 if (ext4_has_feature_csum_seed(sb)) 4579 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); 4580 else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb)) 4581 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, 4582 sizeof(es->s_uuid)); 4583 return 0; 4584 } 4585 4586 static int ext4_check_feature_compatibility(struct super_block *sb, 4587 struct ext4_super_block *es, 4588 int silent) 4589 { 4590 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 4591 (ext4_has_compat_features(sb) || 4592 ext4_has_ro_compat_features(sb) || 4593 ext4_has_incompat_features(sb))) 4594 ext4_msg(sb, KERN_WARNING, 4595 "feature flags set on rev 0 fs, " 4596 "running e2fsck is recommended"); 4597 4598 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { 4599 set_opt2(sb, HURD_COMPAT); 4600 if (ext4_has_feature_64bit(sb)) { 4601 ext4_msg(sb, KERN_ERR, 4602 "The Hurd can't support 64-bit file systems"); 4603 return -EINVAL; 4604 } 4605 4606 /* 4607 * ea_inode feature uses l_i_version field which is not 4608 * available in HURD_COMPAT mode. 4609 */ 4610 if (ext4_has_feature_ea_inode(sb)) { 4611 ext4_msg(sb, KERN_ERR, 4612 "ea_inode feature is not supported for Hurd"); 4613 return -EINVAL; 4614 } 4615 } 4616 4617 if (IS_EXT2_SB(sb)) { 4618 if (ext2_feature_set_ok(sb)) 4619 ext4_msg(sb, KERN_INFO, "mounting ext2 file system " 4620 "using the ext4 subsystem"); 4621 else { 4622 /* 4623 * If we're probing be silent, if this looks like 4624 * it's actually an ext[34] filesystem. 4625 */ 4626 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4627 return -EINVAL; 4628 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " 4629 "to feature incompatibilities"); 4630 return -EINVAL; 4631 } 4632 } 4633 4634 if (IS_EXT3_SB(sb)) { 4635 if (ext3_feature_set_ok(sb)) 4636 ext4_msg(sb, KERN_INFO, "mounting ext3 file system " 4637 "using the ext4 subsystem"); 4638 else { 4639 /* 4640 * If we're probing be silent, if this looks like 4641 * it's actually an ext4 filesystem. 4642 */ 4643 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4644 return -EINVAL; 4645 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " 4646 "to feature incompatibilities"); 4647 return -EINVAL; 4648 } 4649 } 4650 4651 /* 4652 * Check feature flags regardless of the revision level, since we 4653 * previously didn't change the revision level when setting the flags, 4654 * so there is a chance incompat flags are set on a rev 0 filesystem. 4655 */ 4656 if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) 4657 return -EINVAL; 4658 4659 return 0; 4660 } 4661 4662 static int ext4_geometry_check(struct super_block *sb, 4663 struct ext4_super_block *es) 4664 { 4665 struct ext4_sb_info *sbi = EXT4_SB(sb); 4666 __u64 blocks_count; 4667 4668 /* check blocks count against device size */ 4669 blocks_count = sb_bdev_nr_blocks(sb); 4670 if (blocks_count && ext4_blocks_count(es) > blocks_count) { 4671 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 4672 "exceeds size of device (%llu blocks)", 4673 ext4_blocks_count(es), blocks_count); 4674 return -EINVAL; 4675 } 4676 4677 /* 4678 * It makes no sense for the first data block to be beyond the end 4679 * of the filesystem. 4680 */ 4681 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 4682 ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4683 "block %u is beyond end of filesystem (%llu)", 4684 le32_to_cpu(es->s_first_data_block), 4685 ext4_blocks_count(es)); 4686 return -EINVAL; 4687 } 4688 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && 4689 (sbi->s_cluster_ratio == 1)) { 4690 ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4691 "block is 0 with a 1k block and cluster size"); 4692 return -EINVAL; 4693 } 4694 4695 blocks_count = (ext4_blocks_count(es) - 4696 le32_to_cpu(es->s_first_data_block) + 4697 EXT4_BLOCKS_PER_GROUP(sb) - 1); 4698 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 4699 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 4700 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " 4701 "(block count %llu, first data block %u, " 4702 "blocks per group %lu)", blocks_count, 4703 ext4_blocks_count(es), 4704 le32_to_cpu(es->s_first_data_block), 4705 EXT4_BLOCKS_PER_GROUP(sb)); 4706 return -EINVAL; 4707 } 4708 sbi->s_groups_count = blocks_count; 4709 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 4710 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4711 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != 4712 le32_to_cpu(es->s_inodes_count)) { 4713 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", 4714 le32_to_cpu(es->s_inodes_count), 4715 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); 4716 return -EINVAL; 4717 } 4718 4719 return 0; 4720 } 4721 4722 static void ext4_group_desc_free(struct ext4_sb_info *sbi) 4723 { 4724 struct buffer_head **group_desc; 4725 int i; 4726 4727 rcu_read_lock(); 4728 group_desc = rcu_dereference(sbi->s_group_desc); 4729 for (i = 0; i < sbi->s_gdb_count; i++) 4730 brelse(group_desc[i]); 4731 kvfree(group_desc); 4732 rcu_read_unlock(); 4733 } 4734 4735 static int ext4_group_desc_init(struct super_block *sb, 4736 struct ext4_super_block *es, 4737 ext4_fsblk_t logical_sb_block, 4738 ext4_group_t *first_not_zeroed) 4739 { 4740 struct ext4_sb_info *sbi = EXT4_SB(sb); 4741 unsigned int db_count; 4742 ext4_fsblk_t block; 4743 int i; 4744 4745 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4746 EXT4_DESC_PER_BLOCK(sb); 4747 if (ext4_has_feature_meta_bg(sb)) { 4748 if (le32_to_cpu(es->s_first_meta_bg) > db_count) { 4749 ext4_msg(sb, KERN_WARNING, 4750 "first meta block group too large: %u " 4751 "(group descriptor block count %u)", 4752 le32_to_cpu(es->s_first_meta_bg), db_count); 4753 return -EINVAL; 4754 } 4755 } 4756 rcu_assign_pointer(sbi->s_group_desc, 4757 kvmalloc_array(db_count, 4758 sizeof(struct buffer_head *), 4759 GFP_KERNEL)); 4760 if (sbi->s_group_desc == NULL) { 4761 ext4_msg(sb, KERN_ERR, "not enough memory"); 4762 return -ENOMEM; 4763 } 4764 4765 bgl_lock_init(sbi->s_blockgroup_lock); 4766 4767 /* Pre-read the descriptors into the buffer cache */ 4768 for (i = 0; i < db_count; i++) { 4769 block = descriptor_loc(sb, logical_sb_block, i); 4770 ext4_sb_breadahead_unmovable(sb, block); 4771 } 4772 4773 for (i = 0; i < db_count; i++) { 4774 struct buffer_head *bh; 4775 4776 block = descriptor_loc(sb, logical_sb_block, i); 4777 bh = ext4_sb_bread_unmovable(sb, block); 4778 if (IS_ERR(bh)) { 4779 ext4_msg(sb, KERN_ERR, 4780 "can't read group descriptor %d", i); 4781 sbi->s_gdb_count = i; 4782 return PTR_ERR(bh); 4783 } 4784 rcu_read_lock(); 4785 rcu_dereference(sbi->s_group_desc)[i] = bh; 4786 rcu_read_unlock(); 4787 } 4788 sbi->s_gdb_count = db_count; 4789 if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) { 4790 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4791 return -EFSCORRUPTED; 4792 } 4793 4794 return 0; 4795 } 4796 4797 static int ext4_load_and_init_journal(struct super_block *sb, 4798 struct ext4_super_block *es, 4799 struct ext4_fs_context *ctx) 4800 { 4801 struct ext4_sb_info *sbi = EXT4_SB(sb); 4802 int err; 4803 4804 err = ext4_load_journal(sb, es, ctx->journal_devnum); 4805 if (err) 4806 return err; 4807 4808 if (ext4_has_feature_64bit(sb) && 4809 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 4810 JBD2_FEATURE_INCOMPAT_64BIT)) { 4811 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 4812 goto out; 4813 } 4814 4815 if (!set_journal_csum_feature_set(sb)) { 4816 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " 4817 "feature set"); 4818 goto out; 4819 } 4820 4821 if (test_opt2(sb, JOURNAL_FAST_COMMIT) && 4822 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 4823 JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) { 4824 ext4_msg(sb, KERN_ERR, 4825 "Failed to set fast commit journal feature"); 4826 goto out; 4827 } 4828 4829 /* We have now updated the journal if required, so we can 4830 * validate the data journaling mode. */ 4831 switch (test_opt(sb, DATA_FLAGS)) { 4832 case 0: 4833 /* No mode set, assume a default based on the journal 4834 * capabilities: ORDERED_DATA if the journal can 4835 * cope, else JOURNAL_DATA 4836 */ 4837 if (jbd2_journal_check_available_features 4838 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 4839 set_opt(sb, ORDERED_DATA); 4840 sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 4841 } else { 4842 set_opt(sb, JOURNAL_DATA); 4843 sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 4844 } 4845 break; 4846 4847 case EXT4_MOUNT_ORDERED_DATA: 4848 case EXT4_MOUNT_WRITEBACK_DATA: 4849 if (!jbd2_journal_check_available_features 4850 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 4851 ext4_msg(sb, KERN_ERR, "Journal does not support " 4852 "requested data journaling mode"); 4853 goto out; 4854 } 4855 break; 4856 default: 4857 break; 4858 } 4859 4860 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && 4861 test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 4862 ext4_msg(sb, KERN_ERR, "can't mount with " 4863 "journal_async_commit in data=ordered mode"); 4864 goto out; 4865 } 4866 4867 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 4868 4869 sbi->s_journal->j_submit_inode_data_buffers = 4870 ext4_journal_submit_inode_data_buffers; 4871 sbi->s_journal->j_finish_inode_data_buffers = 4872 ext4_journal_finish_inode_data_buffers; 4873 4874 return 0; 4875 4876 out: 4877 /* flush s_error_work before journal destroy. */ 4878 flush_work(&sbi->s_error_work); 4879 jbd2_journal_destroy(sbi->s_journal); 4880 sbi->s_journal = NULL; 4881 return -EINVAL; 4882 } 4883 4884 static int ext4_journal_data_mode_check(struct super_block *sb) 4885 { 4886 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4887 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with " 4888 "data=journal disables delayed allocation, " 4889 "dioread_nolock, O_DIRECT and fast_commit support!\n"); 4890 /* can't mount with both data=journal and dioread_nolock. */ 4891 clear_opt(sb, DIOREAD_NOLOCK); 4892 clear_opt2(sb, JOURNAL_FAST_COMMIT); 4893 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4894 ext4_msg(sb, KERN_ERR, "can't mount with " 4895 "both data=journal and delalloc"); 4896 return -EINVAL; 4897 } 4898 if (test_opt(sb, DAX_ALWAYS)) { 4899 ext4_msg(sb, KERN_ERR, "can't mount with " 4900 "both data=journal and dax"); 4901 return -EINVAL; 4902 } 4903 if (ext4_has_feature_encrypt(sb)) { 4904 ext4_msg(sb, KERN_WARNING, 4905 "encrypted files will use data=ordered " 4906 "instead of data journaling mode"); 4907 } 4908 if (test_opt(sb, DELALLOC)) 4909 clear_opt(sb, DELALLOC); 4910 } else { 4911 sb->s_iflags |= SB_I_CGROUPWB; 4912 } 4913 4914 return 0; 4915 } 4916 4917 static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb, 4918 int silent) 4919 { 4920 struct ext4_sb_info *sbi = EXT4_SB(sb); 4921 struct ext4_super_block *es; 4922 ext4_fsblk_t logical_sb_block; 4923 unsigned long offset = 0; 4924 struct buffer_head *bh; 4925 int ret = -EINVAL; 4926 int blocksize; 4927 4928 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 4929 if (!blocksize) { 4930 ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 4931 return -EINVAL; 4932 } 4933 4934 /* 4935 * The ext4 superblock will not be buffer aligned for other than 1kB 4936 * block sizes. We need to calculate the offset from buffer start. 4937 */ 4938 if (blocksize != EXT4_MIN_BLOCK_SIZE) { 4939 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 4940 offset = do_div(logical_sb_block, blocksize); 4941 } else { 4942 logical_sb_block = sbi->s_sb_block; 4943 } 4944 4945 bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 4946 if (IS_ERR(bh)) { 4947 ext4_msg(sb, KERN_ERR, "unable to read superblock"); 4948 return PTR_ERR(bh); 4949 } 4950 /* 4951 * Note: s_es must be initialized as soon as possible because 4952 * some ext4 macro-instructions depend on its value 4953 */ 4954 es = (struct ext4_super_block *) (bh->b_data + offset); 4955 sbi->s_es = es; 4956 sb->s_magic = le16_to_cpu(es->s_magic); 4957 if (sb->s_magic != EXT4_SUPER_MAGIC) { 4958 if (!silent) 4959 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 4960 goto out; 4961 } 4962 4963 if (le32_to_cpu(es->s_log_block_size) > 4964 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4965 ext4_msg(sb, KERN_ERR, 4966 "Invalid log block size: %u", 4967 le32_to_cpu(es->s_log_block_size)); 4968 goto out; 4969 } 4970 if (le32_to_cpu(es->s_log_cluster_size) > 4971 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4972 ext4_msg(sb, KERN_ERR, 4973 "Invalid log cluster size: %u", 4974 le32_to_cpu(es->s_log_cluster_size)); 4975 goto out; 4976 } 4977 4978 blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 4979 4980 /* 4981 * If the default block size is not the same as the real block size, 4982 * we need to reload it. 4983 */ 4984 if (sb->s_blocksize == blocksize) { 4985 *lsb = logical_sb_block; 4986 sbi->s_sbh = bh; 4987 return 0; 4988 } 4989 4990 /* 4991 * bh must be released before kill_bdev(), otherwise 4992 * it won't be freed and its page also. kill_bdev() 4993 * is called by sb_set_blocksize(). 4994 */ 4995 brelse(bh); 4996 /* Validate the filesystem blocksize */ 4997 if (!sb_set_blocksize(sb, blocksize)) { 4998 ext4_msg(sb, KERN_ERR, "bad block size %d", 4999 blocksize); 5000 bh = NULL; 5001 goto out; 5002 } 5003 5004 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5005 offset = do_div(logical_sb_block, blocksize); 5006 bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5007 if (IS_ERR(bh)) { 5008 ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); 5009 ret = PTR_ERR(bh); 5010 bh = NULL; 5011 goto out; 5012 } 5013 es = (struct ext4_super_block *)(bh->b_data + offset); 5014 sbi->s_es = es; 5015 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 5016 ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); 5017 goto out; 5018 } 5019 *lsb = logical_sb_block; 5020 sbi->s_sbh = bh; 5021 return 0; 5022 out: 5023 brelse(bh); 5024 return ret; 5025 } 5026 5027 static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) 5028 { 5029 struct ext4_super_block *es = NULL; 5030 struct ext4_sb_info *sbi = EXT4_SB(sb); 5031 struct flex_groups **flex_groups; 5032 ext4_fsblk_t block; 5033 ext4_fsblk_t logical_sb_block; 5034 struct inode *root; 5035 int ret = -ENOMEM; 5036 unsigned int i; 5037 int needs_recovery, has_huge_files; 5038 int err = 0; 5039 ext4_group_t first_not_zeroed; 5040 struct ext4_fs_context *ctx = fc->fs_private; 5041 int silent = fc->sb_flags & SB_SILENT; 5042 5043 /* Set defaults for the variables that will be set during parsing */ 5044 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) 5045 ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 5046 5047 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 5048 sbi->s_sectors_written_start = 5049 part_stat_read(sb->s_bdev, sectors[STAT_WRITE]); 5050 5051 /* -EINVAL is default */ 5052 ret = -EINVAL; 5053 err = ext4_load_super(sb, &logical_sb_block, silent); 5054 if (err) 5055 goto out_fail; 5056 5057 es = sbi->s_es; 5058 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 5059 5060 err = ext4_init_metadata_csum(sb, es); 5061 if (err) 5062 goto failed_mount; 5063 5064 ext4_set_def_opts(sb, es); 5065 5066 sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); 5067 sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); 5068 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 5069 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 5070 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 5071 5072 /* 5073 * set default s_li_wait_mult for lazyinit, for the case there is 5074 * no mount option specified. 5075 */ 5076 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 5077 5078 if (ext4_inode_info_init(sb, es)) 5079 goto failed_mount; 5080 5081 err = parse_apply_sb_mount_options(sb, ctx); 5082 if (err < 0) 5083 goto failed_mount; 5084 5085 sbi->s_def_mount_opt = sbi->s_mount_opt; 5086 sbi->s_def_mount_opt2 = sbi->s_mount_opt2; 5087 5088 err = ext4_check_opt_consistency(fc, sb); 5089 if (err < 0) 5090 goto failed_mount; 5091 5092 ext4_apply_options(fc, sb); 5093 5094 if (ext4_encoding_init(sb, es)) 5095 goto failed_mount; 5096 5097 if (ext4_journal_data_mode_check(sb)) 5098 goto failed_mount; 5099 5100 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 5101 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5102 5103 /* i_version is always enabled now */ 5104 sb->s_flags |= SB_I_VERSION; 5105 5106 if (ext4_check_feature_compatibility(sb, es, silent)) 5107 goto failed_mount; 5108 5109 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) { 5110 ext4_msg(sb, KERN_ERR, 5111 "Number of reserved GDT blocks insanely large: %d", 5112 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 5113 goto failed_mount; 5114 } 5115 5116 if (sbi->s_daxdev) { 5117 if (sb->s_blocksize == PAGE_SIZE) 5118 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); 5119 else 5120 ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n"); 5121 } 5122 5123 if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { 5124 if (ext4_has_feature_inline_data(sb)) { 5125 ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" 5126 " that may contain inline data"); 5127 goto failed_mount; 5128 } 5129 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { 5130 ext4_msg(sb, KERN_ERR, 5131 "DAX unsupported by block device."); 5132 goto failed_mount; 5133 } 5134 } 5135 5136 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { 5137 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", 5138 es->s_encryption_level); 5139 goto failed_mount; 5140 } 5141 5142 has_huge_files = ext4_has_feature_huge_file(sb); 5143 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 5144 has_huge_files); 5145 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 5146 5147 sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 5148 if (ext4_has_feature_64bit(sb)) { 5149 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 5150 sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 5151 !is_power_of_2(sbi->s_desc_size)) { 5152 ext4_msg(sb, KERN_ERR, 5153 "unsupported descriptor size %lu", 5154 sbi->s_desc_size); 5155 goto failed_mount; 5156 } 5157 } else 5158 sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 5159 5160 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 5161 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 5162 5163 sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb); 5164 if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) { 5165 if (!silent) 5166 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5167 goto failed_mount; 5168 } 5169 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || 5170 sbi->s_inodes_per_group > sb->s_blocksize * 8) { 5171 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", 5172 sbi->s_inodes_per_group); 5173 goto failed_mount; 5174 } 5175 sbi->s_itb_per_group = sbi->s_inodes_per_group / 5176 sbi->s_inodes_per_block; 5177 sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb); 5178 sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; 5179 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 5180 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 5181 5182 for (i = 0; i < 4; i++) 5183 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 5184 sbi->s_def_hash_version = es->s_def_hash_version; 5185 if (ext4_has_feature_dir_index(sb)) { 5186 i = le32_to_cpu(es->s_flags); 5187 if (i & EXT2_FLAGS_UNSIGNED_HASH) 5188 sbi->s_hash_unsigned = 3; 5189 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 5190 #ifdef __CHAR_UNSIGNED__ 5191 if (!sb_rdonly(sb)) 5192 es->s_flags |= 5193 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 5194 sbi->s_hash_unsigned = 3; 5195 #else 5196 if (!sb_rdonly(sb)) 5197 es->s_flags |= 5198 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 5199 #endif 5200 } 5201 } 5202 5203 if (ext4_handle_clustersize(sb)) 5204 goto failed_mount; 5205 5206 /* 5207 * Test whether we have more sectors than will fit in sector_t, 5208 * and whether the max offset is addressable by the page cache. 5209 */ 5210 err = generic_check_addressable(sb->s_blocksize_bits, 5211 ext4_blocks_count(es)); 5212 if (err) { 5213 ext4_msg(sb, KERN_ERR, "filesystem" 5214 " too large to mount safely on this system"); 5215 goto failed_mount; 5216 } 5217 5218 if (ext4_geometry_check(sb, es)) 5219 goto failed_mount; 5220 5221 timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 5222 spin_lock_init(&sbi->s_error_lock); 5223 INIT_WORK(&sbi->s_error_work, flush_stashed_error_work); 5224 5225 err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed); 5226 if (err) 5227 goto failed_mount3; 5228 5229 /* Register extent status tree shrinker */ 5230 if (ext4_es_register_shrinker(sbi)) 5231 goto failed_mount3; 5232 5233 sbi->s_stripe = ext4_get_stripe_size(sbi); 5234 sbi->s_extent_max_zeroout_kb = 32; 5235 5236 /* 5237 * set up enough so that it can read an inode 5238 */ 5239 sb->s_op = &ext4_sops; 5240 sb->s_export_op = &ext4_export_ops; 5241 sb->s_xattr = ext4_xattr_handlers; 5242 #ifdef CONFIG_FS_ENCRYPTION 5243 sb->s_cop = &ext4_cryptops; 5244 #endif 5245 #ifdef CONFIG_FS_VERITY 5246 sb->s_vop = &ext4_verityops; 5247 #endif 5248 #ifdef CONFIG_QUOTA 5249 sb->dq_op = &ext4_quota_operations; 5250 if (ext4_has_feature_quota(sb)) 5251 sb->s_qcop = &dquot_quotactl_sysfile_ops; 5252 else 5253 sb->s_qcop = &ext4_qctl_operations; 5254 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5255 #endif 5256 memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); 5257 5258 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 5259 mutex_init(&sbi->s_orphan_lock); 5260 5261 ext4_fast_commit_init(sb); 5262 5263 sb->s_root = NULL; 5264 5265 needs_recovery = (es->s_last_orphan != 0 || 5266 ext4_has_feature_orphan_present(sb) || 5267 ext4_has_feature_journal_needs_recovery(sb)); 5268 5269 if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) 5270 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) 5271 goto failed_mount3a; 5272 5273 /* 5274 * The first inode we look at is the journal inode. Don't try 5275 * root first: it may be modified in the journal! 5276 */ 5277 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { 5278 err = ext4_load_and_init_journal(sb, es, ctx); 5279 if (err) 5280 goto failed_mount3a; 5281 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5282 ext4_has_feature_journal_needs_recovery(sb)) { 5283 ext4_msg(sb, KERN_ERR, "required journal recovery " 5284 "suppressed and not mounted read-only"); 5285 goto failed_mount3a; 5286 } else { 5287 /* Nojournal mode, all journal mount options are illegal */ 5288 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 5289 ext4_msg(sb, KERN_ERR, "can't mount with " 5290 "journal_async_commit, fs mounted w/o journal"); 5291 goto failed_mount3a; 5292 } 5293 5294 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { 5295 ext4_msg(sb, KERN_ERR, "can't mount with " 5296 "journal_checksum, fs mounted w/o journal"); 5297 goto failed_mount3a; 5298 } 5299 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { 5300 ext4_msg(sb, KERN_ERR, "can't mount with " 5301 "commit=%lu, fs mounted w/o journal", 5302 sbi->s_commit_interval / HZ); 5303 goto failed_mount3a; 5304 } 5305 if (EXT4_MOUNT_DATA_FLAGS & 5306 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 5307 ext4_msg(sb, KERN_ERR, "can't mount with " 5308 "data=, fs mounted w/o journal"); 5309 goto failed_mount3a; 5310 } 5311 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; 5312 clear_opt(sb, JOURNAL_CHECKSUM); 5313 clear_opt(sb, DATA_FLAGS); 5314 clear_opt2(sb, JOURNAL_FAST_COMMIT); 5315 sbi->s_journal = NULL; 5316 needs_recovery = 0; 5317 } 5318 5319 if (!test_opt(sb, NO_MBCACHE)) { 5320 sbi->s_ea_block_cache = ext4_xattr_create_cache(); 5321 if (!sbi->s_ea_block_cache) { 5322 ext4_msg(sb, KERN_ERR, 5323 "Failed to create ea_block_cache"); 5324 goto failed_mount_wq; 5325 } 5326 5327 if (ext4_has_feature_ea_inode(sb)) { 5328 sbi->s_ea_inode_cache = ext4_xattr_create_cache(); 5329 if (!sbi->s_ea_inode_cache) { 5330 ext4_msg(sb, KERN_ERR, 5331 "Failed to create ea_inode_cache"); 5332 goto failed_mount_wq; 5333 } 5334 } 5335 } 5336 5337 /* 5338 * Get the # of file system overhead blocks from the 5339 * superblock if present. 5340 */ 5341 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); 5342 /* ignore the precalculated value if it is ridiculous */ 5343 if (sbi->s_overhead > ext4_blocks_count(es)) 5344 sbi->s_overhead = 0; 5345 /* 5346 * If the bigalloc feature is not enabled recalculating the 5347 * overhead doesn't take long, so we might as well just redo 5348 * it to make sure we are using the correct value. 5349 */ 5350 if (!ext4_has_feature_bigalloc(sb)) 5351 sbi->s_overhead = 0; 5352 if (sbi->s_overhead == 0) { 5353 err = ext4_calculate_overhead(sb); 5354 if (err) 5355 goto failed_mount_wq; 5356 } 5357 5358 /* 5359 * The maximum number of concurrent works can be high and 5360 * concurrency isn't really necessary. Limit it to 1. 5361 */ 5362 EXT4_SB(sb)->rsv_conversion_wq = 5363 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 5364 if (!EXT4_SB(sb)->rsv_conversion_wq) { 5365 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); 5366 ret = -ENOMEM; 5367 goto failed_mount4; 5368 } 5369 5370 /* 5371 * The jbd2_journal_load will have done any necessary log recovery, 5372 * so we can safely mount the rest of the filesystem now. 5373 */ 5374 5375 root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL); 5376 if (IS_ERR(root)) { 5377 ext4_msg(sb, KERN_ERR, "get root inode failed"); 5378 ret = PTR_ERR(root); 5379 root = NULL; 5380 goto failed_mount4; 5381 } 5382 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 5383 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 5384 iput(root); 5385 goto failed_mount4; 5386 } 5387 5388 sb->s_root = d_make_root(root); 5389 if (!sb->s_root) { 5390 ext4_msg(sb, KERN_ERR, "get root dentry failed"); 5391 ret = -ENOMEM; 5392 goto failed_mount4; 5393 } 5394 5395 ret = ext4_setup_super(sb, es, sb_rdonly(sb)); 5396 if (ret == -EROFS) { 5397 sb->s_flags |= SB_RDONLY; 5398 ret = 0; 5399 } else if (ret) 5400 goto failed_mount4a; 5401 5402 ext4_set_resv_clusters(sb); 5403 5404 if (test_opt(sb, BLOCK_VALIDITY)) { 5405 err = ext4_setup_system_zone(sb); 5406 if (err) { 5407 ext4_msg(sb, KERN_ERR, "failed to initialize system " 5408 "zone (%d)", err); 5409 goto failed_mount4a; 5410 } 5411 } 5412 ext4_fc_replay_cleanup(sb); 5413 5414 ext4_ext_init(sb); 5415 5416 /* 5417 * Enable optimize_scan if number of groups is > threshold. This can be 5418 * turned off by passing "mb_optimize_scan=0". This can also be 5419 * turned on forcefully by passing "mb_optimize_scan=1". 5420 */ 5421 if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) { 5422 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD) 5423 set_opt2(sb, MB_OPTIMIZE_SCAN); 5424 else 5425 clear_opt2(sb, MB_OPTIMIZE_SCAN); 5426 } 5427 5428 err = ext4_mb_init(sb); 5429 if (err) { 5430 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 5431 err); 5432 goto failed_mount5; 5433 } 5434 5435 /* 5436 * We can only set up the journal commit callback once 5437 * mballoc is initialized 5438 */ 5439 if (sbi->s_journal) 5440 sbi->s_journal->j_commit_callback = 5441 ext4_journal_commit_callback; 5442 5443 block = ext4_count_free_clusters(sb); 5444 ext4_free_blocks_count_set(sbi->s_es, 5445 EXT4_C2B(sbi, block)); 5446 err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 5447 GFP_KERNEL); 5448 if (!err) { 5449 unsigned long freei = ext4_count_free_inodes(sb); 5450 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 5451 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 5452 GFP_KERNEL); 5453 } 5454 if (!err) 5455 err = percpu_counter_init(&sbi->s_dirs_counter, 5456 ext4_count_dirs(sb), GFP_KERNEL); 5457 if (!err) 5458 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 5459 GFP_KERNEL); 5460 if (!err) 5461 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 5462 GFP_KERNEL); 5463 if (!err) 5464 err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 5465 5466 if (err) { 5467 ext4_msg(sb, KERN_ERR, "insufficient memory"); 5468 goto failed_mount6; 5469 } 5470 5471 if (ext4_has_feature_flex_bg(sb)) 5472 if (!ext4_fill_flex_info(sb)) { 5473 ext4_msg(sb, KERN_ERR, 5474 "unable to initialize " 5475 "flex_bg meta info!"); 5476 ret = -ENOMEM; 5477 goto failed_mount6; 5478 } 5479 5480 err = ext4_register_li_request(sb, first_not_zeroed); 5481 if (err) 5482 goto failed_mount6; 5483 5484 err = ext4_register_sysfs(sb); 5485 if (err) 5486 goto failed_mount7; 5487 5488 err = ext4_init_orphan_info(sb); 5489 if (err) 5490 goto failed_mount8; 5491 #ifdef CONFIG_QUOTA 5492 /* Enable quota usage during mount. */ 5493 if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { 5494 err = ext4_enable_quotas(sb); 5495 if (err) 5496 goto failed_mount9; 5497 } 5498 #endif /* CONFIG_QUOTA */ 5499 5500 /* 5501 * Save the original bdev mapping's wb_err value which could be 5502 * used to detect the metadata async write error. 5503 */ 5504 spin_lock_init(&sbi->s_bdev_wb_lock); 5505 errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, 5506 &sbi->s_bdev_wb_err); 5507 sb->s_bdev->bd_super = sb; 5508 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5509 ext4_orphan_cleanup(sb, es); 5510 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 5511 /* 5512 * Update the checksum after updating free space/inode counters and 5513 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect 5514 * checksum in the buffer cache until it is written out and 5515 * e2fsprogs programs trying to open a file system immediately 5516 * after it is mounted can fail. 5517 */ 5518 ext4_superblock_csum_set(sb); 5519 if (needs_recovery) { 5520 ext4_msg(sb, KERN_INFO, "recovery complete"); 5521 err = ext4_mark_recovery_complete(sb, es); 5522 if (err) 5523 goto failed_mount9; 5524 } 5525 5526 if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) 5527 ext4_msg(sb, KERN_WARNING, 5528 "mounting with \"discard\" option, but the device does not support discard"); 5529 5530 if (es->s_error_count) 5531 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 5532 5533 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ 5534 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); 5535 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); 5536 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 5537 atomic_set(&sbi->s_warning_count, 0); 5538 atomic_set(&sbi->s_msg_count, 0); 5539 5540 return 0; 5541 5542 failed_mount9: 5543 ext4_release_orphan_info(sb); 5544 failed_mount8: 5545 ext4_unregister_sysfs(sb); 5546 kobject_put(&sbi->s_kobj); 5547 failed_mount7: 5548 ext4_unregister_li_request(sb); 5549 failed_mount6: 5550 ext4_mb_release(sb); 5551 rcu_read_lock(); 5552 flex_groups = rcu_dereference(sbi->s_flex_groups); 5553 if (flex_groups) { 5554 for (i = 0; i < sbi->s_flex_groups_allocated; i++) 5555 kvfree(flex_groups[i]); 5556 kvfree(flex_groups); 5557 } 5558 rcu_read_unlock(); 5559 percpu_counter_destroy(&sbi->s_freeclusters_counter); 5560 percpu_counter_destroy(&sbi->s_freeinodes_counter); 5561 percpu_counter_destroy(&sbi->s_dirs_counter); 5562 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 5563 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 5564 percpu_free_rwsem(&sbi->s_writepages_rwsem); 5565 failed_mount5: 5566 ext4_ext_release(sb); 5567 ext4_release_system_zone(sb); 5568 failed_mount4a: 5569 dput(sb->s_root); 5570 sb->s_root = NULL; 5571 failed_mount4: 5572 ext4_msg(sb, KERN_ERR, "mount failed"); 5573 if (EXT4_SB(sb)->rsv_conversion_wq) 5574 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 5575 failed_mount_wq: 5576 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 5577 sbi->s_ea_inode_cache = NULL; 5578 5579 ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 5580 sbi->s_ea_block_cache = NULL; 5581 5582 if (sbi->s_journal) { 5583 /* flush s_error_work before journal destroy. */ 5584 flush_work(&sbi->s_error_work); 5585 jbd2_journal_destroy(sbi->s_journal); 5586 sbi->s_journal = NULL; 5587 } 5588 failed_mount3a: 5589 ext4_es_unregister_shrinker(sbi); 5590 failed_mount3: 5591 /* flush s_error_work before sbi destroy */ 5592 flush_work(&sbi->s_error_work); 5593 del_timer_sync(&sbi->s_err_report); 5594 ext4_stop_mmpd(sbi); 5595 ext4_group_desc_free(sbi); 5596 failed_mount: 5597 if (sbi->s_chksum_driver) 5598 crypto_free_shash(sbi->s_chksum_driver); 5599 5600 #if IS_ENABLED(CONFIG_UNICODE) 5601 utf8_unload(sb->s_encoding); 5602 #endif 5603 5604 #ifdef CONFIG_QUOTA 5605 for (i = 0; i < EXT4_MAXQUOTAS; i++) 5606 kfree(get_qf_name(sb, sbi, i)); 5607 #endif 5608 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 5609 /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */ 5610 brelse(sbi->s_sbh); 5611 ext4_blkdev_remove(sbi); 5612 out_fail: 5613 sb->s_fs_info = NULL; 5614 return err ? err : ret; 5615 } 5616 5617 static int ext4_fill_super(struct super_block *sb, struct fs_context *fc) 5618 { 5619 struct ext4_fs_context *ctx = fc->fs_private; 5620 struct ext4_sb_info *sbi; 5621 const char *descr; 5622 int ret; 5623 5624 sbi = ext4_alloc_sbi(sb); 5625 if (!sbi) 5626 return -ENOMEM; 5627 5628 fc->s_fs_info = sbi; 5629 5630 /* Cleanup superblock name */ 5631 strreplace(sb->s_id, '/', '!'); 5632 5633 sbi->s_sb_block = 1; /* Default super block location */ 5634 if (ctx->spec & EXT4_SPEC_s_sb_block) 5635 sbi->s_sb_block = ctx->s_sb_block; 5636 5637 ret = __ext4_fill_super(fc, sb); 5638 if (ret < 0) 5639 goto free_sbi; 5640 5641 if (sbi->s_journal) { 5642 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 5643 descr = " journalled data mode"; 5644 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 5645 descr = " ordered data mode"; 5646 else 5647 descr = " writeback data mode"; 5648 } else 5649 descr = "out journal"; 5650 5651 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 5652 ext4_msg(sb, KERN_INFO, "mounted filesystem %pU with%s. " 5653 "Quota mode: %s.", &sb->s_uuid, descr, 5654 ext4_quota_mode(sb)); 5655 5656 /* Update the s_overhead_clusters if necessary */ 5657 ext4_update_overhead(sb, false); 5658 return 0; 5659 5660 free_sbi: 5661 ext4_free_sbi(sbi); 5662 fc->s_fs_info = NULL; 5663 return ret; 5664 } 5665 5666 static int ext4_get_tree(struct fs_context *fc) 5667 { 5668 return get_tree_bdev(fc, ext4_fill_super); 5669 } 5670 5671 /* 5672 * Setup any per-fs journal parameters now. We'll do this both on 5673 * initial mount, once the journal has been initialised but before we've 5674 * done any recovery; and again on any subsequent remount. 5675 */ 5676 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 5677 { 5678 struct ext4_sb_info *sbi = EXT4_SB(sb); 5679 5680 journal->j_commit_interval = sbi->s_commit_interval; 5681 journal->j_min_batch_time = sbi->s_min_batch_time; 5682 journal->j_max_batch_time = sbi->s_max_batch_time; 5683 ext4_fc_init(sb, journal); 5684 5685 write_lock(&journal->j_state_lock); 5686 if (test_opt(sb, BARRIER)) 5687 journal->j_flags |= JBD2_BARRIER; 5688 else 5689 journal->j_flags &= ~JBD2_BARRIER; 5690 if (test_opt(sb, DATA_ERR_ABORT)) 5691 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; 5692 else 5693 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; 5694 write_unlock(&journal->j_state_lock); 5695 } 5696 5697 static struct inode *ext4_get_journal_inode(struct super_block *sb, 5698 unsigned int journal_inum) 5699 { 5700 struct inode *journal_inode; 5701 5702 /* 5703 * Test for the existence of a valid inode on disk. Bad things 5704 * happen if we iget() an unused inode, as the subsequent iput() 5705 * will try to delete it. 5706 */ 5707 journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL); 5708 if (IS_ERR(journal_inode)) { 5709 ext4_msg(sb, KERN_ERR, "no journal found"); 5710 return NULL; 5711 } 5712 if (!journal_inode->i_nlink) { 5713 make_bad_inode(journal_inode); 5714 iput(journal_inode); 5715 ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 5716 return NULL; 5717 } 5718 5719 ext4_debug("Journal inode found at %p: %lld bytes\n", 5720 journal_inode, journal_inode->i_size); 5721 if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { 5722 ext4_msg(sb, KERN_ERR, "invalid journal inode"); 5723 iput(journal_inode); 5724 return NULL; 5725 } 5726 return journal_inode; 5727 } 5728 5729 static int ext4_journal_bmap(journal_t *journal, sector_t *block) 5730 { 5731 struct ext4_map_blocks map; 5732 int ret; 5733 5734 if (journal->j_inode == NULL) 5735 return 0; 5736 5737 map.m_lblk = *block; 5738 map.m_len = 1; 5739 ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0); 5740 if (ret <= 0) { 5741 ext4_msg(journal->j_inode->i_sb, KERN_CRIT, 5742 "journal bmap failed: block %llu ret %d\n", 5743 *block, ret); 5744 jbd2_journal_abort(journal, ret ? ret : -EIO); 5745 return ret; 5746 } 5747 *block = map.m_pblk; 5748 return 0; 5749 } 5750 5751 static journal_t *ext4_get_journal(struct super_block *sb, 5752 unsigned int journal_inum) 5753 { 5754 struct inode *journal_inode; 5755 journal_t *journal; 5756 5757 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 5758 return NULL; 5759 5760 journal_inode = ext4_get_journal_inode(sb, journal_inum); 5761 if (!journal_inode) 5762 return NULL; 5763 5764 journal = jbd2_journal_init_inode(journal_inode); 5765 if (!journal) { 5766 ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 5767 iput(journal_inode); 5768 return NULL; 5769 } 5770 journal->j_private = sb; 5771 journal->j_bmap = ext4_journal_bmap; 5772 ext4_init_journal_params(sb, journal); 5773 return journal; 5774 } 5775 5776 static journal_t *ext4_get_dev_journal(struct super_block *sb, 5777 dev_t j_dev) 5778 { 5779 struct buffer_head *bh; 5780 journal_t *journal; 5781 ext4_fsblk_t start; 5782 ext4_fsblk_t len; 5783 int hblock, blocksize; 5784 ext4_fsblk_t sb_block; 5785 unsigned long offset; 5786 struct ext4_super_block *es; 5787 struct block_device *bdev; 5788 5789 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 5790 return NULL; 5791 5792 bdev = ext4_blkdev_get(j_dev, sb); 5793 if (bdev == NULL) 5794 return NULL; 5795 5796 blocksize = sb->s_blocksize; 5797 hblock = bdev_logical_block_size(bdev); 5798 if (blocksize < hblock) { 5799 ext4_msg(sb, KERN_ERR, 5800 "blocksize too small for journal device"); 5801 goto out_bdev; 5802 } 5803 5804 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 5805 offset = EXT4_MIN_BLOCK_SIZE % blocksize; 5806 set_blocksize(bdev, blocksize); 5807 if (!(bh = __bread(bdev, sb_block, blocksize))) { 5808 ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 5809 "external journal"); 5810 goto out_bdev; 5811 } 5812 5813 es = (struct ext4_super_block *) (bh->b_data + offset); 5814 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 5815 !(le32_to_cpu(es->s_feature_incompat) & 5816 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 5817 ext4_msg(sb, KERN_ERR, "external journal has " 5818 "bad superblock"); 5819 brelse(bh); 5820 goto out_bdev; 5821 } 5822 5823 if ((le32_to_cpu(es->s_feature_ro_compat) & 5824 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 5825 es->s_checksum != ext4_superblock_csum(sb, es)) { 5826 ext4_msg(sb, KERN_ERR, "external journal has " 5827 "corrupt superblock"); 5828 brelse(bh); 5829 goto out_bdev; 5830 } 5831 5832 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 5833 ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 5834 brelse(bh); 5835 goto out_bdev; 5836 } 5837 5838 len = ext4_blocks_count(es); 5839 start = sb_block + 1; 5840 brelse(bh); /* we're done with the superblock */ 5841 5842 journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 5843 start, len, blocksize); 5844 if (!journal) { 5845 ext4_msg(sb, KERN_ERR, "failed to create device journal"); 5846 goto out_bdev; 5847 } 5848 journal->j_private = sb; 5849 if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) { 5850 ext4_msg(sb, KERN_ERR, "I/O error on journal device"); 5851 goto out_journal; 5852 } 5853 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 5854 ext4_msg(sb, KERN_ERR, "External journal has more than one " 5855 "user (unsupported) - %d", 5856 be32_to_cpu(journal->j_superblock->s_nr_users)); 5857 goto out_journal; 5858 } 5859 EXT4_SB(sb)->s_journal_bdev = bdev; 5860 ext4_init_journal_params(sb, journal); 5861 return journal; 5862 5863 out_journal: 5864 jbd2_journal_destroy(journal); 5865 out_bdev: 5866 ext4_blkdev_put(bdev); 5867 return NULL; 5868 } 5869 5870 static int ext4_load_journal(struct super_block *sb, 5871 struct ext4_super_block *es, 5872 unsigned long journal_devnum) 5873 { 5874 journal_t *journal; 5875 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 5876 dev_t journal_dev; 5877 int err = 0; 5878 int really_read_only; 5879 int journal_dev_ro; 5880 5881 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 5882 return -EFSCORRUPTED; 5883 5884 if (journal_devnum && 5885 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5886 ext4_msg(sb, KERN_INFO, "external journal device major/minor " 5887 "numbers have changed"); 5888 journal_dev = new_decode_dev(journal_devnum); 5889 } else 5890 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 5891 5892 if (journal_inum && journal_dev) { 5893 ext4_msg(sb, KERN_ERR, 5894 "filesystem has both journal inode and journal device!"); 5895 return -EINVAL; 5896 } 5897 5898 if (journal_inum) { 5899 journal = ext4_get_journal(sb, journal_inum); 5900 if (!journal) 5901 return -EINVAL; 5902 } else { 5903 journal = ext4_get_dev_journal(sb, journal_dev); 5904 if (!journal) 5905 return -EINVAL; 5906 } 5907 5908 journal_dev_ro = bdev_read_only(journal->j_dev); 5909 really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; 5910 5911 if (journal_dev_ro && !sb_rdonly(sb)) { 5912 ext4_msg(sb, KERN_ERR, 5913 "journal device read-only, try mounting with '-o ro'"); 5914 err = -EROFS; 5915 goto err_out; 5916 } 5917 5918 /* 5919 * Are we loading a blank journal or performing recovery after a 5920 * crash? For recovery, we need to check in advance whether we 5921 * can get read-write access to the device. 5922 */ 5923 if (ext4_has_feature_journal_needs_recovery(sb)) { 5924 if (sb_rdonly(sb)) { 5925 ext4_msg(sb, KERN_INFO, "INFO: recovery " 5926 "required on readonly filesystem"); 5927 if (really_read_only) { 5928 ext4_msg(sb, KERN_ERR, "write access " 5929 "unavailable, cannot proceed " 5930 "(try mounting with noload)"); 5931 err = -EROFS; 5932 goto err_out; 5933 } 5934 ext4_msg(sb, KERN_INFO, "write access will " 5935 "be enabled during recovery"); 5936 } 5937 } 5938 5939 if (!(journal->j_flags & JBD2_BARRIER)) 5940 ext4_msg(sb, KERN_INFO, "barriers disabled"); 5941 5942 if (!ext4_has_feature_journal_needs_recovery(sb)) 5943 err = jbd2_journal_wipe(journal, !really_read_only); 5944 if (!err) { 5945 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 5946 5947 if (save) 5948 memcpy(save, ((char *) es) + 5949 EXT4_S_ERR_START, EXT4_S_ERR_LEN); 5950 err = jbd2_journal_load(journal); 5951 if (save) 5952 memcpy(((char *) es) + EXT4_S_ERR_START, 5953 save, EXT4_S_ERR_LEN); 5954 kfree(save); 5955 es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state & 5956 EXT4_ERROR_FS); 5957 /* Write out restored error information to the superblock */ 5958 if (!bdev_read_only(sb->s_bdev)) { 5959 int err2; 5960 err2 = ext4_commit_super(sb); 5961 err = err ? : err2; 5962 } 5963 } 5964 5965 if (err) { 5966 ext4_msg(sb, KERN_ERR, "error loading journal"); 5967 goto err_out; 5968 } 5969 5970 EXT4_SB(sb)->s_journal = journal; 5971 err = ext4_clear_journal_err(sb, es); 5972 if (err) { 5973 EXT4_SB(sb)->s_journal = NULL; 5974 jbd2_journal_destroy(journal); 5975 return err; 5976 } 5977 5978 if (!really_read_only && journal_devnum && 5979 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5980 es->s_journal_dev = cpu_to_le32(journal_devnum); 5981 ext4_commit_super(sb); 5982 } 5983 if (!really_read_only && journal_inum && 5984 journal_inum != le32_to_cpu(es->s_journal_inum)) { 5985 es->s_journal_inum = cpu_to_le32(journal_inum); 5986 ext4_commit_super(sb); 5987 } 5988 5989 return 0; 5990 5991 err_out: 5992 jbd2_journal_destroy(journal); 5993 return err; 5994 } 5995 5996 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */ 5997 static void ext4_update_super(struct super_block *sb) 5998 { 5999 struct ext4_sb_info *sbi = EXT4_SB(sb); 6000 struct ext4_super_block *es = sbi->s_es; 6001 struct buffer_head *sbh = sbi->s_sbh; 6002 6003 lock_buffer(sbh); 6004 /* 6005 * If the file system is mounted read-only, don't update the 6006 * superblock write time. This avoids updating the superblock 6007 * write time when we are mounting the root file system 6008 * read/only but we need to replay the journal; at that point, 6009 * for people who are east of GMT and who make their clock 6010 * tick in localtime for Windows bug-for-bug compatibility, 6011 * the clock is set in the future, and this will cause e2fsck 6012 * to complain and force a full file system check. 6013 */ 6014 if (!(sb->s_flags & SB_RDONLY)) 6015 ext4_update_tstamp(es, s_wtime); 6016 es->s_kbytes_written = 6017 cpu_to_le64(sbi->s_kbytes_written + 6018 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 6019 sbi->s_sectors_written_start) >> 1)); 6020 if (percpu_counter_initialized(&sbi->s_freeclusters_counter)) 6021 ext4_free_blocks_count_set(es, 6022 EXT4_C2B(sbi, percpu_counter_sum_positive( 6023 &sbi->s_freeclusters_counter))); 6024 if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) 6025 es->s_free_inodes_count = 6026 cpu_to_le32(percpu_counter_sum_positive( 6027 &sbi->s_freeinodes_counter)); 6028 /* Copy error information to the on-disk superblock */ 6029 spin_lock(&sbi->s_error_lock); 6030 if (sbi->s_add_error_count > 0) { 6031 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6032 if (!es->s_first_error_time && !es->s_first_error_time_hi) { 6033 __ext4_update_tstamp(&es->s_first_error_time, 6034 &es->s_first_error_time_hi, 6035 sbi->s_first_error_time); 6036 strncpy(es->s_first_error_func, sbi->s_first_error_func, 6037 sizeof(es->s_first_error_func)); 6038 es->s_first_error_line = 6039 cpu_to_le32(sbi->s_first_error_line); 6040 es->s_first_error_ino = 6041 cpu_to_le32(sbi->s_first_error_ino); 6042 es->s_first_error_block = 6043 cpu_to_le64(sbi->s_first_error_block); 6044 es->s_first_error_errcode = 6045 ext4_errno_to_code(sbi->s_first_error_code); 6046 } 6047 __ext4_update_tstamp(&es->s_last_error_time, 6048 &es->s_last_error_time_hi, 6049 sbi->s_last_error_time); 6050 strncpy(es->s_last_error_func, sbi->s_last_error_func, 6051 sizeof(es->s_last_error_func)); 6052 es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line); 6053 es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino); 6054 es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block); 6055 es->s_last_error_errcode = 6056 ext4_errno_to_code(sbi->s_last_error_code); 6057 /* 6058 * Start the daily error reporting function if it hasn't been 6059 * started already 6060 */ 6061 if (!es->s_error_count) 6062 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); 6063 le32_add_cpu(&es->s_error_count, sbi->s_add_error_count); 6064 sbi->s_add_error_count = 0; 6065 } 6066 spin_unlock(&sbi->s_error_lock); 6067 6068 ext4_superblock_csum_set(sb); 6069 unlock_buffer(sbh); 6070 } 6071 6072 static int ext4_commit_super(struct super_block *sb) 6073 { 6074 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 6075 6076 if (!sbh) 6077 return -EINVAL; 6078 if (block_device_ejected(sb)) 6079 return -ENODEV; 6080 6081 ext4_update_super(sb); 6082 6083 lock_buffer(sbh); 6084 /* Buffer got discarded which means block device got invalidated */ 6085 if (!buffer_mapped(sbh)) { 6086 unlock_buffer(sbh); 6087 return -EIO; 6088 } 6089 6090 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 6091 /* 6092 * Oh, dear. A previous attempt to write the 6093 * superblock failed. This could happen because the 6094 * USB device was yanked out. Or it could happen to 6095 * be a transient write error and maybe the block will 6096 * be remapped. Nothing we can do but to retry the 6097 * write and hope for the best. 6098 */ 6099 ext4_msg(sb, KERN_ERR, "previous I/O error to " 6100 "superblock detected"); 6101 clear_buffer_write_io_error(sbh); 6102 set_buffer_uptodate(sbh); 6103 } 6104 get_bh(sbh); 6105 /* Clear potential dirty bit if it was journalled update */ 6106 clear_buffer_dirty(sbh); 6107 sbh->b_end_io = end_buffer_write_sync; 6108 submit_bh(REQ_OP_WRITE | REQ_SYNC | 6109 (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 6110 wait_on_buffer(sbh); 6111 if (buffer_write_io_error(sbh)) { 6112 ext4_msg(sb, KERN_ERR, "I/O error while writing " 6113 "superblock"); 6114 clear_buffer_write_io_error(sbh); 6115 set_buffer_uptodate(sbh); 6116 return -EIO; 6117 } 6118 return 0; 6119 } 6120 6121 /* 6122 * Have we just finished recovery? If so, and if we are mounting (or 6123 * remounting) the filesystem readonly, then we will end up with a 6124 * consistent fs on disk. Record that fact. 6125 */ 6126 static int ext4_mark_recovery_complete(struct super_block *sb, 6127 struct ext4_super_block *es) 6128 { 6129 int err; 6130 journal_t *journal = EXT4_SB(sb)->s_journal; 6131 6132 if (!ext4_has_feature_journal(sb)) { 6133 if (journal != NULL) { 6134 ext4_error(sb, "Journal got removed while the fs was " 6135 "mounted!"); 6136 return -EFSCORRUPTED; 6137 } 6138 return 0; 6139 } 6140 jbd2_journal_lock_updates(journal); 6141 err = jbd2_journal_flush(journal, 0); 6142 if (err < 0) 6143 goto out; 6144 6145 if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || 6146 ext4_has_feature_orphan_present(sb))) { 6147 if (!ext4_orphan_file_empty(sb)) { 6148 ext4_error(sb, "Orphan file not empty on read-only fs."); 6149 err = -EFSCORRUPTED; 6150 goto out; 6151 } 6152 ext4_clear_feature_journal_needs_recovery(sb); 6153 ext4_clear_feature_orphan_present(sb); 6154 ext4_commit_super(sb); 6155 } 6156 out: 6157 jbd2_journal_unlock_updates(journal); 6158 return err; 6159 } 6160 6161 /* 6162 * If we are mounting (or read-write remounting) a filesystem whose journal 6163 * has recorded an error from a previous lifetime, move that error to the 6164 * main filesystem now. 6165 */ 6166 static int ext4_clear_journal_err(struct super_block *sb, 6167 struct ext4_super_block *es) 6168 { 6169 journal_t *journal; 6170 int j_errno; 6171 const char *errstr; 6172 6173 if (!ext4_has_feature_journal(sb)) { 6174 ext4_error(sb, "Journal got removed while the fs was mounted!"); 6175 return -EFSCORRUPTED; 6176 } 6177 6178 journal = EXT4_SB(sb)->s_journal; 6179 6180 /* 6181 * Now check for any error status which may have been recorded in the 6182 * journal by a prior ext4_error() or ext4_abort() 6183 */ 6184 6185 j_errno = jbd2_journal_errno(journal); 6186 if (j_errno) { 6187 char nbuf[16]; 6188 6189 errstr = ext4_decode_error(sb, j_errno, nbuf); 6190 ext4_warning(sb, "Filesystem error recorded " 6191 "from previous mount: %s", errstr); 6192 6193 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 6194 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6195 j_errno = ext4_commit_super(sb); 6196 if (j_errno) 6197 return j_errno; 6198 ext4_warning(sb, "Marked fs in need of filesystem check."); 6199 6200 jbd2_journal_clear_err(journal); 6201 jbd2_journal_update_sb_errno(journal); 6202 } 6203 return 0; 6204 } 6205 6206 /* 6207 * Force the running and committing transactions to commit, 6208 * and wait on the commit. 6209 */ 6210 int ext4_force_commit(struct super_block *sb) 6211 { 6212 journal_t *journal; 6213 6214 if (sb_rdonly(sb)) 6215 return 0; 6216 6217 journal = EXT4_SB(sb)->s_journal; 6218 return ext4_journal_force_commit(journal); 6219 } 6220 6221 static int ext4_sync_fs(struct super_block *sb, int wait) 6222 { 6223 int ret = 0; 6224 tid_t target; 6225 bool needs_barrier = false; 6226 struct ext4_sb_info *sbi = EXT4_SB(sb); 6227 6228 if (unlikely(ext4_forced_shutdown(sbi))) 6229 return 0; 6230 6231 trace_ext4_sync_fs(sb, wait); 6232 flush_workqueue(sbi->rsv_conversion_wq); 6233 /* 6234 * Writeback quota in non-journalled quota case - journalled quota has 6235 * no dirty dquots 6236 */ 6237 dquot_writeback_dquots(sb, -1); 6238 /* 6239 * Data writeback is possible w/o journal transaction, so barrier must 6240 * being sent at the end of the function. But we can skip it if 6241 * transaction_commit will do it for us. 6242 */ 6243 if (sbi->s_journal) { 6244 target = jbd2_get_latest_transaction(sbi->s_journal); 6245 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 6246 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 6247 needs_barrier = true; 6248 6249 if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 6250 if (wait) 6251 ret = jbd2_log_wait_commit(sbi->s_journal, 6252 target); 6253 } 6254 } else if (wait && test_opt(sb, BARRIER)) 6255 needs_barrier = true; 6256 if (needs_barrier) { 6257 int err; 6258 err = blkdev_issue_flush(sb->s_bdev); 6259 if (!ret) 6260 ret = err; 6261 } 6262 6263 return ret; 6264 } 6265 6266 /* 6267 * LVM calls this function before a (read-only) snapshot is created. This 6268 * gives us a chance to flush the journal completely and mark the fs clean. 6269 * 6270 * Note that only this function cannot bring a filesystem to be in a clean 6271 * state independently. It relies on upper layer to stop all data & metadata 6272 * modifications. 6273 */ 6274 static int ext4_freeze(struct super_block *sb) 6275 { 6276 int error = 0; 6277 journal_t *journal; 6278 6279 if (sb_rdonly(sb)) 6280 return 0; 6281 6282 journal = EXT4_SB(sb)->s_journal; 6283 6284 if (journal) { 6285 /* Now we set up the journal barrier. */ 6286 jbd2_journal_lock_updates(journal); 6287 6288 /* 6289 * Don't clear the needs_recovery flag if we failed to 6290 * flush the journal. 6291 */ 6292 error = jbd2_journal_flush(journal, 0); 6293 if (error < 0) 6294 goto out; 6295 6296 /* Journal blocked and flushed, clear needs_recovery flag. */ 6297 ext4_clear_feature_journal_needs_recovery(sb); 6298 if (ext4_orphan_file_empty(sb)) 6299 ext4_clear_feature_orphan_present(sb); 6300 } 6301 6302 error = ext4_commit_super(sb); 6303 out: 6304 if (journal) 6305 /* we rely on upper layer to stop further updates */ 6306 jbd2_journal_unlock_updates(journal); 6307 return error; 6308 } 6309 6310 /* 6311 * Called by LVM after the snapshot is done. We need to reset the RECOVER 6312 * flag here, even though the filesystem is not technically dirty yet. 6313 */ 6314 static int ext4_unfreeze(struct super_block *sb) 6315 { 6316 if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb))) 6317 return 0; 6318 6319 if (EXT4_SB(sb)->s_journal) { 6320 /* Reset the needs_recovery flag before the fs is unlocked. */ 6321 ext4_set_feature_journal_needs_recovery(sb); 6322 if (ext4_has_feature_orphan_file(sb)) 6323 ext4_set_feature_orphan_present(sb); 6324 } 6325 6326 ext4_commit_super(sb); 6327 return 0; 6328 } 6329 6330 /* 6331 * Structure to save mount options for ext4_remount's benefit 6332 */ 6333 struct ext4_mount_options { 6334 unsigned long s_mount_opt; 6335 unsigned long s_mount_opt2; 6336 kuid_t s_resuid; 6337 kgid_t s_resgid; 6338 unsigned long s_commit_interval; 6339 u32 s_min_batch_time, s_max_batch_time; 6340 #ifdef CONFIG_QUOTA 6341 int s_jquota_fmt; 6342 char *s_qf_names[EXT4_MAXQUOTAS]; 6343 #endif 6344 }; 6345 6346 static int __ext4_remount(struct fs_context *fc, struct super_block *sb) 6347 { 6348 struct ext4_fs_context *ctx = fc->fs_private; 6349 struct ext4_super_block *es; 6350 struct ext4_sb_info *sbi = EXT4_SB(sb); 6351 unsigned long old_sb_flags; 6352 struct ext4_mount_options old_opts; 6353 ext4_group_t g; 6354 int err = 0; 6355 #ifdef CONFIG_QUOTA 6356 int enable_quota = 0; 6357 int i, j; 6358 char *to_free[EXT4_MAXQUOTAS]; 6359 #endif 6360 6361 6362 /* Store the original options */ 6363 old_sb_flags = sb->s_flags; 6364 old_opts.s_mount_opt = sbi->s_mount_opt; 6365 old_opts.s_mount_opt2 = sbi->s_mount_opt2; 6366 old_opts.s_resuid = sbi->s_resuid; 6367 old_opts.s_resgid = sbi->s_resgid; 6368 old_opts.s_commit_interval = sbi->s_commit_interval; 6369 old_opts.s_min_batch_time = sbi->s_min_batch_time; 6370 old_opts.s_max_batch_time = sbi->s_max_batch_time; 6371 #ifdef CONFIG_QUOTA 6372 old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 6373 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6374 if (sbi->s_qf_names[i]) { 6375 char *qf_name = get_qf_name(sb, sbi, i); 6376 6377 old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL); 6378 if (!old_opts.s_qf_names[i]) { 6379 for (j = 0; j < i; j++) 6380 kfree(old_opts.s_qf_names[j]); 6381 return -ENOMEM; 6382 } 6383 } else 6384 old_opts.s_qf_names[i] = NULL; 6385 #endif 6386 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) { 6387 if (sbi->s_journal && sbi->s_journal->j_task->io_context) 6388 ctx->journal_ioprio = 6389 sbi->s_journal->j_task->io_context->ioprio; 6390 else 6391 ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 6392 6393 } 6394 6395 ext4_apply_options(fc, sb); 6396 6397 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 6398 test_opt(sb, JOURNAL_CHECKSUM)) { 6399 ext4_msg(sb, KERN_ERR, "changing journal_checksum " 6400 "during remount not supported; ignoring"); 6401 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; 6402 } 6403 6404 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 6405 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 6406 ext4_msg(sb, KERN_ERR, "can't mount with " 6407 "both data=journal and delalloc"); 6408 err = -EINVAL; 6409 goto restore_opts; 6410 } 6411 if (test_opt(sb, DIOREAD_NOLOCK)) { 6412 ext4_msg(sb, KERN_ERR, "can't mount with " 6413 "both data=journal and dioread_nolock"); 6414 err = -EINVAL; 6415 goto restore_opts; 6416 } 6417 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { 6418 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 6419 ext4_msg(sb, KERN_ERR, "can't mount with " 6420 "journal_async_commit in data=ordered mode"); 6421 err = -EINVAL; 6422 goto restore_opts; 6423 } 6424 } 6425 6426 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) { 6427 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount"); 6428 err = -EINVAL; 6429 goto restore_opts; 6430 } 6431 6432 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 6433 ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); 6434 6435 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 6436 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 6437 6438 es = sbi->s_es; 6439 6440 if (sbi->s_journal) { 6441 ext4_init_journal_params(sb, sbi->s_journal); 6442 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 6443 } 6444 6445 /* Flush outstanding errors before changing fs state */ 6446 flush_work(&sbi->s_error_work); 6447 6448 if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { 6449 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) { 6450 err = -EROFS; 6451 goto restore_opts; 6452 } 6453 6454 if (fc->sb_flags & SB_RDONLY) { 6455 err = sync_filesystem(sb); 6456 if (err < 0) 6457 goto restore_opts; 6458 err = dquot_suspend(sb, -1); 6459 if (err < 0) 6460 goto restore_opts; 6461 6462 /* 6463 * First of all, the unconditional stuff we have to do 6464 * to disable replay of the journal when we next remount 6465 */ 6466 sb->s_flags |= SB_RDONLY; 6467 6468 /* 6469 * OK, test if we are remounting a valid rw partition 6470 * readonly, and if so set the rdonly flag and then 6471 * mark the partition as valid again. 6472 */ 6473 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 6474 (sbi->s_mount_state & EXT4_VALID_FS)) 6475 es->s_state = cpu_to_le16(sbi->s_mount_state); 6476 6477 if (sbi->s_journal) { 6478 /* 6479 * We let remount-ro finish even if marking fs 6480 * as clean failed... 6481 */ 6482 ext4_mark_recovery_complete(sb, es); 6483 } 6484 } else { 6485 /* Make sure we can mount this feature set readwrite */ 6486 if (ext4_has_feature_readonly(sb) || 6487 !ext4_feature_set_ok(sb, 0)) { 6488 err = -EROFS; 6489 goto restore_opts; 6490 } 6491 /* 6492 * Make sure the group descriptor checksums 6493 * are sane. If they aren't, refuse to remount r/w. 6494 */ 6495 for (g = 0; g < sbi->s_groups_count; g++) { 6496 struct ext4_group_desc *gdp = 6497 ext4_get_group_desc(sb, g, NULL); 6498 6499 if (!ext4_group_desc_csum_verify(sb, g, gdp)) { 6500 ext4_msg(sb, KERN_ERR, 6501 "ext4_remount: Checksum for group %u failed (%u!=%u)", 6502 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), 6503 le16_to_cpu(gdp->bg_checksum)); 6504 err = -EFSBADCRC; 6505 goto restore_opts; 6506 } 6507 } 6508 6509 /* 6510 * If we have an unprocessed orphan list hanging 6511 * around from a previously readonly bdev mount, 6512 * require a full umount/remount for now. 6513 */ 6514 if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { 6515 ext4_msg(sb, KERN_WARNING, "Couldn't " 6516 "remount RDWR because of unprocessed " 6517 "orphan inode list. Please " 6518 "umount/remount instead"); 6519 err = -EINVAL; 6520 goto restore_opts; 6521 } 6522 6523 /* 6524 * Mounting a RDONLY partition read-write, so reread 6525 * and store the current valid flag. (It may have 6526 * been changed by e2fsck since we originally mounted 6527 * the partition.) 6528 */ 6529 if (sbi->s_journal) { 6530 err = ext4_clear_journal_err(sb, es); 6531 if (err) 6532 goto restore_opts; 6533 } 6534 sbi->s_mount_state = (le16_to_cpu(es->s_state) & 6535 ~EXT4_FC_REPLAY); 6536 6537 err = ext4_setup_super(sb, es, 0); 6538 if (err) 6539 goto restore_opts; 6540 6541 sb->s_flags &= ~SB_RDONLY; 6542 if (ext4_has_feature_mmp(sb)) 6543 if (ext4_multi_mount_protect(sb, 6544 le64_to_cpu(es->s_mmp_block))) { 6545 err = -EROFS; 6546 goto restore_opts; 6547 } 6548 #ifdef CONFIG_QUOTA 6549 enable_quota = 1; 6550 #endif 6551 } 6552 } 6553 6554 /* 6555 * Reinitialize lazy itable initialization thread based on 6556 * current settings 6557 */ 6558 if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) 6559 ext4_unregister_li_request(sb); 6560 else { 6561 ext4_group_t first_not_zeroed; 6562 first_not_zeroed = ext4_has_uninit_itable(sb); 6563 ext4_register_li_request(sb, first_not_zeroed); 6564 } 6565 6566 /* 6567 * Handle creation of system zone data early because it can fail. 6568 * Releasing of existing data is done when we are sure remount will 6569 * succeed. 6570 */ 6571 if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { 6572 err = ext4_setup_system_zone(sb); 6573 if (err) 6574 goto restore_opts; 6575 } 6576 6577 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { 6578 err = ext4_commit_super(sb); 6579 if (err) 6580 goto restore_opts; 6581 } 6582 6583 #ifdef CONFIG_QUOTA 6584 /* Release old quota file names */ 6585 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6586 kfree(old_opts.s_qf_names[i]); 6587 if (enable_quota) { 6588 if (sb_any_quota_suspended(sb)) 6589 dquot_resume(sb, -1); 6590 else if (ext4_has_feature_quota(sb)) { 6591 err = ext4_enable_quotas(sb); 6592 if (err) 6593 goto restore_opts; 6594 } 6595 } 6596 #endif 6597 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 6598 ext4_release_system_zone(sb); 6599 6600 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 6601 ext4_stop_mmpd(sbi); 6602 6603 return 0; 6604 6605 restore_opts: 6606 sb->s_flags = old_sb_flags; 6607 sbi->s_mount_opt = old_opts.s_mount_opt; 6608 sbi->s_mount_opt2 = old_opts.s_mount_opt2; 6609 sbi->s_resuid = old_opts.s_resuid; 6610 sbi->s_resgid = old_opts.s_resgid; 6611 sbi->s_commit_interval = old_opts.s_commit_interval; 6612 sbi->s_min_batch_time = old_opts.s_min_batch_time; 6613 sbi->s_max_batch_time = old_opts.s_max_batch_time; 6614 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 6615 ext4_release_system_zone(sb); 6616 #ifdef CONFIG_QUOTA 6617 sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 6618 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 6619 to_free[i] = get_qf_name(sb, sbi, i); 6620 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]); 6621 } 6622 synchronize_rcu(); 6623 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6624 kfree(to_free[i]); 6625 #endif 6626 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 6627 ext4_stop_mmpd(sbi); 6628 return err; 6629 } 6630 6631 static int ext4_reconfigure(struct fs_context *fc) 6632 { 6633 struct super_block *sb = fc->root->d_sb; 6634 int ret; 6635 6636 fc->s_fs_info = EXT4_SB(sb); 6637 6638 ret = ext4_check_opt_consistency(fc, sb); 6639 if (ret < 0) 6640 return ret; 6641 6642 ret = __ext4_remount(fc, sb); 6643 if (ret < 0) 6644 return ret; 6645 6646 ext4_msg(sb, KERN_INFO, "re-mounted %pU. Quota mode: %s.", 6647 &sb->s_uuid, ext4_quota_mode(sb)); 6648 6649 return 0; 6650 } 6651 6652 #ifdef CONFIG_QUOTA 6653 static int ext4_statfs_project(struct super_block *sb, 6654 kprojid_t projid, struct kstatfs *buf) 6655 { 6656 struct kqid qid; 6657 struct dquot *dquot; 6658 u64 limit; 6659 u64 curblock; 6660 6661 qid = make_kqid_projid(projid); 6662 dquot = dqget(sb, qid); 6663 if (IS_ERR(dquot)) 6664 return PTR_ERR(dquot); 6665 spin_lock(&dquot->dq_dqb_lock); 6666 6667 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 6668 dquot->dq_dqb.dqb_bhardlimit); 6669 limit >>= sb->s_blocksize_bits; 6670 6671 if (limit && buf->f_blocks > limit) { 6672 curblock = (dquot->dq_dqb.dqb_curspace + 6673 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 6674 buf->f_blocks = limit; 6675 buf->f_bfree = buf->f_bavail = 6676 (buf->f_blocks > curblock) ? 6677 (buf->f_blocks - curblock) : 0; 6678 } 6679 6680 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 6681 dquot->dq_dqb.dqb_ihardlimit); 6682 if (limit && buf->f_files > limit) { 6683 buf->f_files = limit; 6684 buf->f_ffree = 6685 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? 6686 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; 6687 } 6688 6689 spin_unlock(&dquot->dq_dqb_lock); 6690 dqput(dquot); 6691 return 0; 6692 } 6693 #endif 6694 6695 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 6696 { 6697 struct super_block *sb = dentry->d_sb; 6698 struct ext4_sb_info *sbi = EXT4_SB(sb); 6699 struct ext4_super_block *es = sbi->s_es; 6700 ext4_fsblk_t overhead = 0, resv_blocks; 6701 s64 bfree; 6702 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); 6703 6704 if (!test_opt(sb, MINIX_DF)) 6705 overhead = sbi->s_overhead; 6706 6707 buf->f_type = EXT4_SUPER_MAGIC; 6708 buf->f_bsize = sb->s_blocksize; 6709 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); 6710 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 6711 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 6712 /* prevent underflow in case that few free space is available */ 6713 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 6714 buf->f_bavail = buf->f_bfree - 6715 (ext4_r_blocks_count(es) + resv_blocks); 6716 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) 6717 buf->f_bavail = 0; 6718 buf->f_files = le32_to_cpu(es->s_inodes_count); 6719 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 6720 buf->f_namelen = EXT4_NAME_LEN; 6721 buf->f_fsid = uuid_to_fsid(es->s_uuid); 6722 6723 #ifdef CONFIG_QUOTA 6724 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && 6725 sb_has_quota_limits_enabled(sb, PRJQUOTA)) 6726 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf); 6727 #endif 6728 return 0; 6729 } 6730 6731 6732 #ifdef CONFIG_QUOTA 6733 6734 /* 6735 * Helper functions so that transaction is started before we acquire dqio_sem 6736 * to keep correct lock ordering of transaction > dqio_sem 6737 */ 6738 static inline struct inode *dquot_to_inode(struct dquot *dquot) 6739 { 6740 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 6741 } 6742 6743 static int ext4_write_dquot(struct dquot *dquot) 6744 { 6745 int ret, err; 6746 handle_t *handle; 6747 struct inode *inode; 6748 6749 inode = dquot_to_inode(dquot); 6750 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 6751 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 6752 if (IS_ERR(handle)) 6753 return PTR_ERR(handle); 6754 ret = dquot_commit(dquot); 6755 err = ext4_journal_stop(handle); 6756 if (!ret) 6757 ret = err; 6758 return ret; 6759 } 6760 6761 static int ext4_acquire_dquot(struct dquot *dquot) 6762 { 6763 int ret, err; 6764 handle_t *handle; 6765 6766 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6767 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 6768 if (IS_ERR(handle)) 6769 return PTR_ERR(handle); 6770 ret = dquot_acquire(dquot); 6771 err = ext4_journal_stop(handle); 6772 if (!ret) 6773 ret = err; 6774 return ret; 6775 } 6776 6777 static int ext4_release_dquot(struct dquot *dquot) 6778 { 6779 int ret, err; 6780 handle_t *handle; 6781 6782 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6783 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 6784 if (IS_ERR(handle)) { 6785 /* Release dquot anyway to avoid endless cycle in dqput() */ 6786 dquot_release(dquot); 6787 return PTR_ERR(handle); 6788 } 6789 ret = dquot_release(dquot); 6790 err = ext4_journal_stop(handle); 6791 if (!ret) 6792 ret = err; 6793 return ret; 6794 } 6795 6796 static int ext4_mark_dquot_dirty(struct dquot *dquot) 6797 { 6798 struct super_block *sb = dquot->dq_sb; 6799 6800 if (ext4_is_quota_journalled(sb)) { 6801 dquot_mark_dquot_dirty(dquot); 6802 return ext4_write_dquot(dquot); 6803 } else { 6804 return dquot_mark_dquot_dirty(dquot); 6805 } 6806 } 6807 6808 static int ext4_write_info(struct super_block *sb, int type) 6809 { 6810 int ret, err; 6811 handle_t *handle; 6812 6813 /* Data block + inode block */ 6814 handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); 6815 if (IS_ERR(handle)) 6816 return PTR_ERR(handle); 6817 ret = dquot_commit_info(sb, type); 6818 err = ext4_journal_stop(handle); 6819 if (!ret) 6820 ret = err; 6821 return ret; 6822 } 6823 6824 static void lockdep_set_quota_inode(struct inode *inode, int subclass) 6825 { 6826 struct ext4_inode_info *ei = EXT4_I(inode); 6827 6828 /* The first argument of lockdep_set_subclass has to be 6829 * *exactly* the same as the argument to init_rwsem() --- in 6830 * this case, in init_once() --- or lockdep gets unhappy 6831 * because the name of the lock is set using the 6832 * stringification of the argument to init_rwsem(). 6833 */ 6834 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ 6835 lockdep_set_subclass(&ei->i_data_sem, subclass); 6836 } 6837 6838 /* 6839 * Standard function to be called on quota_on 6840 */ 6841 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 6842 const struct path *path) 6843 { 6844 int err; 6845 6846 if (!test_opt(sb, QUOTA)) 6847 return -EINVAL; 6848 6849 /* Quotafile not on the same filesystem? */ 6850 if (path->dentry->d_sb != sb) 6851 return -EXDEV; 6852 6853 /* Quota already enabled for this file? */ 6854 if (IS_NOQUOTA(d_inode(path->dentry))) 6855 return -EBUSY; 6856 6857 /* Journaling quota? */ 6858 if (EXT4_SB(sb)->s_qf_names[type]) { 6859 /* Quotafile not in fs root? */ 6860 if (path->dentry->d_parent != sb->s_root) 6861 ext4_msg(sb, KERN_WARNING, 6862 "Quota file not on filesystem root. " 6863 "Journaled quota will not work"); 6864 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; 6865 } else { 6866 /* 6867 * Clear the flag just in case mount options changed since 6868 * last time. 6869 */ 6870 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; 6871 } 6872 6873 /* 6874 * When we journal data on quota file, we have to flush journal to see 6875 * all updates to the file when we bypass pagecache... 6876 */ 6877 if (EXT4_SB(sb)->s_journal && 6878 ext4_should_journal_data(d_inode(path->dentry))) { 6879 /* 6880 * We don't need to lock updates but journal_flush() could 6881 * otherwise be livelocked... 6882 */ 6883 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 6884 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 6885 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 6886 if (err) 6887 return err; 6888 } 6889 6890 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); 6891 err = dquot_quota_on(sb, type, format_id, path); 6892 if (!err) { 6893 struct inode *inode = d_inode(path->dentry); 6894 handle_t *handle; 6895 6896 /* 6897 * Set inode flags to prevent userspace from messing with quota 6898 * files. If this fails, we return success anyway since quotas 6899 * are already enabled and this is not a hard failure. 6900 */ 6901 inode_lock(inode); 6902 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 6903 if (IS_ERR(handle)) 6904 goto unlock_inode; 6905 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; 6906 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 6907 S_NOATIME | S_IMMUTABLE); 6908 err = ext4_mark_inode_dirty(handle, inode); 6909 ext4_journal_stop(handle); 6910 unlock_inode: 6911 inode_unlock(inode); 6912 if (err) 6913 dquot_quota_off(sb, type); 6914 } 6915 if (err) 6916 lockdep_set_quota_inode(path->dentry->d_inode, 6917 I_DATA_SEM_NORMAL); 6918 return err; 6919 } 6920 6921 static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) 6922 { 6923 switch (type) { 6924 case USRQUOTA: 6925 return qf_inum == EXT4_USR_QUOTA_INO; 6926 case GRPQUOTA: 6927 return qf_inum == EXT4_GRP_QUOTA_INO; 6928 case PRJQUOTA: 6929 return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; 6930 default: 6931 BUG(); 6932 } 6933 } 6934 6935 static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 6936 unsigned int flags) 6937 { 6938 int err; 6939 struct inode *qf_inode; 6940 unsigned long qf_inums[EXT4_MAXQUOTAS] = { 6941 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6942 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6943 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 6944 }; 6945 6946 BUG_ON(!ext4_has_feature_quota(sb)); 6947 6948 if (!qf_inums[type]) 6949 return -EPERM; 6950 6951 if (!ext4_check_quota_inum(type, qf_inums[type])) { 6952 ext4_error(sb, "Bad quota inum: %lu, type: %d", 6953 qf_inums[type], type); 6954 return -EUCLEAN; 6955 } 6956 6957 qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); 6958 if (IS_ERR(qf_inode)) { 6959 ext4_error(sb, "Bad quota inode: %lu, type: %d", 6960 qf_inums[type], type); 6961 return PTR_ERR(qf_inode); 6962 } 6963 6964 /* Don't account quota for quota files to avoid recursion */ 6965 qf_inode->i_flags |= S_NOQUOTA; 6966 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); 6967 err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 6968 if (err) 6969 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); 6970 iput(qf_inode); 6971 6972 return err; 6973 } 6974 6975 /* Enable usage tracking for all quota types. */ 6976 int ext4_enable_quotas(struct super_block *sb) 6977 { 6978 int type, err = 0; 6979 unsigned long qf_inums[EXT4_MAXQUOTAS] = { 6980 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6981 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6982 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 6983 }; 6984 bool quota_mopt[EXT4_MAXQUOTAS] = { 6985 test_opt(sb, USRQUOTA), 6986 test_opt(sb, GRPQUOTA), 6987 test_opt(sb, PRJQUOTA), 6988 }; 6989 6990 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 6991 for (type = 0; type < EXT4_MAXQUOTAS; type++) { 6992 if (qf_inums[type]) { 6993 err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 6994 DQUOT_USAGE_ENABLED | 6995 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 6996 if (err) { 6997 ext4_warning(sb, 6998 "Failed to enable quota tracking " 6999 "(type=%d, err=%d, ino=%lu). " 7000 "Please run e2fsck to fix.", type, 7001 err, qf_inums[type]); 7002 for (type--; type >= 0; type--) { 7003 struct inode *inode; 7004 7005 inode = sb_dqopt(sb)->files[type]; 7006 if (inode) 7007 inode = igrab(inode); 7008 dquot_quota_off(sb, type); 7009 if (inode) { 7010 lockdep_set_quota_inode(inode, 7011 I_DATA_SEM_NORMAL); 7012 iput(inode); 7013 } 7014 } 7015 7016 return err; 7017 } 7018 } 7019 } 7020 return 0; 7021 } 7022 7023 static int ext4_quota_off(struct super_block *sb, int type) 7024 { 7025 struct inode *inode = sb_dqopt(sb)->files[type]; 7026 handle_t *handle; 7027 int err; 7028 7029 /* Force all delayed allocation blocks to be allocated. 7030 * Caller already holds s_umount sem */ 7031 if (test_opt(sb, DELALLOC)) 7032 sync_filesystem(sb); 7033 7034 if (!inode || !igrab(inode)) 7035 goto out; 7036 7037 err = dquot_quota_off(sb, type); 7038 if (err || ext4_has_feature_quota(sb)) 7039 goto out_put; 7040 7041 inode_lock(inode); 7042 /* 7043 * Update modification times of quota files when userspace can 7044 * start looking at them. If we fail, we return success anyway since 7045 * this is not a hard failure and quotas are already disabled. 7046 */ 7047 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 7048 if (IS_ERR(handle)) { 7049 err = PTR_ERR(handle); 7050 goto out_unlock; 7051 } 7052 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); 7053 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 7054 inode->i_mtime = inode->i_ctime = current_time(inode); 7055 err = ext4_mark_inode_dirty(handle, inode); 7056 ext4_journal_stop(handle); 7057 out_unlock: 7058 inode_unlock(inode); 7059 out_put: 7060 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 7061 iput(inode); 7062 return err; 7063 out: 7064 return dquot_quota_off(sb, type); 7065 } 7066 7067 /* Read data from quotafile - avoid pagecache and such because we cannot afford 7068 * acquiring the locks... As quota files are never truncated and quota code 7069 * itself serializes the operations (and no one else should touch the files) 7070 * we don't have to be afraid of races */ 7071 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 7072 size_t len, loff_t off) 7073 { 7074 struct inode *inode = sb_dqopt(sb)->files[type]; 7075 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7076 int offset = off & (sb->s_blocksize - 1); 7077 int tocopy; 7078 size_t toread; 7079 struct buffer_head *bh; 7080 loff_t i_size = i_size_read(inode); 7081 7082 if (off > i_size) 7083 return 0; 7084 if (off+len > i_size) 7085 len = i_size-off; 7086 toread = len; 7087 while (toread > 0) { 7088 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 7089 bh = ext4_bread(NULL, inode, blk, 0); 7090 if (IS_ERR(bh)) 7091 return PTR_ERR(bh); 7092 if (!bh) /* A hole? */ 7093 memset(data, 0, tocopy); 7094 else 7095 memcpy(data, bh->b_data+offset, tocopy); 7096 brelse(bh); 7097 offset = 0; 7098 toread -= tocopy; 7099 data += tocopy; 7100 blk++; 7101 } 7102 return len; 7103 } 7104 7105 /* Write to quotafile (we know the transaction is already started and has 7106 * enough credits) */ 7107 static ssize_t ext4_quota_write(struct super_block *sb, int type, 7108 const char *data, size_t len, loff_t off) 7109 { 7110 struct inode *inode = sb_dqopt(sb)->files[type]; 7111 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7112 int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); 7113 int retries = 0; 7114 struct buffer_head *bh; 7115 handle_t *handle = journal_current_handle(); 7116 7117 if (!handle) { 7118 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7119 " cancelled because transaction is not started", 7120 (unsigned long long)off, (unsigned long long)len); 7121 return -EIO; 7122 } 7123 /* 7124 * Since we account only one data block in transaction credits, 7125 * then it is impossible to cross a block boundary. 7126 */ 7127 if (sb->s_blocksize - offset < len) { 7128 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7129 " cancelled because not block aligned", 7130 (unsigned long long)off, (unsigned long long)len); 7131 return -EIO; 7132 } 7133 7134 do { 7135 bh = ext4_bread(handle, inode, blk, 7136 EXT4_GET_BLOCKS_CREATE | 7137 EXT4_GET_BLOCKS_METADATA_NOFAIL); 7138 } while (PTR_ERR(bh) == -ENOSPC && 7139 ext4_should_retry_alloc(inode->i_sb, &retries)); 7140 if (IS_ERR(bh)) 7141 return PTR_ERR(bh); 7142 if (!bh) 7143 goto out; 7144 BUFFER_TRACE(bh, "get write access"); 7145 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 7146 if (err) { 7147 brelse(bh); 7148 return err; 7149 } 7150 lock_buffer(bh); 7151 memcpy(bh->b_data+offset, data, len); 7152 flush_dcache_page(bh->b_page); 7153 unlock_buffer(bh); 7154 err = ext4_handle_dirty_metadata(handle, NULL, bh); 7155 brelse(bh); 7156 out: 7157 if (inode->i_size < off + len) { 7158 i_size_write(inode, off + len); 7159 EXT4_I(inode)->i_disksize = inode->i_size; 7160 err2 = ext4_mark_inode_dirty(handle, inode); 7161 if (unlikely(err2 && !err)) 7162 err = err2; 7163 } 7164 return err ? err : len; 7165 } 7166 #endif 7167 7168 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 7169 static inline void register_as_ext2(void) 7170 { 7171 int err = register_filesystem(&ext2_fs_type); 7172 if (err) 7173 printk(KERN_WARNING 7174 "EXT4-fs: Unable to register as ext2 (%d)\n", err); 7175 } 7176 7177 static inline void unregister_as_ext2(void) 7178 { 7179 unregister_filesystem(&ext2_fs_type); 7180 } 7181 7182 static inline int ext2_feature_set_ok(struct super_block *sb) 7183 { 7184 if (ext4_has_unknown_ext2_incompat_features(sb)) 7185 return 0; 7186 if (sb_rdonly(sb)) 7187 return 1; 7188 if (ext4_has_unknown_ext2_ro_compat_features(sb)) 7189 return 0; 7190 return 1; 7191 } 7192 #else 7193 static inline void register_as_ext2(void) { } 7194 static inline void unregister_as_ext2(void) { } 7195 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } 7196 #endif 7197 7198 static inline void register_as_ext3(void) 7199 { 7200 int err = register_filesystem(&ext3_fs_type); 7201 if (err) 7202 printk(KERN_WARNING 7203 "EXT4-fs: Unable to register as ext3 (%d)\n", err); 7204 } 7205 7206 static inline void unregister_as_ext3(void) 7207 { 7208 unregister_filesystem(&ext3_fs_type); 7209 } 7210 7211 static inline int ext3_feature_set_ok(struct super_block *sb) 7212 { 7213 if (ext4_has_unknown_ext3_incompat_features(sb)) 7214 return 0; 7215 if (!ext4_has_feature_journal(sb)) 7216 return 0; 7217 if (sb_rdonly(sb)) 7218 return 1; 7219 if (ext4_has_unknown_ext3_ro_compat_features(sb)) 7220 return 0; 7221 return 1; 7222 } 7223 7224 static struct file_system_type ext4_fs_type = { 7225 .owner = THIS_MODULE, 7226 .name = "ext4", 7227 .init_fs_context = ext4_init_fs_context, 7228 .parameters = ext4_param_specs, 7229 .kill_sb = kill_block_super, 7230 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 7231 }; 7232 MODULE_ALIAS_FS("ext4"); 7233 7234 /* Shared across all ext4 file systems */ 7235 wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; 7236 7237 static int __init ext4_init_fs(void) 7238 { 7239 int i, err; 7240 7241 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 7242 ext4_li_info = NULL; 7243 7244 /* Build-time check for flags consistency */ 7245 ext4_check_flag_values(); 7246 7247 for (i = 0; i < EXT4_WQ_HASH_SZ; i++) 7248 init_waitqueue_head(&ext4__ioend_wq[i]); 7249 7250 err = ext4_init_es(); 7251 if (err) 7252 return err; 7253 7254 err = ext4_init_pending(); 7255 if (err) 7256 goto out7; 7257 7258 err = ext4_init_post_read_processing(); 7259 if (err) 7260 goto out6; 7261 7262 err = ext4_init_pageio(); 7263 if (err) 7264 goto out5; 7265 7266 err = ext4_init_system_zone(); 7267 if (err) 7268 goto out4; 7269 7270 err = ext4_init_sysfs(); 7271 if (err) 7272 goto out3; 7273 7274 err = ext4_init_mballoc(); 7275 if (err) 7276 goto out2; 7277 err = init_inodecache(); 7278 if (err) 7279 goto out1; 7280 7281 err = ext4_fc_init_dentry_cache(); 7282 if (err) 7283 goto out05; 7284 7285 register_as_ext3(); 7286 register_as_ext2(); 7287 err = register_filesystem(&ext4_fs_type); 7288 if (err) 7289 goto out; 7290 7291 return 0; 7292 out: 7293 unregister_as_ext2(); 7294 unregister_as_ext3(); 7295 ext4_fc_destroy_dentry_cache(); 7296 out05: 7297 destroy_inodecache(); 7298 out1: 7299 ext4_exit_mballoc(); 7300 out2: 7301 ext4_exit_sysfs(); 7302 out3: 7303 ext4_exit_system_zone(); 7304 out4: 7305 ext4_exit_pageio(); 7306 out5: 7307 ext4_exit_post_read_processing(); 7308 out6: 7309 ext4_exit_pending(); 7310 out7: 7311 ext4_exit_es(); 7312 7313 return err; 7314 } 7315 7316 static void __exit ext4_exit_fs(void) 7317 { 7318 ext4_destroy_lazyinit_thread(); 7319 unregister_as_ext2(); 7320 unregister_as_ext3(); 7321 unregister_filesystem(&ext4_fs_type); 7322 ext4_fc_destroy_dentry_cache(); 7323 destroy_inodecache(); 7324 ext4_exit_mballoc(); 7325 ext4_exit_sysfs(); 7326 ext4_exit_system_zone(); 7327 ext4_exit_pageio(); 7328 ext4_exit_post_read_processing(); 7329 ext4_exit_es(); 7330 ext4_exit_pending(); 7331 } 7332 7333 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 7334 MODULE_DESCRIPTION("Fourth Extended Filesystem"); 7335 MODULE_LICENSE("GPL"); 7336 MODULE_SOFTDEP("pre: crc32c"); 7337 module_init(ext4_init_fs) 7338 module_exit(ext4_exit_fs) 7339