1 /* 2 * linux/fs/ext4/super.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Big-endian to little-endian byte-swapping/bitmaps by 16 * David S. Miller (davem@caip.rutgers.edu), 1995 17 */ 18 19 #include <linux/module.h> 20 #include <linux/string.h> 21 #include <linux/fs.h> 22 #include <linux/time.h> 23 #include <linux/vmalloc.h> 24 #include <linux/jbd2.h> 25 #include <linux/slab.h> 26 #include <linux/init.h> 27 #include <linux/blkdev.h> 28 #include <linux/parser.h> 29 #include <linux/smp_lock.h> 30 #include <linux/buffer_head.h> 31 #include <linux/exportfs.h> 32 #include <linux/vfs.h> 33 #include <linux/random.h> 34 #include <linux/mount.h> 35 #include <linux/namei.h> 36 #include <linux/quotaops.h> 37 #include <linux/seq_file.h> 38 #include <linux/proc_fs.h> 39 #include <linux/ctype.h> 40 #include <linux/log2.h> 41 #include <linux/crc16.h> 42 #include <asm/uaccess.h> 43 44 #include "ext4.h" 45 #include "ext4_jbd2.h" 46 #include "xattr.h" 47 #include "acl.h" 48 #include "mballoc.h" 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/ext4.h> 52 53 struct proc_dir_entry *ext4_proc_root; 54 static struct kset *ext4_kset; 55 56 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 57 unsigned long journal_devnum); 58 static int ext4_commit_super(struct super_block *sb, int sync); 59 static void ext4_mark_recovery_complete(struct super_block *sb, 60 struct ext4_super_block *es); 61 static void ext4_clear_journal_err(struct super_block *sb, 62 struct ext4_super_block *es); 63 static int ext4_sync_fs(struct super_block *sb, int wait); 64 static const char *ext4_decode_error(struct super_block *sb, int errno, 65 char nbuf[16]); 66 static int ext4_remount(struct super_block *sb, int *flags, char *data); 67 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 68 static int ext4_unfreeze(struct super_block *sb); 69 static void ext4_write_super(struct super_block *sb); 70 static int ext4_freeze(struct super_block *sb); 71 static int ext4_get_sb(struct file_system_type *fs_type, int flags, 72 const char *dev_name, void *data, struct vfsmount *mnt); 73 74 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 75 static struct file_system_type ext3_fs_type = { 76 .owner = THIS_MODULE, 77 .name = "ext3", 78 .get_sb = ext4_get_sb, 79 .kill_sb = kill_block_super, 80 .fs_flags = FS_REQUIRES_DEV, 81 }; 82 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) 83 #else 84 #define IS_EXT3_SB(sb) (0) 85 #endif 86 87 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 88 struct ext4_group_desc *bg) 89 { 90 return le32_to_cpu(bg->bg_block_bitmap_lo) | 91 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 92 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 93 } 94 95 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 96 struct ext4_group_desc *bg) 97 { 98 return le32_to_cpu(bg->bg_inode_bitmap_lo) | 99 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 100 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 101 } 102 103 ext4_fsblk_t ext4_inode_table(struct super_block *sb, 104 struct ext4_group_desc *bg) 105 { 106 return le32_to_cpu(bg->bg_inode_table_lo) | 107 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 108 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 109 } 110 111 __u32 ext4_free_blks_count(struct super_block *sb, 112 struct ext4_group_desc *bg) 113 { 114 return le16_to_cpu(bg->bg_free_blocks_count_lo) | 115 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 116 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 117 } 118 119 __u32 ext4_free_inodes_count(struct super_block *sb, 120 struct ext4_group_desc *bg) 121 { 122 return le16_to_cpu(bg->bg_free_inodes_count_lo) | 123 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 124 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); 125 } 126 127 __u32 ext4_used_dirs_count(struct super_block *sb, 128 struct ext4_group_desc *bg) 129 { 130 return le16_to_cpu(bg->bg_used_dirs_count_lo) | 131 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 132 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 133 } 134 135 __u32 ext4_itable_unused_count(struct super_block *sb, 136 struct ext4_group_desc *bg) 137 { 138 return le16_to_cpu(bg->bg_itable_unused_lo) | 139 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 140 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 141 } 142 143 void ext4_block_bitmap_set(struct super_block *sb, 144 struct ext4_group_desc *bg, ext4_fsblk_t blk) 145 { 146 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 147 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 148 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 149 } 150 151 void ext4_inode_bitmap_set(struct super_block *sb, 152 struct ext4_group_desc *bg, ext4_fsblk_t blk) 153 { 154 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 155 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 156 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 157 } 158 159 void ext4_inode_table_set(struct super_block *sb, 160 struct ext4_group_desc *bg, ext4_fsblk_t blk) 161 { 162 bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 163 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 164 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 165 } 166 167 void ext4_free_blks_set(struct super_block *sb, 168 struct ext4_group_desc *bg, __u32 count) 169 { 170 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 171 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 172 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 173 } 174 175 void ext4_free_inodes_set(struct super_block *sb, 176 struct ext4_group_desc *bg, __u32 count) 177 { 178 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); 179 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 180 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); 181 } 182 183 void ext4_used_dirs_set(struct super_block *sb, 184 struct ext4_group_desc *bg, __u32 count) 185 { 186 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 187 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 188 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 189 } 190 191 void ext4_itable_unused_set(struct super_block *sb, 192 struct ext4_group_desc *bg, __u32 count) 193 { 194 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 195 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 196 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 197 } 198 199 200 /* Just increment the non-pointer handle value */ 201 static handle_t *ext4_get_nojournal(void) 202 { 203 handle_t *handle = current->journal_info; 204 unsigned long ref_cnt = (unsigned long)handle; 205 206 BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT); 207 208 ref_cnt++; 209 handle = (handle_t *)ref_cnt; 210 211 current->journal_info = handle; 212 return handle; 213 } 214 215 216 /* Decrement the non-pointer handle value */ 217 static void ext4_put_nojournal(handle_t *handle) 218 { 219 unsigned long ref_cnt = (unsigned long)handle; 220 221 BUG_ON(ref_cnt == 0); 222 223 ref_cnt--; 224 handle = (handle_t *)ref_cnt; 225 226 current->journal_info = handle; 227 } 228 229 /* 230 * Wrappers for jbd2_journal_start/end. 231 * 232 * The only special thing we need to do here is to make sure that all 233 * journal_end calls result in the superblock being marked dirty, so 234 * that sync() will call the filesystem's write_super callback if 235 * appropriate. 236 */ 237 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) 238 { 239 journal_t *journal; 240 241 if (sb->s_flags & MS_RDONLY) 242 return ERR_PTR(-EROFS); 243 244 vfs_check_frozen(sb, SB_FREEZE_TRANS); 245 /* Special case here: if the journal has aborted behind our 246 * backs (eg. EIO in the commit thread), then we still need to 247 * take the FS itself readonly cleanly. */ 248 journal = EXT4_SB(sb)->s_journal; 249 if (journal) { 250 if (is_journal_aborted(journal)) { 251 ext4_abort(sb, "Detected aborted journal"); 252 return ERR_PTR(-EROFS); 253 } 254 return jbd2_journal_start(journal, nblocks); 255 } 256 return ext4_get_nojournal(); 257 } 258 259 /* 260 * The only special thing we need to do here is to make sure that all 261 * jbd2_journal_stop calls result in the superblock being marked dirty, so 262 * that sync() will call the filesystem's write_super callback if 263 * appropriate. 264 */ 265 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) 266 { 267 struct super_block *sb; 268 int err; 269 int rc; 270 271 if (!ext4_handle_valid(handle)) { 272 ext4_put_nojournal(handle); 273 return 0; 274 } 275 sb = handle->h_transaction->t_journal->j_private; 276 err = handle->h_err; 277 rc = jbd2_journal_stop(handle); 278 279 if (!err) 280 err = rc; 281 if (err) 282 __ext4_std_error(sb, where, line, err); 283 return err; 284 } 285 286 void ext4_journal_abort_handle(const char *caller, unsigned int line, 287 const char *err_fn, struct buffer_head *bh, 288 handle_t *handle, int err) 289 { 290 char nbuf[16]; 291 const char *errstr = ext4_decode_error(NULL, err, nbuf); 292 293 BUG_ON(!ext4_handle_valid(handle)); 294 295 if (bh) 296 BUFFER_TRACE(bh, "abort"); 297 298 if (!handle->h_err) 299 handle->h_err = err; 300 301 if (is_handle_aborted(handle)) 302 return; 303 304 printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n", 305 caller, line, errstr, err_fn); 306 307 jbd2_journal_abort_handle(handle); 308 } 309 310 static void __save_error_info(struct super_block *sb, const char *func, 311 unsigned int line) 312 { 313 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 314 315 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 316 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 317 es->s_last_error_time = cpu_to_le32(get_seconds()); 318 strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); 319 es->s_last_error_line = cpu_to_le32(line); 320 if (!es->s_first_error_time) { 321 es->s_first_error_time = es->s_last_error_time; 322 strncpy(es->s_first_error_func, func, 323 sizeof(es->s_first_error_func)); 324 es->s_first_error_line = cpu_to_le32(line); 325 es->s_first_error_ino = es->s_last_error_ino; 326 es->s_first_error_block = es->s_last_error_block; 327 } 328 /* 329 * Start the daily error reporting function if it hasn't been 330 * started already 331 */ 332 if (!es->s_error_count) 333 mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); 334 es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1); 335 } 336 337 static void save_error_info(struct super_block *sb, const char *func, 338 unsigned int line) 339 { 340 __save_error_info(sb, func, line); 341 ext4_commit_super(sb, 1); 342 } 343 344 345 /* Deal with the reporting of failure conditions on a filesystem such as 346 * inconsistencies detected or read IO failures. 347 * 348 * On ext2, we can store the error state of the filesystem in the 349 * superblock. That is not possible on ext4, because we may have other 350 * write ordering constraints on the superblock which prevent us from 351 * writing it out straight away; and given that the journal is about to 352 * be aborted, we can't rely on the current, or future, transactions to 353 * write out the superblock safely. 354 * 355 * We'll just use the jbd2_journal_abort() error code to record an error in 356 * the journal instead. On recovery, the journal will complain about 357 * that error until we've noted it down and cleared it. 358 */ 359 360 static void ext4_handle_error(struct super_block *sb) 361 { 362 if (sb->s_flags & MS_RDONLY) 363 return; 364 365 if (!test_opt(sb, ERRORS_CONT)) { 366 journal_t *journal = EXT4_SB(sb)->s_journal; 367 368 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; 369 if (journal) 370 jbd2_journal_abort(journal, -EIO); 371 } 372 if (test_opt(sb, ERRORS_RO)) { 373 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 374 sb->s_flags |= MS_RDONLY; 375 } 376 if (test_opt(sb, ERRORS_PANIC)) 377 panic("EXT4-fs (device %s): panic forced after error\n", 378 sb->s_id); 379 } 380 381 void __ext4_error(struct super_block *sb, const char *function, 382 unsigned int line, const char *fmt, ...) 383 { 384 va_list args; 385 386 va_start(args, fmt); 387 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ", 388 sb->s_id, function, line, current->comm); 389 vprintk(fmt, args); 390 printk("\n"); 391 va_end(args); 392 393 ext4_handle_error(sb); 394 } 395 396 void ext4_error_inode(struct inode *inode, const char *function, 397 unsigned int line, ext4_fsblk_t block, 398 const char *fmt, ...) 399 { 400 va_list args; 401 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 402 403 es->s_last_error_ino = cpu_to_le32(inode->i_ino); 404 es->s_last_error_block = cpu_to_le64(block); 405 save_error_info(inode->i_sb, function, line); 406 va_start(args, fmt); 407 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ", 408 inode->i_sb->s_id, function, line, inode->i_ino); 409 if (block) 410 printk("block %llu: ", block); 411 printk("comm %s: ", current->comm); 412 vprintk(fmt, args); 413 printk("\n"); 414 va_end(args); 415 416 ext4_handle_error(inode->i_sb); 417 } 418 419 void ext4_error_file(struct file *file, const char *function, 420 unsigned int line, const char *fmt, ...) 421 { 422 va_list args; 423 struct ext4_super_block *es; 424 struct inode *inode = file->f_dentry->d_inode; 425 char pathname[80], *path; 426 427 es = EXT4_SB(inode->i_sb)->s_es; 428 es->s_last_error_ino = cpu_to_le32(inode->i_ino); 429 save_error_info(inode->i_sb, function, line); 430 va_start(args, fmt); 431 path = d_path(&(file->f_path), pathname, sizeof(pathname)); 432 if (!path) 433 path = "(unknown)"; 434 printk(KERN_CRIT 435 "EXT4-fs error (device %s): %s:%d: inode #%lu " 436 "(comm %s path %s): ", 437 inode->i_sb->s_id, function, line, inode->i_ino, 438 current->comm, path); 439 vprintk(fmt, args); 440 printk("\n"); 441 va_end(args); 442 443 ext4_handle_error(inode->i_sb); 444 } 445 446 static const char *ext4_decode_error(struct super_block *sb, int errno, 447 char nbuf[16]) 448 { 449 char *errstr = NULL; 450 451 switch (errno) { 452 case -EIO: 453 errstr = "IO failure"; 454 break; 455 case -ENOMEM: 456 errstr = "Out of memory"; 457 break; 458 case -EROFS: 459 if (!sb || (EXT4_SB(sb)->s_journal && 460 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 461 errstr = "Journal has aborted"; 462 else 463 errstr = "Readonly filesystem"; 464 break; 465 default: 466 /* If the caller passed in an extra buffer for unknown 467 * errors, textualise them now. Else we just return 468 * NULL. */ 469 if (nbuf) { 470 /* Check for truncated error codes... */ 471 if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 472 errstr = nbuf; 473 } 474 break; 475 } 476 477 return errstr; 478 } 479 480 /* __ext4_std_error decodes expected errors from journaling functions 481 * automatically and invokes the appropriate error response. */ 482 483 void __ext4_std_error(struct super_block *sb, const char *function, 484 unsigned int line, int errno) 485 { 486 char nbuf[16]; 487 const char *errstr; 488 489 /* Special case: if the error is EROFS, and we're not already 490 * inside a transaction, then there's really no point in logging 491 * an error. */ 492 if (errno == -EROFS && journal_current_handle() == NULL && 493 (sb->s_flags & MS_RDONLY)) 494 return; 495 496 errstr = ext4_decode_error(sb, errno, nbuf); 497 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 498 sb->s_id, function, line, errstr); 499 save_error_info(sb, function, line); 500 501 ext4_handle_error(sb); 502 } 503 504 /* 505 * ext4_abort is a much stronger failure handler than ext4_error. The 506 * abort function may be used to deal with unrecoverable failures such 507 * as journal IO errors or ENOMEM at a critical moment in log management. 508 * 509 * We unconditionally force the filesystem into an ABORT|READONLY state, 510 * unless the error response on the fs has been set to panic in which 511 * case we take the easy way out and panic immediately. 512 */ 513 514 void __ext4_abort(struct super_block *sb, const char *function, 515 unsigned int line, const char *fmt, ...) 516 { 517 va_list args; 518 519 save_error_info(sb, function, line); 520 va_start(args, fmt); 521 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, 522 function, line); 523 vprintk(fmt, args); 524 printk("\n"); 525 va_end(args); 526 527 if ((sb->s_flags & MS_RDONLY) == 0) { 528 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 529 sb->s_flags |= MS_RDONLY; 530 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; 531 if (EXT4_SB(sb)->s_journal) 532 jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); 533 save_error_info(sb, function, line); 534 } 535 if (test_opt(sb, ERRORS_PANIC)) 536 panic("EXT4-fs panic from previous error\n"); 537 } 538 539 void ext4_msg (struct super_block * sb, const char *prefix, 540 const char *fmt, ...) 541 { 542 va_list args; 543 544 va_start(args, fmt); 545 printk("%sEXT4-fs (%s): ", prefix, sb->s_id); 546 vprintk(fmt, args); 547 printk("\n"); 548 va_end(args); 549 } 550 551 void __ext4_warning(struct super_block *sb, const char *function, 552 unsigned int line, const char *fmt, ...) 553 { 554 va_list args; 555 556 va_start(args, fmt); 557 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ", 558 sb->s_id, function, line); 559 vprintk(fmt, args); 560 printk("\n"); 561 va_end(args); 562 } 563 564 void __ext4_grp_locked_error(const char *function, unsigned int line, 565 struct super_block *sb, ext4_group_t grp, 566 unsigned long ino, ext4_fsblk_t block, 567 const char *fmt, ...) 568 __releases(bitlock) 569 __acquires(bitlock) 570 { 571 va_list args; 572 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 573 574 es->s_last_error_ino = cpu_to_le32(ino); 575 es->s_last_error_block = cpu_to_le64(block); 576 __save_error_info(sb, function, line); 577 va_start(args, fmt); 578 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u", 579 sb->s_id, function, line, grp); 580 if (ino) 581 printk("inode %lu: ", ino); 582 if (block) 583 printk("block %llu:", (unsigned long long) block); 584 vprintk(fmt, args); 585 printk("\n"); 586 va_end(args); 587 588 if (test_opt(sb, ERRORS_CONT)) { 589 ext4_commit_super(sb, 0); 590 return; 591 } 592 593 ext4_unlock_group(sb, grp); 594 ext4_handle_error(sb); 595 /* 596 * We only get here in the ERRORS_RO case; relocking the group 597 * may be dangerous, but nothing bad will happen since the 598 * filesystem will have already been marked read/only and the 599 * journal has been aborted. We return 1 as a hint to callers 600 * who might what to use the return value from 601 * ext4_grp_locked_error() to distinguish beween the 602 * ERRORS_CONT and ERRORS_RO case, and perhaps return more 603 * aggressively from the ext4 function in question, with a 604 * more appropriate error code. 605 */ 606 ext4_lock_group(sb, grp); 607 return; 608 } 609 610 void ext4_update_dynamic_rev(struct super_block *sb) 611 { 612 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 613 614 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 615 return; 616 617 ext4_warning(sb, 618 "updating to rev %d because of new feature flag, " 619 "running e2fsck is recommended", 620 EXT4_DYNAMIC_REV); 621 622 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 623 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 624 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 625 /* leave es->s_feature_*compat flags alone */ 626 /* es->s_uuid will be set by e2fsck if empty */ 627 628 /* 629 * The rest of the superblock fields should be zero, and if not it 630 * means they are likely already in use, so leave them alone. We 631 * can leave it up to e2fsck to clean up any inconsistencies there. 632 */ 633 } 634 635 /* 636 * Open the external journal device 637 */ 638 static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) 639 { 640 struct block_device *bdev; 641 char b[BDEVNAME_SIZE]; 642 643 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 644 if (IS_ERR(bdev)) 645 goto fail; 646 return bdev; 647 648 fail: 649 ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld", 650 __bdevname(dev, b), PTR_ERR(bdev)); 651 return NULL; 652 } 653 654 /* 655 * Release the journal device 656 */ 657 static int ext4_blkdev_put(struct block_device *bdev) 658 { 659 bd_release(bdev); 660 return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 661 } 662 663 static int ext4_blkdev_remove(struct ext4_sb_info *sbi) 664 { 665 struct block_device *bdev; 666 int ret = -ENODEV; 667 668 bdev = sbi->journal_bdev; 669 if (bdev) { 670 ret = ext4_blkdev_put(bdev); 671 sbi->journal_bdev = NULL; 672 } 673 return ret; 674 } 675 676 static inline struct inode *orphan_list_entry(struct list_head *l) 677 { 678 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 679 } 680 681 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 682 { 683 struct list_head *l; 684 685 ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 686 le32_to_cpu(sbi->s_es->s_last_orphan)); 687 688 printk(KERN_ERR "sb_info orphan list:\n"); 689 list_for_each(l, &sbi->s_orphan) { 690 struct inode *inode = orphan_list_entry(l); 691 printk(KERN_ERR " " 692 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 693 inode->i_sb->s_id, inode->i_ino, inode, 694 inode->i_mode, inode->i_nlink, 695 NEXT_ORPHAN(inode)); 696 } 697 } 698 699 static void ext4_put_super(struct super_block *sb) 700 { 701 struct ext4_sb_info *sbi = EXT4_SB(sb); 702 struct ext4_super_block *es = sbi->s_es; 703 int i, err; 704 705 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 706 707 flush_workqueue(sbi->dio_unwritten_wq); 708 destroy_workqueue(sbi->dio_unwritten_wq); 709 710 lock_super(sb); 711 lock_kernel(); 712 if (sb->s_dirt) 713 ext4_commit_super(sb, 1); 714 715 if (sbi->s_journal) { 716 err = jbd2_journal_destroy(sbi->s_journal); 717 sbi->s_journal = NULL; 718 if (err < 0) 719 ext4_abort(sb, "Couldn't clean up the journal"); 720 } 721 722 ext4_release_system_zone(sb); 723 ext4_mb_release(sb); 724 ext4_ext_release(sb); 725 ext4_xattr_put_super(sb); 726 727 if (!(sb->s_flags & MS_RDONLY)) { 728 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 729 es->s_state = cpu_to_le16(sbi->s_mount_state); 730 ext4_commit_super(sb, 1); 731 } 732 if (sbi->s_proc) { 733 remove_proc_entry(sb->s_id, ext4_proc_root); 734 } 735 kobject_del(&sbi->s_kobj); 736 737 for (i = 0; i < sbi->s_gdb_count; i++) 738 brelse(sbi->s_group_desc[i]); 739 kfree(sbi->s_group_desc); 740 if (is_vmalloc_addr(sbi->s_flex_groups)) 741 vfree(sbi->s_flex_groups); 742 else 743 kfree(sbi->s_flex_groups); 744 percpu_counter_destroy(&sbi->s_freeblocks_counter); 745 percpu_counter_destroy(&sbi->s_freeinodes_counter); 746 percpu_counter_destroy(&sbi->s_dirs_counter); 747 percpu_counter_destroy(&sbi->s_dirtyblocks_counter); 748 brelse(sbi->s_sbh); 749 #ifdef CONFIG_QUOTA 750 for (i = 0; i < MAXQUOTAS; i++) 751 kfree(sbi->s_qf_names[i]); 752 #endif 753 754 /* Debugging code just in case the in-memory inode orphan list 755 * isn't empty. The on-disk one can be non-empty if we've 756 * detected an error and taken the fs readonly, but the 757 * in-memory list had better be clean by this point. */ 758 if (!list_empty(&sbi->s_orphan)) 759 dump_orphan_list(sb, sbi); 760 J_ASSERT(list_empty(&sbi->s_orphan)); 761 762 invalidate_bdev(sb->s_bdev); 763 if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { 764 /* 765 * Invalidate the journal device's buffers. We don't want them 766 * floating about in memory - the physical journal device may 767 * hotswapped, and it breaks the `ro-after' testing code. 768 */ 769 sync_blockdev(sbi->journal_bdev); 770 invalidate_bdev(sbi->journal_bdev); 771 ext4_blkdev_remove(sbi); 772 } 773 sb->s_fs_info = NULL; 774 /* 775 * Now that we are completely done shutting down the 776 * superblock, we need to actually destroy the kobject. 777 */ 778 unlock_kernel(); 779 unlock_super(sb); 780 kobject_put(&sbi->s_kobj); 781 wait_for_completion(&sbi->s_kobj_unregister); 782 kfree(sbi->s_blockgroup_lock); 783 kfree(sbi); 784 } 785 786 static struct kmem_cache *ext4_inode_cachep; 787 788 /* 789 * Called inside transaction, so use GFP_NOFS 790 */ 791 static struct inode *ext4_alloc_inode(struct super_block *sb) 792 { 793 struct ext4_inode_info *ei; 794 795 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); 796 if (!ei) 797 return NULL; 798 799 ei->vfs_inode.i_version = 1; 800 ei->vfs_inode.i_data.writeback_index = 0; 801 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); 802 INIT_LIST_HEAD(&ei->i_prealloc_list); 803 spin_lock_init(&ei->i_prealloc_lock); 804 /* 805 * Note: We can be called before EXT4_SB(sb)->s_journal is set, 806 * therefore it can be null here. Don't check it, just initialize 807 * jinode. 808 */ 809 jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode); 810 ei->i_reserved_data_blocks = 0; 811 ei->i_reserved_meta_blocks = 0; 812 ei->i_allocated_meta_blocks = 0; 813 ei->i_da_metadata_calc_len = 0; 814 ei->i_delalloc_reserved_flag = 0; 815 spin_lock_init(&(ei->i_block_reservation_lock)); 816 #ifdef CONFIG_QUOTA 817 ei->i_reserved_quota = 0; 818 #endif 819 INIT_LIST_HEAD(&ei->i_completed_io_list); 820 spin_lock_init(&ei->i_completed_io_lock); 821 ei->cur_aio_dio = NULL; 822 ei->i_sync_tid = 0; 823 ei->i_datasync_tid = 0; 824 825 return &ei->vfs_inode; 826 } 827 828 static void ext4_destroy_inode(struct inode *inode) 829 { 830 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 831 ext4_msg(inode->i_sb, KERN_ERR, 832 "Inode %lu (%p): orphan list check failed!", 833 inode->i_ino, EXT4_I(inode)); 834 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 835 EXT4_I(inode), sizeof(struct ext4_inode_info), 836 true); 837 dump_stack(); 838 } 839 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 840 } 841 842 static void init_once(void *foo) 843 { 844 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 845 846 INIT_LIST_HEAD(&ei->i_orphan); 847 #ifdef CONFIG_EXT4_FS_XATTR 848 init_rwsem(&ei->xattr_sem); 849 #endif 850 init_rwsem(&ei->i_data_sem); 851 inode_init_once(&ei->vfs_inode); 852 } 853 854 static int init_inodecache(void) 855 { 856 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", 857 sizeof(struct ext4_inode_info), 858 0, (SLAB_RECLAIM_ACCOUNT| 859 SLAB_MEM_SPREAD), 860 init_once); 861 if (ext4_inode_cachep == NULL) 862 return -ENOMEM; 863 return 0; 864 } 865 866 static void destroy_inodecache(void) 867 { 868 kmem_cache_destroy(ext4_inode_cachep); 869 } 870 871 void ext4_clear_inode(struct inode *inode) 872 { 873 invalidate_inode_buffers(inode); 874 end_writeback(inode); 875 dquot_drop(inode); 876 ext4_discard_preallocations(inode); 877 if (EXT4_JOURNAL(inode)) 878 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, 879 &EXT4_I(inode)->jinode); 880 } 881 882 static inline void ext4_show_quota_options(struct seq_file *seq, 883 struct super_block *sb) 884 { 885 #if defined(CONFIG_QUOTA) 886 struct ext4_sb_info *sbi = EXT4_SB(sb); 887 888 if (sbi->s_jquota_fmt) { 889 char *fmtname = ""; 890 891 switch (sbi->s_jquota_fmt) { 892 case QFMT_VFS_OLD: 893 fmtname = "vfsold"; 894 break; 895 case QFMT_VFS_V0: 896 fmtname = "vfsv0"; 897 break; 898 case QFMT_VFS_V1: 899 fmtname = "vfsv1"; 900 break; 901 } 902 seq_printf(seq, ",jqfmt=%s", fmtname); 903 } 904 905 if (sbi->s_qf_names[USRQUOTA]) 906 seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); 907 908 if (sbi->s_qf_names[GRPQUOTA]) 909 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); 910 911 if (test_opt(sb, USRQUOTA)) 912 seq_puts(seq, ",usrquota"); 913 914 if (test_opt(sb, GRPQUOTA)) 915 seq_puts(seq, ",grpquota"); 916 #endif 917 } 918 919 /* 920 * Show an option if 921 * - it's set to a non-default value OR 922 * - if the per-sb default is different from the global default 923 */ 924 static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) 925 { 926 int def_errors; 927 unsigned long def_mount_opts; 928 struct super_block *sb = vfs->mnt_sb; 929 struct ext4_sb_info *sbi = EXT4_SB(sb); 930 struct ext4_super_block *es = sbi->s_es; 931 932 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 933 def_errors = le16_to_cpu(es->s_errors); 934 935 if (sbi->s_sb_block != 1) 936 seq_printf(seq, ",sb=%llu", sbi->s_sb_block); 937 if (test_opt(sb, MINIX_DF)) 938 seq_puts(seq, ",minixdf"); 939 if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS)) 940 seq_puts(seq, ",grpid"); 941 if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS)) 942 seq_puts(seq, ",nogrpid"); 943 if (sbi->s_resuid != EXT4_DEF_RESUID || 944 le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) { 945 seq_printf(seq, ",resuid=%u", sbi->s_resuid); 946 } 947 if (sbi->s_resgid != EXT4_DEF_RESGID || 948 le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) { 949 seq_printf(seq, ",resgid=%u", sbi->s_resgid); 950 } 951 if (test_opt(sb, ERRORS_RO)) { 952 if (def_errors == EXT4_ERRORS_PANIC || 953 def_errors == EXT4_ERRORS_CONTINUE) { 954 seq_puts(seq, ",errors=remount-ro"); 955 } 956 } 957 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 958 seq_puts(seq, ",errors=continue"); 959 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 960 seq_puts(seq, ",errors=panic"); 961 if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16)) 962 seq_puts(seq, ",nouid32"); 963 if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG)) 964 seq_puts(seq, ",debug"); 965 if (test_opt(sb, OLDALLOC)) 966 seq_puts(seq, ",oldalloc"); 967 #ifdef CONFIG_EXT4_FS_XATTR 968 if (test_opt(sb, XATTR_USER) && 969 !(def_mount_opts & EXT4_DEFM_XATTR_USER)) 970 seq_puts(seq, ",user_xattr"); 971 if (!test_opt(sb, XATTR_USER) && 972 (def_mount_opts & EXT4_DEFM_XATTR_USER)) { 973 seq_puts(seq, ",nouser_xattr"); 974 } 975 #endif 976 #ifdef CONFIG_EXT4_FS_POSIX_ACL 977 if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) 978 seq_puts(seq, ",acl"); 979 if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL)) 980 seq_puts(seq, ",noacl"); 981 #endif 982 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { 983 seq_printf(seq, ",commit=%u", 984 (unsigned) (sbi->s_commit_interval / HZ)); 985 } 986 if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) { 987 seq_printf(seq, ",min_batch_time=%u", 988 (unsigned) sbi->s_min_batch_time); 989 } 990 if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) { 991 seq_printf(seq, ",max_batch_time=%u", 992 (unsigned) sbi->s_min_batch_time); 993 } 994 995 /* 996 * We're changing the default of barrier mount option, so 997 * let's always display its mount state so it's clear what its 998 * status is. 999 */ 1000 seq_puts(seq, ",barrier="); 1001 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); 1002 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) 1003 seq_puts(seq, ",journal_async_commit"); 1004 else if (test_opt(sb, JOURNAL_CHECKSUM)) 1005 seq_puts(seq, ",journal_checksum"); 1006 if (test_opt(sb, I_VERSION)) 1007 seq_puts(seq, ",i_version"); 1008 if (!test_opt(sb, DELALLOC) && 1009 !(def_mount_opts & EXT4_DEFM_NODELALLOC)) 1010 seq_puts(seq, ",nodelalloc"); 1011 1012 if (sbi->s_stripe) 1013 seq_printf(seq, ",stripe=%lu", sbi->s_stripe); 1014 /* 1015 * journal mode get enabled in different ways 1016 * So just print the value even if we didn't specify it 1017 */ 1018 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 1019 seq_puts(seq, ",data=journal"); 1020 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 1021 seq_puts(seq, ",data=ordered"); 1022 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 1023 seq_puts(seq, ",data=writeback"); 1024 1025 if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 1026 seq_printf(seq, ",inode_readahead_blks=%u", 1027 sbi->s_inode_readahead_blks); 1028 1029 if (test_opt(sb, DATA_ERR_ABORT)) 1030 seq_puts(seq, ",data_err=abort"); 1031 1032 if (test_opt(sb, NO_AUTO_DA_ALLOC)) 1033 seq_puts(seq, ",noauto_da_alloc"); 1034 1035 if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD)) 1036 seq_puts(seq, ",discard"); 1037 1038 if (test_opt(sb, NOLOAD)) 1039 seq_puts(seq, ",norecovery"); 1040 1041 if (test_opt(sb, DIOREAD_NOLOCK)) 1042 seq_puts(seq, ",dioread_nolock"); 1043 1044 if (test_opt(sb, BLOCK_VALIDITY) && 1045 !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)) 1046 seq_puts(seq, ",block_validity"); 1047 1048 ext4_show_quota_options(seq, sb); 1049 1050 return 0; 1051 } 1052 1053 static struct inode *ext4_nfs_get_inode(struct super_block *sb, 1054 u64 ino, u32 generation) 1055 { 1056 struct inode *inode; 1057 1058 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) 1059 return ERR_PTR(-ESTALE); 1060 if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) 1061 return ERR_PTR(-ESTALE); 1062 1063 /* iget isn't really right if the inode is currently unallocated!! 1064 * 1065 * ext4_read_inode will return a bad_inode if the inode had been 1066 * deleted, so we should be safe. 1067 * 1068 * Currently we don't know the generation for parent directory, so 1069 * a generation of 0 means "accept any" 1070 */ 1071 inode = ext4_iget(sb, ino); 1072 if (IS_ERR(inode)) 1073 return ERR_CAST(inode); 1074 if (generation && inode->i_generation != generation) { 1075 iput(inode); 1076 return ERR_PTR(-ESTALE); 1077 } 1078 1079 return inode; 1080 } 1081 1082 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 1083 int fh_len, int fh_type) 1084 { 1085 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 1086 ext4_nfs_get_inode); 1087 } 1088 1089 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 1090 int fh_len, int fh_type) 1091 { 1092 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 1093 ext4_nfs_get_inode); 1094 } 1095 1096 /* 1097 * Try to release metadata pages (indirect blocks, directories) which are 1098 * mapped via the block device. Since these pages could have journal heads 1099 * which would prevent try_to_free_buffers() from freeing them, we must use 1100 * jbd2 layer's try_to_free_buffers() function to release them. 1101 */ 1102 static int bdev_try_to_free_page(struct super_block *sb, struct page *page, 1103 gfp_t wait) 1104 { 1105 journal_t *journal = EXT4_SB(sb)->s_journal; 1106 1107 WARN_ON(PageChecked(page)); 1108 if (!page_has_buffers(page)) 1109 return 0; 1110 if (journal) 1111 return jbd2_journal_try_to_free_buffers(journal, page, 1112 wait & ~__GFP_WAIT); 1113 return try_to_free_buffers(page); 1114 } 1115 1116 #ifdef CONFIG_QUOTA 1117 #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") 1118 #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 1119 1120 static int ext4_write_dquot(struct dquot *dquot); 1121 static int ext4_acquire_dquot(struct dquot *dquot); 1122 static int ext4_release_dquot(struct dquot *dquot); 1123 static int ext4_mark_dquot_dirty(struct dquot *dquot); 1124 static int ext4_write_info(struct super_block *sb, int type); 1125 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1126 char *path); 1127 static int ext4_quota_off(struct super_block *sb, int type); 1128 static int ext4_quota_on_mount(struct super_block *sb, int type); 1129 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1130 size_t len, loff_t off); 1131 static ssize_t ext4_quota_write(struct super_block *sb, int type, 1132 const char *data, size_t len, loff_t off); 1133 1134 static const struct dquot_operations ext4_quota_operations = { 1135 #ifdef CONFIG_QUOTA 1136 .get_reserved_space = ext4_get_reserved_space, 1137 #endif 1138 .write_dquot = ext4_write_dquot, 1139 .acquire_dquot = ext4_acquire_dquot, 1140 .release_dquot = ext4_release_dquot, 1141 .mark_dirty = ext4_mark_dquot_dirty, 1142 .write_info = ext4_write_info, 1143 .alloc_dquot = dquot_alloc, 1144 .destroy_dquot = dquot_destroy, 1145 }; 1146 1147 static const struct quotactl_ops ext4_qctl_operations = { 1148 .quota_on = ext4_quota_on, 1149 .quota_off = ext4_quota_off, 1150 .quota_sync = dquot_quota_sync, 1151 .get_info = dquot_get_dqinfo, 1152 .set_info = dquot_set_dqinfo, 1153 .get_dqblk = dquot_get_dqblk, 1154 .set_dqblk = dquot_set_dqblk 1155 }; 1156 #endif 1157 1158 static const struct super_operations ext4_sops = { 1159 .alloc_inode = ext4_alloc_inode, 1160 .destroy_inode = ext4_destroy_inode, 1161 .write_inode = ext4_write_inode, 1162 .dirty_inode = ext4_dirty_inode, 1163 .evict_inode = ext4_evict_inode, 1164 .put_super = ext4_put_super, 1165 .sync_fs = ext4_sync_fs, 1166 .freeze_fs = ext4_freeze, 1167 .unfreeze_fs = ext4_unfreeze, 1168 .statfs = ext4_statfs, 1169 .remount_fs = ext4_remount, 1170 .show_options = ext4_show_options, 1171 #ifdef CONFIG_QUOTA 1172 .quota_read = ext4_quota_read, 1173 .quota_write = ext4_quota_write, 1174 #endif 1175 .bdev_try_to_free_page = bdev_try_to_free_page, 1176 }; 1177 1178 static const struct super_operations ext4_nojournal_sops = { 1179 .alloc_inode = ext4_alloc_inode, 1180 .destroy_inode = ext4_destroy_inode, 1181 .write_inode = ext4_write_inode, 1182 .dirty_inode = ext4_dirty_inode, 1183 .evict_inode = ext4_evict_inode, 1184 .write_super = ext4_write_super, 1185 .put_super = ext4_put_super, 1186 .statfs = ext4_statfs, 1187 .remount_fs = ext4_remount, 1188 .show_options = ext4_show_options, 1189 #ifdef CONFIG_QUOTA 1190 .quota_read = ext4_quota_read, 1191 .quota_write = ext4_quota_write, 1192 #endif 1193 .bdev_try_to_free_page = bdev_try_to_free_page, 1194 }; 1195 1196 static const struct export_operations ext4_export_ops = { 1197 .fh_to_dentry = ext4_fh_to_dentry, 1198 .fh_to_parent = ext4_fh_to_parent, 1199 .get_parent = ext4_get_parent, 1200 }; 1201 1202 enum { 1203 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1204 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, 1205 Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, 1206 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, 1207 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh, 1208 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, 1209 Opt_journal_update, Opt_journal_dev, 1210 Opt_journal_checksum, Opt_journal_async_commit, 1211 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1212 Opt_data_err_abort, Opt_data_err_ignore, 1213 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 1214 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 1215 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, 1216 Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version, 1217 Opt_stripe, Opt_delalloc, Opt_nodelalloc, 1218 Opt_block_validity, Opt_noblock_validity, 1219 Opt_inode_readahead_blks, Opt_journal_ioprio, 1220 Opt_dioread_nolock, Opt_dioread_lock, 1221 Opt_discard, Opt_nodiscard, 1222 }; 1223 1224 static const match_table_t tokens = { 1225 {Opt_bsd_df, "bsddf"}, 1226 {Opt_minix_df, "minixdf"}, 1227 {Opt_grpid, "grpid"}, 1228 {Opt_grpid, "bsdgroups"}, 1229 {Opt_nogrpid, "nogrpid"}, 1230 {Opt_nogrpid, "sysvgroups"}, 1231 {Opt_resgid, "resgid=%u"}, 1232 {Opt_resuid, "resuid=%u"}, 1233 {Opt_sb, "sb=%u"}, 1234 {Opt_err_cont, "errors=continue"}, 1235 {Opt_err_panic, "errors=panic"}, 1236 {Opt_err_ro, "errors=remount-ro"}, 1237 {Opt_nouid32, "nouid32"}, 1238 {Opt_debug, "debug"}, 1239 {Opt_oldalloc, "oldalloc"}, 1240 {Opt_orlov, "orlov"}, 1241 {Opt_user_xattr, "user_xattr"}, 1242 {Opt_nouser_xattr, "nouser_xattr"}, 1243 {Opt_acl, "acl"}, 1244 {Opt_noacl, "noacl"}, 1245 {Opt_noload, "noload"}, 1246 {Opt_noload, "norecovery"}, 1247 {Opt_nobh, "nobh"}, 1248 {Opt_bh, "bh"}, 1249 {Opt_commit, "commit=%u"}, 1250 {Opt_min_batch_time, "min_batch_time=%u"}, 1251 {Opt_max_batch_time, "max_batch_time=%u"}, 1252 {Opt_journal_update, "journal=update"}, 1253 {Opt_journal_dev, "journal_dev=%u"}, 1254 {Opt_journal_checksum, "journal_checksum"}, 1255 {Opt_journal_async_commit, "journal_async_commit"}, 1256 {Opt_abort, "abort"}, 1257 {Opt_data_journal, "data=journal"}, 1258 {Opt_data_ordered, "data=ordered"}, 1259 {Opt_data_writeback, "data=writeback"}, 1260 {Opt_data_err_abort, "data_err=abort"}, 1261 {Opt_data_err_ignore, "data_err=ignore"}, 1262 {Opt_offusrjquota, "usrjquota="}, 1263 {Opt_usrjquota, "usrjquota=%s"}, 1264 {Opt_offgrpjquota, "grpjquota="}, 1265 {Opt_grpjquota, "grpjquota=%s"}, 1266 {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, 1267 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, 1268 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, 1269 {Opt_grpquota, "grpquota"}, 1270 {Opt_noquota, "noquota"}, 1271 {Opt_quota, "quota"}, 1272 {Opt_usrquota, "usrquota"}, 1273 {Opt_barrier, "barrier=%u"}, 1274 {Opt_barrier, "barrier"}, 1275 {Opt_nobarrier, "nobarrier"}, 1276 {Opt_i_version, "i_version"}, 1277 {Opt_stripe, "stripe=%u"}, 1278 {Opt_resize, "resize"}, 1279 {Opt_delalloc, "delalloc"}, 1280 {Opt_nodelalloc, "nodelalloc"}, 1281 {Opt_block_validity, "block_validity"}, 1282 {Opt_noblock_validity, "noblock_validity"}, 1283 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, 1284 {Opt_journal_ioprio, "journal_ioprio=%u"}, 1285 {Opt_auto_da_alloc, "auto_da_alloc=%u"}, 1286 {Opt_auto_da_alloc, "auto_da_alloc"}, 1287 {Opt_noauto_da_alloc, "noauto_da_alloc"}, 1288 {Opt_dioread_nolock, "dioread_nolock"}, 1289 {Opt_dioread_lock, "dioread_lock"}, 1290 {Opt_discard, "discard"}, 1291 {Opt_nodiscard, "nodiscard"}, 1292 {Opt_err, NULL}, 1293 }; 1294 1295 static ext4_fsblk_t get_sb_block(void **data) 1296 { 1297 ext4_fsblk_t sb_block; 1298 char *options = (char *) *data; 1299 1300 if (!options || strncmp(options, "sb=", 3) != 0) 1301 return 1; /* Default location */ 1302 1303 options += 3; 1304 /* TODO: use simple_strtoll with >32bit ext4 */ 1305 sb_block = simple_strtoul(options, &options, 0); 1306 if (*options && *options != ',') { 1307 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n", 1308 (char *) *data); 1309 return 1; 1310 } 1311 if (*options == ',') 1312 options++; 1313 *data = (void *) options; 1314 1315 return sb_block; 1316 } 1317 1318 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1319 static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" 1320 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; 1321 1322 #ifdef CONFIG_QUOTA 1323 static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) 1324 { 1325 struct ext4_sb_info *sbi = EXT4_SB(sb); 1326 char *qname; 1327 1328 if (sb_any_quota_loaded(sb) && 1329 !sbi->s_qf_names[qtype]) { 1330 ext4_msg(sb, KERN_ERR, 1331 "Cannot change journaled " 1332 "quota options when quota turned on"); 1333 return 0; 1334 } 1335 qname = match_strdup(args); 1336 if (!qname) { 1337 ext4_msg(sb, KERN_ERR, 1338 "Not enough memory for storing quotafile name"); 1339 return 0; 1340 } 1341 if (sbi->s_qf_names[qtype] && 1342 strcmp(sbi->s_qf_names[qtype], qname)) { 1343 ext4_msg(sb, KERN_ERR, 1344 "%s quota file already specified", QTYPE2NAME(qtype)); 1345 kfree(qname); 1346 return 0; 1347 } 1348 sbi->s_qf_names[qtype] = qname; 1349 if (strchr(sbi->s_qf_names[qtype], '/')) { 1350 ext4_msg(sb, KERN_ERR, 1351 "quotafile must be on filesystem root"); 1352 kfree(sbi->s_qf_names[qtype]); 1353 sbi->s_qf_names[qtype] = NULL; 1354 return 0; 1355 } 1356 set_opt(sbi->s_mount_opt, QUOTA); 1357 return 1; 1358 } 1359 1360 static int clear_qf_name(struct super_block *sb, int qtype) 1361 { 1362 1363 struct ext4_sb_info *sbi = EXT4_SB(sb); 1364 1365 if (sb_any_quota_loaded(sb) && 1366 sbi->s_qf_names[qtype]) { 1367 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" 1368 " when quota turned on"); 1369 return 0; 1370 } 1371 /* 1372 * The space will be released later when all options are confirmed 1373 * to be correct 1374 */ 1375 sbi->s_qf_names[qtype] = NULL; 1376 return 1; 1377 } 1378 #endif 1379 1380 static int parse_options(char *options, struct super_block *sb, 1381 unsigned long *journal_devnum, 1382 unsigned int *journal_ioprio, 1383 ext4_fsblk_t *n_blocks_count, int is_remount) 1384 { 1385 struct ext4_sb_info *sbi = EXT4_SB(sb); 1386 char *p; 1387 substring_t args[MAX_OPT_ARGS]; 1388 int data_opt = 0; 1389 int option; 1390 #ifdef CONFIG_QUOTA 1391 int qfmt; 1392 #endif 1393 1394 if (!options) 1395 return 1; 1396 1397 while ((p = strsep(&options, ",")) != NULL) { 1398 int token; 1399 if (!*p) 1400 continue; 1401 1402 /* 1403 * Initialize args struct so we know whether arg was 1404 * found; some options take optional arguments. 1405 */ 1406 args[0].to = args[0].from = 0; 1407 token = match_token(p, tokens, args); 1408 switch (token) { 1409 case Opt_bsd_df: 1410 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1411 clear_opt(sbi->s_mount_opt, MINIX_DF); 1412 break; 1413 case Opt_minix_df: 1414 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1415 set_opt(sbi->s_mount_opt, MINIX_DF); 1416 1417 break; 1418 case Opt_grpid: 1419 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1420 set_opt(sbi->s_mount_opt, GRPID); 1421 1422 break; 1423 case Opt_nogrpid: 1424 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1425 clear_opt(sbi->s_mount_opt, GRPID); 1426 1427 break; 1428 case Opt_resuid: 1429 if (match_int(&args[0], &option)) 1430 return 0; 1431 sbi->s_resuid = option; 1432 break; 1433 case Opt_resgid: 1434 if (match_int(&args[0], &option)) 1435 return 0; 1436 sbi->s_resgid = option; 1437 break; 1438 case Opt_sb: 1439 /* handled by get_sb_block() instead of here */ 1440 /* *sb_block = match_int(&args[0]); */ 1441 break; 1442 case Opt_err_panic: 1443 clear_opt(sbi->s_mount_opt, ERRORS_CONT); 1444 clear_opt(sbi->s_mount_opt, ERRORS_RO); 1445 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 1446 break; 1447 case Opt_err_ro: 1448 clear_opt(sbi->s_mount_opt, ERRORS_CONT); 1449 clear_opt(sbi->s_mount_opt, ERRORS_PANIC); 1450 set_opt(sbi->s_mount_opt, ERRORS_RO); 1451 break; 1452 case Opt_err_cont: 1453 clear_opt(sbi->s_mount_opt, ERRORS_RO); 1454 clear_opt(sbi->s_mount_opt, ERRORS_PANIC); 1455 set_opt(sbi->s_mount_opt, ERRORS_CONT); 1456 break; 1457 case Opt_nouid32: 1458 set_opt(sbi->s_mount_opt, NO_UID32); 1459 break; 1460 case Opt_debug: 1461 set_opt(sbi->s_mount_opt, DEBUG); 1462 break; 1463 case Opt_oldalloc: 1464 set_opt(sbi->s_mount_opt, OLDALLOC); 1465 break; 1466 case Opt_orlov: 1467 clear_opt(sbi->s_mount_opt, OLDALLOC); 1468 break; 1469 #ifdef CONFIG_EXT4_FS_XATTR 1470 case Opt_user_xattr: 1471 set_opt(sbi->s_mount_opt, XATTR_USER); 1472 break; 1473 case Opt_nouser_xattr: 1474 clear_opt(sbi->s_mount_opt, XATTR_USER); 1475 break; 1476 #else 1477 case Opt_user_xattr: 1478 case Opt_nouser_xattr: 1479 ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported"); 1480 break; 1481 #endif 1482 #ifdef CONFIG_EXT4_FS_POSIX_ACL 1483 case Opt_acl: 1484 set_opt(sbi->s_mount_opt, POSIX_ACL); 1485 break; 1486 case Opt_noacl: 1487 clear_opt(sbi->s_mount_opt, POSIX_ACL); 1488 break; 1489 #else 1490 case Opt_acl: 1491 case Opt_noacl: 1492 ext4_msg(sb, KERN_ERR, "(no)acl options not supported"); 1493 break; 1494 #endif 1495 case Opt_journal_update: 1496 /* @@@ FIXME */ 1497 /* Eventually we will want to be able to create 1498 a journal file here. For now, only allow the 1499 user to specify an existing inode to be the 1500 journal file. */ 1501 if (is_remount) { 1502 ext4_msg(sb, KERN_ERR, 1503 "Cannot specify journal on remount"); 1504 return 0; 1505 } 1506 set_opt(sbi->s_mount_opt, UPDATE_JOURNAL); 1507 break; 1508 case Opt_journal_dev: 1509 if (is_remount) { 1510 ext4_msg(sb, KERN_ERR, 1511 "Cannot specify journal on remount"); 1512 return 0; 1513 } 1514 if (match_int(&args[0], &option)) 1515 return 0; 1516 *journal_devnum = option; 1517 break; 1518 case Opt_journal_checksum: 1519 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); 1520 break; 1521 case Opt_journal_async_commit: 1522 set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT); 1523 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); 1524 break; 1525 case Opt_noload: 1526 set_opt(sbi->s_mount_opt, NOLOAD); 1527 break; 1528 case Opt_commit: 1529 if (match_int(&args[0], &option)) 1530 return 0; 1531 if (option < 0) 1532 return 0; 1533 if (option == 0) 1534 option = JBD2_DEFAULT_MAX_COMMIT_AGE; 1535 sbi->s_commit_interval = HZ * option; 1536 break; 1537 case Opt_max_batch_time: 1538 if (match_int(&args[0], &option)) 1539 return 0; 1540 if (option < 0) 1541 return 0; 1542 if (option == 0) 1543 option = EXT4_DEF_MAX_BATCH_TIME; 1544 sbi->s_max_batch_time = option; 1545 break; 1546 case Opt_min_batch_time: 1547 if (match_int(&args[0], &option)) 1548 return 0; 1549 if (option < 0) 1550 return 0; 1551 sbi->s_min_batch_time = option; 1552 break; 1553 case Opt_data_journal: 1554 data_opt = EXT4_MOUNT_JOURNAL_DATA; 1555 goto datacheck; 1556 case Opt_data_ordered: 1557 data_opt = EXT4_MOUNT_ORDERED_DATA; 1558 goto datacheck; 1559 case Opt_data_writeback: 1560 data_opt = EXT4_MOUNT_WRITEBACK_DATA; 1561 datacheck: 1562 if (is_remount) { 1563 if (test_opt(sb, DATA_FLAGS) != data_opt) { 1564 ext4_msg(sb, KERN_ERR, 1565 "Cannot change data mode on remount"); 1566 return 0; 1567 } 1568 } else { 1569 clear_opt(sbi->s_mount_opt, DATA_FLAGS); 1570 sbi->s_mount_opt |= data_opt; 1571 } 1572 break; 1573 case Opt_data_err_abort: 1574 set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); 1575 break; 1576 case Opt_data_err_ignore: 1577 clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); 1578 break; 1579 #ifdef CONFIG_QUOTA 1580 case Opt_usrjquota: 1581 if (!set_qf_name(sb, USRQUOTA, &args[0])) 1582 return 0; 1583 break; 1584 case Opt_grpjquota: 1585 if (!set_qf_name(sb, GRPQUOTA, &args[0])) 1586 return 0; 1587 break; 1588 case Opt_offusrjquota: 1589 if (!clear_qf_name(sb, USRQUOTA)) 1590 return 0; 1591 break; 1592 case Opt_offgrpjquota: 1593 if (!clear_qf_name(sb, GRPQUOTA)) 1594 return 0; 1595 break; 1596 1597 case Opt_jqfmt_vfsold: 1598 qfmt = QFMT_VFS_OLD; 1599 goto set_qf_format; 1600 case Opt_jqfmt_vfsv0: 1601 qfmt = QFMT_VFS_V0; 1602 goto set_qf_format; 1603 case Opt_jqfmt_vfsv1: 1604 qfmt = QFMT_VFS_V1; 1605 set_qf_format: 1606 if (sb_any_quota_loaded(sb) && 1607 sbi->s_jquota_fmt != qfmt) { 1608 ext4_msg(sb, KERN_ERR, "Cannot change " 1609 "journaled quota options when " 1610 "quota turned on"); 1611 return 0; 1612 } 1613 sbi->s_jquota_fmt = qfmt; 1614 break; 1615 case Opt_quota: 1616 case Opt_usrquota: 1617 set_opt(sbi->s_mount_opt, QUOTA); 1618 set_opt(sbi->s_mount_opt, USRQUOTA); 1619 break; 1620 case Opt_grpquota: 1621 set_opt(sbi->s_mount_opt, QUOTA); 1622 set_opt(sbi->s_mount_opt, GRPQUOTA); 1623 break; 1624 case Opt_noquota: 1625 if (sb_any_quota_loaded(sb)) { 1626 ext4_msg(sb, KERN_ERR, "Cannot change quota " 1627 "options when quota turned on"); 1628 return 0; 1629 } 1630 clear_opt(sbi->s_mount_opt, QUOTA); 1631 clear_opt(sbi->s_mount_opt, USRQUOTA); 1632 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1633 break; 1634 #else 1635 case Opt_quota: 1636 case Opt_usrquota: 1637 case Opt_grpquota: 1638 ext4_msg(sb, KERN_ERR, 1639 "quota options not supported"); 1640 break; 1641 case Opt_usrjquota: 1642 case Opt_grpjquota: 1643 case Opt_offusrjquota: 1644 case Opt_offgrpjquota: 1645 case Opt_jqfmt_vfsold: 1646 case Opt_jqfmt_vfsv0: 1647 case Opt_jqfmt_vfsv1: 1648 ext4_msg(sb, KERN_ERR, 1649 "journaled quota options not supported"); 1650 break; 1651 case Opt_noquota: 1652 break; 1653 #endif 1654 case Opt_abort: 1655 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1656 break; 1657 case Opt_nobarrier: 1658 clear_opt(sbi->s_mount_opt, BARRIER); 1659 break; 1660 case Opt_barrier: 1661 if (args[0].from) { 1662 if (match_int(&args[0], &option)) 1663 return 0; 1664 } else 1665 option = 1; /* No argument, default to 1 */ 1666 if (option) 1667 set_opt(sbi->s_mount_opt, BARRIER); 1668 else 1669 clear_opt(sbi->s_mount_opt, BARRIER); 1670 break; 1671 case Opt_ignore: 1672 break; 1673 case Opt_resize: 1674 if (!is_remount) { 1675 ext4_msg(sb, KERN_ERR, 1676 "resize option only available " 1677 "for remount"); 1678 return 0; 1679 } 1680 if (match_int(&args[0], &option) != 0) 1681 return 0; 1682 *n_blocks_count = option; 1683 break; 1684 case Opt_nobh: 1685 ext4_msg(sb, KERN_WARNING, 1686 "Ignoring deprecated nobh option"); 1687 break; 1688 case Opt_bh: 1689 ext4_msg(sb, KERN_WARNING, 1690 "Ignoring deprecated bh option"); 1691 break; 1692 case Opt_i_version: 1693 set_opt(sbi->s_mount_opt, I_VERSION); 1694 sb->s_flags |= MS_I_VERSION; 1695 break; 1696 case Opt_nodelalloc: 1697 clear_opt(sbi->s_mount_opt, DELALLOC); 1698 break; 1699 case Opt_stripe: 1700 if (match_int(&args[0], &option)) 1701 return 0; 1702 if (option < 0) 1703 return 0; 1704 sbi->s_stripe = option; 1705 break; 1706 case Opt_delalloc: 1707 set_opt(sbi->s_mount_opt, DELALLOC); 1708 break; 1709 case Opt_block_validity: 1710 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 1711 break; 1712 case Opt_noblock_validity: 1713 clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 1714 break; 1715 case Opt_inode_readahead_blks: 1716 if (match_int(&args[0], &option)) 1717 return 0; 1718 if (option < 0 || option > (1 << 30)) 1719 return 0; 1720 if (!is_power_of_2(option)) { 1721 ext4_msg(sb, KERN_ERR, 1722 "EXT4-fs: inode_readahead_blks" 1723 " must be a power of 2"); 1724 return 0; 1725 } 1726 sbi->s_inode_readahead_blks = option; 1727 break; 1728 case Opt_journal_ioprio: 1729 if (match_int(&args[0], &option)) 1730 return 0; 1731 if (option < 0 || option > 7) 1732 break; 1733 *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 1734 option); 1735 break; 1736 case Opt_noauto_da_alloc: 1737 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); 1738 break; 1739 case Opt_auto_da_alloc: 1740 if (args[0].from) { 1741 if (match_int(&args[0], &option)) 1742 return 0; 1743 } else 1744 option = 1; /* No argument, default to 1 */ 1745 if (option) 1746 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); 1747 else 1748 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); 1749 break; 1750 case Opt_discard: 1751 set_opt(sbi->s_mount_opt, DISCARD); 1752 break; 1753 case Opt_nodiscard: 1754 clear_opt(sbi->s_mount_opt, DISCARD); 1755 break; 1756 case Opt_dioread_nolock: 1757 set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 1758 break; 1759 case Opt_dioread_lock: 1760 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 1761 break; 1762 default: 1763 ext4_msg(sb, KERN_ERR, 1764 "Unrecognized mount option \"%s\" " 1765 "or missing value", p); 1766 return 0; 1767 } 1768 } 1769 #ifdef CONFIG_QUOTA 1770 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1771 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1772 clear_opt(sbi->s_mount_opt, USRQUOTA); 1773 1774 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) 1775 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1776 1777 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { 1778 ext4_msg(sb, KERN_ERR, "old and new quota " 1779 "format mixing"); 1780 return 0; 1781 } 1782 1783 if (!sbi->s_jquota_fmt) { 1784 ext4_msg(sb, KERN_ERR, "journaled quota format " 1785 "not specified"); 1786 return 0; 1787 } 1788 } else { 1789 if (sbi->s_jquota_fmt) { 1790 ext4_msg(sb, KERN_ERR, "journaled quota format " 1791 "specified with no journaling " 1792 "enabled"); 1793 return 0; 1794 } 1795 } 1796 #endif 1797 return 1; 1798 } 1799 1800 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 1801 int read_only) 1802 { 1803 struct ext4_sb_info *sbi = EXT4_SB(sb); 1804 int res = 0; 1805 1806 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 1807 ext4_msg(sb, KERN_ERR, "revision level too high, " 1808 "forcing read-only mode"); 1809 res = MS_RDONLY; 1810 } 1811 if (read_only) 1812 return res; 1813 if (!(sbi->s_mount_state & EXT4_VALID_FS)) 1814 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 1815 "running e2fsck is recommended"); 1816 else if ((sbi->s_mount_state & EXT4_ERROR_FS)) 1817 ext4_msg(sb, KERN_WARNING, 1818 "warning: mounting fs with errors, " 1819 "running e2fsck is recommended"); 1820 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && 1821 le16_to_cpu(es->s_mnt_count) >= 1822 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 1823 ext4_msg(sb, KERN_WARNING, 1824 "warning: maximal mount count reached, " 1825 "running e2fsck is recommended"); 1826 else if (le32_to_cpu(es->s_checkinterval) && 1827 (le32_to_cpu(es->s_lastcheck) + 1828 le32_to_cpu(es->s_checkinterval) <= get_seconds())) 1829 ext4_msg(sb, KERN_WARNING, 1830 "warning: checktime reached, " 1831 "running e2fsck is recommended"); 1832 if (!sbi->s_journal) 1833 es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 1834 if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 1835 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 1836 le16_add_cpu(&es->s_mnt_count, 1); 1837 es->s_mtime = cpu_to_le32(get_seconds()); 1838 ext4_update_dynamic_rev(sb); 1839 if (sbi->s_journal) 1840 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 1841 1842 ext4_commit_super(sb, 1); 1843 if (test_opt(sb, DEBUG)) 1844 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 1845 "bpg=%lu, ipg=%lu, mo=%04x]\n", 1846 sb->s_blocksize, 1847 sbi->s_groups_count, 1848 EXT4_BLOCKS_PER_GROUP(sb), 1849 EXT4_INODES_PER_GROUP(sb), 1850 sbi->s_mount_opt); 1851 1852 return res; 1853 } 1854 1855 static int ext4_fill_flex_info(struct super_block *sb) 1856 { 1857 struct ext4_sb_info *sbi = EXT4_SB(sb); 1858 struct ext4_group_desc *gdp = NULL; 1859 ext4_group_t flex_group_count; 1860 ext4_group_t flex_group; 1861 int groups_per_flex = 0; 1862 size_t size; 1863 int i; 1864 1865 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 1866 groups_per_flex = 1 << sbi->s_log_groups_per_flex; 1867 1868 if (groups_per_flex < 2) { 1869 sbi->s_log_groups_per_flex = 0; 1870 return 1; 1871 } 1872 1873 /* We allocate both existing and potentially added groups */ 1874 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + 1875 ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) << 1876 EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex; 1877 size = flex_group_count * sizeof(struct flex_groups); 1878 sbi->s_flex_groups = kzalloc(size, GFP_KERNEL); 1879 if (sbi->s_flex_groups == NULL) { 1880 sbi->s_flex_groups = vmalloc(size); 1881 if (sbi->s_flex_groups) 1882 memset(sbi->s_flex_groups, 0, size); 1883 } 1884 if (sbi->s_flex_groups == NULL) { 1885 ext4_msg(sb, KERN_ERR, "not enough memory for " 1886 "%u flex groups", flex_group_count); 1887 goto failed; 1888 } 1889 1890 for (i = 0; i < sbi->s_groups_count; i++) { 1891 gdp = ext4_get_group_desc(sb, i, NULL); 1892 1893 flex_group = ext4_flex_group(sbi, i); 1894 atomic_add(ext4_free_inodes_count(sb, gdp), 1895 &sbi->s_flex_groups[flex_group].free_inodes); 1896 atomic_add(ext4_free_blks_count(sb, gdp), 1897 &sbi->s_flex_groups[flex_group].free_blocks); 1898 atomic_add(ext4_used_dirs_count(sb, gdp), 1899 &sbi->s_flex_groups[flex_group].used_dirs); 1900 } 1901 1902 return 1; 1903 failed: 1904 return 0; 1905 } 1906 1907 __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, 1908 struct ext4_group_desc *gdp) 1909 { 1910 __u16 crc = 0; 1911 1912 if (sbi->s_es->s_feature_ro_compat & 1913 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 1914 int offset = offsetof(struct ext4_group_desc, bg_checksum); 1915 __le32 le_group = cpu_to_le32(block_group); 1916 1917 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 1918 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 1919 crc = crc16(crc, (__u8 *)gdp, offset); 1920 offset += sizeof(gdp->bg_checksum); /* skip checksum */ 1921 /* for checksum of struct ext4_group_desc do the rest...*/ 1922 if ((sbi->s_es->s_feature_incompat & 1923 cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && 1924 offset < le16_to_cpu(sbi->s_es->s_desc_size)) 1925 crc = crc16(crc, (__u8 *)gdp + offset, 1926 le16_to_cpu(sbi->s_es->s_desc_size) - 1927 offset); 1928 } 1929 1930 return cpu_to_le16(crc); 1931 } 1932 1933 int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, 1934 struct ext4_group_desc *gdp) 1935 { 1936 if ((sbi->s_es->s_feature_ro_compat & 1937 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) && 1938 (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp))) 1939 return 0; 1940 1941 return 1; 1942 } 1943 1944 /* Called at mount-time, super-block is locked */ 1945 static int ext4_check_descriptors(struct super_block *sb) 1946 { 1947 struct ext4_sb_info *sbi = EXT4_SB(sb); 1948 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 1949 ext4_fsblk_t last_block; 1950 ext4_fsblk_t block_bitmap; 1951 ext4_fsblk_t inode_bitmap; 1952 ext4_fsblk_t inode_table; 1953 int flexbg_flag = 0; 1954 ext4_group_t i; 1955 1956 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 1957 flexbg_flag = 1; 1958 1959 ext4_debug("Checking group descriptors"); 1960 1961 for (i = 0; i < sbi->s_groups_count; i++) { 1962 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 1963 1964 if (i == sbi->s_groups_count - 1 || flexbg_flag) 1965 last_block = ext4_blocks_count(sbi->s_es) - 1; 1966 else 1967 last_block = first_block + 1968 (EXT4_BLOCKS_PER_GROUP(sb) - 1); 1969 1970 block_bitmap = ext4_block_bitmap(sb, gdp); 1971 if (block_bitmap < first_block || block_bitmap > last_block) { 1972 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 1973 "Block bitmap for group %u not in group " 1974 "(block %llu)!", i, block_bitmap); 1975 return 0; 1976 } 1977 inode_bitmap = ext4_inode_bitmap(sb, gdp); 1978 if (inode_bitmap < first_block || inode_bitmap > last_block) { 1979 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 1980 "Inode bitmap for group %u not in group " 1981 "(block %llu)!", i, inode_bitmap); 1982 return 0; 1983 } 1984 inode_table = ext4_inode_table(sb, gdp); 1985 if (inode_table < first_block || 1986 inode_table + sbi->s_itb_per_group - 1 > last_block) { 1987 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 1988 "Inode table for group %u not in group " 1989 "(block %llu)!", i, inode_table); 1990 return 0; 1991 } 1992 ext4_lock_group(sb, i); 1993 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { 1994 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 1995 "Checksum for group %u failed (%u!=%u)", 1996 i, le16_to_cpu(ext4_group_desc_csum(sbi, i, 1997 gdp)), le16_to_cpu(gdp->bg_checksum)); 1998 if (!(sb->s_flags & MS_RDONLY)) { 1999 ext4_unlock_group(sb, i); 2000 return 0; 2001 } 2002 } 2003 ext4_unlock_group(sb, i); 2004 if (!flexbg_flag) 2005 first_block += EXT4_BLOCKS_PER_GROUP(sb); 2006 } 2007 2008 ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); 2009 sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb)); 2010 return 1; 2011 } 2012 2013 /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at 2014 * the superblock) which were deleted from all directories, but held open by 2015 * a process at the time of a crash. We walk the list and try to delete these 2016 * inodes at recovery time (only with a read-write filesystem). 2017 * 2018 * In order to keep the orphan inode chain consistent during traversal (in 2019 * case of crash during recovery), we link each inode into the superblock 2020 * orphan list_head and handle it the same way as an inode deletion during 2021 * normal operation (which journals the operations for us). 2022 * 2023 * We only do an iget() and an iput() on each inode, which is very safe if we 2024 * accidentally point at an in-use or already deleted inode. The worst that 2025 * can happen in this case is that we get a "bit already cleared" message from 2026 * ext4_free_inode(). The only reason we would point at a wrong inode is if 2027 * e2fsck was run on this filesystem, and it must have already done the orphan 2028 * inode cleanup for us, so we can safely abort without any further action. 2029 */ 2030 static void ext4_orphan_cleanup(struct super_block *sb, 2031 struct ext4_super_block *es) 2032 { 2033 unsigned int s_flags = sb->s_flags; 2034 int nr_orphans = 0, nr_truncates = 0; 2035 #ifdef CONFIG_QUOTA 2036 int i; 2037 #endif 2038 if (!es->s_last_orphan) { 2039 jbd_debug(4, "no orphan inodes to clean up\n"); 2040 return; 2041 } 2042 2043 if (bdev_read_only(sb->s_bdev)) { 2044 ext4_msg(sb, KERN_ERR, "write access " 2045 "unavailable, skipping orphan cleanup"); 2046 return; 2047 } 2048 2049 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 2050 if (es->s_last_orphan) 2051 jbd_debug(1, "Errors on filesystem, " 2052 "clearing orphan list.\n"); 2053 es->s_last_orphan = 0; 2054 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); 2055 return; 2056 } 2057 2058 if (s_flags & MS_RDONLY) { 2059 ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); 2060 sb->s_flags &= ~MS_RDONLY; 2061 } 2062 #ifdef CONFIG_QUOTA 2063 /* Needed for iput() to work correctly and not trash data */ 2064 sb->s_flags |= MS_ACTIVE; 2065 /* Turn on quotas so that they are updated correctly */ 2066 for (i = 0; i < MAXQUOTAS; i++) { 2067 if (EXT4_SB(sb)->s_qf_names[i]) { 2068 int ret = ext4_quota_on_mount(sb, i); 2069 if (ret < 0) 2070 ext4_msg(sb, KERN_ERR, 2071 "Cannot turn on journaled " 2072 "quota: error %d", ret); 2073 } 2074 } 2075 #endif 2076 2077 while (es->s_last_orphan) { 2078 struct inode *inode; 2079 2080 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); 2081 if (IS_ERR(inode)) { 2082 es->s_last_orphan = 0; 2083 break; 2084 } 2085 2086 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 2087 dquot_initialize(inode); 2088 if (inode->i_nlink) { 2089 ext4_msg(sb, KERN_DEBUG, 2090 "%s: truncating inode %lu to %lld bytes", 2091 __func__, inode->i_ino, inode->i_size); 2092 jbd_debug(2, "truncating inode %lu to %lld bytes\n", 2093 inode->i_ino, inode->i_size); 2094 ext4_truncate(inode); 2095 nr_truncates++; 2096 } else { 2097 ext4_msg(sb, KERN_DEBUG, 2098 "%s: deleting unreferenced inode %lu", 2099 __func__, inode->i_ino); 2100 jbd_debug(2, "deleting unreferenced inode %lu\n", 2101 inode->i_ino); 2102 nr_orphans++; 2103 } 2104 iput(inode); /* The delete magic happens here! */ 2105 } 2106 2107 #define PLURAL(x) (x), ((x) == 1) ? "" : "s" 2108 2109 if (nr_orphans) 2110 ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", 2111 PLURAL(nr_orphans)); 2112 if (nr_truncates) 2113 ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", 2114 PLURAL(nr_truncates)); 2115 #ifdef CONFIG_QUOTA 2116 /* Turn quotas off */ 2117 for (i = 0; i < MAXQUOTAS; i++) { 2118 if (sb_dqopt(sb)->files[i]) 2119 dquot_quota_off(sb, i); 2120 } 2121 #endif 2122 sb->s_flags = s_flags; /* Restore MS_RDONLY status */ 2123 } 2124 2125 /* 2126 * Maximal extent format file size. 2127 * Resulting logical blkno at s_maxbytes must fit in our on-disk 2128 * extent format containers, within a sector_t, and within i_blocks 2129 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 2130 * so that won't be a limiting factor. 2131 * 2132 * Note, this does *not* consider any metadata overhead for vfs i_blocks. 2133 */ 2134 static loff_t ext4_max_size(int blkbits, int has_huge_files) 2135 { 2136 loff_t res; 2137 loff_t upper_limit = MAX_LFS_FILESIZE; 2138 2139 /* small i_blocks in vfs inode? */ 2140 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { 2141 /* 2142 * CONFIG_LBDAF is not enabled implies the inode 2143 * i_block represent total blocks in 512 bytes 2144 * 32 == size of vfs inode i_blocks * 8 2145 */ 2146 upper_limit = (1LL << 32) - 1; 2147 2148 /* total blocks in file system block size */ 2149 upper_limit >>= (blkbits - 9); 2150 upper_limit <<= blkbits; 2151 } 2152 2153 /* 32-bit extent-start container, ee_block */ 2154 res = 1LL << 32; 2155 res <<= blkbits; 2156 res -= 1; 2157 2158 /* Sanity check against vm- & vfs- imposed limits */ 2159 if (res > upper_limit) 2160 res = upper_limit; 2161 2162 return res; 2163 } 2164 2165 /* 2166 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 2167 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 2168 * We need to be 1 filesystem block less than the 2^48 sector limit. 2169 */ 2170 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 2171 { 2172 loff_t res = EXT4_NDIR_BLOCKS; 2173 int meta_blocks; 2174 loff_t upper_limit; 2175 /* This is calculated to be the largest file size for a dense, block 2176 * mapped file such that the file's total number of 512-byte sectors, 2177 * including data and all indirect blocks, does not exceed (2^48 - 1). 2178 * 2179 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 2180 * number of 512-byte sectors of the file. 2181 */ 2182 2183 if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { 2184 /* 2185 * !has_huge_files or CONFIG_LBDAF not enabled implies that 2186 * the inode i_block field represents total file blocks in 2187 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8 2188 */ 2189 upper_limit = (1LL << 32) - 1; 2190 2191 /* total blocks in file system block size */ 2192 upper_limit >>= (bits - 9); 2193 2194 } else { 2195 /* 2196 * We use 48 bit ext4_inode i_blocks 2197 * With EXT4_HUGE_FILE_FL set the i_blocks 2198 * represent total number of blocks in 2199 * file system block size 2200 */ 2201 upper_limit = (1LL << 48) - 1; 2202 2203 } 2204 2205 /* indirect blocks */ 2206 meta_blocks = 1; 2207 /* double indirect blocks */ 2208 meta_blocks += 1 + (1LL << (bits-2)); 2209 /* tripple indirect blocks */ 2210 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); 2211 2212 upper_limit -= meta_blocks; 2213 upper_limit <<= bits; 2214 2215 res += 1LL << (bits-2); 2216 res += 1LL << (2*(bits-2)); 2217 res += 1LL << (3*(bits-2)); 2218 res <<= bits; 2219 if (res > upper_limit) 2220 res = upper_limit; 2221 2222 if (res > MAX_LFS_FILESIZE) 2223 res = MAX_LFS_FILESIZE; 2224 2225 return res; 2226 } 2227 2228 static ext4_fsblk_t descriptor_loc(struct super_block *sb, 2229 ext4_fsblk_t logical_sb_block, int nr) 2230 { 2231 struct ext4_sb_info *sbi = EXT4_SB(sb); 2232 ext4_group_t bg, first_meta_bg; 2233 int has_super = 0; 2234 2235 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 2236 2237 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) || 2238 nr < first_meta_bg) 2239 return logical_sb_block + nr + 1; 2240 bg = sbi->s_desc_per_block * nr; 2241 if (ext4_bg_has_super(sb, bg)) 2242 has_super = 1; 2243 2244 return (has_super + ext4_group_first_block_no(sb, bg)); 2245 } 2246 2247 /** 2248 * ext4_get_stripe_size: Get the stripe size. 2249 * @sbi: In memory super block info 2250 * 2251 * If we have specified it via mount option, then 2252 * use the mount option value. If the value specified at mount time is 2253 * greater than the blocks per group use the super block value. 2254 * If the super block value is greater than blocks per group return 0. 2255 * Allocator needs it be less than blocks per group. 2256 * 2257 */ 2258 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 2259 { 2260 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 2261 unsigned long stripe_width = 2262 le32_to_cpu(sbi->s_es->s_raid_stripe_width); 2263 2264 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 2265 return sbi->s_stripe; 2266 2267 if (stripe_width <= sbi->s_blocks_per_group) 2268 return stripe_width; 2269 2270 if (stride <= sbi->s_blocks_per_group) 2271 return stride; 2272 2273 return 0; 2274 } 2275 2276 /* sysfs supprt */ 2277 2278 struct ext4_attr { 2279 struct attribute attr; 2280 ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); 2281 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, 2282 const char *, size_t); 2283 int offset; 2284 }; 2285 2286 static int parse_strtoul(const char *buf, 2287 unsigned long max, unsigned long *value) 2288 { 2289 char *endp; 2290 2291 *value = simple_strtoul(skip_spaces(buf), &endp, 0); 2292 endp = skip_spaces(endp); 2293 if (*endp || *value > max) 2294 return -EINVAL; 2295 2296 return 0; 2297 } 2298 2299 static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a, 2300 struct ext4_sb_info *sbi, 2301 char *buf) 2302 { 2303 return snprintf(buf, PAGE_SIZE, "%llu\n", 2304 (s64) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2305 } 2306 2307 static ssize_t session_write_kbytes_show(struct ext4_attr *a, 2308 struct ext4_sb_info *sbi, char *buf) 2309 { 2310 struct super_block *sb = sbi->s_buddy_cache->i_sb; 2311 2312 if (!sb->s_bdev->bd_part) 2313 return snprintf(buf, PAGE_SIZE, "0\n"); 2314 return snprintf(buf, PAGE_SIZE, "%lu\n", 2315 (part_stat_read(sb->s_bdev->bd_part, sectors[1]) - 2316 sbi->s_sectors_written_start) >> 1); 2317 } 2318 2319 static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a, 2320 struct ext4_sb_info *sbi, char *buf) 2321 { 2322 struct super_block *sb = sbi->s_buddy_cache->i_sb; 2323 2324 if (!sb->s_bdev->bd_part) 2325 return snprintf(buf, PAGE_SIZE, "0\n"); 2326 return snprintf(buf, PAGE_SIZE, "%llu\n", 2327 (unsigned long long)(sbi->s_kbytes_written + 2328 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - 2329 EXT4_SB(sb)->s_sectors_written_start) >> 1))); 2330 } 2331 2332 static ssize_t inode_readahead_blks_store(struct ext4_attr *a, 2333 struct ext4_sb_info *sbi, 2334 const char *buf, size_t count) 2335 { 2336 unsigned long t; 2337 2338 if (parse_strtoul(buf, 0x40000000, &t)) 2339 return -EINVAL; 2340 2341 if (!is_power_of_2(t)) 2342 return -EINVAL; 2343 2344 sbi->s_inode_readahead_blks = t; 2345 return count; 2346 } 2347 2348 static ssize_t sbi_ui_show(struct ext4_attr *a, 2349 struct ext4_sb_info *sbi, char *buf) 2350 { 2351 unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); 2352 2353 return snprintf(buf, PAGE_SIZE, "%u\n", *ui); 2354 } 2355 2356 static ssize_t sbi_ui_store(struct ext4_attr *a, 2357 struct ext4_sb_info *sbi, 2358 const char *buf, size_t count) 2359 { 2360 unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset); 2361 unsigned long t; 2362 2363 if (parse_strtoul(buf, 0xffffffff, &t)) 2364 return -EINVAL; 2365 *ui = t; 2366 return count; 2367 } 2368 2369 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ 2370 static struct ext4_attr ext4_attr_##_name = { \ 2371 .attr = {.name = __stringify(_name), .mode = _mode }, \ 2372 .show = _show, \ 2373 .store = _store, \ 2374 .offset = offsetof(struct ext4_sb_info, _elname), \ 2375 } 2376 #define EXT4_ATTR(name, mode, show, store) \ 2377 static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) 2378 2379 #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL) 2380 #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store) 2381 #define EXT4_RW_ATTR_SBI_UI(name, elname) \ 2382 EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) 2383 #define ATTR_LIST(name) &ext4_attr_##name.attr 2384 2385 EXT4_RO_ATTR(delayed_allocation_blocks); 2386 EXT4_RO_ATTR(session_write_kbytes); 2387 EXT4_RO_ATTR(lifetime_write_kbytes); 2388 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, 2389 inode_readahead_blks_store, s_inode_readahead_blks); 2390 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); 2391 EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats); 2392 EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); 2393 EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); 2394 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); 2395 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); 2396 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); 2397 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); 2398 2399 static struct attribute *ext4_attrs[] = { 2400 ATTR_LIST(delayed_allocation_blocks), 2401 ATTR_LIST(session_write_kbytes), 2402 ATTR_LIST(lifetime_write_kbytes), 2403 ATTR_LIST(inode_readahead_blks), 2404 ATTR_LIST(inode_goal), 2405 ATTR_LIST(mb_stats), 2406 ATTR_LIST(mb_max_to_scan), 2407 ATTR_LIST(mb_min_to_scan), 2408 ATTR_LIST(mb_order2_req), 2409 ATTR_LIST(mb_stream_req), 2410 ATTR_LIST(mb_group_prealloc), 2411 ATTR_LIST(max_writeback_mb_bump), 2412 NULL, 2413 }; 2414 2415 static ssize_t ext4_attr_show(struct kobject *kobj, 2416 struct attribute *attr, char *buf) 2417 { 2418 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 2419 s_kobj); 2420 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); 2421 2422 return a->show ? a->show(a, sbi, buf) : 0; 2423 } 2424 2425 static ssize_t ext4_attr_store(struct kobject *kobj, 2426 struct attribute *attr, 2427 const char *buf, size_t len) 2428 { 2429 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 2430 s_kobj); 2431 struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); 2432 2433 return a->store ? a->store(a, sbi, buf, len) : 0; 2434 } 2435 2436 static void ext4_sb_release(struct kobject *kobj) 2437 { 2438 struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, 2439 s_kobj); 2440 complete(&sbi->s_kobj_unregister); 2441 } 2442 2443 2444 static const struct sysfs_ops ext4_attr_ops = { 2445 .show = ext4_attr_show, 2446 .store = ext4_attr_store, 2447 }; 2448 2449 static struct kobj_type ext4_ktype = { 2450 .default_attrs = ext4_attrs, 2451 .sysfs_ops = &ext4_attr_ops, 2452 .release = ext4_sb_release, 2453 }; 2454 2455 /* 2456 * Check whether this filesystem can be mounted based on 2457 * the features present and the RDONLY/RDWR mount requested. 2458 * Returns 1 if this filesystem can be mounted as requested, 2459 * 0 if it cannot be. 2460 */ 2461 static int ext4_feature_set_ok(struct super_block *sb, int readonly) 2462 { 2463 if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) { 2464 ext4_msg(sb, KERN_ERR, 2465 "Couldn't mount because of " 2466 "unsupported optional features (%x)", 2467 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 2468 ~EXT4_FEATURE_INCOMPAT_SUPP)); 2469 return 0; 2470 } 2471 2472 if (readonly) 2473 return 1; 2474 2475 /* Check that feature set is OK for a read-write mount */ 2476 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { 2477 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 2478 "unsupported optional features (%x)", 2479 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 2480 ~EXT4_FEATURE_RO_COMPAT_SUPP)); 2481 return 0; 2482 } 2483 /* 2484 * Large file size enabled file system can only be mounted 2485 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF 2486 */ 2487 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 2488 if (sizeof(blkcnt_t) < sizeof(u64)) { 2489 ext4_msg(sb, KERN_ERR, "Filesystem with huge files " 2490 "cannot be mounted RDWR without " 2491 "CONFIG_LBDAF"); 2492 return 0; 2493 } 2494 } 2495 return 1; 2496 } 2497 2498 /* 2499 * This function is called once a day if we have errors logged 2500 * on the file system 2501 */ 2502 static void print_daily_error_info(unsigned long arg) 2503 { 2504 struct super_block *sb = (struct super_block *) arg; 2505 struct ext4_sb_info *sbi; 2506 struct ext4_super_block *es; 2507 2508 sbi = EXT4_SB(sb); 2509 es = sbi->s_es; 2510 2511 if (es->s_error_count) 2512 ext4_msg(sb, KERN_NOTICE, "error count: %u", 2513 le32_to_cpu(es->s_error_count)); 2514 if (es->s_first_error_time) { 2515 printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", 2516 sb->s_id, le32_to_cpu(es->s_first_error_time), 2517 (int) sizeof(es->s_first_error_func), 2518 es->s_first_error_func, 2519 le32_to_cpu(es->s_first_error_line)); 2520 if (es->s_first_error_ino) 2521 printk(": inode %u", 2522 le32_to_cpu(es->s_first_error_ino)); 2523 if (es->s_first_error_block) 2524 printk(": block %llu", (unsigned long long) 2525 le64_to_cpu(es->s_first_error_block)); 2526 printk("\n"); 2527 } 2528 if (es->s_last_error_time) { 2529 printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", 2530 sb->s_id, le32_to_cpu(es->s_last_error_time), 2531 (int) sizeof(es->s_last_error_func), 2532 es->s_last_error_func, 2533 le32_to_cpu(es->s_last_error_line)); 2534 if (es->s_last_error_ino) 2535 printk(": inode %u", 2536 le32_to_cpu(es->s_last_error_ino)); 2537 if (es->s_last_error_block) 2538 printk(": block %llu", (unsigned long long) 2539 le64_to_cpu(es->s_last_error_block)); 2540 printk("\n"); 2541 } 2542 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 2543 } 2544 2545 static int ext4_fill_super(struct super_block *sb, void *data, int silent) 2546 __releases(kernel_lock) 2547 __acquires(kernel_lock) 2548 { 2549 char *orig_data = kstrdup(data, GFP_KERNEL); 2550 struct buffer_head *bh; 2551 struct ext4_super_block *es = NULL; 2552 struct ext4_sb_info *sbi; 2553 ext4_fsblk_t block; 2554 ext4_fsblk_t sb_block = get_sb_block(&data); 2555 ext4_fsblk_t logical_sb_block; 2556 unsigned long offset = 0; 2557 unsigned long journal_devnum = 0; 2558 unsigned long def_mount_opts; 2559 struct inode *root; 2560 char *cp; 2561 const char *descr; 2562 int ret = -ENOMEM; 2563 int blocksize; 2564 unsigned int db_count; 2565 unsigned int i; 2566 int needs_recovery, has_huge_files; 2567 __u64 blocks_count; 2568 int err; 2569 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 2570 2571 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 2572 if (!sbi) 2573 goto out_free_orig; 2574 2575 sbi->s_blockgroup_lock = 2576 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 2577 if (!sbi->s_blockgroup_lock) { 2578 kfree(sbi); 2579 goto out_free_orig; 2580 } 2581 sb->s_fs_info = sbi; 2582 sbi->s_mount_opt = 0; 2583 sbi->s_resuid = EXT4_DEF_RESUID; 2584 sbi->s_resgid = EXT4_DEF_RESGID; 2585 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 2586 sbi->s_sb_block = sb_block; 2587 if (sb->s_bdev->bd_part) 2588 sbi->s_sectors_written_start = 2589 part_stat_read(sb->s_bdev->bd_part, sectors[1]); 2590 2591 unlock_kernel(); 2592 2593 /* Cleanup superblock name */ 2594 for (cp = sb->s_id; (cp = strchr(cp, '/'));) 2595 *cp = '!'; 2596 2597 ret = -EINVAL; 2598 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 2599 if (!blocksize) { 2600 ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 2601 goto out_fail; 2602 } 2603 2604 /* 2605 * The ext4 superblock will not be buffer aligned for other than 1kB 2606 * block sizes. We need to calculate the offset from buffer start. 2607 */ 2608 if (blocksize != EXT4_MIN_BLOCK_SIZE) { 2609 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; 2610 offset = do_div(logical_sb_block, blocksize); 2611 } else { 2612 logical_sb_block = sb_block; 2613 } 2614 2615 if (!(bh = sb_bread(sb, logical_sb_block))) { 2616 ext4_msg(sb, KERN_ERR, "unable to read superblock"); 2617 goto out_fail; 2618 } 2619 /* 2620 * Note: s_es must be initialized as soon as possible because 2621 * some ext4 macro-instructions depend on its value 2622 */ 2623 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); 2624 sbi->s_es = es; 2625 sb->s_magic = le16_to_cpu(es->s_magic); 2626 if (sb->s_magic != EXT4_SUPER_MAGIC) 2627 goto cantfind_ext4; 2628 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 2629 2630 /* Set defaults before we parse the mount options */ 2631 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 2632 if (def_mount_opts & EXT4_DEFM_DEBUG) 2633 set_opt(sbi->s_mount_opt, DEBUG); 2634 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { 2635 ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", 2636 "2.6.38"); 2637 set_opt(sbi->s_mount_opt, GRPID); 2638 } 2639 if (def_mount_opts & EXT4_DEFM_UID16) 2640 set_opt(sbi->s_mount_opt, NO_UID32); 2641 #ifdef CONFIG_EXT4_FS_XATTR 2642 if (def_mount_opts & EXT4_DEFM_XATTR_USER) 2643 set_opt(sbi->s_mount_opt, XATTR_USER); 2644 #endif 2645 #ifdef CONFIG_EXT4_FS_POSIX_ACL 2646 if (def_mount_opts & EXT4_DEFM_ACL) 2647 set_opt(sbi->s_mount_opt, POSIX_ACL); 2648 #endif 2649 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 2650 set_opt(sbi->s_mount_opt, JOURNAL_DATA); 2651 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 2652 set_opt(sbi->s_mount_opt, ORDERED_DATA); 2653 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 2654 set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 2655 2656 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) 2657 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 2658 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) 2659 set_opt(sbi->s_mount_opt, ERRORS_CONT); 2660 else 2661 set_opt(sbi->s_mount_opt, ERRORS_RO); 2662 if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) 2663 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 2664 if (def_mount_opts & EXT4_DEFM_DISCARD) 2665 set_opt(sbi->s_mount_opt, DISCARD); 2666 2667 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 2668 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 2669 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 2670 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 2671 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 2672 2673 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 2674 set_opt(sbi->s_mount_opt, BARRIER); 2675 2676 /* 2677 * enable delayed allocation by default 2678 * Use -o nodelalloc to turn it off 2679 */ 2680 if (!IS_EXT3_SB(sb) && 2681 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 2682 set_opt(sbi->s_mount_opt, DELALLOC); 2683 2684 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, 2685 &journal_devnum, &journal_ioprio, NULL, 0)) { 2686 ext4_msg(sb, KERN_WARNING, 2687 "failed to parse options in superblock: %s", 2688 sbi->s_es->s_mount_opts); 2689 } 2690 if (!parse_options((char *) data, sb, &journal_devnum, 2691 &journal_ioprio, NULL, 0)) 2692 goto failed_mount; 2693 2694 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 2695 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); 2696 2697 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 2698 (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) || 2699 EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) || 2700 EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U))) 2701 ext4_msg(sb, KERN_WARNING, 2702 "feature flags set on rev 0 fs, " 2703 "running e2fsck is recommended"); 2704 2705 /* 2706 * Check feature flags regardless of the revision level, since we 2707 * previously didn't change the revision level when setting the flags, 2708 * so there is a chance incompat flags are set on a rev 0 filesystem. 2709 */ 2710 if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) 2711 goto failed_mount; 2712 2713 blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 2714 2715 if (blocksize < EXT4_MIN_BLOCK_SIZE || 2716 blocksize > EXT4_MAX_BLOCK_SIZE) { 2717 ext4_msg(sb, KERN_ERR, 2718 "Unsupported filesystem blocksize %d", blocksize); 2719 goto failed_mount; 2720 } 2721 2722 if (sb->s_blocksize != blocksize) { 2723 /* Validate the filesystem blocksize */ 2724 if (!sb_set_blocksize(sb, blocksize)) { 2725 ext4_msg(sb, KERN_ERR, "bad block size %d", 2726 blocksize); 2727 goto failed_mount; 2728 } 2729 2730 brelse(bh); 2731 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; 2732 offset = do_div(logical_sb_block, blocksize); 2733 bh = sb_bread(sb, logical_sb_block); 2734 if (!bh) { 2735 ext4_msg(sb, KERN_ERR, 2736 "Can't read superblock on 2nd try"); 2737 goto failed_mount; 2738 } 2739 es = (struct ext4_super_block *)(((char *)bh->b_data) + offset); 2740 sbi->s_es = es; 2741 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 2742 ext4_msg(sb, KERN_ERR, 2743 "Magic mismatch, very weird!"); 2744 goto failed_mount; 2745 } 2746 } 2747 2748 has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, 2749 EXT4_FEATURE_RO_COMPAT_HUGE_FILE); 2750 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 2751 has_huge_files); 2752 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 2753 2754 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 2755 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 2756 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 2757 } else { 2758 sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 2759 sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 2760 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 2761 (!is_power_of_2(sbi->s_inode_size)) || 2762 (sbi->s_inode_size > blocksize)) { 2763 ext4_msg(sb, KERN_ERR, 2764 "unsupported inode size: %d", 2765 sbi->s_inode_size); 2766 goto failed_mount; 2767 } 2768 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) 2769 sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); 2770 } 2771 2772 sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 2773 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) { 2774 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 2775 sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 2776 !is_power_of_2(sbi->s_desc_size)) { 2777 ext4_msg(sb, KERN_ERR, 2778 "unsupported descriptor size %lu", 2779 sbi->s_desc_size); 2780 goto failed_mount; 2781 } 2782 } else 2783 sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 2784 2785 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 2786 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 2787 if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) 2788 goto cantfind_ext4; 2789 2790 sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); 2791 if (sbi->s_inodes_per_block == 0) 2792 goto cantfind_ext4; 2793 sbi->s_itb_per_group = sbi->s_inodes_per_group / 2794 sbi->s_inodes_per_block; 2795 sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); 2796 sbi->s_sbh = bh; 2797 sbi->s_mount_state = le16_to_cpu(es->s_state); 2798 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 2799 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 2800 2801 for (i = 0; i < 4; i++) 2802 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 2803 sbi->s_def_hash_version = es->s_def_hash_version; 2804 i = le32_to_cpu(es->s_flags); 2805 if (i & EXT2_FLAGS_UNSIGNED_HASH) 2806 sbi->s_hash_unsigned = 3; 2807 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 2808 #ifdef __CHAR_UNSIGNED__ 2809 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 2810 sbi->s_hash_unsigned = 3; 2811 #else 2812 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 2813 #endif 2814 sb->s_dirt = 1; 2815 } 2816 2817 if (sbi->s_blocks_per_group > blocksize * 8) { 2818 ext4_msg(sb, KERN_ERR, 2819 "#blocks per group too big: %lu", 2820 sbi->s_blocks_per_group); 2821 goto failed_mount; 2822 } 2823 if (sbi->s_inodes_per_group > blocksize * 8) { 2824 ext4_msg(sb, KERN_ERR, 2825 "#inodes per group too big: %lu", 2826 sbi->s_inodes_per_group); 2827 goto failed_mount; 2828 } 2829 2830 /* 2831 * Test whether we have more sectors than will fit in sector_t, 2832 * and whether the max offset is addressable by the page cache. 2833 */ 2834 if ((ext4_blocks_count(es) > 2835 (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) || 2836 (ext4_blocks_count(es) > 2837 (pgoff_t)(~0ULL) >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits))) { 2838 ext4_msg(sb, KERN_ERR, "filesystem" 2839 " too large to mount safely on this system"); 2840 if (sizeof(sector_t) < 8) 2841 ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); 2842 ret = -EFBIG; 2843 goto failed_mount; 2844 } 2845 2846 if (EXT4_BLOCKS_PER_GROUP(sb) == 0) 2847 goto cantfind_ext4; 2848 2849 /* check blocks count against device size */ 2850 blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; 2851 if (blocks_count && ext4_blocks_count(es) > blocks_count) { 2852 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 2853 "exceeds size of device (%llu blocks)", 2854 ext4_blocks_count(es), blocks_count); 2855 goto failed_mount; 2856 } 2857 2858 /* 2859 * It makes no sense for the first data block to be beyond the end 2860 * of the filesystem. 2861 */ 2862 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 2863 ext4_msg(sb, KERN_WARNING, "bad geometry: first data" 2864 "block %u is beyond end of filesystem (%llu)", 2865 le32_to_cpu(es->s_first_data_block), 2866 ext4_blocks_count(es)); 2867 goto failed_mount; 2868 } 2869 blocks_count = (ext4_blocks_count(es) - 2870 le32_to_cpu(es->s_first_data_block) + 2871 EXT4_BLOCKS_PER_GROUP(sb) - 1); 2872 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 2873 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 2874 ext4_msg(sb, KERN_WARNING, "groups count too large: %u " 2875 "(block count %llu, first data block %u, " 2876 "blocks per group %lu)", sbi->s_groups_count, 2877 ext4_blocks_count(es), 2878 le32_to_cpu(es->s_first_data_block), 2879 EXT4_BLOCKS_PER_GROUP(sb)); 2880 goto failed_mount; 2881 } 2882 sbi->s_groups_count = blocks_count; 2883 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 2884 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 2885 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 2886 EXT4_DESC_PER_BLOCK(sb); 2887 sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *), 2888 GFP_KERNEL); 2889 if (sbi->s_group_desc == NULL) { 2890 ext4_msg(sb, KERN_ERR, "not enough memory"); 2891 goto failed_mount; 2892 } 2893 2894 #ifdef CONFIG_PROC_FS 2895 if (ext4_proc_root) 2896 sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root); 2897 #endif 2898 2899 bgl_lock_init(sbi->s_blockgroup_lock); 2900 2901 for (i = 0; i < db_count; i++) { 2902 block = descriptor_loc(sb, logical_sb_block, i); 2903 sbi->s_group_desc[i] = sb_bread(sb, block); 2904 if (!sbi->s_group_desc[i]) { 2905 ext4_msg(sb, KERN_ERR, 2906 "can't read group descriptor %d", i); 2907 db_count = i; 2908 goto failed_mount2; 2909 } 2910 } 2911 if (!ext4_check_descriptors(sb)) { 2912 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 2913 goto failed_mount2; 2914 } 2915 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 2916 if (!ext4_fill_flex_info(sb)) { 2917 ext4_msg(sb, KERN_ERR, 2918 "unable to initialize " 2919 "flex_bg meta info!"); 2920 goto failed_mount2; 2921 } 2922 2923 sbi->s_gdb_count = db_count; 2924 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 2925 spin_lock_init(&sbi->s_next_gen_lock); 2926 2927 sbi->s_stripe = ext4_get_stripe_size(sbi); 2928 sbi->s_max_writeback_mb_bump = 128; 2929 2930 /* 2931 * set up enough so that it can read an inode 2932 */ 2933 if (!test_opt(sb, NOLOAD) && 2934 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) 2935 sb->s_op = &ext4_sops; 2936 else 2937 sb->s_op = &ext4_nojournal_sops; 2938 sb->s_export_op = &ext4_export_ops; 2939 sb->s_xattr = ext4_xattr_handlers; 2940 #ifdef CONFIG_QUOTA 2941 sb->s_qcop = &ext4_qctl_operations; 2942 sb->dq_op = &ext4_quota_operations; 2943 #endif 2944 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 2945 mutex_init(&sbi->s_orphan_lock); 2946 mutex_init(&sbi->s_resize_lock); 2947 2948 sb->s_root = NULL; 2949 2950 needs_recovery = (es->s_last_orphan != 0 || 2951 EXT4_HAS_INCOMPAT_FEATURE(sb, 2952 EXT4_FEATURE_INCOMPAT_RECOVER)); 2953 2954 /* 2955 * The first inode we look at is the journal inode. Don't try 2956 * root first: it may be modified in the journal! 2957 */ 2958 if (!test_opt(sb, NOLOAD) && 2959 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { 2960 if (ext4_load_journal(sb, es, journal_devnum)) 2961 goto failed_mount3; 2962 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && 2963 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { 2964 ext4_msg(sb, KERN_ERR, "required journal recovery " 2965 "suppressed and not mounted read-only"); 2966 goto failed_mount_wq; 2967 } else { 2968 clear_opt(sbi->s_mount_opt, DATA_FLAGS); 2969 set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 2970 sbi->s_journal = NULL; 2971 needs_recovery = 0; 2972 goto no_journal; 2973 } 2974 2975 if (ext4_blocks_count(es) > 0xffffffffULL && 2976 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 2977 JBD2_FEATURE_INCOMPAT_64BIT)) { 2978 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 2979 goto failed_mount_wq; 2980 } 2981 2982 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 2983 jbd2_journal_set_features(sbi->s_journal, 2984 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 2985 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 2986 } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 2987 jbd2_journal_set_features(sbi->s_journal, 2988 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); 2989 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 2990 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 2991 } else { 2992 jbd2_journal_clear_features(sbi->s_journal, 2993 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 2994 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 2995 } 2996 2997 /* We have now updated the journal if required, so we can 2998 * validate the data journaling mode. */ 2999 switch (test_opt(sb, DATA_FLAGS)) { 3000 case 0: 3001 /* No mode set, assume a default based on the journal 3002 * capabilities: ORDERED_DATA if the journal can 3003 * cope, else JOURNAL_DATA 3004 */ 3005 if (jbd2_journal_check_available_features 3006 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) 3007 set_opt(sbi->s_mount_opt, ORDERED_DATA); 3008 else 3009 set_opt(sbi->s_mount_opt, JOURNAL_DATA); 3010 break; 3011 3012 case EXT4_MOUNT_ORDERED_DATA: 3013 case EXT4_MOUNT_WRITEBACK_DATA: 3014 if (!jbd2_journal_check_available_features 3015 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 3016 ext4_msg(sb, KERN_ERR, "Journal does not support " 3017 "requested data journaling mode"); 3018 goto failed_mount_wq; 3019 } 3020 default: 3021 break; 3022 } 3023 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 3024 3025 no_journal: 3026 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3027 ext4_count_free_blocks(sb)); 3028 if (!err) 3029 err = percpu_counter_init(&sbi->s_freeinodes_counter, 3030 ext4_count_free_inodes(sb)); 3031 if (!err) 3032 err = percpu_counter_init(&sbi->s_dirs_counter, 3033 ext4_count_dirs(sb)); 3034 if (!err) 3035 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); 3036 if (err) { 3037 ext4_msg(sb, KERN_ERR, "insufficient memory"); 3038 goto failed_mount_wq; 3039 } 3040 3041 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); 3042 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3043 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3044 goto failed_mount_wq; 3045 } 3046 3047 /* 3048 * The jbd2_journal_load will have done any necessary log recovery, 3049 * so we can safely mount the rest of the filesystem now. 3050 */ 3051 3052 root = ext4_iget(sb, EXT4_ROOT_INO); 3053 if (IS_ERR(root)) { 3054 ext4_msg(sb, KERN_ERR, "get root inode failed"); 3055 ret = PTR_ERR(root); 3056 goto failed_mount4; 3057 } 3058 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 3059 iput(root); 3060 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 3061 goto failed_mount4; 3062 } 3063 sb->s_root = d_alloc_root(root); 3064 if (!sb->s_root) { 3065 ext4_msg(sb, KERN_ERR, "get root dentry failed"); 3066 iput(root); 3067 ret = -ENOMEM; 3068 goto failed_mount4; 3069 } 3070 3071 ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY); 3072 3073 /* determine the minimum size of new large inodes, if present */ 3074 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 3075 sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 3076 EXT4_GOOD_OLD_INODE_SIZE; 3077 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3078 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { 3079 if (sbi->s_want_extra_isize < 3080 le16_to_cpu(es->s_want_extra_isize)) 3081 sbi->s_want_extra_isize = 3082 le16_to_cpu(es->s_want_extra_isize); 3083 if (sbi->s_want_extra_isize < 3084 le16_to_cpu(es->s_min_extra_isize)) 3085 sbi->s_want_extra_isize = 3086 le16_to_cpu(es->s_min_extra_isize); 3087 } 3088 } 3089 /* Check if enough inode space is available */ 3090 if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > 3091 sbi->s_inode_size) { 3092 sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 3093 EXT4_GOOD_OLD_INODE_SIZE; 3094 ext4_msg(sb, KERN_INFO, "required extra inode space not" 3095 "available"); 3096 } 3097 3098 if (test_opt(sb, DELALLOC) && 3099 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 3100 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - " 3101 "requested data journaling mode"); 3102 clear_opt(sbi->s_mount_opt, DELALLOC); 3103 } 3104 if (test_opt(sb, DIOREAD_NOLOCK)) { 3105 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 3106 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " 3107 "option - requested data journaling mode"); 3108 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 3109 } 3110 if (sb->s_blocksize < PAGE_SIZE) { 3111 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " 3112 "option - block size is too small"); 3113 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 3114 } 3115 } 3116 3117 err = ext4_setup_system_zone(sb); 3118 if (err) { 3119 ext4_msg(sb, KERN_ERR, "failed to initialize system " 3120 "zone (%d)", err); 3121 goto failed_mount4; 3122 } 3123 3124 ext4_ext_init(sb); 3125 err = ext4_mb_init(sb, needs_recovery); 3126 if (err) { 3127 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 3128 err); 3129 goto failed_mount4; 3130 } 3131 3132 sbi->s_kobj.kset = ext4_kset; 3133 init_completion(&sbi->s_kobj_unregister); 3134 err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, 3135 "%s", sb->s_id); 3136 if (err) { 3137 ext4_mb_release(sb); 3138 ext4_ext_release(sb); 3139 goto failed_mount4; 3140 }; 3141 3142 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 3143 ext4_orphan_cleanup(sb, es); 3144 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 3145 if (needs_recovery) { 3146 ext4_msg(sb, KERN_INFO, "recovery complete"); 3147 ext4_mark_recovery_complete(sb, es); 3148 } 3149 if (EXT4_SB(sb)->s_journal) { 3150 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 3151 descr = " journalled data mode"; 3152 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 3153 descr = " ordered data mode"; 3154 else 3155 descr = " writeback data mode"; 3156 } else 3157 descr = "out journal"; 3158 3159 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " 3160 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 3161 *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 3162 3163 init_timer(&sbi->s_err_report); 3164 sbi->s_err_report.function = print_daily_error_info; 3165 sbi->s_err_report.data = (unsigned long) sb; 3166 if (es->s_error_count) 3167 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 3168 3169 lock_kernel(); 3170 kfree(orig_data); 3171 return 0; 3172 3173 cantfind_ext4: 3174 if (!silent) 3175 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 3176 goto failed_mount; 3177 3178 failed_mount4: 3179 ext4_msg(sb, KERN_ERR, "mount failed"); 3180 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); 3181 failed_mount_wq: 3182 ext4_release_system_zone(sb); 3183 if (sbi->s_journal) { 3184 jbd2_journal_destroy(sbi->s_journal); 3185 sbi->s_journal = NULL; 3186 } 3187 percpu_counter_destroy(&sbi->s_freeblocks_counter); 3188 percpu_counter_destroy(&sbi->s_freeinodes_counter); 3189 percpu_counter_destroy(&sbi->s_dirs_counter); 3190 percpu_counter_destroy(&sbi->s_dirtyblocks_counter); 3191 failed_mount3: 3192 if (sbi->s_flex_groups) { 3193 if (is_vmalloc_addr(sbi->s_flex_groups)) 3194 vfree(sbi->s_flex_groups); 3195 else 3196 kfree(sbi->s_flex_groups); 3197 } 3198 failed_mount2: 3199 for (i = 0; i < db_count; i++) 3200 brelse(sbi->s_group_desc[i]); 3201 kfree(sbi->s_group_desc); 3202 failed_mount: 3203 if (sbi->s_proc) { 3204 remove_proc_entry(sb->s_id, ext4_proc_root); 3205 } 3206 #ifdef CONFIG_QUOTA 3207 for (i = 0; i < MAXQUOTAS; i++) 3208 kfree(sbi->s_qf_names[i]); 3209 #endif 3210 ext4_blkdev_remove(sbi); 3211 brelse(bh); 3212 out_fail: 3213 sb->s_fs_info = NULL; 3214 kfree(sbi->s_blockgroup_lock); 3215 kfree(sbi); 3216 lock_kernel(); 3217 out_free_orig: 3218 kfree(orig_data); 3219 return ret; 3220 } 3221 3222 /* 3223 * Setup any per-fs journal parameters now. We'll do this both on 3224 * initial mount, once the journal has been initialised but before we've 3225 * done any recovery; and again on any subsequent remount. 3226 */ 3227 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 3228 { 3229 struct ext4_sb_info *sbi = EXT4_SB(sb); 3230 3231 journal->j_commit_interval = sbi->s_commit_interval; 3232 journal->j_min_batch_time = sbi->s_min_batch_time; 3233 journal->j_max_batch_time = sbi->s_max_batch_time; 3234 3235 write_lock(&journal->j_state_lock); 3236 if (test_opt(sb, BARRIER)) 3237 journal->j_flags |= JBD2_BARRIER; 3238 else 3239 journal->j_flags &= ~JBD2_BARRIER; 3240 if (test_opt(sb, DATA_ERR_ABORT)) 3241 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; 3242 else 3243 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; 3244 write_unlock(&journal->j_state_lock); 3245 } 3246 3247 static journal_t *ext4_get_journal(struct super_block *sb, 3248 unsigned int journal_inum) 3249 { 3250 struct inode *journal_inode; 3251 journal_t *journal; 3252 3253 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); 3254 3255 /* First, test for the existence of a valid inode on disk. Bad 3256 * things happen if we iget() an unused inode, as the subsequent 3257 * iput() will try to delete it. */ 3258 3259 journal_inode = ext4_iget(sb, journal_inum); 3260 if (IS_ERR(journal_inode)) { 3261 ext4_msg(sb, KERN_ERR, "no journal found"); 3262 return NULL; 3263 } 3264 if (!journal_inode->i_nlink) { 3265 make_bad_inode(journal_inode); 3266 iput(journal_inode); 3267 ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 3268 return NULL; 3269 } 3270 3271 jbd_debug(2, "Journal inode found at %p: %lld bytes\n", 3272 journal_inode, journal_inode->i_size); 3273 if (!S_ISREG(journal_inode->i_mode)) { 3274 ext4_msg(sb, KERN_ERR, "invalid journal inode"); 3275 iput(journal_inode); 3276 return NULL; 3277 } 3278 3279 journal = jbd2_journal_init_inode(journal_inode); 3280 if (!journal) { 3281 ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 3282 iput(journal_inode); 3283 return NULL; 3284 } 3285 journal->j_private = sb; 3286 ext4_init_journal_params(sb, journal); 3287 return journal; 3288 } 3289 3290 static journal_t *ext4_get_dev_journal(struct super_block *sb, 3291 dev_t j_dev) 3292 { 3293 struct buffer_head *bh; 3294 journal_t *journal; 3295 ext4_fsblk_t start; 3296 ext4_fsblk_t len; 3297 int hblock, blocksize; 3298 ext4_fsblk_t sb_block; 3299 unsigned long offset; 3300 struct ext4_super_block *es; 3301 struct block_device *bdev; 3302 3303 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); 3304 3305 bdev = ext4_blkdev_get(j_dev, sb); 3306 if (bdev == NULL) 3307 return NULL; 3308 3309 if (bd_claim(bdev, sb)) { 3310 ext4_msg(sb, KERN_ERR, 3311 "failed to claim external journal device"); 3312 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 3313 return NULL; 3314 } 3315 3316 blocksize = sb->s_blocksize; 3317 hblock = bdev_logical_block_size(bdev); 3318 if (blocksize < hblock) { 3319 ext4_msg(sb, KERN_ERR, 3320 "blocksize too small for journal device"); 3321 goto out_bdev; 3322 } 3323 3324 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 3325 offset = EXT4_MIN_BLOCK_SIZE % blocksize; 3326 set_blocksize(bdev, blocksize); 3327 if (!(bh = __bread(bdev, sb_block, blocksize))) { 3328 ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 3329 "external journal"); 3330 goto out_bdev; 3331 } 3332 3333 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); 3334 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 3335 !(le32_to_cpu(es->s_feature_incompat) & 3336 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 3337 ext4_msg(sb, KERN_ERR, "external journal has " 3338 "bad superblock"); 3339 brelse(bh); 3340 goto out_bdev; 3341 } 3342 3343 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 3344 ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 3345 brelse(bh); 3346 goto out_bdev; 3347 } 3348 3349 len = ext4_blocks_count(es); 3350 start = sb_block + 1; 3351 brelse(bh); /* we're done with the superblock */ 3352 3353 journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 3354 start, len, blocksize); 3355 if (!journal) { 3356 ext4_msg(sb, KERN_ERR, "failed to create device journal"); 3357 goto out_bdev; 3358 } 3359 journal->j_private = sb; 3360 ll_rw_block(READ, 1, &journal->j_sb_buffer); 3361 wait_on_buffer(journal->j_sb_buffer); 3362 if (!buffer_uptodate(journal->j_sb_buffer)) { 3363 ext4_msg(sb, KERN_ERR, "I/O error on journal device"); 3364 goto out_journal; 3365 } 3366 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 3367 ext4_msg(sb, KERN_ERR, "External journal has more than one " 3368 "user (unsupported) - %d", 3369 be32_to_cpu(journal->j_superblock->s_nr_users)); 3370 goto out_journal; 3371 } 3372 EXT4_SB(sb)->journal_bdev = bdev; 3373 ext4_init_journal_params(sb, journal); 3374 return journal; 3375 3376 out_journal: 3377 jbd2_journal_destroy(journal); 3378 out_bdev: 3379 ext4_blkdev_put(bdev); 3380 return NULL; 3381 } 3382 3383 static int ext4_load_journal(struct super_block *sb, 3384 struct ext4_super_block *es, 3385 unsigned long journal_devnum) 3386 { 3387 journal_t *journal; 3388 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 3389 dev_t journal_dev; 3390 int err = 0; 3391 int really_read_only; 3392 3393 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); 3394 3395 if (journal_devnum && 3396 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 3397 ext4_msg(sb, KERN_INFO, "external journal device major/minor " 3398 "numbers have changed"); 3399 journal_dev = new_decode_dev(journal_devnum); 3400 } else 3401 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 3402 3403 really_read_only = bdev_read_only(sb->s_bdev); 3404 3405 /* 3406 * Are we loading a blank journal or performing recovery after a 3407 * crash? For recovery, we need to check in advance whether we 3408 * can get read-write access to the device. 3409 */ 3410 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { 3411 if (sb->s_flags & MS_RDONLY) { 3412 ext4_msg(sb, KERN_INFO, "INFO: recovery " 3413 "required on readonly filesystem"); 3414 if (really_read_only) { 3415 ext4_msg(sb, KERN_ERR, "write access " 3416 "unavailable, cannot proceed"); 3417 return -EROFS; 3418 } 3419 ext4_msg(sb, KERN_INFO, "write access will " 3420 "be enabled during recovery"); 3421 } 3422 } 3423 3424 if (journal_inum && journal_dev) { 3425 ext4_msg(sb, KERN_ERR, "filesystem has both journal " 3426 "and inode journals!"); 3427 return -EINVAL; 3428 } 3429 3430 if (journal_inum) { 3431 if (!(journal = ext4_get_journal(sb, journal_inum))) 3432 return -EINVAL; 3433 } else { 3434 if (!(journal = ext4_get_dev_journal(sb, journal_dev))) 3435 return -EINVAL; 3436 } 3437 3438 if (!(journal->j_flags & JBD2_BARRIER)) 3439 ext4_msg(sb, KERN_INFO, "barriers disabled"); 3440 3441 if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { 3442 err = jbd2_journal_update_format(journal); 3443 if (err) { 3444 ext4_msg(sb, KERN_ERR, "error updating journal"); 3445 jbd2_journal_destroy(journal); 3446 return err; 3447 } 3448 } 3449 3450 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) 3451 err = jbd2_journal_wipe(journal, !really_read_only); 3452 if (!err) { 3453 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 3454 if (save) 3455 memcpy(save, ((char *) es) + 3456 EXT4_S_ERR_START, EXT4_S_ERR_LEN); 3457 err = jbd2_journal_load(journal); 3458 if (save) 3459 memcpy(((char *) es) + EXT4_S_ERR_START, 3460 save, EXT4_S_ERR_LEN); 3461 kfree(save); 3462 } 3463 3464 if (err) { 3465 ext4_msg(sb, KERN_ERR, "error loading journal"); 3466 jbd2_journal_destroy(journal); 3467 return err; 3468 } 3469 3470 EXT4_SB(sb)->s_journal = journal; 3471 ext4_clear_journal_err(sb, es); 3472 3473 if (journal_devnum && 3474 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 3475 es->s_journal_dev = cpu_to_le32(journal_devnum); 3476 3477 /* Make sure we flush the recovery flag to disk. */ 3478 ext4_commit_super(sb, 1); 3479 } 3480 3481 return 0; 3482 } 3483 3484 static int ext4_commit_super(struct super_block *sb, int sync) 3485 { 3486 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 3487 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 3488 int error = 0; 3489 3490 if (!sbh) 3491 return error; 3492 if (buffer_write_io_error(sbh)) { 3493 /* 3494 * Oh, dear. A previous attempt to write the 3495 * superblock failed. This could happen because the 3496 * USB device was yanked out. Or it could happen to 3497 * be a transient write error and maybe the block will 3498 * be remapped. Nothing we can do but to retry the 3499 * write and hope for the best. 3500 */ 3501 ext4_msg(sb, KERN_ERR, "previous I/O error to " 3502 "superblock detected"); 3503 clear_buffer_write_io_error(sbh); 3504 set_buffer_uptodate(sbh); 3505 } 3506 /* 3507 * If the file system is mounted read-only, don't update the 3508 * superblock write time. This avoids updating the superblock 3509 * write time when we are mounting the root file system 3510 * read/only but we need to replay the journal; at that point, 3511 * for people who are east of GMT and who make their clock 3512 * tick in localtime for Windows bug-for-bug compatibility, 3513 * the clock is set in the future, and this will cause e2fsck 3514 * to complain and force a full file system check. 3515 */ 3516 if (!(sb->s_flags & MS_RDONLY)) 3517 es->s_wtime = cpu_to_le32(get_seconds()); 3518 if (sb->s_bdev->bd_part) 3519 es->s_kbytes_written = 3520 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + 3521 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - 3522 EXT4_SB(sb)->s_sectors_written_start) >> 1)); 3523 else 3524 es->s_kbytes_written = 3525 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); 3526 ext4_free_blocks_count_set(es, percpu_counter_sum_positive( 3527 &EXT4_SB(sb)->s_freeblocks_counter)); 3528 es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive( 3529 &EXT4_SB(sb)->s_freeinodes_counter)); 3530 sb->s_dirt = 0; 3531 BUFFER_TRACE(sbh, "marking dirty"); 3532 mark_buffer_dirty(sbh); 3533 if (sync) { 3534 error = sync_dirty_buffer(sbh); 3535 if (error) 3536 return error; 3537 3538 error = buffer_write_io_error(sbh); 3539 if (error) { 3540 ext4_msg(sb, KERN_ERR, "I/O error while writing " 3541 "superblock"); 3542 clear_buffer_write_io_error(sbh); 3543 set_buffer_uptodate(sbh); 3544 } 3545 } 3546 return error; 3547 } 3548 3549 /* 3550 * Have we just finished recovery? If so, and if we are mounting (or 3551 * remounting) the filesystem readonly, then we will end up with a 3552 * consistent fs on disk. Record that fact. 3553 */ 3554 static void ext4_mark_recovery_complete(struct super_block *sb, 3555 struct ext4_super_block *es) 3556 { 3557 journal_t *journal = EXT4_SB(sb)->s_journal; 3558 3559 if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { 3560 BUG_ON(journal != NULL); 3561 return; 3562 } 3563 jbd2_journal_lock_updates(journal); 3564 if (jbd2_journal_flush(journal) < 0) 3565 goto out; 3566 3567 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) && 3568 sb->s_flags & MS_RDONLY) { 3569 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3570 ext4_commit_super(sb, 1); 3571 } 3572 3573 out: 3574 jbd2_journal_unlock_updates(journal); 3575 } 3576 3577 /* 3578 * If we are mounting (or read-write remounting) a filesystem whose journal 3579 * has recorded an error from a previous lifetime, move that error to the 3580 * main filesystem now. 3581 */ 3582 static void ext4_clear_journal_err(struct super_block *sb, 3583 struct ext4_super_block *es) 3584 { 3585 journal_t *journal; 3586 int j_errno; 3587 const char *errstr; 3588 3589 BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)); 3590 3591 journal = EXT4_SB(sb)->s_journal; 3592 3593 /* 3594 * Now check for any error status which may have been recorded in the 3595 * journal by a prior ext4_error() or ext4_abort() 3596 */ 3597 3598 j_errno = jbd2_journal_errno(journal); 3599 if (j_errno) { 3600 char nbuf[16]; 3601 3602 errstr = ext4_decode_error(sb, j_errno, nbuf); 3603 ext4_warning(sb, "Filesystem error recorded " 3604 "from previous mount: %s", errstr); 3605 ext4_warning(sb, "Marking fs in need of filesystem check."); 3606 3607 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 3608 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 3609 ext4_commit_super(sb, 1); 3610 3611 jbd2_journal_clear_err(journal); 3612 } 3613 } 3614 3615 /* 3616 * Force the running and committing transactions to commit, 3617 * and wait on the commit. 3618 */ 3619 int ext4_force_commit(struct super_block *sb) 3620 { 3621 journal_t *journal; 3622 int ret = 0; 3623 3624 if (sb->s_flags & MS_RDONLY) 3625 return 0; 3626 3627 journal = EXT4_SB(sb)->s_journal; 3628 if (journal) { 3629 vfs_check_frozen(sb, SB_FREEZE_TRANS); 3630 ret = ext4_journal_force_commit(journal); 3631 } 3632 3633 return ret; 3634 } 3635 3636 static void ext4_write_super(struct super_block *sb) 3637 { 3638 lock_super(sb); 3639 ext4_commit_super(sb, 1); 3640 unlock_super(sb); 3641 } 3642 3643 static int ext4_sync_fs(struct super_block *sb, int wait) 3644 { 3645 int ret = 0; 3646 tid_t target; 3647 struct ext4_sb_info *sbi = EXT4_SB(sb); 3648 3649 trace_ext4_sync_fs(sb, wait); 3650 flush_workqueue(sbi->dio_unwritten_wq); 3651 if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 3652 if (wait) 3653 jbd2_log_wait_commit(sbi->s_journal, target); 3654 } 3655 return ret; 3656 } 3657 3658 /* 3659 * LVM calls this function before a (read-only) snapshot is created. This 3660 * gives us a chance to flush the journal completely and mark the fs clean. 3661 */ 3662 static int ext4_freeze(struct super_block *sb) 3663 { 3664 int error = 0; 3665 journal_t *journal; 3666 3667 if (sb->s_flags & MS_RDONLY) 3668 return 0; 3669 3670 journal = EXT4_SB(sb)->s_journal; 3671 3672 /* Now we set up the journal barrier. */ 3673 jbd2_journal_lock_updates(journal); 3674 3675 /* 3676 * Don't clear the needs_recovery flag if we failed to flush 3677 * the journal. 3678 */ 3679 error = jbd2_journal_flush(journal); 3680 if (error < 0) 3681 goto out; 3682 3683 /* Journal blocked and flushed, clear needs_recovery flag. */ 3684 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3685 error = ext4_commit_super(sb, 1); 3686 out: 3687 /* we rely on s_frozen to stop further updates */ 3688 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 3689 return error; 3690 } 3691 3692 /* 3693 * Called by LVM after the snapshot is done. We need to reset the RECOVER 3694 * flag here, even though the filesystem is not technically dirty yet. 3695 */ 3696 static int ext4_unfreeze(struct super_block *sb) 3697 { 3698 if (sb->s_flags & MS_RDONLY) 3699 return 0; 3700 3701 lock_super(sb); 3702 /* Reset the needs_recovery flag before the fs is unlocked. */ 3703 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3704 ext4_commit_super(sb, 1); 3705 unlock_super(sb); 3706 return 0; 3707 } 3708 3709 static int ext4_remount(struct super_block *sb, int *flags, char *data) 3710 { 3711 struct ext4_super_block *es; 3712 struct ext4_sb_info *sbi = EXT4_SB(sb); 3713 ext4_fsblk_t n_blocks_count = 0; 3714 unsigned long old_sb_flags; 3715 struct ext4_mount_options old_opts; 3716 int enable_quota = 0; 3717 ext4_group_t g; 3718 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 3719 int err; 3720 #ifdef CONFIG_QUOTA 3721 int i; 3722 #endif 3723 char *orig_data = kstrdup(data, GFP_KERNEL); 3724 3725 lock_kernel(); 3726 3727 /* Store the original options */ 3728 lock_super(sb); 3729 old_sb_flags = sb->s_flags; 3730 old_opts.s_mount_opt = sbi->s_mount_opt; 3731 old_opts.s_resuid = sbi->s_resuid; 3732 old_opts.s_resgid = sbi->s_resgid; 3733 old_opts.s_commit_interval = sbi->s_commit_interval; 3734 old_opts.s_min_batch_time = sbi->s_min_batch_time; 3735 old_opts.s_max_batch_time = sbi->s_max_batch_time; 3736 #ifdef CONFIG_QUOTA 3737 old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 3738 for (i = 0; i < MAXQUOTAS; i++) 3739 old_opts.s_qf_names[i] = sbi->s_qf_names[i]; 3740 #endif 3741 if (sbi->s_journal && sbi->s_journal->j_task->io_context) 3742 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; 3743 3744 /* 3745 * Allow the "check" option to be passed as a remount option. 3746 */ 3747 if (!parse_options(data, sb, NULL, &journal_ioprio, 3748 &n_blocks_count, 1)) { 3749 err = -EINVAL; 3750 goto restore_opts; 3751 } 3752 3753 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) 3754 ext4_abort(sb, "Abort forced by user"); 3755 3756 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 3757 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); 3758 3759 es = sbi->s_es; 3760 3761 if (sbi->s_journal) { 3762 ext4_init_journal_params(sb, sbi->s_journal); 3763 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 3764 } 3765 3766 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || 3767 n_blocks_count > ext4_blocks_count(es)) { 3768 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { 3769 err = -EROFS; 3770 goto restore_opts; 3771 } 3772 3773 if (*flags & MS_RDONLY) { 3774 err = dquot_suspend(sb, -1); 3775 if (err < 0) 3776 goto restore_opts; 3777 3778 /* 3779 * First of all, the unconditional stuff we have to do 3780 * to disable replay of the journal when we next remount 3781 */ 3782 sb->s_flags |= MS_RDONLY; 3783 3784 /* 3785 * OK, test if we are remounting a valid rw partition 3786 * readonly, and if so set the rdonly flag and then 3787 * mark the partition as valid again. 3788 */ 3789 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 3790 (sbi->s_mount_state & EXT4_VALID_FS)) 3791 es->s_state = cpu_to_le16(sbi->s_mount_state); 3792 3793 if (sbi->s_journal) 3794 ext4_mark_recovery_complete(sb, es); 3795 } else { 3796 /* Make sure we can mount this feature set readwrite */ 3797 if (!ext4_feature_set_ok(sb, 0)) { 3798 err = -EROFS; 3799 goto restore_opts; 3800 } 3801 /* 3802 * Make sure the group descriptor checksums 3803 * are sane. If they aren't, refuse to remount r/w. 3804 */ 3805 for (g = 0; g < sbi->s_groups_count; g++) { 3806 struct ext4_group_desc *gdp = 3807 ext4_get_group_desc(sb, g, NULL); 3808 3809 if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { 3810 ext4_msg(sb, KERN_ERR, 3811 "ext4_remount: Checksum for group %u failed (%u!=%u)", 3812 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), 3813 le16_to_cpu(gdp->bg_checksum)); 3814 err = -EINVAL; 3815 goto restore_opts; 3816 } 3817 } 3818 3819 /* 3820 * If we have an unprocessed orphan list hanging 3821 * around from a previously readonly bdev mount, 3822 * require a full umount/remount for now. 3823 */ 3824 if (es->s_last_orphan) { 3825 ext4_msg(sb, KERN_WARNING, "Couldn't " 3826 "remount RDWR because of unprocessed " 3827 "orphan inode list. Please " 3828 "umount/remount instead"); 3829 err = -EINVAL; 3830 goto restore_opts; 3831 } 3832 3833 /* 3834 * Mounting a RDONLY partition read-write, so reread 3835 * and store the current valid flag. (It may have 3836 * been changed by e2fsck since we originally mounted 3837 * the partition.) 3838 */ 3839 if (sbi->s_journal) 3840 ext4_clear_journal_err(sb, es); 3841 sbi->s_mount_state = le16_to_cpu(es->s_state); 3842 if ((err = ext4_group_extend(sb, es, n_blocks_count))) 3843 goto restore_opts; 3844 if (!ext4_setup_super(sb, es, 0)) 3845 sb->s_flags &= ~MS_RDONLY; 3846 enable_quota = 1; 3847 } 3848 } 3849 ext4_setup_system_zone(sb); 3850 if (sbi->s_journal == NULL) 3851 ext4_commit_super(sb, 1); 3852 3853 #ifdef CONFIG_QUOTA 3854 /* Release old quota file names */ 3855 for (i = 0; i < MAXQUOTAS; i++) 3856 if (old_opts.s_qf_names[i] && 3857 old_opts.s_qf_names[i] != sbi->s_qf_names[i]) 3858 kfree(old_opts.s_qf_names[i]); 3859 #endif 3860 unlock_super(sb); 3861 unlock_kernel(); 3862 if (enable_quota) 3863 dquot_resume(sb, -1); 3864 3865 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); 3866 kfree(orig_data); 3867 return 0; 3868 3869 restore_opts: 3870 sb->s_flags = old_sb_flags; 3871 sbi->s_mount_opt = old_opts.s_mount_opt; 3872 sbi->s_resuid = old_opts.s_resuid; 3873 sbi->s_resgid = old_opts.s_resgid; 3874 sbi->s_commit_interval = old_opts.s_commit_interval; 3875 sbi->s_min_batch_time = old_opts.s_min_batch_time; 3876 sbi->s_max_batch_time = old_opts.s_max_batch_time; 3877 #ifdef CONFIG_QUOTA 3878 sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 3879 for (i = 0; i < MAXQUOTAS; i++) { 3880 if (sbi->s_qf_names[i] && 3881 old_opts.s_qf_names[i] != sbi->s_qf_names[i]) 3882 kfree(sbi->s_qf_names[i]); 3883 sbi->s_qf_names[i] = old_opts.s_qf_names[i]; 3884 } 3885 #endif 3886 unlock_super(sb); 3887 unlock_kernel(); 3888 kfree(orig_data); 3889 return err; 3890 } 3891 3892 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 3893 { 3894 struct super_block *sb = dentry->d_sb; 3895 struct ext4_sb_info *sbi = EXT4_SB(sb); 3896 struct ext4_super_block *es = sbi->s_es; 3897 u64 fsid; 3898 3899 if (test_opt(sb, MINIX_DF)) { 3900 sbi->s_overhead_last = 0; 3901 } else if (sbi->s_blocks_last != ext4_blocks_count(es)) { 3902 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 3903 ext4_fsblk_t overhead = 0; 3904 3905 /* 3906 * Compute the overhead (FS structures). This is constant 3907 * for a given filesystem unless the number of block groups 3908 * changes so we cache the previous value until it does. 3909 */ 3910 3911 /* 3912 * All of the blocks before first_data_block are 3913 * overhead 3914 */ 3915 overhead = le32_to_cpu(es->s_first_data_block); 3916 3917 /* 3918 * Add the overhead attributed to the superblock and 3919 * block group descriptors. If the sparse superblocks 3920 * feature is turned on, then not all groups have this. 3921 */ 3922 for (i = 0; i < ngroups; i++) { 3923 overhead += ext4_bg_has_super(sb, i) + 3924 ext4_bg_num_gdb(sb, i); 3925 cond_resched(); 3926 } 3927 3928 /* 3929 * Every block group has an inode bitmap, a block 3930 * bitmap, and an inode table. 3931 */ 3932 overhead += ngroups * (2 + sbi->s_itb_per_group); 3933 sbi->s_overhead_last = overhead; 3934 smp_wmb(); 3935 sbi->s_blocks_last = ext4_blocks_count(es); 3936 } 3937 3938 buf->f_type = EXT4_SUPER_MAGIC; 3939 buf->f_bsize = sb->s_blocksize; 3940 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; 3941 buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - 3942 percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); 3943 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); 3944 if (buf->f_bfree < ext4_r_blocks_count(es)) 3945 buf->f_bavail = 0; 3946 buf->f_files = le32_to_cpu(es->s_inodes_count); 3947 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 3948 buf->f_namelen = EXT4_NAME_LEN; 3949 fsid = le64_to_cpup((void *)es->s_uuid) ^ 3950 le64_to_cpup((void *)es->s_uuid + sizeof(u64)); 3951 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; 3952 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; 3953 3954 return 0; 3955 } 3956 3957 /* Helper function for writing quotas on sync - we need to start transaction 3958 * before quota file is locked for write. Otherwise the are possible deadlocks: 3959 * Process 1 Process 2 3960 * ext4_create() quota_sync() 3961 * jbd2_journal_start() write_dquot() 3962 * dquot_initialize() down(dqio_mutex) 3963 * down(dqio_mutex) jbd2_journal_start() 3964 * 3965 */ 3966 3967 #ifdef CONFIG_QUOTA 3968 3969 static inline struct inode *dquot_to_inode(struct dquot *dquot) 3970 { 3971 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 3972 } 3973 3974 static int ext4_write_dquot(struct dquot *dquot) 3975 { 3976 int ret, err; 3977 handle_t *handle; 3978 struct inode *inode; 3979 3980 inode = dquot_to_inode(dquot); 3981 handle = ext4_journal_start(inode, 3982 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 3983 if (IS_ERR(handle)) 3984 return PTR_ERR(handle); 3985 ret = dquot_commit(dquot); 3986 err = ext4_journal_stop(handle); 3987 if (!ret) 3988 ret = err; 3989 return ret; 3990 } 3991 3992 static int ext4_acquire_dquot(struct dquot *dquot) 3993 { 3994 int ret, err; 3995 handle_t *handle; 3996 3997 handle = ext4_journal_start(dquot_to_inode(dquot), 3998 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 3999 if (IS_ERR(handle)) 4000 return PTR_ERR(handle); 4001 ret = dquot_acquire(dquot); 4002 err = ext4_journal_stop(handle); 4003 if (!ret) 4004 ret = err; 4005 return ret; 4006 } 4007 4008 static int ext4_release_dquot(struct dquot *dquot) 4009 { 4010 int ret, err; 4011 handle_t *handle; 4012 4013 handle = ext4_journal_start(dquot_to_inode(dquot), 4014 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 4015 if (IS_ERR(handle)) { 4016 /* Release dquot anyway to avoid endless cycle in dqput() */ 4017 dquot_release(dquot); 4018 return PTR_ERR(handle); 4019 } 4020 ret = dquot_release(dquot); 4021 err = ext4_journal_stop(handle); 4022 if (!ret) 4023 ret = err; 4024 return ret; 4025 } 4026 4027 static int ext4_mark_dquot_dirty(struct dquot *dquot) 4028 { 4029 /* Are we journaling quotas? */ 4030 if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || 4031 EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { 4032 dquot_mark_dquot_dirty(dquot); 4033 return ext4_write_dquot(dquot); 4034 } else { 4035 return dquot_mark_dquot_dirty(dquot); 4036 } 4037 } 4038 4039 static int ext4_write_info(struct super_block *sb, int type) 4040 { 4041 int ret, err; 4042 handle_t *handle; 4043 4044 /* Data block + inode block */ 4045 handle = ext4_journal_start(sb->s_root->d_inode, 2); 4046 if (IS_ERR(handle)) 4047 return PTR_ERR(handle); 4048 ret = dquot_commit_info(sb, type); 4049 err = ext4_journal_stop(handle); 4050 if (!ret) 4051 ret = err; 4052 return ret; 4053 } 4054 4055 /* 4056 * Turn on quotas during mount time - we need to find 4057 * the quota file and such... 4058 */ 4059 static int ext4_quota_on_mount(struct super_block *sb, int type) 4060 { 4061 return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], 4062 EXT4_SB(sb)->s_jquota_fmt, type); 4063 } 4064 4065 /* 4066 * Standard function to be called on quota_on 4067 */ 4068 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 4069 char *name) 4070 { 4071 int err; 4072 struct path path; 4073 4074 if (!test_opt(sb, QUOTA)) 4075 return -EINVAL; 4076 4077 err = kern_path(name, LOOKUP_FOLLOW, &path); 4078 if (err) 4079 return err; 4080 4081 /* Quotafile not on the same filesystem? */ 4082 if (path.mnt->mnt_sb != sb) { 4083 path_put(&path); 4084 return -EXDEV; 4085 } 4086 /* Journaling quota? */ 4087 if (EXT4_SB(sb)->s_qf_names[type]) { 4088 /* Quotafile not in fs root? */ 4089 if (path.dentry->d_parent != sb->s_root) 4090 ext4_msg(sb, KERN_WARNING, 4091 "Quota file not on filesystem root. " 4092 "Journaled quota will not work"); 4093 } 4094 4095 /* 4096 * When we journal data on quota file, we have to flush journal to see 4097 * all updates to the file when we bypass pagecache... 4098 */ 4099 if (EXT4_SB(sb)->s_journal && 4100 ext4_should_journal_data(path.dentry->d_inode)) { 4101 /* 4102 * We don't need to lock updates but journal_flush() could 4103 * otherwise be livelocked... 4104 */ 4105 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 4106 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); 4107 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 4108 if (err) { 4109 path_put(&path); 4110 return err; 4111 } 4112 } 4113 4114 err = dquot_quota_on_path(sb, type, format_id, &path); 4115 path_put(&path); 4116 return err; 4117 } 4118 4119 static int ext4_quota_off(struct super_block *sb, int type) 4120 { 4121 /* Force all delayed allocation blocks to be allocated */ 4122 if (test_opt(sb, DELALLOC)) { 4123 down_read(&sb->s_umount); 4124 sync_filesystem(sb); 4125 up_read(&sb->s_umount); 4126 } 4127 4128 return dquot_quota_off(sb, type); 4129 } 4130 4131 /* Read data from quotafile - avoid pagecache and such because we cannot afford 4132 * acquiring the locks... As quota files are never truncated and quota code 4133 * itself serializes the operations (and noone else should touch the files) 4134 * we don't have to be afraid of races */ 4135 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 4136 size_t len, loff_t off) 4137 { 4138 struct inode *inode = sb_dqopt(sb)->files[type]; 4139 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 4140 int err = 0; 4141 int offset = off & (sb->s_blocksize - 1); 4142 int tocopy; 4143 size_t toread; 4144 struct buffer_head *bh; 4145 loff_t i_size = i_size_read(inode); 4146 4147 if (off > i_size) 4148 return 0; 4149 if (off+len > i_size) 4150 len = i_size-off; 4151 toread = len; 4152 while (toread > 0) { 4153 tocopy = sb->s_blocksize - offset < toread ? 4154 sb->s_blocksize - offset : toread; 4155 bh = ext4_bread(NULL, inode, blk, 0, &err); 4156 if (err) 4157 return err; 4158 if (!bh) /* A hole? */ 4159 memset(data, 0, tocopy); 4160 else 4161 memcpy(data, bh->b_data+offset, tocopy); 4162 brelse(bh); 4163 offset = 0; 4164 toread -= tocopy; 4165 data += tocopy; 4166 blk++; 4167 } 4168 return len; 4169 } 4170 4171 /* Write to quotafile (we know the transaction is already started and has 4172 * enough credits) */ 4173 static ssize_t ext4_quota_write(struct super_block *sb, int type, 4174 const char *data, size_t len, loff_t off) 4175 { 4176 struct inode *inode = sb_dqopt(sb)->files[type]; 4177 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 4178 int err = 0; 4179 int offset = off & (sb->s_blocksize - 1); 4180 struct buffer_head *bh; 4181 handle_t *handle = journal_current_handle(); 4182 4183 if (EXT4_SB(sb)->s_journal && !handle) { 4184 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 4185 " cancelled because transaction is not started", 4186 (unsigned long long)off, (unsigned long long)len); 4187 return -EIO; 4188 } 4189 /* 4190 * Since we account only one data block in transaction credits, 4191 * then it is impossible to cross a block boundary. 4192 */ 4193 if (sb->s_blocksize - offset < len) { 4194 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 4195 " cancelled because not block aligned", 4196 (unsigned long long)off, (unsigned long long)len); 4197 return -EIO; 4198 } 4199 4200 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 4201 bh = ext4_bread(handle, inode, blk, 1, &err); 4202 if (!bh) 4203 goto out; 4204 err = ext4_journal_get_write_access(handle, bh); 4205 if (err) { 4206 brelse(bh); 4207 goto out; 4208 } 4209 lock_buffer(bh); 4210 memcpy(bh->b_data+offset, data, len); 4211 flush_dcache_page(bh->b_page); 4212 unlock_buffer(bh); 4213 err = ext4_handle_dirty_metadata(handle, NULL, bh); 4214 brelse(bh); 4215 out: 4216 if (err) { 4217 mutex_unlock(&inode->i_mutex); 4218 return err; 4219 } 4220 if (inode->i_size < off + len) { 4221 i_size_write(inode, off + len); 4222 EXT4_I(inode)->i_disksize = inode->i_size; 4223 } 4224 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 4225 ext4_mark_inode_dirty(handle, inode); 4226 mutex_unlock(&inode->i_mutex); 4227 return len; 4228 } 4229 4230 #endif 4231 4232 static int ext4_get_sb(struct file_system_type *fs_type, int flags, 4233 const char *dev_name, void *data, struct vfsmount *mnt) 4234 { 4235 return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt); 4236 } 4237 4238 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 4239 static struct file_system_type ext2_fs_type = { 4240 .owner = THIS_MODULE, 4241 .name = "ext2", 4242 .get_sb = ext4_get_sb, 4243 .kill_sb = kill_block_super, 4244 .fs_flags = FS_REQUIRES_DEV, 4245 }; 4246 4247 static inline void register_as_ext2(void) 4248 { 4249 int err = register_filesystem(&ext2_fs_type); 4250 if (err) 4251 printk(KERN_WARNING 4252 "EXT4-fs: Unable to register as ext2 (%d)\n", err); 4253 } 4254 4255 static inline void unregister_as_ext2(void) 4256 { 4257 unregister_filesystem(&ext2_fs_type); 4258 } 4259 MODULE_ALIAS("ext2"); 4260 #else 4261 static inline void register_as_ext2(void) { } 4262 static inline void unregister_as_ext2(void) { } 4263 #endif 4264 4265 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 4266 static inline void register_as_ext3(void) 4267 { 4268 int err = register_filesystem(&ext3_fs_type); 4269 if (err) 4270 printk(KERN_WARNING 4271 "EXT4-fs: Unable to register as ext3 (%d)\n", err); 4272 } 4273 4274 static inline void unregister_as_ext3(void) 4275 { 4276 unregister_filesystem(&ext3_fs_type); 4277 } 4278 MODULE_ALIAS("ext3"); 4279 #else 4280 static inline void register_as_ext3(void) { } 4281 static inline void unregister_as_ext3(void) { } 4282 #endif 4283 4284 static struct file_system_type ext4_fs_type = { 4285 .owner = THIS_MODULE, 4286 .name = "ext4", 4287 .get_sb = ext4_get_sb, 4288 .kill_sb = kill_block_super, 4289 .fs_flags = FS_REQUIRES_DEV, 4290 }; 4291 4292 static int __init init_ext4_fs(void) 4293 { 4294 int err; 4295 4296 ext4_check_flag_values(); 4297 err = init_ext4_system_zone(); 4298 if (err) 4299 return err; 4300 ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); 4301 if (!ext4_kset) 4302 goto out4; 4303 ext4_proc_root = proc_mkdir("fs/ext4", NULL); 4304 err = init_ext4_mballoc(); 4305 if (err) 4306 goto out3; 4307 4308 err = init_ext4_xattr(); 4309 if (err) 4310 goto out2; 4311 err = init_inodecache(); 4312 if (err) 4313 goto out1; 4314 register_as_ext2(); 4315 register_as_ext3(); 4316 err = register_filesystem(&ext4_fs_type); 4317 if (err) 4318 goto out; 4319 return 0; 4320 out: 4321 unregister_as_ext2(); 4322 unregister_as_ext3(); 4323 destroy_inodecache(); 4324 out1: 4325 exit_ext4_xattr(); 4326 out2: 4327 exit_ext4_mballoc(); 4328 out3: 4329 remove_proc_entry("fs/ext4", NULL); 4330 kset_unregister(ext4_kset); 4331 out4: 4332 exit_ext4_system_zone(); 4333 return err; 4334 } 4335 4336 static void __exit exit_ext4_fs(void) 4337 { 4338 unregister_as_ext2(); 4339 unregister_as_ext3(); 4340 unregister_filesystem(&ext4_fs_type); 4341 destroy_inodecache(); 4342 exit_ext4_xattr(); 4343 exit_ext4_mballoc(); 4344 remove_proc_entry("fs/ext4", NULL); 4345 kset_unregister(ext4_kset); 4346 exit_ext4_system_zone(); 4347 } 4348 4349 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 4350 MODULE_DESCRIPTION("Fourth Extended Filesystem"); 4351 MODULE_LICENSE("GPL"); 4352 module_init(init_ext4_fs) 4353 module_exit(exit_ext4_fs) 4354