1 /* 2 * fs/f2fs/super.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/fs.h> 14 #include <linux/statfs.h> 15 #include <linux/buffer_head.h> 16 #include <linux/backing-dev.h> 17 #include <linux/kthread.h> 18 #include <linux/parser.h> 19 #include <linux/mount.h> 20 #include <linux/seq_file.h> 21 #include <linux/proc_fs.h> 22 #include <linux/random.h> 23 #include <linux/exportfs.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #include <linux/f2fs_fs.h> 27 #include <linux/sysfs.h> 28 #include <linux/quota.h> 29 30 #include "f2fs.h" 31 #include "node.h" 32 #include "segment.h" 33 #include "xattr.h" 34 #include "gc.h" 35 #include "trace.h" 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/f2fs.h> 39 40 static struct kmem_cache *f2fs_inode_cachep; 41 42 #ifdef CONFIG_F2FS_FAULT_INJECTION 43 44 char *fault_name[FAULT_MAX] = { 45 [FAULT_KMALLOC] = "kmalloc", 46 [FAULT_KVMALLOC] = "kvmalloc", 47 [FAULT_PAGE_ALLOC] = "page alloc", 48 [FAULT_PAGE_GET] = "page get", 49 [FAULT_ALLOC_BIO] = "alloc bio", 50 [FAULT_ALLOC_NID] = "alloc nid", 51 [FAULT_ORPHAN] = "orphan", 52 [FAULT_BLOCK] = "no more block", 53 [FAULT_DIR_DEPTH] = "too big dir depth", 54 [FAULT_EVICT_INODE] = "evict_inode fail", 55 [FAULT_TRUNCATE] = "truncate fail", 56 [FAULT_IO] = "IO error", 57 [FAULT_CHECKPOINT] = "checkpoint error", 58 }; 59 60 static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, 61 unsigned int rate) 62 { 63 struct f2fs_fault_info *ffi = &sbi->fault_info; 64 65 if (rate) { 66 atomic_set(&ffi->inject_ops, 0); 67 ffi->inject_rate = rate; 68 ffi->inject_type = (1 << FAULT_MAX) - 1; 69 } else { 70 memset(ffi, 0, sizeof(struct f2fs_fault_info)); 71 } 72 } 73 #endif 74 75 /* f2fs-wide shrinker description */ 76 static struct shrinker f2fs_shrinker_info = { 77 .scan_objects = f2fs_shrink_scan, 78 .count_objects = f2fs_shrink_count, 79 .seeks = DEFAULT_SEEKS, 80 }; 81 82 enum { 83 Opt_gc_background, 84 Opt_disable_roll_forward, 85 Opt_norecovery, 86 Opt_discard, 87 Opt_nodiscard, 88 Opt_noheap, 89 Opt_heap, 90 Opt_user_xattr, 91 Opt_nouser_xattr, 92 Opt_acl, 93 Opt_noacl, 94 Opt_active_logs, 95 Opt_disable_ext_identify, 96 Opt_inline_xattr, 97 Opt_noinline_xattr, 98 Opt_inline_xattr_size, 99 Opt_inline_data, 100 Opt_inline_dentry, 101 Opt_noinline_dentry, 102 Opt_flush_merge, 103 Opt_noflush_merge, 104 Opt_nobarrier, 105 Opt_fastboot, 106 Opt_extent_cache, 107 Opt_noextent_cache, 108 Opt_noinline_data, 109 Opt_data_flush, 110 Opt_reserve_root, 111 Opt_resgid, 112 Opt_resuid, 113 Opt_mode, 114 Opt_io_size_bits, 115 Opt_fault_injection, 116 Opt_lazytime, 117 Opt_nolazytime, 118 Opt_quota, 119 Opt_noquota, 120 Opt_usrquota, 121 Opt_grpquota, 122 Opt_prjquota, 123 Opt_usrjquota, 124 Opt_grpjquota, 125 Opt_prjjquota, 126 Opt_offusrjquota, 127 Opt_offgrpjquota, 128 Opt_offprjjquota, 129 Opt_jqfmt_vfsold, 130 Opt_jqfmt_vfsv0, 131 Opt_jqfmt_vfsv1, 132 Opt_err, 133 }; 134 135 static match_table_t f2fs_tokens = { 136 {Opt_gc_background, "background_gc=%s"}, 137 {Opt_disable_roll_forward, "disable_roll_forward"}, 138 {Opt_norecovery, "norecovery"}, 139 {Opt_discard, "discard"}, 140 {Opt_nodiscard, "nodiscard"}, 141 {Opt_noheap, "no_heap"}, 142 {Opt_heap, "heap"}, 143 {Opt_user_xattr, "user_xattr"}, 144 {Opt_nouser_xattr, "nouser_xattr"}, 145 {Opt_acl, "acl"}, 146 {Opt_noacl, "noacl"}, 147 {Opt_active_logs, "active_logs=%u"}, 148 {Opt_disable_ext_identify, "disable_ext_identify"}, 149 {Opt_inline_xattr, "inline_xattr"}, 150 {Opt_noinline_xattr, "noinline_xattr"}, 151 {Opt_inline_xattr_size, "inline_xattr_size=%u"}, 152 {Opt_inline_data, "inline_data"}, 153 {Opt_inline_dentry, "inline_dentry"}, 154 {Opt_noinline_dentry, "noinline_dentry"}, 155 {Opt_flush_merge, "flush_merge"}, 156 {Opt_noflush_merge, "noflush_merge"}, 157 {Opt_nobarrier, "nobarrier"}, 158 {Opt_fastboot, "fastboot"}, 159 {Opt_extent_cache, "extent_cache"}, 160 {Opt_noextent_cache, "noextent_cache"}, 161 {Opt_noinline_data, "noinline_data"}, 162 {Opt_data_flush, "data_flush"}, 163 {Opt_reserve_root, "reserve_root=%u"}, 164 {Opt_resgid, "resgid=%u"}, 165 {Opt_resuid, "resuid=%u"}, 166 {Opt_mode, "mode=%s"}, 167 {Opt_io_size_bits, "io_bits=%u"}, 168 {Opt_fault_injection, "fault_injection=%u"}, 169 {Opt_lazytime, "lazytime"}, 170 {Opt_nolazytime, "nolazytime"}, 171 {Opt_quota, "quota"}, 172 {Opt_noquota, "noquota"}, 173 {Opt_usrquota, "usrquota"}, 174 {Opt_grpquota, "grpquota"}, 175 {Opt_prjquota, "prjquota"}, 176 {Opt_usrjquota, "usrjquota=%s"}, 177 {Opt_grpjquota, "grpjquota=%s"}, 178 {Opt_prjjquota, "prjjquota=%s"}, 179 {Opt_offusrjquota, "usrjquota="}, 180 {Opt_offgrpjquota, "grpjquota="}, 181 {Opt_offprjjquota, "prjjquota="}, 182 {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, 183 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, 184 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, 185 {Opt_err, NULL}, 186 }; 187 188 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 189 { 190 struct va_format vaf; 191 va_list args; 192 193 va_start(args, fmt); 194 vaf.fmt = fmt; 195 vaf.va = &args; 196 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 197 va_end(args); 198 } 199 200 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) 201 { 202 block_t limit = (sbi->user_block_count << 1) / 1000; 203 204 /* limit is 0.2% */ 205 if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) { 206 sbi->root_reserved_blocks = limit; 207 f2fs_msg(sbi->sb, KERN_INFO, 208 "Reduce reserved blocks for root = %u", 209 sbi->root_reserved_blocks); 210 } 211 if (!test_opt(sbi, RESERVE_ROOT) && 212 (!uid_eq(sbi->s_resuid, 213 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || 214 !gid_eq(sbi->s_resgid, 215 make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) 216 f2fs_msg(sbi->sb, KERN_INFO, 217 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", 218 from_kuid_munged(&init_user_ns, sbi->s_resuid), 219 from_kgid_munged(&init_user_ns, sbi->s_resgid)); 220 } 221 222 static void init_once(void *foo) 223 { 224 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; 225 226 inode_init_once(&fi->vfs_inode); 227 } 228 229 #ifdef CONFIG_QUOTA 230 static const char * const quotatypes[] = INITQFNAMES; 231 #define QTYPE2NAME(t) (quotatypes[t]) 232 static int f2fs_set_qf_name(struct super_block *sb, int qtype, 233 substring_t *args) 234 { 235 struct f2fs_sb_info *sbi = F2FS_SB(sb); 236 char *qname; 237 int ret = -EINVAL; 238 239 if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { 240 f2fs_msg(sb, KERN_ERR, 241 "Cannot change journaled " 242 "quota options when quota turned on"); 243 return -EINVAL; 244 } 245 if (f2fs_sb_has_quota_ino(sb)) { 246 f2fs_msg(sb, KERN_INFO, 247 "QUOTA feature is enabled, so ignore qf_name"); 248 return 0; 249 } 250 251 qname = match_strdup(args); 252 if (!qname) { 253 f2fs_msg(sb, KERN_ERR, 254 "Not enough memory for storing quotafile name"); 255 return -EINVAL; 256 } 257 if (sbi->s_qf_names[qtype]) { 258 if (strcmp(sbi->s_qf_names[qtype], qname) == 0) 259 ret = 0; 260 else 261 f2fs_msg(sb, KERN_ERR, 262 "%s quota file already specified", 263 QTYPE2NAME(qtype)); 264 goto errout; 265 } 266 if (strchr(qname, '/')) { 267 f2fs_msg(sb, KERN_ERR, 268 "quotafile must be on filesystem root"); 269 goto errout; 270 } 271 sbi->s_qf_names[qtype] = qname; 272 set_opt(sbi, QUOTA); 273 return 0; 274 errout: 275 kfree(qname); 276 return ret; 277 } 278 279 static int f2fs_clear_qf_name(struct super_block *sb, int qtype) 280 { 281 struct f2fs_sb_info *sbi = F2FS_SB(sb); 282 283 if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { 284 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" 285 " when quota turned on"); 286 return -EINVAL; 287 } 288 kfree(sbi->s_qf_names[qtype]); 289 sbi->s_qf_names[qtype] = NULL; 290 return 0; 291 } 292 293 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) 294 { 295 /* 296 * We do the test below only for project quotas. 'usrquota' and 297 * 'grpquota' mount options are allowed even without quota feature 298 * to support legacy quotas in quota files. 299 */ 300 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) { 301 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. " 302 "Cannot enable project quota enforcement."); 303 return -1; 304 } 305 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] || 306 sbi->s_qf_names[PRJQUOTA]) { 307 if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 308 clear_opt(sbi, USRQUOTA); 309 310 if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) 311 clear_opt(sbi, GRPQUOTA); 312 313 if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA]) 314 clear_opt(sbi, PRJQUOTA); 315 316 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || 317 test_opt(sbi, PRJQUOTA)) { 318 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota " 319 "format mixing"); 320 return -1; 321 } 322 323 if (!sbi->s_jquota_fmt) { 324 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " 325 "not specified"); 326 return -1; 327 } 328 } 329 330 if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) { 331 f2fs_msg(sbi->sb, KERN_INFO, 332 "QUOTA feature is enabled, so ignore jquota_fmt"); 333 sbi->s_jquota_fmt = 0; 334 } 335 if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) { 336 f2fs_msg(sbi->sb, KERN_INFO, 337 "Filesystem with quota feature cannot be mounted RDWR " 338 "without CONFIG_QUOTA"); 339 return -1; 340 } 341 return 0; 342 } 343 #endif 344 345 static int parse_options(struct super_block *sb, char *options) 346 { 347 struct f2fs_sb_info *sbi = F2FS_SB(sb); 348 struct request_queue *q; 349 substring_t args[MAX_OPT_ARGS]; 350 char *p, *name; 351 int arg = 0; 352 kuid_t uid; 353 kgid_t gid; 354 #ifdef CONFIG_QUOTA 355 int ret; 356 #endif 357 358 if (!options) 359 return 0; 360 361 while ((p = strsep(&options, ",")) != NULL) { 362 int token; 363 if (!*p) 364 continue; 365 /* 366 * Initialize args struct so we know whether arg was 367 * found; some options take optional arguments. 368 */ 369 args[0].to = args[0].from = NULL; 370 token = match_token(p, f2fs_tokens, args); 371 372 switch (token) { 373 case Opt_gc_background: 374 name = match_strdup(&args[0]); 375 376 if (!name) 377 return -ENOMEM; 378 if (strlen(name) == 2 && !strncmp(name, "on", 2)) { 379 set_opt(sbi, BG_GC); 380 clear_opt(sbi, FORCE_FG_GC); 381 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) { 382 clear_opt(sbi, BG_GC); 383 clear_opt(sbi, FORCE_FG_GC); 384 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) { 385 set_opt(sbi, BG_GC); 386 set_opt(sbi, FORCE_FG_GC); 387 } else { 388 kfree(name); 389 return -EINVAL; 390 } 391 kfree(name); 392 break; 393 case Opt_disable_roll_forward: 394 set_opt(sbi, DISABLE_ROLL_FORWARD); 395 break; 396 case Opt_norecovery: 397 /* this option mounts f2fs with ro */ 398 set_opt(sbi, DISABLE_ROLL_FORWARD); 399 if (!f2fs_readonly(sb)) 400 return -EINVAL; 401 break; 402 case Opt_discard: 403 q = bdev_get_queue(sb->s_bdev); 404 if (blk_queue_discard(q)) { 405 set_opt(sbi, DISCARD); 406 } else if (!f2fs_sb_mounted_blkzoned(sb)) { 407 f2fs_msg(sb, KERN_WARNING, 408 "mounting with \"discard\" option, but " 409 "the device does not support discard"); 410 } 411 break; 412 case Opt_nodiscard: 413 if (f2fs_sb_mounted_blkzoned(sb)) { 414 f2fs_msg(sb, KERN_WARNING, 415 "discard is required for zoned block devices"); 416 return -EINVAL; 417 } 418 clear_opt(sbi, DISCARD); 419 break; 420 case Opt_noheap: 421 set_opt(sbi, NOHEAP); 422 break; 423 case Opt_heap: 424 clear_opt(sbi, NOHEAP); 425 break; 426 #ifdef CONFIG_F2FS_FS_XATTR 427 case Opt_user_xattr: 428 set_opt(sbi, XATTR_USER); 429 break; 430 case Opt_nouser_xattr: 431 clear_opt(sbi, XATTR_USER); 432 break; 433 case Opt_inline_xattr: 434 set_opt(sbi, INLINE_XATTR); 435 break; 436 case Opt_noinline_xattr: 437 clear_opt(sbi, INLINE_XATTR); 438 break; 439 case Opt_inline_xattr_size: 440 if (args->from && match_int(args, &arg)) 441 return -EINVAL; 442 set_opt(sbi, INLINE_XATTR_SIZE); 443 sbi->inline_xattr_size = arg; 444 break; 445 #else 446 case Opt_user_xattr: 447 f2fs_msg(sb, KERN_INFO, 448 "user_xattr options not supported"); 449 break; 450 case Opt_nouser_xattr: 451 f2fs_msg(sb, KERN_INFO, 452 "nouser_xattr options not supported"); 453 break; 454 case Opt_inline_xattr: 455 f2fs_msg(sb, KERN_INFO, 456 "inline_xattr options not supported"); 457 break; 458 case Opt_noinline_xattr: 459 f2fs_msg(sb, KERN_INFO, 460 "noinline_xattr options not supported"); 461 break; 462 #endif 463 #ifdef CONFIG_F2FS_FS_POSIX_ACL 464 case Opt_acl: 465 set_opt(sbi, POSIX_ACL); 466 break; 467 case Opt_noacl: 468 clear_opt(sbi, POSIX_ACL); 469 break; 470 #else 471 case Opt_acl: 472 f2fs_msg(sb, KERN_INFO, "acl options not supported"); 473 break; 474 case Opt_noacl: 475 f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 476 break; 477 #endif 478 case Opt_active_logs: 479 if (args->from && match_int(args, &arg)) 480 return -EINVAL; 481 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) 482 return -EINVAL; 483 sbi->active_logs = arg; 484 break; 485 case Opt_disable_ext_identify: 486 set_opt(sbi, DISABLE_EXT_IDENTIFY); 487 break; 488 case Opt_inline_data: 489 set_opt(sbi, INLINE_DATA); 490 break; 491 case Opt_inline_dentry: 492 set_opt(sbi, INLINE_DENTRY); 493 break; 494 case Opt_noinline_dentry: 495 clear_opt(sbi, INLINE_DENTRY); 496 break; 497 case Opt_flush_merge: 498 set_opt(sbi, FLUSH_MERGE); 499 break; 500 case Opt_noflush_merge: 501 clear_opt(sbi, FLUSH_MERGE); 502 break; 503 case Opt_nobarrier: 504 set_opt(sbi, NOBARRIER); 505 break; 506 case Opt_fastboot: 507 set_opt(sbi, FASTBOOT); 508 break; 509 case Opt_extent_cache: 510 set_opt(sbi, EXTENT_CACHE); 511 break; 512 case Opt_noextent_cache: 513 clear_opt(sbi, EXTENT_CACHE); 514 break; 515 case Opt_noinline_data: 516 clear_opt(sbi, INLINE_DATA); 517 break; 518 case Opt_data_flush: 519 set_opt(sbi, DATA_FLUSH); 520 break; 521 case Opt_reserve_root: 522 if (args->from && match_int(args, &arg)) 523 return -EINVAL; 524 if (test_opt(sbi, RESERVE_ROOT)) { 525 f2fs_msg(sb, KERN_INFO, 526 "Preserve previous reserve_root=%u", 527 sbi->root_reserved_blocks); 528 } else { 529 sbi->root_reserved_blocks = arg; 530 set_opt(sbi, RESERVE_ROOT); 531 } 532 break; 533 case Opt_resuid: 534 if (args->from && match_int(args, &arg)) 535 return -EINVAL; 536 uid = make_kuid(current_user_ns(), arg); 537 if (!uid_valid(uid)) { 538 f2fs_msg(sb, KERN_ERR, 539 "Invalid uid value %d", arg); 540 return -EINVAL; 541 } 542 sbi->s_resuid = uid; 543 break; 544 case Opt_resgid: 545 if (args->from && match_int(args, &arg)) 546 return -EINVAL; 547 gid = make_kgid(current_user_ns(), arg); 548 if (!gid_valid(gid)) { 549 f2fs_msg(sb, KERN_ERR, 550 "Invalid gid value %d", arg); 551 return -EINVAL; 552 } 553 sbi->s_resgid = gid; 554 break; 555 case Opt_mode: 556 name = match_strdup(&args[0]); 557 558 if (!name) 559 return -ENOMEM; 560 if (strlen(name) == 8 && 561 !strncmp(name, "adaptive", 8)) { 562 if (f2fs_sb_mounted_blkzoned(sb)) { 563 f2fs_msg(sb, KERN_WARNING, 564 "adaptive mode is not allowed with " 565 "zoned block device feature"); 566 kfree(name); 567 return -EINVAL; 568 } 569 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE); 570 } else if (strlen(name) == 3 && 571 !strncmp(name, "lfs", 3)) { 572 set_opt_mode(sbi, F2FS_MOUNT_LFS); 573 } else { 574 kfree(name); 575 return -EINVAL; 576 } 577 kfree(name); 578 break; 579 case Opt_io_size_bits: 580 if (args->from && match_int(args, &arg)) 581 return -EINVAL; 582 if (arg > __ilog2_u32(BIO_MAX_PAGES)) { 583 f2fs_msg(sb, KERN_WARNING, 584 "Not support %d, larger than %d", 585 1 << arg, BIO_MAX_PAGES); 586 return -EINVAL; 587 } 588 sbi->write_io_size_bits = arg; 589 break; 590 case Opt_fault_injection: 591 if (args->from && match_int(args, &arg)) 592 return -EINVAL; 593 #ifdef CONFIG_F2FS_FAULT_INJECTION 594 f2fs_build_fault_attr(sbi, arg); 595 set_opt(sbi, FAULT_INJECTION); 596 #else 597 f2fs_msg(sb, KERN_INFO, 598 "FAULT_INJECTION was not selected"); 599 #endif 600 break; 601 case Opt_lazytime: 602 sb->s_flags |= SB_LAZYTIME; 603 break; 604 case Opt_nolazytime: 605 sb->s_flags &= ~SB_LAZYTIME; 606 break; 607 #ifdef CONFIG_QUOTA 608 case Opt_quota: 609 case Opt_usrquota: 610 set_opt(sbi, USRQUOTA); 611 break; 612 case Opt_grpquota: 613 set_opt(sbi, GRPQUOTA); 614 break; 615 case Opt_prjquota: 616 set_opt(sbi, PRJQUOTA); 617 break; 618 case Opt_usrjquota: 619 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]); 620 if (ret) 621 return ret; 622 break; 623 case Opt_grpjquota: 624 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]); 625 if (ret) 626 return ret; 627 break; 628 case Opt_prjjquota: 629 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]); 630 if (ret) 631 return ret; 632 break; 633 case Opt_offusrjquota: 634 ret = f2fs_clear_qf_name(sb, USRQUOTA); 635 if (ret) 636 return ret; 637 break; 638 case Opt_offgrpjquota: 639 ret = f2fs_clear_qf_name(sb, GRPQUOTA); 640 if (ret) 641 return ret; 642 break; 643 case Opt_offprjjquota: 644 ret = f2fs_clear_qf_name(sb, PRJQUOTA); 645 if (ret) 646 return ret; 647 break; 648 case Opt_jqfmt_vfsold: 649 sbi->s_jquota_fmt = QFMT_VFS_OLD; 650 break; 651 case Opt_jqfmt_vfsv0: 652 sbi->s_jquota_fmt = QFMT_VFS_V0; 653 break; 654 case Opt_jqfmt_vfsv1: 655 sbi->s_jquota_fmt = QFMT_VFS_V1; 656 break; 657 case Opt_noquota: 658 clear_opt(sbi, QUOTA); 659 clear_opt(sbi, USRQUOTA); 660 clear_opt(sbi, GRPQUOTA); 661 clear_opt(sbi, PRJQUOTA); 662 break; 663 #else 664 case Opt_quota: 665 case Opt_usrquota: 666 case Opt_grpquota: 667 case Opt_prjquota: 668 case Opt_usrjquota: 669 case Opt_grpjquota: 670 case Opt_prjjquota: 671 case Opt_offusrjquota: 672 case Opt_offgrpjquota: 673 case Opt_offprjjquota: 674 case Opt_jqfmt_vfsold: 675 case Opt_jqfmt_vfsv0: 676 case Opt_jqfmt_vfsv1: 677 case Opt_noquota: 678 f2fs_msg(sb, KERN_INFO, 679 "quota operations not supported"); 680 break; 681 #endif 682 default: 683 f2fs_msg(sb, KERN_ERR, 684 "Unrecognized mount option \"%s\" or missing value", 685 p); 686 return -EINVAL; 687 } 688 } 689 #ifdef CONFIG_QUOTA 690 if (f2fs_check_quota_options(sbi)) 691 return -EINVAL; 692 #endif 693 694 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { 695 f2fs_msg(sb, KERN_ERR, 696 "Should set mode=lfs with %uKB-sized IO", 697 F2FS_IO_SIZE_KB(sbi)); 698 return -EINVAL; 699 } 700 701 if (test_opt(sbi, INLINE_XATTR_SIZE)) { 702 if (!test_opt(sbi, INLINE_XATTR)) { 703 f2fs_msg(sb, KERN_ERR, 704 "inline_xattr_size option should be " 705 "set with inline_xattr option"); 706 return -EINVAL; 707 } 708 if (!sbi->inline_xattr_size || 709 sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE - 710 F2FS_TOTAL_EXTRA_ATTR_SIZE - 711 DEF_INLINE_RESERVED_SIZE - 712 DEF_MIN_INLINE_SIZE) { 713 f2fs_msg(sb, KERN_ERR, 714 "inline xattr size is out of range"); 715 return -EINVAL; 716 } 717 } 718 return 0; 719 } 720 721 static struct inode *f2fs_alloc_inode(struct super_block *sb) 722 { 723 struct f2fs_inode_info *fi; 724 725 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO); 726 if (!fi) 727 return NULL; 728 729 init_once((void *) fi); 730 731 /* Initialize f2fs-specific inode info */ 732 atomic_set(&fi->dirty_pages, 0); 733 fi->i_current_depth = 1; 734 fi->i_advise = 0; 735 init_rwsem(&fi->i_sem); 736 INIT_LIST_HEAD(&fi->dirty_list); 737 INIT_LIST_HEAD(&fi->gdirty_list); 738 INIT_LIST_HEAD(&fi->inmem_ilist); 739 INIT_LIST_HEAD(&fi->inmem_pages); 740 mutex_init(&fi->inmem_lock); 741 init_rwsem(&fi->dio_rwsem[READ]); 742 init_rwsem(&fi->dio_rwsem[WRITE]); 743 init_rwsem(&fi->i_mmap_sem); 744 init_rwsem(&fi->i_xattr_sem); 745 746 #ifdef CONFIG_QUOTA 747 memset(&fi->i_dquot, 0, sizeof(fi->i_dquot)); 748 fi->i_reserved_quota = 0; 749 #endif 750 /* Will be used by directory only */ 751 fi->i_dir_level = F2FS_SB(sb)->dir_level; 752 753 return &fi->vfs_inode; 754 } 755 756 static int f2fs_drop_inode(struct inode *inode) 757 { 758 int ret; 759 /* 760 * This is to avoid a deadlock condition like below. 761 * writeback_single_inode(inode) 762 * - f2fs_write_data_page 763 * - f2fs_gc -> iput -> evict 764 * - inode_wait_for_writeback(inode) 765 */ 766 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) { 767 if (!inode->i_nlink && !is_bad_inode(inode)) { 768 /* to avoid evict_inode call simultaneously */ 769 atomic_inc(&inode->i_count); 770 spin_unlock(&inode->i_lock); 771 772 /* some remained atomic pages should discarded */ 773 if (f2fs_is_atomic_file(inode)) 774 drop_inmem_pages(inode); 775 776 /* should remain fi->extent_tree for writepage */ 777 f2fs_destroy_extent_node(inode); 778 779 sb_start_intwrite(inode->i_sb); 780 f2fs_i_size_write(inode, 0); 781 782 if (F2FS_HAS_BLOCKS(inode)) 783 f2fs_truncate(inode); 784 785 sb_end_intwrite(inode->i_sb); 786 787 spin_lock(&inode->i_lock); 788 atomic_dec(&inode->i_count); 789 } 790 trace_f2fs_drop_inode(inode, 0); 791 return 0; 792 } 793 ret = generic_drop_inode(inode); 794 trace_f2fs_drop_inode(inode, ret); 795 return ret; 796 } 797 798 int f2fs_inode_dirtied(struct inode *inode, bool sync) 799 { 800 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 801 int ret = 0; 802 803 spin_lock(&sbi->inode_lock[DIRTY_META]); 804 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { 805 ret = 1; 806 } else { 807 set_inode_flag(inode, FI_DIRTY_INODE); 808 stat_inc_dirty_inode(sbi, DIRTY_META); 809 } 810 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) { 811 list_add_tail(&F2FS_I(inode)->gdirty_list, 812 &sbi->inode_list[DIRTY_META]); 813 inc_page_count(sbi, F2FS_DIRTY_IMETA); 814 } 815 spin_unlock(&sbi->inode_lock[DIRTY_META]); 816 return ret; 817 } 818 819 void f2fs_inode_synced(struct inode *inode) 820 { 821 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 822 823 spin_lock(&sbi->inode_lock[DIRTY_META]); 824 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) { 825 spin_unlock(&sbi->inode_lock[DIRTY_META]); 826 return; 827 } 828 if (!list_empty(&F2FS_I(inode)->gdirty_list)) { 829 list_del_init(&F2FS_I(inode)->gdirty_list); 830 dec_page_count(sbi, F2FS_DIRTY_IMETA); 831 } 832 clear_inode_flag(inode, FI_DIRTY_INODE); 833 clear_inode_flag(inode, FI_AUTO_RECOVER); 834 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META); 835 spin_unlock(&sbi->inode_lock[DIRTY_META]); 836 } 837 838 /* 839 * f2fs_dirty_inode() is called from __mark_inode_dirty() 840 * 841 * We should call set_dirty_inode to write the dirty inode through write_inode. 842 */ 843 static void f2fs_dirty_inode(struct inode *inode, int flags) 844 { 845 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 846 847 if (inode->i_ino == F2FS_NODE_INO(sbi) || 848 inode->i_ino == F2FS_META_INO(sbi)) 849 return; 850 851 if (flags == I_DIRTY_TIME) 852 return; 853 854 if (is_inode_flag_set(inode, FI_AUTO_RECOVER)) 855 clear_inode_flag(inode, FI_AUTO_RECOVER); 856 857 f2fs_inode_dirtied(inode, false); 858 } 859 860 static void f2fs_i_callback(struct rcu_head *head) 861 { 862 struct inode *inode = container_of(head, struct inode, i_rcu); 863 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); 864 } 865 866 static void f2fs_destroy_inode(struct inode *inode) 867 { 868 call_rcu(&inode->i_rcu, f2fs_i_callback); 869 } 870 871 static void destroy_percpu_info(struct f2fs_sb_info *sbi) 872 { 873 percpu_counter_destroy(&sbi->alloc_valid_block_count); 874 percpu_counter_destroy(&sbi->total_valid_inode_count); 875 } 876 877 static void destroy_device_list(struct f2fs_sb_info *sbi) 878 { 879 int i; 880 881 for (i = 0; i < sbi->s_ndevs; i++) { 882 blkdev_put(FDEV(i).bdev, FMODE_EXCL); 883 #ifdef CONFIG_BLK_DEV_ZONED 884 kfree(FDEV(i).blkz_type); 885 #endif 886 } 887 kfree(sbi->devs); 888 } 889 890 static void f2fs_put_super(struct super_block *sb) 891 { 892 struct f2fs_sb_info *sbi = F2FS_SB(sb); 893 int i; 894 bool dropped; 895 896 f2fs_quota_off_umount(sb); 897 898 /* prevent remaining shrinker jobs */ 899 mutex_lock(&sbi->umount_mutex); 900 901 /* 902 * We don't need to do checkpoint when superblock is clean. 903 * But, the previous checkpoint was not done by umount, it needs to do 904 * clean checkpoint again. 905 */ 906 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || 907 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 908 struct cp_control cpc = { 909 .reason = CP_UMOUNT, 910 }; 911 write_checkpoint(sbi, &cpc); 912 } 913 914 /* be sure to wait for any on-going discard commands */ 915 dropped = f2fs_wait_discard_bios(sbi); 916 917 if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) { 918 struct cp_control cpc = { 919 .reason = CP_UMOUNT | CP_TRIMMED, 920 }; 921 write_checkpoint(sbi, &cpc); 922 } 923 924 /* write_checkpoint can update stat informaion */ 925 f2fs_destroy_stats(sbi); 926 927 /* 928 * normally superblock is clean, so we need to release this. 929 * In addition, EIO will skip do checkpoint, we need this as well. 930 */ 931 release_ino_entry(sbi, true); 932 933 f2fs_leave_shrinker(sbi); 934 mutex_unlock(&sbi->umount_mutex); 935 936 /* our cp_error case, we can wait for any writeback page */ 937 f2fs_flush_merged_writes(sbi); 938 939 iput(sbi->node_inode); 940 iput(sbi->meta_inode); 941 942 /* destroy f2fs internal modules */ 943 destroy_node_manager(sbi); 944 destroy_segment_manager(sbi); 945 946 kfree(sbi->ckpt); 947 948 f2fs_unregister_sysfs(sbi); 949 950 sb->s_fs_info = NULL; 951 if (sbi->s_chksum_driver) 952 crypto_free_shash(sbi->s_chksum_driver); 953 kfree(sbi->raw_super); 954 955 destroy_device_list(sbi); 956 mempool_destroy(sbi->write_io_dummy); 957 #ifdef CONFIG_QUOTA 958 for (i = 0; i < MAXQUOTAS; i++) 959 kfree(sbi->s_qf_names[i]); 960 #endif 961 destroy_percpu_info(sbi); 962 for (i = 0; i < NR_PAGE_TYPE; i++) 963 kfree(sbi->write_io[i]); 964 kfree(sbi); 965 } 966 967 int f2fs_sync_fs(struct super_block *sb, int sync) 968 { 969 struct f2fs_sb_info *sbi = F2FS_SB(sb); 970 int err = 0; 971 972 if (unlikely(f2fs_cp_error(sbi))) 973 return 0; 974 975 trace_f2fs_sync_fs(sb, sync); 976 977 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 978 return -EAGAIN; 979 980 if (sync) { 981 struct cp_control cpc; 982 983 cpc.reason = __get_cp_reason(sbi); 984 985 mutex_lock(&sbi->gc_mutex); 986 err = write_checkpoint(sbi, &cpc); 987 mutex_unlock(&sbi->gc_mutex); 988 } 989 f2fs_trace_ios(NULL, 1); 990 991 return err; 992 } 993 994 static int f2fs_freeze(struct super_block *sb) 995 { 996 if (f2fs_readonly(sb)) 997 return 0; 998 999 /* IO error happened before */ 1000 if (unlikely(f2fs_cp_error(F2FS_SB(sb)))) 1001 return -EIO; 1002 1003 /* must be clean, since sync_filesystem() was already called */ 1004 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) 1005 return -EINVAL; 1006 return 0; 1007 } 1008 1009 static int f2fs_unfreeze(struct super_block *sb) 1010 { 1011 return 0; 1012 } 1013 1014 #ifdef CONFIG_QUOTA 1015 static int f2fs_statfs_project(struct super_block *sb, 1016 kprojid_t projid, struct kstatfs *buf) 1017 { 1018 struct kqid qid; 1019 struct dquot *dquot; 1020 u64 limit; 1021 u64 curblock; 1022 1023 qid = make_kqid_projid(projid); 1024 dquot = dqget(sb, qid); 1025 if (IS_ERR(dquot)) 1026 return PTR_ERR(dquot); 1027 spin_lock(&dq_data_lock); 1028 1029 limit = (dquot->dq_dqb.dqb_bsoftlimit ? 1030 dquot->dq_dqb.dqb_bsoftlimit : 1031 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits; 1032 if (limit && buf->f_blocks > limit) { 1033 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; 1034 buf->f_blocks = limit; 1035 buf->f_bfree = buf->f_bavail = 1036 (buf->f_blocks > curblock) ? 1037 (buf->f_blocks - curblock) : 0; 1038 } 1039 1040 limit = dquot->dq_dqb.dqb_isoftlimit ? 1041 dquot->dq_dqb.dqb_isoftlimit : 1042 dquot->dq_dqb.dqb_ihardlimit; 1043 if (limit && buf->f_files > limit) { 1044 buf->f_files = limit; 1045 buf->f_ffree = 1046 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? 1047 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; 1048 } 1049 1050 spin_unlock(&dq_data_lock); 1051 dqput(dquot); 1052 return 0; 1053 } 1054 #endif 1055 1056 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) 1057 { 1058 struct super_block *sb = dentry->d_sb; 1059 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1060 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 1061 block_t total_count, user_block_count, start_count; 1062 u64 avail_node_count; 1063 1064 total_count = le64_to_cpu(sbi->raw_super->block_count); 1065 user_block_count = sbi->user_block_count; 1066 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); 1067 buf->f_type = F2FS_SUPER_MAGIC; 1068 buf->f_bsize = sbi->blocksize; 1069 1070 buf->f_blocks = total_count - start_count; 1071 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - 1072 sbi->current_reserved_blocks; 1073 if (buf->f_bfree > sbi->root_reserved_blocks) 1074 buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks; 1075 else 1076 buf->f_bavail = 0; 1077 1078 avail_node_count = sbi->total_node_count - sbi->nquota_files - 1079 F2FS_RESERVED_NODE_NUM; 1080 1081 if (avail_node_count > user_block_count) { 1082 buf->f_files = user_block_count; 1083 buf->f_ffree = buf->f_bavail; 1084 } else { 1085 buf->f_files = avail_node_count; 1086 buf->f_ffree = min(avail_node_count - valid_node_count(sbi), 1087 buf->f_bavail); 1088 } 1089 1090 buf->f_namelen = F2FS_NAME_LEN; 1091 buf->f_fsid.val[0] = (u32)id; 1092 buf->f_fsid.val[1] = (u32)(id >> 32); 1093 1094 #ifdef CONFIG_QUOTA 1095 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) && 1096 sb_has_quota_limits_enabled(sb, PRJQUOTA)) { 1097 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf); 1098 } 1099 #endif 1100 return 0; 1101 } 1102 1103 static inline void f2fs_show_quota_options(struct seq_file *seq, 1104 struct super_block *sb) 1105 { 1106 #ifdef CONFIG_QUOTA 1107 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1108 1109 if (sbi->s_jquota_fmt) { 1110 char *fmtname = ""; 1111 1112 switch (sbi->s_jquota_fmt) { 1113 case QFMT_VFS_OLD: 1114 fmtname = "vfsold"; 1115 break; 1116 case QFMT_VFS_V0: 1117 fmtname = "vfsv0"; 1118 break; 1119 case QFMT_VFS_V1: 1120 fmtname = "vfsv1"; 1121 break; 1122 } 1123 seq_printf(seq, ",jqfmt=%s", fmtname); 1124 } 1125 1126 if (sbi->s_qf_names[USRQUOTA]) 1127 seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]); 1128 1129 if (sbi->s_qf_names[GRPQUOTA]) 1130 seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]); 1131 1132 if (sbi->s_qf_names[PRJQUOTA]) 1133 seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]); 1134 #endif 1135 } 1136 1137 static int f2fs_show_options(struct seq_file *seq, struct dentry *root) 1138 { 1139 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); 1140 1141 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) { 1142 if (test_opt(sbi, FORCE_FG_GC)) 1143 seq_printf(seq, ",background_gc=%s", "sync"); 1144 else 1145 seq_printf(seq, ",background_gc=%s", "on"); 1146 } else { 1147 seq_printf(seq, ",background_gc=%s", "off"); 1148 } 1149 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) 1150 seq_puts(seq, ",disable_roll_forward"); 1151 if (test_opt(sbi, DISCARD)) 1152 seq_puts(seq, ",discard"); 1153 if (test_opt(sbi, NOHEAP)) 1154 seq_puts(seq, ",no_heap"); 1155 else 1156 seq_puts(seq, ",heap"); 1157 #ifdef CONFIG_F2FS_FS_XATTR 1158 if (test_opt(sbi, XATTR_USER)) 1159 seq_puts(seq, ",user_xattr"); 1160 else 1161 seq_puts(seq, ",nouser_xattr"); 1162 if (test_opt(sbi, INLINE_XATTR)) 1163 seq_puts(seq, ",inline_xattr"); 1164 else 1165 seq_puts(seq, ",noinline_xattr"); 1166 if (test_opt(sbi, INLINE_XATTR_SIZE)) 1167 seq_printf(seq, ",inline_xattr_size=%u", 1168 sbi->inline_xattr_size); 1169 #endif 1170 #ifdef CONFIG_F2FS_FS_POSIX_ACL 1171 if (test_opt(sbi, POSIX_ACL)) 1172 seq_puts(seq, ",acl"); 1173 else 1174 seq_puts(seq, ",noacl"); 1175 #endif 1176 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) 1177 seq_puts(seq, ",disable_ext_identify"); 1178 if (test_opt(sbi, INLINE_DATA)) 1179 seq_puts(seq, ",inline_data"); 1180 else 1181 seq_puts(seq, ",noinline_data"); 1182 if (test_opt(sbi, INLINE_DENTRY)) 1183 seq_puts(seq, ",inline_dentry"); 1184 else 1185 seq_puts(seq, ",noinline_dentry"); 1186 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE)) 1187 seq_puts(seq, ",flush_merge"); 1188 if (test_opt(sbi, NOBARRIER)) 1189 seq_puts(seq, ",nobarrier"); 1190 if (test_opt(sbi, FASTBOOT)) 1191 seq_puts(seq, ",fastboot"); 1192 if (test_opt(sbi, EXTENT_CACHE)) 1193 seq_puts(seq, ",extent_cache"); 1194 else 1195 seq_puts(seq, ",noextent_cache"); 1196 if (test_opt(sbi, DATA_FLUSH)) 1197 seq_puts(seq, ",data_flush"); 1198 1199 seq_puts(seq, ",mode="); 1200 if (test_opt(sbi, ADAPTIVE)) 1201 seq_puts(seq, "adaptive"); 1202 else if (test_opt(sbi, LFS)) 1203 seq_puts(seq, "lfs"); 1204 seq_printf(seq, ",active_logs=%u", sbi->active_logs); 1205 if (test_opt(sbi, RESERVE_ROOT)) 1206 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u", 1207 sbi->root_reserved_blocks, 1208 from_kuid_munged(&init_user_ns, sbi->s_resuid), 1209 from_kgid_munged(&init_user_ns, sbi->s_resgid)); 1210 if (F2FS_IO_SIZE_BITS(sbi)) 1211 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi)); 1212 #ifdef CONFIG_F2FS_FAULT_INJECTION 1213 if (test_opt(sbi, FAULT_INJECTION)) 1214 seq_printf(seq, ",fault_injection=%u", 1215 sbi->fault_info.inject_rate); 1216 #endif 1217 #ifdef CONFIG_QUOTA 1218 if (test_opt(sbi, QUOTA)) 1219 seq_puts(seq, ",quota"); 1220 if (test_opt(sbi, USRQUOTA)) 1221 seq_puts(seq, ",usrquota"); 1222 if (test_opt(sbi, GRPQUOTA)) 1223 seq_puts(seq, ",grpquota"); 1224 if (test_opt(sbi, PRJQUOTA)) 1225 seq_puts(seq, ",prjquota"); 1226 #endif 1227 f2fs_show_quota_options(seq, sbi->sb); 1228 1229 return 0; 1230 } 1231 1232 static void default_options(struct f2fs_sb_info *sbi) 1233 { 1234 /* init some FS parameters */ 1235 sbi->active_logs = NR_CURSEG_TYPE; 1236 sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 1237 1238 set_opt(sbi, BG_GC); 1239 set_opt(sbi, INLINE_XATTR); 1240 set_opt(sbi, INLINE_DATA); 1241 set_opt(sbi, INLINE_DENTRY); 1242 set_opt(sbi, EXTENT_CACHE); 1243 set_opt(sbi, NOHEAP); 1244 sbi->sb->s_flags |= SB_LAZYTIME; 1245 set_opt(sbi, FLUSH_MERGE); 1246 if (f2fs_sb_mounted_blkzoned(sbi->sb)) { 1247 set_opt_mode(sbi, F2FS_MOUNT_LFS); 1248 set_opt(sbi, DISCARD); 1249 } else { 1250 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE); 1251 } 1252 1253 #ifdef CONFIG_F2FS_FS_XATTR 1254 set_opt(sbi, XATTR_USER); 1255 #endif 1256 #ifdef CONFIG_F2FS_FS_POSIX_ACL 1257 set_opt(sbi, POSIX_ACL); 1258 #endif 1259 1260 #ifdef CONFIG_F2FS_FAULT_INJECTION 1261 f2fs_build_fault_attr(sbi, 0); 1262 #endif 1263 } 1264 1265 #ifdef CONFIG_QUOTA 1266 static int f2fs_enable_quotas(struct super_block *sb); 1267 #endif 1268 static int f2fs_remount(struct super_block *sb, int *flags, char *data) 1269 { 1270 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1271 struct f2fs_mount_info org_mount_opt; 1272 unsigned long old_sb_flags; 1273 int err, active_logs; 1274 bool need_restart_gc = false; 1275 bool need_stop_gc = false; 1276 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); 1277 #ifdef CONFIG_F2FS_FAULT_INJECTION 1278 struct f2fs_fault_info ffi = sbi->fault_info; 1279 #endif 1280 #ifdef CONFIG_QUOTA 1281 int s_jquota_fmt; 1282 char *s_qf_names[MAXQUOTAS]; 1283 int i, j; 1284 #endif 1285 1286 /* 1287 * Save the old mount options in case we 1288 * need to restore them. 1289 */ 1290 org_mount_opt = sbi->mount_opt; 1291 old_sb_flags = sb->s_flags; 1292 active_logs = sbi->active_logs; 1293 1294 #ifdef CONFIG_QUOTA 1295 s_jquota_fmt = sbi->s_jquota_fmt; 1296 for (i = 0; i < MAXQUOTAS; i++) { 1297 if (sbi->s_qf_names[i]) { 1298 s_qf_names[i] = kstrdup(sbi->s_qf_names[i], 1299 GFP_KERNEL); 1300 if (!s_qf_names[i]) { 1301 for (j = 0; j < i; j++) 1302 kfree(s_qf_names[j]); 1303 return -ENOMEM; 1304 } 1305 } else { 1306 s_qf_names[i] = NULL; 1307 } 1308 } 1309 #endif 1310 1311 /* recover superblocks we couldn't write due to previous RO mount */ 1312 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { 1313 err = f2fs_commit_super(sbi, false); 1314 f2fs_msg(sb, KERN_INFO, 1315 "Try to recover all the superblocks, ret: %d", err); 1316 if (!err) 1317 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); 1318 } 1319 1320 default_options(sbi); 1321 1322 /* parse mount options */ 1323 err = parse_options(sb, data); 1324 if (err) 1325 goto restore_opts; 1326 1327 /* 1328 * Previous and new state of filesystem is RO, 1329 * so skip checking GC and FLUSH_MERGE conditions. 1330 */ 1331 if (f2fs_readonly(sb) && (*flags & SB_RDONLY)) 1332 goto skip; 1333 1334 #ifdef CONFIG_QUOTA 1335 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) { 1336 err = dquot_suspend(sb, -1); 1337 if (err < 0) 1338 goto restore_opts; 1339 } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) { 1340 /* dquot_resume needs RW */ 1341 sb->s_flags &= ~SB_RDONLY; 1342 if (sb_any_quota_suspended(sb)) { 1343 dquot_resume(sb, -1); 1344 } else if (f2fs_sb_has_quota_ino(sb)) { 1345 err = f2fs_enable_quotas(sb); 1346 if (err) 1347 goto restore_opts; 1348 } 1349 } 1350 #endif 1351 /* disallow enable/disable extent_cache dynamically */ 1352 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { 1353 err = -EINVAL; 1354 f2fs_msg(sbi->sb, KERN_WARNING, 1355 "switch extent_cache option is not allowed"); 1356 goto restore_opts; 1357 } 1358 1359 /* 1360 * We stop the GC thread if FS is mounted as RO 1361 * or if background_gc = off is passed in mount 1362 * option. Also sync the filesystem. 1363 */ 1364 if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) { 1365 if (sbi->gc_thread) { 1366 stop_gc_thread(sbi); 1367 need_restart_gc = true; 1368 } 1369 } else if (!sbi->gc_thread) { 1370 err = start_gc_thread(sbi); 1371 if (err) 1372 goto restore_opts; 1373 need_stop_gc = true; 1374 } 1375 1376 if (*flags & SB_RDONLY) { 1377 writeback_inodes_sb(sb, WB_REASON_SYNC); 1378 sync_inodes_sb(sb); 1379 1380 set_sbi_flag(sbi, SBI_IS_DIRTY); 1381 set_sbi_flag(sbi, SBI_IS_CLOSE); 1382 f2fs_sync_fs(sb, 1); 1383 clear_sbi_flag(sbi, SBI_IS_CLOSE); 1384 } 1385 1386 /* 1387 * We stop issue flush thread if FS is mounted as RO 1388 * or if flush_merge is not passed in mount option. 1389 */ 1390 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { 1391 clear_opt(sbi, FLUSH_MERGE); 1392 destroy_flush_cmd_control(sbi, false); 1393 } else { 1394 err = create_flush_cmd_control(sbi); 1395 if (err) 1396 goto restore_gc; 1397 } 1398 skip: 1399 #ifdef CONFIG_QUOTA 1400 /* Release old quota file names */ 1401 for (i = 0; i < MAXQUOTAS; i++) 1402 kfree(s_qf_names[i]); 1403 #endif 1404 /* Update the POSIXACL Flag */ 1405 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 1406 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); 1407 1408 limit_reserve_root(sbi); 1409 return 0; 1410 restore_gc: 1411 if (need_restart_gc) { 1412 if (start_gc_thread(sbi)) 1413 f2fs_msg(sbi->sb, KERN_WARNING, 1414 "background gc thread has stopped"); 1415 } else if (need_stop_gc) { 1416 stop_gc_thread(sbi); 1417 } 1418 restore_opts: 1419 #ifdef CONFIG_QUOTA 1420 sbi->s_jquota_fmt = s_jquota_fmt; 1421 for (i = 0; i < MAXQUOTAS; i++) { 1422 kfree(sbi->s_qf_names[i]); 1423 sbi->s_qf_names[i] = s_qf_names[i]; 1424 } 1425 #endif 1426 sbi->mount_opt = org_mount_opt; 1427 sbi->active_logs = active_logs; 1428 sb->s_flags = old_sb_flags; 1429 #ifdef CONFIG_F2FS_FAULT_INJECTION 1430 sbi->fault_info = ffi; 1431 #endif 1432 return err; 1433 } 1434 1435 #ifdef CONFIG_QUOTA 1436 /* Read data from quotafile */ 1437 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, 1438 size_t len, loff_t off) 1439 { 1440 struct inode *inode = sb_dqopt(sb)->files[type]; 1441 struct address_space *mapping = inode->i_mapping; 1442 block_t blkidx = F2FS_BYTES_TO_BLK(off); 1443 int offset = off & (sb->s_blocksize - 1); 1444 int tocopy; 1445 size_t toread; 1446 loff_t i_size = i_size_read(inode); 1447 struct page *page; 1448 char *kaddr; 1449 1450 if (off > i_size) 1451 return 0; 1452 1453 if (off + len > i_size) 1454 len = i_size - off; 1455 toread = len; 1456 while (toread > 0) { 1457 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 1458 repeat: 1459 page = read_mapping_page(mapping, blkidx, NULL); 1460 if (IS_ERR(page)) { 1461 if (PTR_ERR(page) == -ENOMEM) { 1462 congestion_wait(BLK_RW_ASYNC, HZ/50); 1463 goto repeat; 1464 } 1465 return PTR_ERR(page); 1466 } 1467 1468 lock_page(page); 1469 1470 if (unlikely(page->mapping != mapping)) { 1471 f2fs_put_page(page, 1); 1472 goto repeat; 1473 } 1474 if (unlikely(!PageUptodate(page))) { 1475 f2fs_put_page(page, 1); 1476 return -EIO; 1477 } 1478 1479 kaddr = kmap_atomic(page); 1480 memcpy(data, kaddr + offset, tocopy); 1481 kunmap_atomic(kaddr); 1482 f2fs_put_page(page, 1); 1483 1484 offset = 0; 1485 toread -= tocopy; 1486 data += tocopy; 1487 blkidx++; 1488 } 1489 return len; 1490 } 1491 1492 /* Write to quotafile */ 1493 static ssize_t f2fs_quota_write(struct super_block *sb, int type, 1494 const char *data, size_t len, loff_t off) 1495 { 1496 struct inode *inode = sb_dqopt(sb)->files[type]; 1497 struct address_space *mapping = inode->i_mapping; 1498 const struct address_space_operations *a_ops = mapping->a_ops; 1499 int offset = off & (sb->s_blocksize - 1); 1500 size_t towrite = len; 1501 struct page *page; 1502 char *kaddr; 1503 int err = 0; 1504 int tocopy; 1505 1506 while (towrite > 0) { 1507 tocopy = min_t(unsigned long, sb->s_blocksize - offset, 1508 towrite); 1509 retry: 1510 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, 1511 &page, NULL); 1512 if (unlikely(err)) { 1513 if (err == -ENOMEM) { 1514 congestion_wait(BLK_RW_ASYNC, HZ/50); 1515 goto retry; 1516 } 1517 break; 1518 } 1519 1520 kaddr = kmap_atomic(page); 1521 memcpy(kaddr + offset, data, tocopy); 1522 kunmap_atomic(kaddr); 1523 flush_dcache_page(page); 1524 1525 a_ops->write_end(NULL, mapping, off, tocopy, tocopy, 1526 page, NULL); 1527 offset = 0; 1528 towrite -= tocopy; 1529 off += tocopy; 1530 data += tocopy; 1531 cond_resched(); 1532 } 1533 1534 if (len == towrite) 1535 return err; 1536 inode->i_mtime = inode->i_ctime = current_time(inode); 1537 f2fs_mark_inode_dirty_sync(inode, false); 1538 return len - towrite; 1539 } 1540 1541 static struct dquot **f2fs_get_dquots(struct inode *inode) 1542 { 1543 return F2FS_I(inode)->i_dquot; 1544 } 1545 1546 static qsize_t *f2fs_get_reserved_space(struct inode *inode) 1547 { 1548 return &F2FS_I(inode)->i_reserved_quota; 1549 } 1550 1551 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) 1552 { 1553 return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type], 1554 sbi->s_jquota_fmt, type); 1555 } 1556 1557 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) 1558 { 1559 int enabled = 0; 1560 int i, err; 1561 1562 if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) { 1563 err = f2fs_enable_quotas(sbi->sb); 1564 if (err) { 1565 f2fs_msg(sbi->sb, KERN_ERR, 1566 "Cannot turn on quota_ino: %d", err); 1567 return 0; 1568 } 1569 return 1; 1570 } 1571 1572 for (i = 0; i < MAXQUOTAS; i++) { 1573 if (sbi->s_qf_names[i]) { 1574 err = f2fs_quota_on_mount(sbi, i); 1575 if (!err) { 1576 enabled = 1; 1577 continue; 1578 } 1579 f2fs_msg(sbi->sb, KERN_ERR, 1580 "Cannot turn on quotas: %d on %d", err, i); 1581 } 1582 } 1583 return enabled; 1584 } 1585 1586 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, 1587 unsigned int flags) 1588 { 1589 struct inode *qf_inode; 1590 unsigned long qf_inum; 1591 int err; 1592 1593 BUG_ON(!f2fs_sb_has_quota_ino(sb)); 1594 1595 qf_inum = f2fs_qf_ino(sb, type); 1596 if (!qf_inum) 1597 return -EPERM; 1598 1599 qf_inode = f2fs_iget(sb, qf_inum); 1600 if (IS_ERR(qf_inode)) { 1601 f2fs_msg(sb, KERN_ERR, 1602 "Bad quota inode %u:%lu", type, qf_inum); 1603 return PTR_ERR(qf_inode); 1604 } 1605 1606 /* Don't account quota for quota files to avoid recursion */ 1607 qf_inode->i_flags |= S_NOQUOTA; 1608 err = dquot_enable(qf_inode, type, format_id, flags); 1609 iput(qf_inode); 1610 return err; 1611 } 1612 1613 static int f2fs_enable_quotas(struct super_block *sb) 1614 { 1615 int type, err = 0; 1616 unsigned long qf_inum; 1617 bool quota_mopt[MAXQUOTAS] = { 1618 test_opt(F2FS_SB(sb), USRQUOTA), 1619 test_opt(F2FS_SB(sb), GRPQUOTA), 1620 test_opt(F2FS_SB(sb), PRJQUOTA), 1621 }; 1622 1623 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 1624 for (type = 0; type < MAXQUOTAS; type++) { 1625 qf_inum = f2fs_qf_ino(sb, type); 1626 if (qf_inum) { 1627 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1, 1628 DQUOT_USAGE_ENABLED | 1629 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 1630 if (err) { 1631 f2fs_msg(sb, KERN_ERR, 1632 "Failed to enable quota tracking " 1633 "(type=%d, err=%d). Please run " 1634 "fsck to fix.", type, err); 1635 for (type--; type >= 0; type--) 1636 dquot_quota_off(sb, type); 1637 return err; 1638 } 1639 } 1640 } 1641 return 0; 1642 } 1643 1644 static int f2fs_quota_sync(struct super_block *sb, int type) 1645 { 1646 struct quota_info *dqopt = sb_dqopt(sb); 1647 int cnt; 1648 int ret; 1649 1650 ret = dquot_writeback_dquots(sb, type); 1651 if (ret) 1652 return ret; 1653 1654 /* 1655 * Now when everything is written we can discard the pagecache so 1656 * that userspace sees the changes. 1657 */ 1658 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1659 if (type != -1 && cnt != type) 1660 continue; 1661 if (!sb_has_quota_active(sb, cnt)) 1662 continue; 1663 1664 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping); 1665 if (ret) 1666 return ret; 1667 1668 inode_lock(dqopt->files[cnt]); 1669 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); 1670 inode_unlock(dqopt->files[cnt]); 1671 } 1672 return 0; 1673 } 1674 1675 static int f2fs_quota_on(struct super_block *sb, int type, int format_id, 1676 const struct path *path) 1677 { 1678 struct inode *inode; 1679 int err; 1680 1681 err = f2fs_quota_sync(sb, type); 1682 if (err) 1683 return err; 1684 1685 err = dquot_quota_on(sb, type, format_id, path); 1686 if (err) 1687 return err; 1688 1689 inode = d_inode(path->dentry); 1690 1691 inode_lock(inode); 1692 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL; 1693 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 1694 S_NOATIME | S_IMMUTABLE); 1695 inode_unlock(inode); 1696 f2fs_mark_inode_dirty_sync(inode, false); 1697 1698 return 0; 1699 } 1700 1701 static int f2fs_quota_off(struct super_block *sb, int type) 1702 { 1703 struct inode *inode = sb_dqopt(sb)->files[type]; 1704 int err; 1705 1706 if (!inode || !igrab(inode)) 1707 return dquot_quota_off(sb, type); 1708 1709 f2fs_quota_sync(sb, type); 1710 1711 err = dquot_quota_off(sb, type); 1712 if (err || f2fs_sb_has_quota_ino(sb)) 1713 goto out_put; 1714 1715 inode_lock(inode); 1716 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL); 1717 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 1718 inode_unlock(inode); 1719 f2fs_mark_inode_dirty_sync(inode, false); 1720 out_put: 1721 iput(inode); 1722 return err; 1723 } 1724 1725 void f2fs_quota_off_umount(struct super_block *sb) 1726 { 1727 int type; 1728 1729 for (type = 0; type < MAXQUOTAS; type++) 1730 f2fs_quota_off(sb, type); 1731 } 1732 1733 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid) 1734 { 1735 *projid = F2FS_I(inode)->i_projid; 1736 return 0; 1737 } 1738 1739 static const struct dquot_operations f2fs_quota_operations = { 1740 .get_reserved_space = f2fs_get_reserved_space, 1741 .write_dquot = dquot_commit, 1742 .acquire_dquot = dquot_acquire, 1743 .release_dquot = dquot_release, 1744 .mark_dirty = dquot_mark_dquot_dirty, 1745 .write_info = dquot_commit_info, 1746 .alloc_dquot = dquot_alloc, 1747 .destroy_dquot = dquot_destroy, 1748 .get_projid = f2fs_get_projid, 1749 .get_next_id = dquot_get_next_id, 1750 }; 1751 1752 static const struct quotactl_ops f2fs_quotactl_ops = { 1753 .quota_on = f2fs_quota_on, 1754 .quota_off = f2fs_quota_off, 1755 .quota_sync = f2fs_quota_sync, 1756 .get_state = dquot_get_state, 1757 .set_info = dquot_set_dqinfo, 1758 .get_dqblk = dquot_get_dqblk, 1759 .set_dqblk = dquot_set_dqblk, 1760 .get_nextdqblk = dquot_get_next_dqblk, 1761 }; 1762 #else 1763 void f2fs_quota_off_umount(struct super_block *sb) 1764 { 1765 } 1766 #endif 1767 1768 static const struct super_operations f2fs_sops = { 1769 .alloc_inode = f2fs_alloc_inode, 1770 .drop_inode = f2fs_drop_inode, 1771 .destroy_inode = f2fs_destroy_inode, 1772 .write_inode = f2fs_write_inode, 1773 .dirty_inode = f2fs_dirty_inode, 1774 .show_options = f2fs_show_options, 1775 #ifdef CONFIG_QUOTA 1776 .quota_read = f2fs_quota_read, 1777 .quota_write = f2fs_quota_write, 1778 .get_dquots = f2fs_get_dquots, 1779 #endif 1780 .evict_inode = f2fs_evict_inode, 1781 .put_super = f2fs_put_super, 1782 .sync_fs = f2fs_sync_fs, 1783 .freeze_fs = f2fs_freeze, 1784 .unfreeze_fs = f2fs_unfreeze, 1785 .statfs = f2fs_statfs, 1786 .remount_fs = f2fs_remount, 1787 }; 1788 1789 #ifdef CONFIG_F2FS_FS_ENCRYPTION 1790 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) 1791 { 1792 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, 1793 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, 1794 ctx, len, NULL); 1795 } 1796 1797 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, 1798 void *fs_data) 1799 { 1800 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, 1801 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, 1802 ctx, len, fs_data, XATTR_CREATE); 1803 } 1804 1805 static unsigned f2fs_max_namelen(struct inode *inode) 1806 { 1807 return S_ISLNK(inode->i_mode) ? 1808 inode->i_sb->s_blocksize : F2FS_NAME_LEN; 1809 } 1810 1811 static const struct fscrypt_operations f2fs_cryptops = { 1812 .key_prefix = "f2fs:", 1813 .get_context = f2fs_get_context, 1814 .set_context = f2fs_set_context, 1815 .empty_dir = f2fs_empty_dir, 1816 .max_namelen = f2fs_max_namelen, 1817 }; 1818 #endif 1819 1820 static struct inode *f2fs_nfs_get_inode(struct super_block *sb, 1821 u64 ino, u32 generation) 1822 { 1823 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1824 struct inode *inode; 1825 1826 if (check_nid_range(sbi, ino)) 1827 return ERR_PTR(-ESTALE); 1828 1829 /* 1830 * f2fs_iget isn't quite right if the inode is currently unallocated! 1831 * However f2fs_iget currently does appropriate checks to handle stale 1832 * inodes so everything is OK. 1833 */ 1834 inode = f2fs_iget(sb, ino); 1835 if (IS_ERR(inode)) 1836 return ERR_CAST(inode); 1837 if (unlikely(generation && inode->i_generation != generation)) { 1838 /* we didn't find the right inode.. */ 1839 iput(inode); 1840 return ERR_PTR(-ESTALE); 1841 } 1842 return inode; 1843 } 1844 1845 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid, 1846 int fh_len, int fh_type) 1847 { 1848 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 1849 f2fs_nfs_get_inode); 1850 } 1851 1852 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid, 1853 int fh_len, int fh_type) 1854 { 1855 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 1856 f2fs_nfs_get_inode); 1857 } 1858 1859 static const struct export_operations f2fs_export_ops = { 1860 .fh_to_dentry = f2fs_fh_to_dentry, 1861 .fh_to_parent = f2fs_fh_to_parent, 1862 .get_parent = f2fs_get_parent, 1863 }; 1864 1865 static loff_t max_file_blocks(void) 1866 { 1867 loff_t result = 0; 1868 loff_t leaf_count = ADDRS_PER_BLOCK; 1869 1870 /* 1871 * note: previously, result is equal to (DEF_ADDRS_PER_INODE - 1872 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more 1873 * space in inode.i_addr, it will be more safe to reassign 1874 * result as zero. 1875 */ 1876 1877 /* two direct node blocks */ 1878 result += (leaf_count * 2); 1879 1880 /* two indirect node blocks */ 1881 leaf_count *= NIDS_PER_BLOCK; 1882 result += (leaf_count * 2); 1883 1884 /* one double indirect node block */ 1885 leaf_count *= NIDS_PER_BLOCK; 1886 result += leaf_count; 1887 1888 return result; 1889 } 1890 1891 static int __f2fs_commit_super(struct buffer_head *bh, 1892 struct f2fs_super_block *super) 1893 { 1894 lock_buffer(bh); 1895 if (super) 1896 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); 1897 set_buffer_uptodate(bh); 1898 set_buffer_dirty(bh); 1899 unlock_buffer(bh); 1900 1901 /* it's rare case, we can do fua all the time */ 1902 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 1903 } 1904 1905 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, 1906 struct buffer_head *bh) 1907 { 1908 struct f2fs_super_block *raw_super = (struct f2fs_super_block *) 1909 (bh->b_data + F2FS_SUPER_OFFSET); 1910 struct super_block *sb = sbi->sb; 1911 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1912 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); 1913 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); 1914 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr); 1915 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 1916 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1917 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt); 1918 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit); 1919 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat); 1920 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa); 1921 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 1922 u32 segment_count = le32_to_cpu(raw_super->segment_count); 1923 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 1924 u64 main_end_blkaddr = main_blkaddr + 1925 (segment_count_main << log_blocks_per_seg); 1926 u64 seg_end_blkaddr = segment0_blkaddr + 1927 (segment_count << log_blocks_per_seg); 1928 1929 if (segment0_blkaddr != cp_blkaddr) { 1930 f2fs_msg(sb, KERN_INFO, 1931 "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 1932 segment0_blkaddr, cp_blkaddr); 1933 return true; 1934 } 1935 1936 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != 1937 sit_blkaddr) { 1938 f2fs_msg(sb, KERN_INFO, 1939 "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 1940 cp_blkaddr, sit_blkaddr, 1941 segment_count_ckpt << log_blocks_per_seg); 1942 return true; 1943 } 1944 1945 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != 1946 nat_blkaddr) { 1947 f2fs_msg(sb, KERN_INFO, 1948 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 1949 sit_blkaddr, nat_blkaddr, 1950 segment_count_sit << log_blocks_per_seg); 1951 return true; 1952 } 1953 1954 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != 1955 ssa_blkaddr) { 1956 f2fs_msg(sb, KERN_INFO, 1957 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 1958 nat_blkaddr, ssa_blkaddr, 1959 segment_count_nat << log_blocks_per_seg); 1960 return true; 1961 } 1962 1963 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != 1964 main_blkaddr) { 1965 f2fs_msg(sb, KERN_INFO, 1966 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 1967 ssa_blkaddr, main_blkaddr, 1968 segment_count_ssa << log_blocks_per_seg); 1969 return true; 1970 } 1971 1972 if (main_end_blkaddr > seg_end_blkaddr) { 1973 f2fs_msg(sb, KERN_INFO, 1974 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", 1975 main_blkaddr, 1976 segment0_blkaddr + 1977 (segment_count << log_blocks_per_seg), 1978 segment_count_main << log_blocks_per_seg); 1979 return true; 1980 } else if (main_end_blkaddr < seg_end_blkaddr) { 1981 int err = 0; 1982 char *res; 1983 1984 /* fix in-memory information all the time */ 1985 raw_super->segment_count = cpu_to_le32((main_end_blkaddr - 1986 segment0_blkaddr) >> log_blocks_per_seg); 1987 1988 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) { 1989 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); 1990 res = "internally"; 1991 } else { 1992 err = __f2fs_commit_super(bh, NULL); 1993 res = err ? "failed" : "done"; 1994 } 1995 f2fs_msg(sb, KERN_INFO, 1996 "Fix alignment : %s, start(%u) end(%u) block(%u)", 1997 res, main_blkaddr, 1998 segment0_blkaddr + 1999 (segment_count << log_blocks_per_seg), 2000 segment_count_main << log_blocks_per_seg); 2001 if (err) 2002 return true; 2003 } 2004 return false; 2005 } 2006 2007 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, 2008 struct buffer_head *bh) 2009 { 2010 struct f2fs_super_block *raw_super = (struct f2fs_super_block *) 2011 (bh->b_data + F2FS_SUPER_OFFSET); 2012 struct super_block *sb = sbi->sb; 2013 unsigned int blocksize; 2014 2015 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 2016 f2fs_msg(sb, KERN_INFO, 2017 "Magic Mismatch, valid(0x%x) - read(0x%x)", 2018 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 2019 return 1; 2020 } 2021 2022 /* Currently, support only 4KB page cache size */ 2023 if (F2FS_BLKSIZE != PAGE_SIZE) { 2024 f2fs_msg(sb, KERN_INFO, 2025 "Invalid page_cache_size (%lu), supports only 4KB\n", 2026 PAGE_SIZE); 2027 return 1; 2028 } 2029 2030 /* Currently, support only 4KB block size */ 2031 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 2032 if (blocksize != F2FS_BLKSIZE) { 2033 f2fs_msg(sb, KERN_INFO, 2034 "Invalid blocksize (%u), supports only 4KB\n", 2035 blocksize); 2036 return 1; 2037 } 2038 2039 /* check log blocks per segment */ 2040 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 2041 f2fs_msg(sb, KERN_INFO, 2042 "Invalid log blocks per segment (%u)\n", 2043 le32_to_cpu(raw_super->log_blocks_per_seg)); 2044 return 1; 2045 } 2046 2047 /* Currently, support 512/1024/2048/4096 bytes sector size */ 2048 if (le32_to_cpu(raw_super->log_sectorsize) > 2049 F2FS_MAX_LOG_SECTOR_SIZE || 2050 le32_to_cpu(raw_super->log_sectorsize) < 2051 F2FS_MIN_LOG_SECTOR_SIZE) { 2052 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", 2053 le32_to_cpu(raw_super->log_sectorsize)); 2054 return 1; 2055 } 2056 if (le32_to_cpu(raw_super->log_sectors_per_block) + 2057 le32_to_cpu(raw_super->log_sectorsize) != 2058 F2FS_MAX_LOG_SECTOR_SIZE) { 2059 f2fs_msg(sb, KERN_INFO, 2060 "Invalid log sectors per block(%u) log sectorsize(%u)", 2061 le32_to_cpu(raw_super->log_sectors_per_block), 2062 le32_to_cpu(raw_super->log_sectorsize)); 2063 return 1; 2064 } 2065 2066 /* check reserved ino info */ 2067 if (le32_to_cpu(raw_super->node_ino) != 1 || 2068 le32_to_cpu(raw_super->meta_ino) != 2 || 2069 le32_to_cpu(raw_super->root_ino) != 3) { 2070 f2fs_msg(sb, KERN_INFO, 2071 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 2072 le32_to_cpu(raw_super->node_ino), 2073 le32_to_cpu(raw_super->meta_ino), 2074 le32_to_cpu(raw_super->root_ino)); 2075 return 1; 2076 } 2077 2078 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) { 2079 f2fs_msg(sb, KERN_INFO, 2080 "Invalid segment count (%u)", 2081 le32_to_cpu(raw_super->segment_count)); 2082 return 1; 2083 } 2084 2085 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 2086 if (sanity_check_area_boundary(sbi, bh)) 2087 return 1; 2088 2089 return 0; 2090 } 2091 2092 int sanity_check_ckpt(struct f2fs_sb_info *sbi) 2093 { 2094 unsigned int total, fsmeta; 2095 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2096 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2097 unsigned int ovp_segments, reserved_segments; 2098 unsigned int main_segs, blocks_per_seg; 2099 int i; 2100 2101 total = le32_to_cpu(raw_super->segment_count); 2102 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); 2103 fsmeta += le32_to_cpu(raw_super->segment_count_sit); 2104 fsmeta += le32_to_cpu(raw_super->segment_count_nat); 2105 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); 2106 fsmeta += le32_to_cpu(raw_super->segment_count_ssa); 2107 2108 if (unlikely(fsmeta >= total)) 2109 return 1; 2110 2111 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 2112 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 2113 2114 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || 2115 ovp_segments == 0 || reserved_segments == 0)) { 2116 f2fs_msg(sbi->sb, KERN_ERR, 2117 "Wrong layout: check mkfs.f2fs version"); 2118 return 1; 2119 } 2120 2121 main_segs = le32_to_cpu(raw_super->segment_count_main); 2122 blocks_per_seg = sbi->blocks_per_seg; 2123 2124 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { 2125 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || 2126 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) 2127 return 1; 2128 } 2129 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { 2130 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || 2131 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) 2132 return 1; 2133 } 2134 2135 if (unlikely(f2fs_cp_error(sbi))) { 2136 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); 2137 return 1; 2138 } 2139 return 0; 2140 } 2141 2142 static void init_sb_info(struct f2fs_sb_info *sbi) 2143 { 2144 struct f2fs_super_block *raw_super = sbi->raw_super; 2145 int i, j; 2146 2147 sbi->log_sectors_per_block = 2148 le32_to_cpu(raw_super->log_sectors_per_block); 2149 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); 2150 sbi->blocksize = 1 << sbi->log_blocksize; 2151 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 2152 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; 2153 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 2154 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 2155 sbi->total_sections = le32_to_cpu(raw_super->section_count); 2156 sbi->total_node_count = 2157 (le32_to_cpu(raw_super->segment_count_nat) / 2) 2158 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; 2159 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); 2160 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); 2161 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); 2162 sbi->cur_victim_sec = NULL_SECNO; 2163 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; 2164 2165 sbi->dir_level = DEF_DIR_LEVEL; 2166 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; 2167 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; 2168 clear_sbi_flag(sbi, SBI_NEED_FSCK); 2169 2170 for (i = 0; i < NR_COUNT_TYPE; i++) 2171 atomic_set(&sbi->nr_pages[i], 0); 2172 2173 atomic_set(&sbi->wb_sync_req, 0); 2174 2175 INIT_LIST_HEAD(&sbi->s_list); 2176 mutex_init(&sbi->umount_mutex); 2177 for (i = 0; i < NR_PAGE_TYPE - 1; i++) 2178 for (j = HOT; j < NR_TEMP_TYPE; j++) 2179 mutex_init(&sbi->wio_mutex[i][j]); 2180 spin_lock_init(&sbi->cp_lock); 2181 2182 sbi->dirty_device = 0; 2183 spin_lock_init(&sbi->dev_lock); 2184 } 2185 2186 static int init_percpu_info(struct f2fs_sb_info *sbi) 2187 { 2188 int err; 2189 2190 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); 2191 if (err) 2192 return err; 2193 2194 return percpu_counter_init(&sbi->total_valid_inode_count, 0, 2195 GFP_KERNEL); 2196 } 2197 2198 #ifdef CONFIG_BLK_DEV_ZONED 2199 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) 2200 { 2201 struct block_device *bdev = FDEV(devi).bdev; 2202 sector_t nr_sectors = bdev->bd_part->nr_sects; 2203 sector_t sector = 0; 2204 struct blk_zone *zones; 2205 unsigned int i, nr_zones; 2206 unsigned int n = 0; 2207 int err = -EIO; 2208 2209 if (!f2fs_sb_mounted_blkzoned(sbi->sb)) 2210 return 0; 2211 2212 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != 2213 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev))) 2214 return -EINVAL; 2215 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)); 2216 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != 2217 __ilog2_u32(sbi->blocks_per_blkz)) 2218 return -EINVAL; 2219 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); 2220 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> 2221 sbi->log_blocks_per_blkz; 2222 if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) 2223 FDEV(devi).nr_blkz++; 2224 2225 FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz, 2226 GFP_KERNEL); 2227 if (!FDEV(devi).blkz_type) 2228 return -ENOMEM; 2229 2230 #define F2FS_REPORT_NR_ZONES 4096 2231 2232 zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) * 2233 F2FS_REPORT_NR_ZONES, GFP_KERNEL); 2234 if (!zones) 2235 return -ENOMEM; 2236 2237 /* Get block zones type */ 2238 while (zones && sector < nr_sectors) { 2239 2240 nr_zones = F2FS_REPORT_NR_ZONES; 2241 err = blkdev_report_zones(bdev, sector, 2242 zones, &nr_zones, 2243 GFP_KERNEL); 2244 if (err) 2245 break; 2246 if (!nr_zones) { 2247 err = -EIO; 2248 break; 2249 } 2250 2251 for (i = 0; i < nr_zones; i++) { 2252 FDEV(devi).blkz_type[n] = zones[i].type; 2253 sector += zones[i].len; 2254 n++; 2255 } 2256 } 2257 2258 kfree(zones); 2259 2260 return err; 2261 } 2262 #endif 2263 2264 /* 2265 * Read f2fs raw super block. 2266 * Because we have two copies of super block, so read both of them 2267 * to get the first valid one. If any one of them is broken, we pass 2268 * them recovery flag back to the caller. 2269 */ 2270 static int read_raw_super_block(struct f2fs_sb_info *sbi, 2271 struct f2fs_super_block **raw_super, 2272 int *valid_super_block, int *recovery) 2273 { 2274 struct super_block *sb = sbi->sb; 2275 int block; 2276 struct buffer_head *bh; 2277 struct f2fs_super_block *super; 2278 int err = 0; 2279 2280 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); 2281 if (!super) 2282 return -ENOMEM; 2283 2284 for (block = 0; block < 2; block++) { 2285 bh = sb_bread(sb, block); 2286 if (!bh) { 2287 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", 2288 block + 1); 2289 err = -EIO; 2290 continue; 2291 } 2292 2293 /* sanity checking of raw super */ 2294 if (sanity_check_raw_super(sbi, bh)) { 2295 f2fs_msg(sb, KERN_ERR, 2296 "Can't find valid F2FS filesystem in %dth superblock", 2297 block + 1); 2298 err = -EINVAL; 2299 brelse(bh); 2300 continue; 2301 } 2302 2303 if (!*raw_super) { 2304 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET, 2305 sizeof(*super)); 2306 *valid_super_block = block; 2307 *raw_super = super; 2308 } 2309 brelse(bh); 2310 } 2311 2312 /* Fail to read any one of the superblocks*/ 2313 if (err < 0) 2314 *recovery = 1; 2315 2316 /* No valid superblock */ 2317 if (!*raw_super) 2318 kfree(super); 2319 else 2320 err = 0; 2321 2322 return err; 2323 } 2324 2325 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) 2326 { 2327 struct buffer_head *bh; 2328 int err; 2329 2330 if ((recover && f2fs_readonly(sbi->sb)) || 2331 bdev_read_only(sbi->sb->s_bdev)) { 2332 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); 2333 return -EROFS; 2334 } 2335 2336 /* write back-up superblock first */ 2337 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); 2338 if (!bh) 2339 return -EIO; 2340 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); 2341 brelse(bh); 2342 2343 /* if we are in recovery path, skip writing valid superblock */ 2344 if (recover || err) 2345 return err; 2346 2347 /* write current valid superblock */ 2348 bh = sb_getblk(sbi->sb, sbi->valid_super_block); 2349 if (!bh) 2350 return -EIO; 2351 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); 2352 brelse(bh); 2353 return err; 2354 } 2355 2356 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) 2357 { 2358 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 2359 unsigned int max_devices = MAX_DEVICES; 2360 int i; 2361 2362 /* Initialize single device information */ 2363 if (!RDEV(0).path[0]) { 2364 if (!bdev_is_zoned(sbi->sb->s_bdev)) 2365 return 0; 2366 max_devices = 1; 2367 } 2368 2369 /* 2370 * Initialize multiple devices information, or single 2371 * zoned block device information. 2372 */ 2373 sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) * 2374 max_devices, GFP_KERNEL); 2375 if (!sbi->devs) 2376 return -ENOMEM; 2377 2378 for (i = 0; i < max_devices; i++) { 2379 2380 if (i > 0 && !RDEV(i).path[0]) 2381 break; 2382 2383 if (max_devices == 1) { 2384 /* Single zoned block device mount */ 2385 FDEV(0).bdev = 2386 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, 2387 sbi->sb->s_mode, sbi->sb->s_type); 2388 } else { 2389 /* Multi-device mount */ 2390 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN); 2391 FDEV(i).total_segments = 2392 le32_to_cpu(RDEV(i).total_segments); 2393 if (i == 0) { 2394 FDEV(i).start_blk = 0; 2395 FDEV(i).end_blk = FDEV(i).start_blk + 2396 (FDEV(i).total_segments << 2397 sbi->log_blocks_per_seg) - 1 + 2398 le32_to_cpu(raw_super->segment0_blkaddr); 2399 } else { 2400 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1; 2401 FDEV(i).end_blk = FDEV(i).start_blk + 2402 (FDEV(i).total_segments << 2403 sbi->log_blocks_per_seg) - 1; 2404 } 2405 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, 2406 sbi->sb->s_mode, sbi->sb->s_type); 2407 } 2408 if (IS_ERR(FDEV(i).bdev)) 2409 return PTR_ERR(FDEV(i).bdev); 2410 2411 /* to release errored devices */ 2412 sbi->s_ndevs = i + 1; 2413 2414 #ifdef CONFIG_BLK_DEV_ZONED 2415 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && 2416 !f2fs_sb_mounted_blkzoned(sbi->sb)) { 2417 f2fs_msg(sbi->sb, KERN_ERR, 2418 "Zoned block device feature not enabled\n"); 2419 return -EINVAL; 2420 } 2421 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) { 2422 if (init_blkz_info(sbi, i)) { 2423 f2fs_msg(sbi->sb, KERN_ERR, 2424 "Failed to initialize F2FS blkzone information"); 2425 return -EINVAL; 2426 } 2427 if (max_devices == 1) 2428 break; 2429 f2fs_msg(sbi->sb, KERN_INFO, 2430 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", 2431 i, FDEV(i).path, 2432 FDEV(i).total_segments, 2433 FDEV(i).start_blk, FDEV(i).end_blk, 2434 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? 2435 "Host-aware" : "Host-managed"); 2436 continue; 2437 } 2438 #endif 2439 f2fs_msg(sbi->sb, KERN_INFO, 2440 "Mount Device [%2d]: %20s, %8u, %8x - %8x", 2441 i, FDEV(i).path, 2442 FDEV(i).total_segments, 2443 FDEV(i).start_blk, FDEV(i).end_blk); 2444 } 2445 f2fs_msg(sbi->sb, KERN_INFO, 2446 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); 2447 return 0; 2448 } 2449 2450 static int f2fs_fill_super(struct super_block *sb, void *data, int silent) 2451 { 2452 struct f2fs_sb_info *sbi; 2453 struct f2fs_super_block *raw_super; 2454 struct inode *root; 2455 int err; 2456 bool retry = true, need_fsck = false; 2457 char *options = NULL; 2458 int recovery, i, valid_super_block; 2459 struct curseg_info *seg_i; 2460 2461 try_onemore: 2462 err = -EINVAL; 2463 raw_super = NULL; 2464 valid_super_block = -1; 2465 recovery = 0; 2466 2467 /* allocate memory for f2fs-specific super block info */ 2468 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); 2469 if (!sbi) 2470 return -ENOMEM; 2471 2472 sbi->sb = sb; 2473 2474 /* Load the checksum driver */ 2475 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); 2476 if (IS_ERR(sbi->s_chksum_driver)) { 2477 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); 2478 err = PTR_ERR(sbi->s_chksum_driver); 2479 sbi->s_chksum_driver = NULL; 2480 goto free_sbi; 2481 } 2482 2483 /* set a block size */ 2484 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { 2485 f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 2486 goto free_sbi; 2487 } 2488 2489 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, 2490 &recovery); 2491 if (err) 2492 goto free_sbi; 2493 2494 sb->s_fs_info = sbi; 2495 sbi->raw_super = raw_super; 2496 2497 sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); 2498 sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); 2499 2500 /* precompute checksum seed for metadata */ 2501 if (f2fs_sb_has_inode_chksum(sb)) 2502 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, 2503 sizeof(raw_super->uuid)); 2504 2505 /* 2506 * The BLKZONED feature indicates that the drive was formatted with 2507 * zone alignment optimization. This is optional for host-aware 2508 * devices, but mandatory for host-managed zoned block devices. 2509 */ 2510 #ifndef CONFIG_BLK_DEV_ZONED 2511 if (f2fs_sb_mounted_blkzoned(sb)) { 2512 f2fs_msg(sb, KERN_ERR, 2513 "Zoned block device support is not enabled\n"); 2514 err = -EOPNOTSUPP; 2515 goto free_sb_buf; 2516 } 2517 #endif 2518 default_options(sbi); 2519 /* parse mount options */ 2520 options = kstrdup((const char *)data, GFP_KERNEL); 2521 if (data && !options) { 2522 err = -ENOMEM; 2523 goto free_sb_buf; 2524 } 2525 2526 err = parse_options(sb, options); 2527 if (err) 2528 goto free_options; 2529 2530 sbi->max_file_blocks = max_file_blocks(); 2531 sb->s_maxbytes = sbi->max_file_blocks << 2532 le32_to_cpu(raw_super->log_blocksize); 2533 sb->s_max_links = F2FS_LINK_MAX; 2534 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 2535 2536 #ifdef CONFIG_QUOTA 2537 sb->dq_op = &f2fs_quota_operations; 2538 if (f2fs_sb_has_quota_ino(sb)) 2539 sb->s_qcop = &dquot_quotactl_sysfile_ops; 2540 else 2541 sb->s_qcop = &f2fs_quotactl_ops; 2542 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 2543 2544 if (f2fs_sb_has_quota_ino(sbi->sb)) { 2545 for (i = 0; i < MAXQUOTAS; i++) { 2546 if (f2fs_qf_ino(sbi->sb, i)) 2547 sbi->nquota_files++; 2548 } 2549 } 2550 #endif 2551 2552 sb->s_op = &f2fs_sops; 2553 #ifdef CONFIG_F2FS_FS_ENCRYPTION 2554 sb->s_cop = &f2fs_cryptops; 2555 #endif 2556 sb->s_xattr = f2fs_xattr_handlers; 2557 sb->s_export_op = &f2fs_export_ops; 2558 sb->s_magic = F2FS_SUPER_MAGIC; 2559 sb->s_time_gran = 1; 2560 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 2561 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); 2562 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); 2563 sb->s_iflags |= SB_I_CGROUPWB; 2564 2565 /* init f2fs-specific super block info */ 2566 sbi->valid_super_block = valid_super_block; 2567 mutex_init(&sbi->gc_mutex); 2568 mutex_init(&sbi->cp_mutex); 2569 init_rwsem(&sbi->node_write); 2570 init_rwsem(&sbi->node_change); 2571 2572 /* disallow all the data/node/meta page writes */ 2573 set_sbi_flag(sbi, SBI_POR_DOING); 2574 spin_lock_init(&sbi->stat_lock); 2575 2576 /* init iostat info */ 2577 spin_lock_init(&sbi->iostat_lock); 2578 sbi->iostat_enable = false; 2579 2580 for (i = 0; i < NR_PAGE_TYPE; i++) { 2581 int n = (i == META) ? 1: NR_TEMP_TYPE; 2582 int j; 2583 2584 sbi->write_io[i] = f2fs_kmalloc(sbi, 2585 n * sizeof(struct f2fs_bio_info), 2586 GFP_KERNEL); 2587 if (!sbi->write_io[i]) { 2588 err = -ENOMEM; 2589 goto free_options; 2590 } 2591 2592 for (j = HOT; j < n; j++) { 2593 init_rwsem(&sbi->write_io[i][j].io_rwsem); 2594 sbi->write_io[i][j].sbi = sbi; 2595 sbi->write_io[i][j].bio = NULL; 2596 spin_lock_init(&sbi->write_io[i][j].io_lock); 2597 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list); 2598 } 2599 } 2600 2601 init_rwsem(&sbi->cp_rwsem); 2602 init_waitqueue_head(&sbi->cp_wait); 2603 init_sb_info(sbi); 2604 2605 err = init_percpu_info(sbi); 2606 if (err) 2607 goto free_bio_info; 2608 2609 if (F2FS_IO_SIZE(sbi) > 1) { 2610 sbi->write_io_dummy = 2611 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); 2612 if (!sbi->write_io_dummy) { 2613 err = -ENOMEM; 2614 goto free_percpu; 2615 } 2616 } 2617 2618 /* get an inode for meta space */ 2619 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 2620 if (IS_ERR(sbi->meta_inode)) { 2621 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 2622 err = PTR_ERR(sbi->meta_inode); 2623 goto free_io_dummy; 2624 } 2625 2626 err = get_valid_checkpoint(sbi); 2627 if (err) { 2628 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 2629 goto free_meta_inode; 2630 } 2631 2632 /* Initialize device list */ 2633 err = f2fs_scan_devices(sbi); 2634 if (err) { 2635 f2fs_msg(sb, KERN_ERR, "Failed to find devices"); 2636 goto free_devices; 2637 } 2638 2639 sbi->total_valid_node_count = 2640 le32_to_cpu(sbi->ckpt->valid_node_count); 2641 percpu_counter_set(&sbi->total_valid_inode_count, 2642 le32_to_cpu(sbi->ckpt->valid_inode_count)); 2643 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); 2644 sbi->total_valid_block_count = 2645 le64_to_cpu(sbi->ckpt->valid_block_count); 2646 sbi->last_valid_block_count = sbi->total_valid_block_count; 2647 sbi->reserved_blocks = 0; 2648 sbi->current_reserved_blocks = 0; 2649 limit_reserve_root(sbi); 2650 2651 for (i = 0; i < NR_INODE_TYPE; i++) { 2652 INIT_LIST_HEAD(&sbi->inode_list[i]); 2653 spin_lock_init(&sbi->inode_lock[i]); 2654 } 2655 2656 init_extent_cache_info(sbi); 2657 2658 init_ino_entry_info(sbi); 2659 2660 /* setup f2fs internal modules */ 2661 err = build_segment_manager(sbi); 2662 if (err) { 2663 f2fs_msg(sb, KERN_ERR, 2664 "Failed to initialize F2FS segment manager"); 2665 goto free_sm; 2666 } 2667 err = build_node_manager(sbi); 2668 if (err) { 2669 f2fs_msg(sb, KERN_ERR, 2670 "Failed to initialize F2FS node manager"); 2671 goto free_nm; 2672 } 2673 2674 /* For write statistics */ 2675 if (sb->s_bdev->bd_part) 2676 sbi->sectors_written_start = 2677 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]); 2678 2679 /* Read accumulated write IO statistics if exists */ 2680 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); 2681 if (__exist_node_summaries(sbi)) 2682 sbi->kbytes_written = 2683 le64_to_cpu(seg_i->journal->info.kbytes_written); 2684 2685 build_gc_manager(sbi); 2686 2687 /* get an inode for node space */ 2688 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 2689 if (IS_ERR(sbi->node_inode)) { 2690 f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 2691 err = PTR_ERR(sbi->node_inode); 2692 goto free_nm; 2693 } 2694 2695 err = f2fs_build_stats(sbi); 2696 if (err) 2697 goto free_node_inode; 2698 2699 /* read root inode and dentry */ 2700 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 2701 if (IS_ERR(root)) { 2702 f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 2703 err = PTR_ERR(root); 2704 goto free_stats; 2705 } 2706 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 2707 iput(root); 2708 err = -EINVAL; 2709 goto free_node_inode; 2710 } 2711 2712 sb->s_root = d_make_root(root); /* allocate root dentry */ 2713 if (!sb->s_root) { 2714 err = -ENOMEM; 2715 goto free_root_inode; 2716 } 2717 2718 err = f2fs_register_sysfs(sbi); 2719 if (err) 2720 goto free_root_inode; 2721 2722 #ifdef CONFIG_QUOTA 2723 /* 2724 * Turn on quotas which were not enabled for read-only mounts if 2725 * filesystem has quota feature, so that they are updated correctly. 2726 */ 2727 if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) { 2728 err = f2fs_enable_quotas(sb); 2729 if (err) { 2730 f2fs_msg(sb, KERN_ERR, 2731 "Cannot turn on quotas: error %d", err); 2732 goto free_sysfs; 2733 } 2734 } 2735 #endif 2736 /* if there are nt orphan nodes free them */ 2737 err = recover_orphan_inodes(sbi); 2738 if (err) 2739 goto free_meta; 2740 2741 /* recover fsynced data */ 2742 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { 2743 /* 2744 * mount should be failed, when device has readonly mode, and 2745 * previous checkpoint was not done by clean system shutdown. 2746 */ 2747 if (bdev_read_only(sb->s_bdev) && 2748 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 2749 err = -EROFS; 2750 goto free_meta; 2751 } 2752 2753 if (need_fsck) 2754 set_sbi_flag(sbi, SBI_NEED_FSCK); 2755 2756 if (!retry) 2757 goto skip_recovery; 2758 2759 err = recover_fsync_data(sbi, false); 2760 if (err < 0) { 2761 need_fsck = true; 2762 f2fs_msg(sb, KERN_ERR, 2763 "Cannot recover all fsync data errno=%d", err); 2764 goto free_meta; 2765 } 2766 } else { 2767 err = recover_fsync_data(sbi, true); 2768 2769 if (!f2fs_readonly(sb) && err > 0) { 2770 err = -EINVAL; 2771 f2fs_msg(sb, KERN_ERR, 2772 "Need to recover fsync data"); 2773 goto free_meta; 2774 } 2775 } 2776 skip_recovery: 2777 /* recover_fsync_data() cleared this already */ 2778 clear_sbi_flag(sbi, SBI_POR_DOING); 2779 2780 /* 2781 * If filesystem is not mounted as read-only then 2782 * do start the gc_thread. 2783 */ 2784 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { 2785 /* After POR, we can run background GC thread.*/ 2786 err = start_gc_thread(sbi); 2787 if (err) 2788 goto free_meta; 2789 } 2790 kfree(options); 2791 2792 /* recover broken superblock */ 2793 if (recovery) { 2794 err = f2fs_commit_super(sbi, true); 2795 f2fs_msg(sb, KERN_INFO, 2796 "Try to recover %dth superblock, ret: %d", 2797 sbi->valid_super_block ? 1 : 2, err); 2798 } 2799 2800 f2fs_join_shrinker(sbi); 2801 2802 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", 2803 cur_cp_version(F2FS_CKPT(sbi))); 2804 f2fs_update_time(sbi, CP_TIME); 2805 f2fs_update_time(sbi, REQ_TIME); 2806 return 0; 2807 2808 free_meta: 2809 #ifdef CONFIG_QUOTA 2810 if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) 2811 f2fs_quota_off_umount(sbi->sb); 2812 #endif 2813 f2fs_sync_inode_meta(sbi); 2814 /* 2815 * Some dirty meta pages can be produced by recover_orphan_inodes() 2816 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg() 2817 * followed by write_checkpoint() through f2fs_write_node_pages(), which 2818 * falls into an infinite loop in sync_meta_pages(). 2819 */ 2820 truncate_inode_pages_final(META_MAPPING(sbi)); 2821 #ifdef CONFIG_QUOTA 2822 free_sysfs: 2823 #endif 2824 f2fs_unregister_sysfs(sbi); 2825 free_root_inode: 2826 dput(sb->s_root); 2827 sb->s_root = NULL; 2828 free_stats: 2829 f2fs_destroy_stats(sbi); 2830 free_node_inode: 2831 release_ino_entry(sbi, true); 2832 truncate_inode_pages_final(NODE_MAPPING(sbi)); 2833 iput(sbi->node_inode); 2834 free_nm: 2835 destroy_node_manager(sbi); 2836 free_sm: 2837 destroy_segment_manager(sbi); 2838 free_devices: 2839 destroy_device_list(sbi); 2840 kfree(sbi->ckpt); 2841 free_meta_inode: 2842 make_bad_inode(sbi->meta_inode); 2843 iput(sbi->meta_inode); 2844 free_io_dummy: 2845 mempool_destroy(sbi->write_io_dummy); 2846 free_percpu: 2847 destroy_percpu_info(sbi); 2848 free_bio_info: 2849 for (i = 0; i < NR_PAGE_TYPE; i++) 2850 kfree(sbi->write_io[i]); 2851 free_options: 2852 #ifdef CONFIG_QUOTA 2853 for (i = 0; i < MAXQUOTAS; i++) 2854 kfree(sbi->s_qf_names[i]); 2855 #endif 2856 kfree(options); 2857 free_sb_buf: 2858 kfree(raw_super); 2859 free_sbi: 2860 if (sbi->s_chksum_driver) 2861 crypto_free_shash(sbi->s_chksum_driver); 2862 kfree(sbi); 2863 2864 /* give only one another chance */ 2865 if (retry) { 2866 retry = false; 2867 shrink_dcache_sb(sb); 2868 goto try_onemore; 2869 } 2870 return err; 2871 } 2872 2873 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, 2874 const char *dev_name, void *data) 2875 { 2876 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); 2877 } 2878 2879 static void kill_f2fs_super(struct super_block *sb) 2880 { 2881 if (sb->s_root) { 2882 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE); 2883 stop_gc_thread(F2FS_SB(sb)); 2884 stop_discard_thread(F2FS_SB(sb)); 2885 } 2886 kill_block_super(sb); 2887 } 2888 2889 static struct file_system_type f2fs_fs_type = { 2890 .owner = THIS_MODULE, 2891 .name = "f2fs", 2892 .mount = f2fs_mount, 2893 .kill_sb = kill_f2fs_super, 2894 .fs_flags = FS_REQUIRES_DEV, 2895 }; 2896 MODULE_ALIAS_FS("f2fs"); 2897 2898 static int __init init_inodecache(void) 2899 { 2900 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache", 2901 sizeof(struct f2fs_inode_info), 0, 2902 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL); 2903 if (!f2fs_inode_cachep) 2904 return -ENOMEM; 2905 return 0; 2906 } 2907 2908 static void destroy_inodecache(void) 2909 { 2910 /* 2911 * Make sure all delayed rcu free inodes are flushed before we 2912 * destroy cache. 2913 */ 2914 rcu_barrier(); 2915 kmem_cache_destroy(f2fs_inode_cachep); 2916 } 2917 2918 static int __init init_f2fs_fs(void) 2919 { 2920 int err; 2921 2922 f2fs_build_trace_ios(); 2923 2924 err = init_inodecache(); 2925 if (err) 2926 goto fail; 2927 err = create_node_manager_caches(); 2928 if (err) 2929 goto free_inodecache; 2930 err = create_segment_manager_caches(); 2931 if (err) 2932 goto free_node_manager_caches; 2933 err = create_checkpoint_caches(); 2934 if (err) 2935 goto free_segment_manager_caches; 2936 err = create_extent_cache(); 2937 if (err) 2938 goto free_checkpoint_caches; 2939 err = f2fs_init_sysfs(); 2940 if (err) 2941 goto free_extent_cache; 2942 err = register_shrinker(&f2fs_shrinker_info); 2943 if (err) 2944 goto free_sysfs; 2945 err = register_filesystem(&f2fs_fs_type); 2946 if (err) 2947 goto free_shrinker; 2948 err = f2fs_create_root_stats(); 2949 if (err) 2950 goto free_filesystem; 2951 return 0; 2952 2953 free_filesystem: 2954 unregister_filesystem(&f2fs_fs_type); 2955 free_shrinker: 2956 unregister_shrinker(&f2fs_shrinker_info); 2957 free_sysfs: 2958 f2fs_exit_sysfs(); 2959 free_extent_cache: 2960 destroy_extent_cache(); 2961 free_checkpoint_caches: 2962 destroy_checkpoint_caches(); 2963 free_segment_manager_caches: 2964 destroy_segment_manager_caches(); 2965 free_node_manager_caches: 2966 destroy_node_manager_caches(); 2967 free_inodecache: 2968 destroy_inodecache(); 2969 fail: 2970 return err; 2971 } 2972 2973 static void __exit exit_f2fs_fs(void) 2974 { 2975 f2fs_destroy_root_stats(); 2976 unregister_filesystem(&f2fs_fs_type); 2977 unregister_shrinker(&f2fs_shrinker_info); 2978 f2fs_exit_sysfs(); 2979 destroy_extent_cache(); 2980 destroy_checkpoint_caches(); 2981 destroy_segment_manager_caches(); 2982 destroy_node_manager_caches(); 2983 destroy_inodecache(); 2984 f2fs_destroy_trace_ios(); 2985 } 2986 2987 module_init(init_f2fs_fs) 2988 module_exit(exit_f2fs_fs) 2989 2990 MODULE_AUTHOR("Samsung Electronics's Praesto Team"); 2991 MODULE_DESCRIPTION("Flash Friendly File System"); 2992 MODULE_LICENSE("GPL"); 2993 2994