1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (WARN_ON(condition)) \ 37 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 38 } while (0) 39 #endif 40 41 enum { 42 FAULT_KMALLOC, 43 FAULT_KVMALLOC, 44 FAULT_PAGE_ALLOC, 45 FAULT_PAGE_GET, 46 FAULT_ALLOC_BIO, 47 FAULT_ALLOC_NID, 48 FAULT_ORPHAN, 49 FAULT_BLOCK, 50 FAULT_DIR_DEPTH, 51 FAULT_EVICT_INODE, 52 FAULT_TRUNCATE, 53 FAULT_READ_IO, 54 FAULT_CHECKPOINT, 55 FAULT_DISCARD, 56 FAULT_WRITE_IO, 57 FAULT_MAX, 58 }; 59 60 #ifdef CONFIG_F2FS_FAULT_INJECTION 61 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 62 63 struct f2fs_fault_info { 64 atomic_t inject_ops; 65 unsigned int inject_rate; 66 unsigned int inject_type; 67 }; 68 69 extern const char *f2fs_fault_name[FAULT_MAX]; 70 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 71 #endif 72 73 /* 74 * For mount options 75 */ 76 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 77 #define F2FS_MOUNT_DISCARD 0x00000004 78 #define F2FS_MOUNT_NOHEAP 0x00000008 79 #define F2FS_MOUNT_XATTR_USER 0x00000010 80 #define F2FS_MOUNT_POSIX_ACL 0x00000020 81 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 82 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 83 #define F2FS_MOUNT_INLINE_DATA 0x00000100 84 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 85 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 86 #define F2FS_MOUNT_NOBARRIER 0x00000800 87 #define F2FS_MOUNT_FASTBOOT 0x00001000 88 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 89 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 90 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 91 #define F2FS_MOUNT_USRQUOTA 0x00080000 92 #define F2FS_MOUNT_GRPQUOTA 0x00100000 93 #define F2FS_MOUNT_PRJQUOTA 0x00200000 94 #define F2FS_MOUNT_QUOTA 0x00400000 95 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 96 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 97 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 98 #define F2FS_MOUNT_NORECOVERY 0x04000000 99 #define F2FS_MOUNT_ATGC 0x08000000 100 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 101 102 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 103 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 104 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 105 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 106 107 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 108 typecheck(unsigned long long, b) && \ 109 ((long long)((a) - (b)) > 0)) 110 111 typedef u32 block_t; /* 112 * should not change u32, since it is the on-disk block 113 * address format, __le32. 114 */ 115 typedef u32 nid_t; 116 117 #define COMPRESS_EXT_NUM 16 118 119 struct f2fs_mount_info { 120 unsigned int opt; 121 int write_io_size_bits; /* Write IO size bits */ 122 block_t root_reserved_blocks; /* root reserved blocks */ 123 kuid_t s_resuid; /* reserved blocks for uid */ 124 kgid_t s_resgid; /* reserved blocks for gid */ 125 int active_logs; /* # of active logs */ 126 int inline_xattr_size; /* inline xattr size */ 127 #ifdef CONFIG_F2FS_FAULT_INJECTION 128 struct f2fs_fault_info fault_info; /* For fault injection */ 129 #endif 130 #ifdef CONFIG_QUOTA 131 /* Names of quota files with journalled quota */ 132 char *s_qf_names[MAXQUOTAS]; 133 int s_jquota_fmt; /* Format of quota to use */ 134 #endif 135 /* For which write hints are passed down to block layer */ 136 int whint_mode; 137 int alloc_mode; /* segment allocation policy */ 138 int fsync_mode; /* fsync policy */ 139 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 140 int bggc_mode; /* bggc mode: off, on or sync */ 141 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 142 block_t unusable_cap_perc; /* percentage for cap */ 143 block_t unusable_cap; /* Amount of space allowed to be 144 * unusable when disabling checkpoint 145 */ 146 147 /* For compression */ 148 unsigned char compress_algorithm; /* algorithm type */ 149 unsigned char compress_log_size; /* cluster log size */ 150 unsigned char compress_level; /* compress level */ 151 bool compress_chksum; /* compressed data chksum */ 152 unsigned char compress_ext_cnt; /* extension count */ 153 int compress_mode; /* compression mode */ 154 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 155 }; 156 157 #define F2FS_FEATURE_ENCRYPT 0x0001 158 #define F2FS_FEATURE_BLKZONED 0x0002 159 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 160 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 161 #define F2FS_FEATURE_PRJQUOTA 0x0010 162 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 163 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 164 #define F2FS_FEATURE_QUOTA_INO 0x0080 165 #define F2FS_FEATURE_INODE_CRTIME 0x0100 166 #define F2FS_FEATURE_LOST_FOUND 0x0200 167 #define F2FS_FEATURE_VERITY 0x0400 168 #define F2FS_FEATURE_SB_CHKSUM 0x0800 169 #define F2FS_FEATURE_CASEFOLD 0x1000 170 #define F2FS_FEATURE_COMPRESSION 0x2000 171 172 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 173 ((raw_super->feature & cpu_to_le32(mask)) != 0) 174 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 175 #define F2FS_SET_FEATURE(sbi, mask) \ 176 (sbi->raw_super->feature |= cpu_to_le32(mask)) 177 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 178 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 179 180 /* 181 * Default values for user and/or group using reserved blocks 182 */ 183 #define F2FS_DEF_RESUID 0 184 #define F2FS_DEF_RESGID 0 185 186 /* 187 * For checkpoint manager 188 */ 189 enum { 190 NAT_BITMAP, 191 SIT_BITMAP 192 }; 193 194 #define CP_UMOUNT 0x00000001 195 #define CP_FASTBOOT 0x00000002 196 #define CP_SYNC 0x00000004 197 #define CP_RECOVERY 0x00000008 198 #define CP_DISCARD 0x00000010 199 #define CP_TRIMMED 0x00000020 200 #define CP_PAUSE 0x00000040 201 #define CP_RESIZE 0x00000080 202 203 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 204 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 205 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 206 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 207 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 208 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 209 #define DEF_CP_INTERVAL 60 /* 60 secs */ 210 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 211 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 212 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 213 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 214 215 struct cp_control { 216 int reason; 217 __u64 trim_start; 218 __u64 trim_end; 219 __u64 trim_minlen; 220 }; 221 222 /* 223 * indicate meta/data type 224 */ 225 enum { 226 META_CP, 227 META_NAT, 228 META_SIT, 229 META_SSA, 230 META_MAX, 231 META_POR, 232 DATA_GENERIC, /* check range only */ 233 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 234 DATA_GENERIC_ENHANCE_READ, /* 235 * strong check on range and segment 236 * bitmap but no warning due to race 237 * condition of read on truncated area 238 * by extent_cache 239 */ 240 META_GENERIC, 241 }; 242 243 /* for the list of ino */ 244 enum { 245 ORPHAN_INO, /* for orphan ino list */ 246 APPEND_INO, /* for append ino list */ 247 UPDATE_INO, /* for update ino list */ 248 TRANS_DIR_INO, /* for trasactions dir ino list */ 249 FLUSH_INO, /* for multiple device flushing */ 250 MAX_INO_ENTRY, /* max. list */ 251 }; 252 253 struct ino_entry { 254 struct list_head list; /* list head */ 255 nid_t ino; /* inode number */ 256 unsigned int dirty_device; /* dirty device bitmap */ 257 }; 258 259 /* for the list of inodes to be GCed */ 260 struct inode_entry { 261 struct list_head list; /* list head */ 262 struct inode *inode; /* vfs inode pointer */ 263 }; 264 265 struct fsync_node_entry { 266 struct list_head list; /* list head */ 267 struct page *page; /* warm node page pointer */ 268 unsigned int seq_id; /* sequence id */ 269 }; 270 271 struct ckpt_req { 272 struct completion wait; /* completion for checkpoint done */ 273 struct llist_node llnode; /* llist_node to be linked in wait queue */ 274 int ret; /* return code of checkpoint */ 275 ktime_t queue_time; /* request queued time */ 276 }; 277 278 struct ckpt_req_control { 279 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 280 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 281 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 282 atomic_t issued_ckpt; /* # of actually issued ckpts */ 283 atomic_t total_ckpt; /* # of total ckpts */ 284 atomic_t queued_ckpt; /* # of queued ckpts */ 285 struct llist_head issue_list; /* list for command issue */ 286 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 287 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 288 unsigned int peak_time; /* peak wait time in msec until now */ 289 }; 290 291 /* for the bitmap indicate blocks to be discarded */ 292 struct discard_entry { 293 struct list_head list; /* list head */ 294 block_t start_blkaddr; /* start blockaddr of current segment */ 295 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 296 }; 297 298 /* default discard granularity of inner discard thread, unit: block count */ 299 #define DEFAULT_DISCARD_GRANULARITY 16 300 301 /* max discard pend list number */ 302 #define MAX_PLIST_NUM 512 303 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 304 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 305 306 enum { 307 D_PREP, /* initial */ 308 D_PARTIAL, /* partially submitted */ 309 D_SUBMIT, /* all submitted */ 310 D_DONE, /* finished */ 311 }; 312 313 struct discard_info { 314 block_t lstart; /* logical start address */ 315 block_t len; /* length */ 316 block_t start; /* actual start address in dev */ 317 }; 318 319 struct discard_cmd { 320 struct rb_node rb_node; /* rb node located in rb-tree */ 321 union { 322 struct { 323 block_t lstart; /* logical start address */ 324 block_t len; /* length */ 325 block_t start; /* actual start address in dev */ 326 }; 327 struct discard_info di; /* discard info */ 328 329 }; 330 struct list_head list; /* command list */ 331 struct completion wait; /* compleation */ 332 struct block_device *bdev; /* bdev */ 333 unsigned short ref; /* reference count */ 334 unsigned char state; /* state */ 335 unsigned char queued; /* queued discard */ 336 int error; /* bio error */ 337 spinlock_t lock; /* for state/bio_ref updating */ 338 unsigned short bio_ref; /* bio reference count */ 339 }; 340 341 enum { 342 DPOLICY_BG, 343 DPOLICY_FORCE, 344 DPOLICY_FSTRIM, 345 DPOLICY_UMOUNT, 346 MAX_DPOLICY, 347 }; 348 349 struct discard_policy { 350 int type; /* type of discard */ 351 unsigned int min_interval; /* used for candidates exist */ 352 unsigned int mid_interval; /* used for device busy */ 353 unsigned int max_interval; /* used for candidates not exist */ 354 unsigned int max_requests; /* # of discards issued per round */ 355 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 356 bool io_aware; /* issue discard in idle time */ 357 bool sync; /* submit discard with REQ_SYNC flag */ 358 bool ordered; /* issue discard by lba order */ 359 bool timeout; /* discard timeout for put_super */ 360 unsigned int granularity; /* discard granularity */ 361 }; 362 363 struct discard_cmd_control { 364 struct task_struct *f2fs_issue_discard; /* discard thread */ 365 struct list_head entry_list; /* 4KB discard entry list */ 366 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 367 struct list_head wait_list; /* store on-flushing entries */ 368 struct list_head fstrim_list; /* in-flight discard from fstrim */ 369 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 370 unsigned int discard_wake; /* to wake up discard thread */ 371 struct mutex cmd_lock; 372 unsigned int nr_discards; /* # of discards in the list */ 373 unsigned int max_discards; /* max. discards to be issued */ 374 unsigned int discard_granularity; /* discard granularity */ 375 unsigned int undiscard_blks; /* # of undiscard blocks */ 376 unsigned int next_pos; /* next discard position */ 377 atomic_t issued_discard; /* # of issued discard */ 378 atomic_t queued_discard; /* # of queued discard */ 379 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 380 struct rb_root_cached root; /* root of discard rb-tree */ 381 bool rbtree_check; /* config for consistence check */ 382 }; 383 384 /* for the list of fsync inodes, used only during recovery */ 385 struct fsync_inode_entry { 386 struct list_head list; /* list head */ 387 struct inode *inode; /* vfs inode pointer */ 388 block_t blkaddr; /* block address locating the last fsync */ 389 block_t last_dentry; /* block address locating the last dentry */ 390 }; 391 392 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 393 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 394 395 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 396 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 397 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 398 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 399 400 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 401 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 402 403 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 404 { 405 int before = nats_in_cursum(journal); 406 407 journal->n_nats = cpu_to_le16(before + i); 408 return before; 409 } 410 411 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 412 { 413 int before = sits_in_cursum(journal); 414 415 journal->n_sits = cpu_to_le16(before + i); 416 return before; 417 } 418 419 static inline bool __has_cursum_space(struct f2fs_journal *journal, 420 int size, int type) 421 { 422 if (type == NAT_JOURNAL) 423 return size <= MAX_NAT_JENTRIES(journal); 424 return size <= MAX_SIT_JENTRIES(journal); 425 } 426 427 /* for inline stuff */ 428 #define DEF_INLINE_RESERVED_SIZE 1 429 static inline int get_extra_isize(struct inode *inode); 430 static inline int get_inline_xattr_addrs(struct inode *inode); 431 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 432 (CUR_ADDRS_PER_INODE(inode) - \ 433 get_inline_xattr_addrs(inode) - \ 434 DEF_INLINE_RESERVED_SIZE)) 435 436 /* for inline dir */ 437 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 438 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 439 BITS_PER_BYTE + 1)) 440 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 441 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 442 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 443 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 444 NR_INLINE_DENTRY(inode) + \ 445 INLINE_DENTRY_BITMAP_SIZE(inode))) 446 447 /* 448 * For INODE and NODE manager 449 */ 450 /* for directory operations */ 451 452 struct f2fs_filename { 453 /* 454 * The filename the user specified. This is NULL for some 455 * filesystem-internal operations, e.g. converting an inline directory 456 * to a non-inline one, or roll-forward recovering an encrypted dentry. 457 */ 458 const struct qstr *usr_fname; 459 460 /* 461 * The on-disk filename. For encrypted directories, this is encrypted. 462 * This may be NULL for lookups in an encrypted dir without the key. 463 */ 464 struct fscrypt_str disk_name; 465 466 /* The dirhash of this filename */ 467 f2fs_hash_t hash; 468 469 #ifdef CONFIG_FS_ENCRYPTION 470 /* 471 * For lookups in encrypted directories: either the buffer backing 472 * disk_name, or a buffer that holds the decoded no-key name. 473 */ 474 struct fscrypt_str crypto_buf; 475 #endif 476 #ifdef CONFIG_UNICODE 477 /* 478 * For casefolded directories: the casefolded name, but it's left NULL 479 * if the original name is not valid Unicode, if the directory is both 480 * casefolded and encrypted and its encryption key is unavailable, or if 481 * the filesystem is doing an internal operation where usr_fname is also 482 * NULL. In all these cases we fall back to treating the name as an 483 * opaque byte sequence. 484 */ 485 struct fscrypt_str cf_name; 486 #endif 487 }; 488 489 struct f2fs_dentry_ptr { 490 struct inode *inode; 491 void *bitmap; 492 struct f2fs_dir_entry *dentry; 493 __u8 (*filename)[F2FS_SLOT_LEN]; 494 int max; 495 int nr_bitmap; 496 }; 497 498 static inline void make_dentry_ptr_block(struct inode *inode, 499 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 500 { 501 d->inode = inode; 502 d->max = NR_DENTRY_IN_BLOCK; 503 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 504 d->bitmap = t->dentry_bitmap; 505 d->dentry = t->dentry; 506 d->filename = t->filename; 507 } 508 509 static inline void make_dentry_ptr_inline(struct inode *inode, 510 struct f2fs_dentry_ptr *d, void *t) 511 { 512 int entry_cnt = NR_INLINE_DENTRY(inode); 513 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 514 int reserved_size = INLINE_RESERVED_SIZE(inode); 515 516 d->inode = inode; 517 d->max = entry_cnt; 518 d->nr_bitmap = bitmap_size; 519 d->bitmap = t; 520 d->dentry = t + bitmap_size + reserved_size; 521 d->filename = t + bitmap_size + reserved_size + 522 SIZE_OF_DIR_ENTRY * entry_cnt; 523 } 524 525 /* 526 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 527 * as its node offset to distinguish from index node blocks. 528 * But some bits are used to mark the node block. 529 */ 530 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 531 >> OFFSET_BIT_SHIFT) 532 enum { 533 ALLOC_NODE, /* allocate a new node page if needed */ 534 LOOKUP_NODE, /* look up a node without readahead */ 535 LOOKUP_NODE_RA, /* 536 * look up a node with readahead called 537 * by get_data_block. 538 */ 539 }; 540 541 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 542 543 /* congestion wait timeout value, default: 20ms */ 544 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 545 546 /* maximum retry quota flush count */ 547 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 548 549 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 550 551 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 552 553 /* for in-memory extent cache entry */ 554 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 555 556 /* number of extent info in extent cache we try to shrink */ 557 #define EXTENT_CACHE_SHRINK_NUMBER 128 558 559 struct rb_entry { 560 struct rb_node rb_node; /* rb node located in rb-tree */ 561 union { 562 struct { 563 unsigned int ofs; /* start offset of the entry */ 564 unsigned int len; /* length of the entry */ 565 }; 566 unsigned long long key; /* 64-bits key */ 567 } __packed; 568 }; 569 570 struct extent_info { 571 unsigned int fofs; /* start offset in a file */ 572 unsigned int len; /* length of the extent */ 573 u32 blk; /* start block address of the extent */ 574 }; 575 576 struct extent_node { 577 struct rb_node rb_node; /* rb node located in rb-tree */ 578 struct extent_info ei; /* extent info */ 579 struct list_head list; /* node in global extent list of sbi */ 580 struct extent_tree *et; /* extent tree pointer */ 581 }; 582 583 struct extent_tree { 584 nid_t ino; /* inode number */ 585 struct rb_root_cached root; /* root of extent info rb-tree */ 586 struct extent_node *cached_en; /* recently accessed extent node */ 587 struct extent_info largest; /* largested extent info */ 588 struct list_head list; /* to be used by sbi->zombie_list */ 589 rwlock_t lock; /* protect extent info rb-tree */ 590 atomic_t node_cnt; /* # of extent node in rb-tree*/ 591 bool largest_updated; /* largest extent updated */ 592 }; 593 594 /* 595 * This structure is taken from ext4_map_blocks. 596 * 597 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 598 */ 599 #define F2FS_MAP_NEW (1 << BH_New) 600 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 601 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 602 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 603 F2FS_MAP_UNWRITTEN) 604 605 struct f2fs_map_blocks { 606 block_t m_pblk; 607 block_t m_lblk; 608 unsigned int m_len; 609 unsigned int m_flags; 610 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 611 pgoff_t *m_next_extent; /* point to next possible extent */ 612 int m_seg_type; 613 bool m_may_create; /* indicate it is from write path */ 614 }; 615 616 /* for flag in get_data_block */ 617 enum { 618 F2FS_GET_BLOCK_DEFAULT, 619 F2FS_GET_BLOCK_FIEMAP, 620 F2FS_GET_BLOCK_BMAP, 621 F2FS_GET_BLOCK_DIO, 622 F2FS_GET_BLOCK_PRE_DIO, 623 F2FS_GET_BLOCK_PRE_AIO, 624 F2FS_GET_BLOCK_PRECACHE, 625 }; 626 627 /* 628 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 629 */ 630 #define FADVISE_COLD_BIT 0x01 631 #define FADVISE_LOST_PINO_BIT 0x02 632 #define FADVISE_ENCRYPT_BIT 0x04 633 #define FADVISE_ENC_NAME_BIT 0x08 634 #define FADVISE_KEEP_SIZE_BIT 0x10 635 #define FADVISE_HOT_BIT 0x20 636 #define FADVISE_VERITY_BIT 0x40 637 638 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 639 640 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 641 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 642 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 643 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 644 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 645 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 646 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 647 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 648 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 649 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 650 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 651 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 652 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 653 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 654 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 655 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 656 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 657 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 658 659 #define DEF_DIR_LEVEL 0 660 661 enum { 662 GC_FAILURE_PIN, 663 GC_FAILURE_ATOMIC, 664 MAX_GC_FAILURE 665 }; 666 667 /* used for f2fs_inode_info->flags */ 668 enum { 669 FI_NEW_INODE, /* indicate newly allocated inode */ 670 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 671 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 672 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 673 FI_INC_LINK, /* need to increment i_nlink */ 674 FI_ACL_MODE, /* indicate acl mode */ 675 FI_NO_ALLOC, /* should not allocate any blocks */ 676 FI_FREE_NID, /* free allocated nide */ 677 FI_NO_EXTENT, /* not to use the extent cache */ 678 FI_INLINE_XATTR, /* used for inline xattr */ 679 FI_INLINE_DATA, /* used for inline data*/ 680 FI_INLINE_DENTRY, /* used for inline dentry */ 681 FI_APPEND_WRITE, /* inode has appended data */ 682 FI_UPDATE_WRITE, /* inode has in-place-update data */ 683 FI_NEED_IPU, /* used for ipu per file */ 684 FI_ATOMIC_FILE, /* indicate atomic file */ 685 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 686 FI_VOLATILE_FILE, /* indicate volatile file */ 687 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 688 FI_DROP_CACHE, /* drop dirty page cache */ 689 FI_DATA_EXIST, /* indicate data exists */ 690 FI_INLINE_DOTS, /* indicate inline dot dentries */ 691 FI_DO_DEFRAG, /* indicate defragment is running */ 692 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 693 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 694 FI_HOT_DATA, /* indicate file is hot */ 695 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 696 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 697 FI_PIN_FILE, /* indicate file should not be gced */ 698 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 699 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 700 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 701 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 702 FI_MMAP_FILE, /* indicate file was mmapped */ 703 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 704 FI_MAX, /* max flag, never be used */ 705 }; 706 707 struct f2fs_inode_info { 708 struct inode vfs_inode; /* serve a vfs inode */ 709 unsigned long i_flags; /* keep an inode flags for ioctl */ 710 unsigned char i_advise; /* use to give file attribute hints */ 711 unsigned char i_dir_level; /* use for dentry level for large dir */ 712 unsigned int i_current_depth; /* only for directory depth */ 713 /* for gc failure statistic */ 714 unsigned int i_gc_failures[MAX_GC_FAILURE]; 715 unsigned int i_pino; /* parent inode number */ 716 umode_t i_acl_mode; /* keep file acl mode temporarily */ 717 718 /* Use below internally in f2fs*/ 719 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 720 struct rw_semaphore i_sem; /* protect fi info */ 721 atomic_t dirty_pages; /* # of dirty pages */ 722 f2fs_hash_t chash; /* hash value of given file name */ 723 unsigned int clevel; /* maximum level of given file name */ 724 struct task_struct *task; /* lookup and create consistency */ 725 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 726 nid_t i_xattr_nid; /* node id that contains xattrs */ 727 loff_t last_disk_size; /* lastly written file size */ 728 spinlock_t i_size_lock; /* protect last_disk_size */ 729 730 #ifdef CONFIG_QUOTA 731 struct dquot *i_dquot[MAXQUOTAS]; 732 733 /* quota space reservation, managed internally by quota code */ 734 qsize_t i_reserved_quota; 735 #endif 736 struct list_head dirty_list; /* dirty list for dirs and files */ 737 struct list_head gdirty_list; /* linked in global dirty list */ 738 struct list_head inmem_ilist; /* list for inmem inodes */ 739 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 740 struct task_struct *inmem_task; /* store inmemory task */ 741 struct mutex inmem_lock; /* lock for inmemory pages */ 742 struct extent_tree *extent_tree; /* cached extent_tree entry */ 743 744 /* avoid racing between foreground op and gc */ 745 struct rw_semaphore i_gc_rwsem[2]; 746 struct rw_semaphore i_mmap_sem; 747 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 748 749 int i_extra_isize; /* size of extra space located in i_addr */ 750 kprojid_t i_projid; /* id for project quota */ 751 int i_inline_xattr_size; /* inline xattr size */ 752 struct timespec64 i_crtime; /* inode creation time */ 753 struct timespec64 i_disk_time[4];/* inode disk times */ 754 755 /* for file compress */ 756 atomic_t i_compr_blocks; /* # of compressed blocks */ 757 unsigned char i_compress_algorithm; /* algorithm type */ 758 unsigned char i_log_cluster_size; /* log of cluster size */ 759 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 760 unsigned short i_compress_flag; /* compress flag */ 761 unsigned int i_cluster_size; /* cluster size */ 762 }; 763 764 static inline void get_extent_info(struct extent_info *ext, 765 struct f2fs_extent *i_ext) 766 { 767 ext->fofs = le32_to_cpu(i_ext->fofs); 768 ext->blk = le32_to_cpu(i_ext->blk); 769 ext->len = le32_to_cpu(i_ext->len); 770 } 771 772 static inline void set_raw_extent(struct extent_info *ext, 773 struct f2fs_extent *i_ext) 774 { 775 i_ext->fofs = cpu_to_le32(ext->fofs); 776 i_ext->blk = cpu_to_le32(ext->blk); 777 i_ext->len = cpu_to_le32(ext->len); 778 } 779 780 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 781 u32 blk, unsigned int len) 782 { 783 ei->fofs = fofs; 784 ei->blk = blk; 785 ei->len = len; 786 } 787 788 static inline bool __is_discard_mergeable(struct discard_info *back, 789 struct discard_info *front, unsigned int max_len) 790 { 791 return (back->lstart + back->len == front->lstart) && 792 (back->len + front->len <= max_len); 793 } 794 795 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 796 struct discard_info *back, unsigned int max_len) 797 { 798 return __is_discard_mergeable(back, cur, max_len); 799 } 800 801 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 802 struct discard_info *front, unsigned int max_len) 803 { 804 return __is_discard_mergeable(cur, front, max_len); 805 } 806 807 static inline bool __is_extent_mergeable(struct extent_info *back, 808 struct extent_info *front) 809 { 810 return (back->fofs + back->len == front->fofs && 811 back->blk + back->len == front->blk); 812 } 813 814 static inline bool __is_back_mergeable(struct extent_info *cur, 815 struct extent_info *back) 816 { 817 return __is_extent_mergeable(back, cur); 818 } 819 820 static inline bool __is_front_mergeable(struct extent_info *cur, 821 struct extent_info *front) 822 { 823 return __is_extent_mergeable(cur, front); 824 } 825 826 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 827 static inline void __try_update_largest_extent(struct extent_tree *et, 828 struct extent_node *en) 829 { 830 if (en->ei.len > et->largest.len) { 831 et->largest = en->ei; 832 et->largest_updated = true; 833 } 834 } 835 836 /* 837 * For free nid management 838 */ 839 enum nid_state { 840 FREE_NID, /* newly added to free nid list */ 841 PREALLOC_NID, /* it is preallocated */ 842 MAX_NID_STATE, 843 }; 844 845 enum nat_state { 846 TOTAL_NAT, 847 DIRTY_NAT, 848 RECLAIMABLE_NAT, 849 MAX_NAT_STATE, 850 }; 851 852 struct f2fs_nm_info { 853 block_t nat_blkaddr; /* base disk address of NAT */ 854 nid_t max_nid; /* maximum possible node ids */ 855 nid_t available_nids; /* # of available node ids */ 856 nid_t next_scan_nid; /* the next nid to be scanned */ 857 unsigned int ram_thresh; /* control the memory footprint */ 858 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 859 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 860 861 /* NAT cache management */ 862 struct radix_tree_root nat_root;/* root of the nat entry cache */ 863 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 864 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 865 struct list_head nat_entries; /* cached nat entry list (clean) */ 866 spinlock_t nat_list_lock; /* protect clean nat entry list */ 867 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 868 unsigned int nat_blocks; /* # of nat blocks */ 869 870 /* free node ids management */ 871 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 872 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 873 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 874 spinlock_t nid_list_lock; /* protect nid lists ops */ 875 struct mutex build_lock; /* lock for build free nids */ 876 unsigned char **free_nid_bitmap; 877 unsigned char *nat_block_bitmap; 878 unsigned short *free_nid_count; /* free nid count of NAT block */ 879 880 /* for checkpoint */ 881 char *nat_bitmap; /* NAT bitmap pointer */ 882 883 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 884 unsigned char *nat_bits; /* NAT bits blocks */ 885 unsigned char *full_nat_bits; /* full NAT pages */ 886 unsigned char *empty_nat_bits; /* empty NAT pages */ 887 #ifdef CONFIG_F2FS_CHECK_FS 888 char *nat_bitmap_mir; /* NAT bitmap mirror */ 889 #endif 890 int bitmap_size; /* bitmap size */ 891 }; 892 893 /* 894 * this structure is used as one of function parameters. 895 * all the information are dedicated to a given direct node block determined 896 * by the data offset in a file. 897 */ 898 struct dnode_of_data { 899 struct inode *inode; /* vfs inode pointer */ 900 struct page *inode_page; /* its inode page, NULL is possible */ 901 struct page *node_page; /* cached direct node page */ 902 nid_t nid; /* node id of the direct node block */ 903 unsigned int ofs_in_node; /* data offset in the node page */ 904 bool inode_page_locked; /* inode page is locked or not */ 905 bool node_changed; /* is node block changed */ 906 char cur_level; /* level of hole node page */ 907 char max_level; /* level of current page located */ 908 block_t data_blkaddr; /* block address of the node block */ 909 }; 910 911 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 912 struct page *ipage, struct page *npage, nid_t nid) 913 { 914 memset(dn, 0, sizeof(*dn)); 915 dn->inode = inode; 916 dn->inode_page = ipage; 917 dn->node_page = npage; 918 dn->nid = nid; 919 } 920 921 /* 922 * For SIT manager 923 * 924 * By default, there are 6 active log areas across the whole main area. 925 * When considering hot and cold data separation to reduce cleaning overhead, 926 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 927 * respectively. 928 * In the current design, you should not change the numbers intentionally. 929 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 930 * logs individually according to the underlying devices. (default: 6) 931 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 932 * data and 8 for node logs. 933 */ 934 #define NR_CURSEG_DATA_TYPE (3) 935 #define NR_CURSEG_NODE_TYPE (3) 936 #define NR_CURSEG_INMEM_TYPE (2) 937 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 938 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 939 940 enum { 941 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 942 CURSEG_WARM_DATA, /* data blocks */ 943 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 944 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 945 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 946 CURSEG_COLD_NODE, /* indirect node blocks */ 947 NR_PERSISTENT_LOG, /* number of persistent log */ 948 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 949 /* pinned file that needs consecutive block address */ 950 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 951 NO_CHECK_TYPE, /* number of persistent & inmem log */ 952 }; 953 954 struct flush_cmd { 955 struct completion wait; 956 struct llist_node llnode; 957 nid_t ino; 958 int ret; 959 }; 960 961 struct flush_cmd_control { 962 struct task_struct *f2fs_issue_flush; /* flush thread */ 963 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 964 atomic_t issued_flush; /* # of issued flushes */ 965 atomic_t queued_flush; /* # of queued flushes */ 966 struct llist_head issue_list; /* list for command issue */ 967 struct llist_node *dispatch_list; /* list for command dispatch */ 968 }; 969 970 struct f2fs_sm_info { 971 struct sit_info *sit_info; /* whole segment information */ 972 struct free_segmap_info *free_info; /* free segment information */ 973 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 974 struct curseg_info *curseg_array; /* active segment information */ 975 976 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 977 978 block_t seg0_blkaddr; /* block address of 0'th segment */ 979 block_t main_blkaddr; /* start block address of main area */ 980 block_t ssa_blkaddr; /* start block address of SSA area */ 981 982 unsigned int segment_count; /* total # of segments */ 983 unsigned int main_segments; /* # of segments in main area */ 984 unsigned int reserved_segments; /* # of reserved segments */ 985 unsigned int ovp_segments; /* # of overprovision segments */ 986 987 /* a threshold to reclaim prefree segments */ 988 unsigned int rec_prefree_segments; 989 990 /* for batched trimming */ 991 unsigned int trim_sections; /* # of sections to trim */ 992 993 struct list_head sit_entry_set; /* sit entry set list */ 994 995 unsigned int ipu_policy; /* in-place-update policy */ 996 unsigned int min_ipu_util; /* in-place-update threshold */ 997 unsigned int min_fsync_blocks; /* threshold for fsync */ 998 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 999 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1000 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1001 1002 /* for flush command control */ 1003 struct flush_cmd_control *fcc_info; 1004 1005 /* for discard command control */ 1006 struct discard_cmd_control *dcc_info; 1007 }; 1008 1009 /* 1010 * For superblock 1011 */ 1012 /* 1013 * COUNT_TYPE for monitoring 1014 * 1015 * f2fs monitors the number of several block types such as on-writeback, 1016 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1017 */ 1018 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1019 enum count_type { 1020 F2FS_DIRTY_DENTS, 1021 F2FS_DIRTY_DATA, 1022 F2FS_DIRTY_QDATA, 1023 F2FS_DIRTY_NODES, 1024 F2FS_DIRTY_META, 1025 F2FS_INMEM_PAGES, 1026 F2FS_DIRTY_IMETA, 1027 F2FS_WB_CP_DATA, 1028 F2FS_WB_DATA, 1029 F2FS_RD_DATA, 1030 F2FS_RD_NODE, 1031 F2FS_RD_META, 1032 F2FS_DIO_WRITE, 1033 F2FS_DIO_READ, 1034 NR_COUNT_TYPE, 1035 }; 1036 1037 /* 1038 * The below are the page types of bios used in submit_bio(). 1039 * The available types are: 1040 * DATA User data pages. It operates as async mode. 1041 * NODE Node pages. It operates as async mode. 1042 * META FS metadata pages such as SIT, NAT, CP. 1043 * NR_PAGE_TYPE The number of page types. 1044 * META_FLUSH Make sure the previous pages are written 1045 * with waiting the bio's completion 1046 * ... Only can be used with META. 1047 */ 1048 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1049 enum page_type { 1050 DATA, 1051 NODE, 1052 META, 1053 NR_PAGE_TYPE, 1054 META_FLUSH, 1055 INMEM, /* the below types are used by tracepoints only. */ 1056 INMEM_DROP, 1057 INMEM_INVALIDATE, 1058 INMEM_REVOKE, 1059 IPU, 1060 OPU, 1061 }; 1062 1063 enum temp_type { 1064 HOT = 0, /* must be zero for meta bio */ 1065 WARM, 1066 COLD, 1067 NR_TEMP_TYPE, 1068 }; 1069 1070 enum need_lock_type { 1071 LOCK_REQ = 0, 1072 LOCK_DONE, 1073 LOCK_RETRY, 1074 }; 1075 1076 enum cp_reason_type { 1077 CP_NO_NEEDED, 1078 CP_NON_REGULAR, 1079 CP_COMPRESSED, 1080 CP_HARDLINK, 1081 CP_SB_NEED_CP, 1082 CP_WRONG_PINO, 1083 CP_NO_SPC_ROLL, 1084 CP_NODE_NEED_CP, 1085 CP_FASTBOOT_MODE, 1086 CP_SPEC_LOG_NUM, 1087 CP_RECOVER_DIR, 1088 }; 1089 1090 enum iostat_type { 1091 /* WRITE IO */ 1092 APP_DIRECT_IO, /* app direct write IOs */ 1093 APP_BUFFERED_IO, /* app buffered write IOs */ 1094 APP_WRITE_IO, /* app write IOs */ 1095 APP_MAPPED_IO, /* app mapped IOs */ 1096 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1097 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1098 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1099 FS_GC_DATA_IO, /* data IOs from forground gc */ 1100 FS_GC_NODE_IO, /* node IOs from forground gc */ 1101 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1102 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1103 FS_CP_META_IO, /* meta IOs from checkpoint */ 1104 1105 /* READ IO */ 1106 APP_DIRECT_READ_IO, /* app direct read IOs */ 1107 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1108 APP_READ_IO, /* app read IOs */ 1109 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1110 FS_DATA_READ_IO, /* data read IOs */ 1111 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1112 FS_CDATA_READ_IO, /* compressed data read IOs */ 1113 FS_NODE_READ_IO, /* node read IOs */ 1114 FS_META_READ_IO, /* meta read IOs */ 1115 1116 /* other */ 1117 FS_DISCARD, /* discard */ 1118 NR_IO_TYPE, 1119 }; 1120 1121 struct f2fs_io_info { 1122 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1123 nid_t ino; /* inode number */ 1124 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1125 enum temp_type temp; /* contains HOT/WARM/COLD */ 1126 int op; /* contains REQ_OP_ */ 1127 int op_flags; /* req_flag_bits */ 1128 block_t new_blkaddr; /* new block address to be written */ 1129 block_t old_blkaddr; /* old block address before Cow */ 1130 struct page *page; /* page to be written */ 1131 struct page *encrypted_page; /* encrypted page */ 1132 struct page *compressed_page; /* compressed page */ 1133 struct list_head list; /* serialize IOs */ 1134 bool submitted; /* indicate IO submission */ 1135 int need_lock; /* indicate we need to lock cp_rwsem */ 1136 bool in_list; /* indicate fio is in io_list */ 1137 bool is_por; /* indicate IO is from recovery or not */ 1138 bool retry; /* need to reallocate block address */ 1139 int compr_blocks; /* # of compressed block addresses */ 1140 bool encrypted; /* indicate file is encrypted */ 1141 enum iostat_type io_type; /* io type */ 1142 struct writeback_control *io_wbc; /* writeback control */ 1143 struct bio **bio; /* bio for ipu */ 1144 sector_t *last_block; /* last block number in bio */ 1145 unsigned char version; /* version of the node */ 1146 }; 1147 1148 struct bio_entry { 1149 struct bio *bio; 1150 struct list_head list; 1151 }; 1152 1153 #define is_read_io(rw) ((rw) == READ) 1154 struct f2fs_bio_info { 1155 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1156 struct bio *bio; /* bios to merge */ 1157 sector_t last_block_in_bio; /* last block number */ 1158 struct f2fs_io_info fio; /* store buffered io info. */ 1159 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1160 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1161 struct list_head io_list; /* track fios */ 1162 struct list_head bio_list; /* bio entry list head */ 1163 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1164 }; 1165 1166 #define FDEV(i) (sbi->devs[i]) 1167 #define RDEV(i) (raw_super->devs[i]) 1168 struct f2fs_dev_info { 1169 struct block_device *bdev; 1170 char path[MAX_PATH_LEN]; 1171 unsigned int total_segments; 1172 block_t start_blk; 1173 block_t end_blk; 1174 #ifdef CONFIG_BLK_DEV_ZONED 1175 unsigned int nr_blkz; /* Total number of zones */ 1176 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1177 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1178 #endif 1179 }; 1180 1181 enum inode_type { 1182 DIR_INODE, /* for dirty dir inode */ 1183 FILE_INODE, /* for dirty regular/symlink inode */ 1184 DIRTY_META, /* for all dirtied inode metadata */ 1185 ATOMIC_FILE, /* for all atomic files */ 1186 NR_INODE_TYPE, 1187 }; 1188 1189 /* for inner inode cache management */ 1190 struct inode_management { 1191 struct radix_tree_root ino_root; /* ino entry array */ 1192 spinlock_t ino_lock; /* for ino entry lock */ 1193 struct list_head ino_list; /* inode list head */ 1194 unsigned long ino_num; /* number of entries */ 1195 }; 1196 1197 /* for GC_AT */ 1198 struct atgc_management { 1199 bool atgc_enabled; /* ATGC is enabled or not */ 1200 struct rb_root_cached root; /* root of victim rb-tree */ 1201 struct list_head victim_list; /* linked with all victim entries */ 1202 unsigned int victim_count; /* victim count in rb-tree */ 1203 unsigned int candidate_ratio; /* candidate ratio */ 1204 unsigned int max_candidate_count; /* max candidate count */ 1205 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1206 unsigned long long age_threshold; /* age threshold */ 1207 }; 1208 1209 /* For s_flag in struct f2fs_sb_info */ 1210 enum { 1211 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1212 SBI_IS_CLOSE, /* specify unmounting */ 1213 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1214 SBI_POR_DOING, /* recovery is doing or not */ 1215 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1216 SBI_NEED_CP, /* need to checkpoint */ 1217 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1218 SBI_IS_RECOVERED, /* recovered orphan/data */ 1219 SBI_CP_DISABLED, /* CP was disabled last mount */ 1220 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1221 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1222 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1223 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1224 SBI_IS_RESIZEFS, /* resizefs is in process */ 1225 }; 1226 1227 enum { 1228 CP_TIME, 1229 REQ_TIME, 1230 DISCARD_TIME, 1231 GC_TIME, 1232 DISABLE_TIME, 1233 UMOUNT_DISCARD_TIMEOUT, 1234 MAX_TIME, 1235 }; 1236 1237 enum { 1238 GC_NORMAL, 1239 GC_IDLE_CB, 1240 GC_IDLE_GREEDY, 1241 GC_IDLE_AT, 1242 GC_URGENT_HIGH, 1243 GC_URGENT_LOW, 1244 }; 1245 1246 enum { 1247 BGGC_MODE_ON, /* background gc is on */ 1248 BGGC_MODE_OFF, /* background gc is off */ 1249 BGGC_MODE_SYNC, /* 1250 * background gc is on, migrating blocks 1251 * like foreground gc 1252 */ 1253 }; 1254 1255 enum { 1256 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1257 FS_MODE_LFS, /* use lfs allocation only */ 1258 }; 1259 1260 enum { 1261 WHINT_MODE_OFF, /* not pass down write hints */ 1262 WHINT_MODE_USER, /* try to pass down hints given by users */ 1263 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1264 }; 1265 1266 enum { 1267 ALLOC_MODE_DEFAULT, /* stay default */ 1268 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1269 }; 1270 1271 enum fsync_mode { 1272 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1273 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1274 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1275 }; 1276 1277 enum { 1278 COMPR_MODE_FS, /* 1279 * automatically compress compression 1280 * enabled files 1281 */ 1282 COMPR_MODE_USER, /* 1283 * automatical compression is disabled. 1284 * user can control the file compression 1285 * using ioctls 1286 */ 1287 }; 1288 1289 /* 1290 * this value is set in page as a private data which indicate that 1291 * the page is atomically written, and it is in inmem_pages list. 1292 */ 1293 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 1294 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2) 1295 1296 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 1297 (page_private(page) == ATOMIC_WRITTEN_PAGE) 1298 #define IS_DUMMY_WRITTEN_PAGE(page) \ 1299 (page_private(page) == DUMMY_WRITTEN_PAGE) 1300 1301 #ifdef CONFIG_F2FS_IO_TRACE 1302 #define IS_IO_TRACED_PAGE(page) \ 1303 (page_private(page) > 0 && \ 1304 page_private(page) < (unsigned long)PID_MAX_LIMIT) 1305 #else 1306 #define IS_IO_TRACED_PAGE(page) (0) 1307 #endif 1308 1309 /* For compression */ 1310 enum compress_algorithm_type { 1311 COMPRESS_LZO, 1312 COMPRESS_LZ4, 1313 COMPRESS_ZSTD, 1314 COMPRESS_LZORLE, 1315 COMPRESS_MAX, 1316 }; 1317 1318 enum compress_flag { 1319 COMPRESS_CHKSUM, 1320 COMPRESS_MAX_FLAG, 1321 }; 1322 1323 #define COMPRESS_DATA_RESERVED_SIZE 4 1324 struct compress_data { 1325 __le32 clen; /* compressed data size */ 1326 __le32 chksum; /* compressed data chksum */ 1327 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1328 u8 cdata[]; /* compressed data */ 1329 }; 1330 1331 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1332 1333 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1334 1335 #define COMPRESS_LEVEL_OFFSET 8 1336 1337 /* compress context */ 1338 struct compress_ctx { 1339 struct inode *inode; /* inode the context belong to */ 1340 pgoff_t cluster_idx; /* cluster index number */ 1341 unsigned int cluster_size; /* page count in cluster */ 1342 unsigned int log_cluster_size; /* log of cluster size */ 1343 struct page **rpages; /* pages store raw data in cluster */ 1344 unsigned int nr_rpages; /* total page number in rpages */ 1345 struct page **cpages; /* pages store compressed data in cluster */ 1346 unsigned int nr_cpages; /* total page number in cpages */ 1347 void *rbuf; /* virtual mapped address on rpages */ 1348 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1349 size_t rlen; /* valid data length in rbuf */ 1350 size_t clen; /* valid data length in cbuf */ 1351 void *private; /* payload buffer for specified compression algorithm */ 1352 void *private2; /* extra payload buffer */ 1353 }; 1354 1355 /* compress context for write IO path */ 1356 struct compress_io_ctx { 1357 u32 magic; /* magic number to indicate page is compressed */ 1358 struct inode *inode; /* inode the context belong to */ 1359 struct page **rpages; /* pages store raw data in cluster */ 1360 unsigned int nr_rpages; /* total page number in rpages */ 1361 atomic_t pending_pages; /* in-flight compressed page count */ 1362 }; 1363 1364 /* Context for decompressing one cluster on the read IO path */ 1365 struct decompress_io_ctx { 1366 u32 magic; /* magic number to indicate page is compressed */ 1367 struct inode *inode; /* inode the context belong to */ 1368 pgoff_t cluster_idx; /* cluster index number */ 1369 unsigned int cluster_size; /* page count in cluster */ 1370 unsigned int log_cluster_size; /* log of cluster size */ 1371 struct page **rpages; /* pages store raw data in cluster */ 1372 unsigned int nr_rpages; /* total page number in rpages */ 1373 struct page **cpages; /* pages store compressed data in cluster */ 1374 unsigned int nr_cpages; /* total page number in cpages */ 1375 struct page **tpages; /* temp pages to pad holes in cluster */ 1376 void *rbuf; /* virtual mapped address on rpages */ 1377 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1378 size_t rlen; /* valid data length in rbuf */ 1379 size_t clen; /* valid data length in cbuf */ 1380 1381 /* 1382 * The number of compressed pages remaining to be read in this cluster. 1383 * This is initially nr_cpages. It is decremented by 1 each time a page 1384 * has been read (or failed to be read). When it reaches 0, the cluster 1385 * is decompressed (or an error is reported). 1386 * 1387 * If an error occurs before all the pages have been submitted for I/O, 1388 * then this will never reach 0. In this case the I/O submitter is 1389 * responsible for calling f2fs_decompress_end_io() instead. 1390 */ 1391 atomic_t remaining_pages; 1392 1393 /* 1394 * Number of references to this decompress_io_ctx. 1395 * 1396 * One reference is held for I/O completion. This reference is dropped 1397 * after the pagecache pages are updated and unlocked -- either after 1398 * decompression (and verity if enabled), or after an error. 1399 * 1400 * In addition, each compressed page holds a reference while it is in a 1401 * bio. These references are necessary prevent compressed pages from 1402 * being freed while they are still in a bio. 1403 */ 1404 refcount_t refcnt; 1405 1406 bool failed; /* IO error occurred before decompression? */ 1407 bool need_verity; /* need fs-verity verification after decompression? */ 1408 void *private; /* payload buffer for specified decompression algorithm */ 1409 void *private2; /* extra payload buffer */ 1410 struct work_struct verity_work; /* work to verify the decompressed pages */ 1411 }; 1412 1413 #define NULL_CLUSTER ((unsigned int)(~0)) 1414 #define MIN_COMPRESS_LOG_SIZE 2 1415 #define MAX_COMPRESS_LOG_SIZE 8 1416 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1417 1418 struct f2fs_sb_info { 1419 struct super_block *sb; /* pointer to VFS super block */ 1420 struct proc_dir_entry *s_proc; /* proc entry */ 1421 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1422 struct rw_semaphore sb_lock; /* lock for raw super block */ 1423 int valid_super_block; /* valid super block no */ 1424 unsigned long s_flag; /* flags for sbi */ 1425 struct mutex writepages; /* mutex for writepages() */ 1426 1427 #ifdef CONFIG_BLK_DEV_ZONED 1428 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1429 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1430 #endif 1431 1432 /* for node-related operations */ 1433 struct f2fs_nm_info *nm_info; /* node manager */ 1434 struct inode *node_inode; /* cache node blocks */ 1435 1436 /* for segment-related operations */ 1437 struct f2fs_sm_info *sm_info; /* segment manager */ 1438 1439 /* for bio operations */ 1440 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1441 /* keep migration IO order for LFS mode */ 1442 struct rw_semaphore io_order_lock; 1443 mempool_t *write_io_dummy; /* Dummy pages */ 1444 1445 /* for checkpoint */ 1446 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1447 int cur_cp_pack; /* remain current cp pack */ 1448 spinlock_t cp_lock; /* for flag in ckpt */ 1449 struct inode *meta_inode; /* cache meta blocks */ 1450 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1451 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1452 struct rw_semaphore node_write; /* locking node writes */ 1453 struct rw_semaphore node_change; /* locking node change */ 1454 wait_queue_head_t cp_wait; 1455 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1456 long interval_time[MAX_TIME]; /* to store thresholds */ 1457 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1458 1459 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1460 1461 spinlock_t fsync_node_lock; /* for node entry lock */ 1462 struct list_head fsync_node_list; /* node list head */ 1463 unsigned int fsync_seg_id; /* sequence id */ 1464 unsigned int fsync_node_num; /* number of node entries */ 1465 1466 /* for orphan inode, use 0'th array */ 1467 unsigned int max_orphans; /* max orphan inodes */ 1468 1469 /* for inode management */ 1470 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1471 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1472 struct mutex flush_lock; /* for flush exclusion */ 1473 1474 /* for extent tree cache */ 1475 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1476 struct mutex extent_tree_lock; /* locking extent radix tree */ 1477 struct list_head extent_list; /* lru list for shrinker */ 1478 spinlock_t extent_lock; /* locking extent lru list */ 1479 atomic_t total_ext_tree; /* extent tree count */ 1480 struct list_head zombie_list; /* extent zombie tree list */ 1481 atomic_t total_zombie_tree; /* extent zombie tree count */ 1482 atomic_t total_ext_node; /* extent info count */ 1483 1484 /* basic filesystem units */ 1485 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1486 unsigned int log_blocksize; /* log2 block size */ 1487 unsigned int blocksize; /* block size */ 1488 unsigned int root_ino_num; /* root inode number*/ 1489 unsigned int node_ino_num; /* node inode number*/ 1490 unsigned int meta_ino_num; /* meta inode number*/ 1491 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1492 unsigned int blocks_per_seg; /* blocks per segment */ 1493 unsigned int segs_per_sec; /* segments per section */ 1494 unsigned int secs_per_zone; /* sections per zone */ 1495 unsigned int total_sections; /* total section count */ 1496 unsigned int total_node_count; /* total node block count */ 1497 unsigned int total_valid_node_count; /* valid node block count */ 1498 int dir_level; /* directory level */ 1499 int readdir_ra; /* readahead inode in readdir */ 1500 u64 max_io_bytes; /* max io bytes to merge IOs */ 1501 1502 block_t user_block_count; /* # of user blocks */ 1503 block_t total_valid_block_count; /* # of valid blocks */ 1504 block_t discard_blks; /* discard command candidats */ 1505 block_t last_valid_block_count; /* for recovery */ 1506 block_t reserved_blocks; /* configurable reserved blocks */ 1507 block_t current_reserved_blocks; /* current reserved blocks */ 1508 1509 /* Additional tracking for no checkpoint mode */ 1510 block_t unusable_block_count; /* # of blocks saved by last cp */ 1511 1512 unsigned int nquota_files; /* # of quota sysfile */ 1513 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1514 1515 /* # of pages, see count_type */ 1516 atomic_t nr_pages[NR_COUNT_TYPE]; 1517 /* # of allocated blocks */ 1518 struct percpu_counter alloc_valid_block_count; 1519 1520 /* writeback control */ 1521 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1522 1523 /* valid inode count */ 1524 struct percpu_counter total_valid_inode_count; 1525 1526 struct f2fs_mount_info mount_opt; /* mount options */ 1527 1528 /* for cleaning operations */ 1529 struct rw_semaphore gc_lock; /* 1530 * semaphore for GC, avoid 1531 * race between GC and GC or CP 1532 */ 1533 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1534 struct atgc_management am; /* atgc management */ 1535 unsigned int cur_victim_sec; /* current victim section num */ 1536 unsigned int gc_mode; /* current GC state */ 1537 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1538 1539 /* for skip statistic */ 1540 unsigned int atomic_files; /* # of opened atomic file */ 1541 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1542 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1543 1544 /* threshold for gc trials on pinned files */ 1545 u64 gc_pin_file_threshold; 1546 struct rw_semaphore pin_sem; 1547 1548 /* maximum # of trials to find a victim segment for SSR and GC */ 1549 unsigned int max_victim_search; 1550 /* migration granularity of garbage collection, unit: segment */ 1551 unsigned int migration_granularity; 1552 1553 /* 1554 * for stat information. 1555 * one is for the LFS mode, and the other is for the SSR mode. 1556 */ 1557 #ifdef CONFIG_F2FS_STAT_FS 1558 struct f2fs_stat_info *stat_info; /* FS status information */ 1559 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1560 unsigned int segment_count[2]; /* # of allocated segments */ 1561 unsigned int block_count[2]; /* # of allocated blocks */ 1562 atomic_t inplace_count; /* # of inplace update */ 1563 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1564 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1565 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1566 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1567 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1568 atomic_t inline_inode; /* # of inline_data inodes */ 1569 atomic_t inline_dir; /* # of inline_dentry inodes */ 1570 atomic_t compr_inode; /* # of compressed inodes */ 1571 atomic64_t compr_blocks; /* # of compressed blocks */ 1572 atomic_t vw_cnt; /* # of volatile writes */ 1573 atomic_t max_aw_cnt; /* max # of atomic writes */ 1574 atomic_t max_vw_cnt; /* max # of volatile writes */ 1575 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1576 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1577 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1578 #endif 1579 spinlock_t stat_lock; /* lock for stat operations */ 1580 1581 /* For app/fs IO statistics */ 1582 spinlock_t iostat_lock; 1583 unsigned long long rw_iostat[NR_IO_TYPE]; 1584 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1585 bool iostat_enable; 1586 unsigned long iostat_next_period; 1587 unsigned int iostat_period_ms; 1588 1589 /* to attach REQ_META|REQ_FUA flags */ 1590 unsigned int data_io_flag; 1591 unsigned int node_io_flag; 1592 1593 /* For sysfs suppport */ 1594 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1595 struct completion s_kobj_unregister; 1596 1597 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1598 struct completion s_stat_kobj_unregister; 1599 1600 /* For shrinker support */ 1601 struct list_head s_list; 1602 int s_ndevs; /* number of devices */ 1603 struct f2fs_dev_info *devs; /* for device list */ 1604 unsigned int dirty_device; /* for checkpoint data flush */ 1605 spinlock_t dev_lock; /* protect dirty_device */ 1606 struct mutex umount_mutex; 1607 unsigned int shrinker_run_no; 1608 1609 /* For write statistics */ 1610 u64 sectors_written_start; 1611 u64 kbytes_written; 1612 1613 /* Reference to checksum algorithm driver via cryptoapi */ 1614 struct crypto_shash *s_chksum_driver; 1615 1616 /* Precomputed FS UUID checksum for seeding other checksums */ 1617 __u32 s_chksum_seed; 1618 1619 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1620 1621 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1622 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1623 1624 #ifdef CONFIG_F2FS_FS_COMPRESSION 1625 struct kmem_cache *page_array_slab; /* page array entry */ 1626 unsigned int page_array_slab_size; /* default page array slab size */ 1627 #endif 1628 }; 1629 1630 struct f2fs_private_dio { 1631 struct inode *inode; 1632 void *orig_private; 1633 bio_end_io_t *orig_end_io; 1634 bool write; 1635 }; 1636 1637 #ifdef CONFIG_F2FS_FAULT_INJECTION 1638 #define f2fs_show_injection_info(sbi, type) \ 1639 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1640 KERN_INFO, sbi->sb->s_id, \ 1641 f2fs_fault_name[type], \ 1642 __func__, __builtin_return_address(0)) 1643 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1644 { 1645 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1646 1647 if (!ffi->inject_rate) 1648 return false; 1649 1650 if (!IS_FAULT_SET(ffi, type)) 1651 return false; 1652 1653 atomic_inc(&ffi->inject_ops); 1654 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1655 atomic_set(&ffi->inject_ops, 0); 1656 return true; 1657 } 1658 return false; 1659 } 1660 #else 1661 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1662 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1663 { 1664 return false; 1665 } 1666 #endif 1667 1668 /* 1669 * Test if the mounted volume is a multi-device volume. 1670 * - For a single regular disk volume, sbi->s_ndevs is 0. 1671 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1672 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1673 */ 1674 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1675 { 1676 return sbi->s_ndevs > 1; 1677 } 1678 1679 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1680 { 1681 unsigned long now = jiffies; 1682 1683 sbi->last_time[type] = now; 1684 1685 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1686 if (type == REQ_TIME) { 1687 sbi->last_time[DISCARD_TIME] = now; 1688 sbi->last_time[GC_TIME] = now; 1689 } 1690 } 1691 1692 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1693 { 1694 unsigned long interval = sbi->interval_time[type] * HZ; 1695 1696 return time_after(jiffies, sbi->last_time[type] + interval); 1697 } 1698 1699 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1700 int type) 1701 { 1702 unsigned long interval = sbi->interval_time[type] * HZ; 1703 unsigned int wait_ms = 0; 1704 long delta; 1705 1706 delta = (sbi->last_time[type] + interval) - jiffies; 1707 if (delta > 0) 1708 wait_ms = jiffies_to_msecs(delta); 1709 1710 return wait_ms; 1711 } 1712 1713 /* 1714 * Inline functions 1715 */ 1716 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1717 const void *address, unsigned int length) 1718 { 1719 struct { 1720 struct shash_desc shash; 1721 char ctx[4]; 1722 } desc; 1723 int err; 1724 1725 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1726 1727 desc.shash.tfm = sbi->s_chksum_driver; 1728 *(u32 *)desc.ctx = crc; 1729 1730 err = crypto_shash_update(&desc.shash, address, length); 1731 BUG_ON(err); 1732 1733 return *(u32 *)desc.ctx; 1734 } 1735 1736 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1737 unsigned int length) 1738 { 1739 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1740 } 1741 1742 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1743 void *buf, size_t buf_size) 1744 { 1745 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1746 } 1747 1748 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1749 const void *address, unsigned int length) 1750 { 1751 return __f2fs_crc32(sbi, crc, address, length); 1752 } 1753 1754 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1755 { 1756 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1757 } 1758 1759 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1760 { 1761 return sb->s_fs_info; 1762 } 1763 1764 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1765 { 1766 return F2FS_SB(inode->i_sb); 1767 } 1768 1769 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1770 { 1771 return F2FS_I_SB(mapping->host); 1772 } 1773 1774 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1775 { 1776 return F2FS_M_SB(page_file_mapping(page)); 1777 } 1778 1779 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1780 { 1781 return (struct f2fs_super_block *)(sbi->raw_super); 1782 } 1783 1784 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1785 { 1786 return (struct f2fs_checkpoint *)(sbi->ckpt); 1787 } 1788 1789 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1790 { 1791 return (struct f2fs_node *)page_address(page); 1792 } 1793 1794 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1795 { 1796 return &((struct f2fs_node *)page_address(page))->i; 1797 } 1798 1799 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1800 { 1801 return (struct f2fs_nm_info *)(sbi->nm_info); 1802 } 1803 1804 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1805 { 1806 return (struct f2fs_sm_info *)(sbi->sm_info); 1807 } 1808 1809 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1810 { 1811 return (struct sit_info *)(SM_I(sbi)->sit_info); 1812 } 1813 1814 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1815 { 1816 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1817 } 1818 1819 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1820 { 1821 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1822 } 1823 1824 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1825 { 1826 return sbi->meta_inode->i_mapping; 1827 } 1828 1829 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1830 { 1831 return sbi->node_inode->i_mapping; 1832 } 1833 1834 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1835 { 1836 return test_bit(type, &sbi->s_flag); 1837 } 1838 1839 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1840 { 1841 set_bit(type, &sbi->s_flag); 1842 } 1843 1844 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1845 { 1846 clear_bit(type, &sbi->s_flag); 1847 } 1848 1849 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1850 { 1851 return le64_to_cpu(cp->checkpoint_ver); 1852 } 1853 1854 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1855 { 1856 if (type < F2FS_MAX_QUOTAS) 1857 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1858 return 0; 1859 } 1860 1861 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1862 { 1863 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1864 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1865 } 1866 1867 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1868 { 1869 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1870 1871 return ckpt_flags & f; 1872 } 1873 1874 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1875 { 1876 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1877 } 1878 1879 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1880 { 1881 unsigned int ckpt_flags; 1882 1883 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1884 ckpt_flags |= f; 1885 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1886 } 1887 1888 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1889 { 1890 unsigned long flags; 1891 1892 spin_lock_irqsave(&sbi->cp_lock, flags); 1893 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1894 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1895 } 1896 1897 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1898 { 1899 unsigned int ckpt_flags; 1900 1901 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1902 ckpt_flags &= (~f); 1903 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1904 } 1905 1906 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1907 { 1908 unsigned long flags; 1909 1910 spin_lock_irqsave(&sbi->cp_lock, flags); 1911 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1912 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1913 } 1914 1915 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1916 { 1917 unsigned long flags; 1918 unsigned char *nat_bits; 1919 1920 /* 1921 * In order to re-enable nat_bits we need to call fsck.f2fs by 1922 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 1923 * so let's rely on regular fsck or unclean shutdown. 1924 */ 1925 1926 if (lock) 1927 spin_lock_irqsave(&sbi->cp_lock, flags); 1928 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1929 nat_bits = NM_I(sbi)->nat_bits; 1930 NM_I(sbi)->nat_bits = NULL; 1931 if (lock) 1932 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1933 1934 kvfree(nat_bits); 1935 } 1936 1937 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1938 struct cp_control *cpc) 1939 { 1940 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1941 1942 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1943 } 1944 1945 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1946 { 1947 down_read(&sbi->cp_rwsem); 1948 } 1949 1950 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1951 { 1952 return down_read_trylock(&sbi->cp_rwsem); 1953 } 1954 1955 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1956 { 1957 up_read(&sbi->cp_rwsem); 1958 } 1959 1960 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1961 { 1962 down_write(&sbi->cp_rwsem); 1963 } 1964 1965 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1966 { 1967 up_write(&sbi->cp_rwsem); 1968 } 1969 1970 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1971 { 1972 int reason = CP_SYNC; 1973 1974 if (test_opt(sbi, FASTBOOT)) 1975 reason = CP_FASTBOOT; 1976 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1977 reason = CP_UMOUNT; 1978 return reason; 1979 } 1980 1981 static inline bool __remain_node_summaries(int reason) 1982 { 1983 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1984 } 1985 1986 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1987 { 1988 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1989 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1990 } 1991 1992 /* 1993 * Check whether the inode has blocks or not 1994 */ 1995 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 1996 { 1997 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 1998 1999 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2000 } 2001 2002 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2003 { 2004 return ofs == XATTR_NODE_OFFSET; 2005 } 2006 2007 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2008 struct inode *inode, bool cap) 2009 { 2010 if (!inode) 2011 return true; 2012 if (!test_opt(sbi, RESERVE_ROOT)) 2013 return false; 2014 if (IS_NOQUOTA(inode)) 2015 return true; 2016 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2017 return true; 2018 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2019 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2020 return true; 2021 if (cap && capable(CAP_SYS_RESOURCE)) 2022 return true; 2023 return false; 2024 } 2025 2026 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2027 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2028 struct inode *inode, blkcnt_t *count) 2029 { 2030 blkcnt_t diff = 0, release = 0; 2031 block_t avail_user_block_count; 2032 int ret; 2033 2034 ret = dquot_reserve_block(inode, *count); 2035 if (ret) 2036 return ret; 2037 2038 if (time_to_inject(sbi, FAULT_BLOCK)) { 2039 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2040 release = *count; 2041 goto release_quota; 2042 } 2043 2044 /* 2045 * let's increase this in prior to actual block count change in order 2046 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2047 */ 2048 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2049 2050 spin_lock(&sbi->stat_lock); 2051 sbi->total_valid_block_count += (block_t)(*count); 2052 avail_user_block_count = sbi->user_block_count - 2053 sbi->current_reserved_blocks; 2054 2055 if (!__allow_reserved_blocks(sbi, inode, true)) 2056 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2057 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2058 if (avail_user_block_count > sbi->unusable_block_count) 2059 avail_user_block_count -= sbi->unusable_block_count; 2060 else 2061 avail_user_block_count = 0; 2062 } 2063 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2064 diff = sbi->total_valid_block_count - avail_user_block_count; 2065 if (diff > *count) 2066 diff = *count; 2067 *count -= diff; 2068 release = diff; 2069 sbi->total_valid_block_count -= diff; 2070 if (!*count) { 2071 spin_unlock(&sbi->stat_lock); 2072 goto enospc; 2073 } 2074 } 2075 spin_unlock(&sbi->stat_lock); 2076 2077 if (unlikely(release)) { 2078 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2079 dquot_release_reservation_block(inode, release); 2080 } 2081 f2fs_i_blocks_write(inode, *count, true, true); 2082 return 0; 2083 2084 enospc: 2085 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2086 release_quota: 2087 dquot_release_reservation_block(inode, release); 2088 return -ENOSPC; 2089 } 2090 2091 __printf(2, 3) 2092 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2093 2094 #define f2fs_err(sbi, fmt, ...) \ 2095 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2096 #define f2fs_warn(sbi, fmt, ...) \ 2097 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2098 #define f2fs_notice(sbi, fmt, ...) \ 2099 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2100 #define f2fs_info(sbi, fmt, ...) \ 2101 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2102 #define f2fs_debug(sbi, fmt, ...) \ 2103 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2104 2105 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2106 struct inode *inode, 2107 block_t count) 2108 { 2109 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2110 2111 spin_lock(&sbi->stat_lock); 2112 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2113 sbi->total_valid_block_count -= (block_t)count; 2114 if (sbi->reserved_blocks && 2115 sbi->current_reserved_blocks < sbi->reserved_blocks) 2116 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2117 sbi->current_reserved_blocks + count); 2118 spin_unlock(&sbi->stat_lock); 2119 if (unlikely(inode->i_blocks < sectors)) { 2120 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2121 inode->i_ino, 2122 (unsigned long long)inode->i_blocks, 2123 (unsigned long long)sectors); 2124 set_sbi_flag(sbi, SBI_NEED_FSCK); 2125 return; 2126 } 2127 f2fs_i_blocks_write(inode, count, false, true); 2128 } 2129 2130 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2131 { 2132 atomic_inc(&sbi->nr_pages[count_type]); 2133 2134 if (count_type == F2FS_DIRTY_DENTS || 2135 count_type == F2FS_DIRTY_NODES || 2136 count_type == F2FS_DIRTY_META || 2137 count_type == F2FS_DIRTY_QDATA || 2138 count_type == F2FS_DIRTY_IMETA) 2139 set_sbi_flag(sbi, SBI_IS_DIRTY); 2140 } 2141 2142 static inline void inode_inc_dirty_pages(struct inode *inode) 2143 { 2144 atomic_inc(&F2FS_I(inode)->dirty_pages); 2145 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2146 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2147 if (IS_NOQUOTA(inode)) 2148 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2149 } 2150 2151 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2152 { 2153 atomic_dec(&sbi->nr_pages[count_type]); 2154 } 2155 2156 static inline void inode_dec_dirty_pages(struct inode *inode) 2157 { 2158 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2159 !S_ISLNK(inode->i_mode)) 2160 return; 2161 2162 atomic_dec(&F2FS_I(inode)->dirty_pages); 2163 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2164 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2165 if (IS_NOQUOTA(inode)) 2166 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2167 } 2168 2169 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2170 { 2171 return atomic_read(&sbi->nr_pages[count_type]); 2172 } 2173 2174 static inline int get_dirty_pages(struct inode *inode) 2175 { 2176 return atomic_read(&F2FS_I(inode)->dirty_pages); 2177 } 2178 2179 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2180 { 2181 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2182 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2183 sbi->log_blocks_per_seg; 2184 2185 return segs / sbi->segs_per_sec; 2186 } 2187 2188 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2189 { 2190 return sbi->total_valid_block_count; 2191 } 2192 2193 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2194 { 2195 return sbi->discard_blks; 2196 } 2197 2198 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2199 { 2200 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2201 2202 /* return NAT or SIT bitmap */ 2203 if (flag == NAT_BITMAP) 2204 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2205 else if (flag == SIT_BITMAP) 2206 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2207 2208 return 0; 2209 } 2210 2211 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2212 { 2213 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2214 } 2215 2216 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2217 { 2218 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2219 int offset; 2220 2221 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2222 offset = (flag == SIT_BITMAP) ? 2223 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2224 /* 2225 * if large_nat_bitmap feature is enabled, leave checksum 2226 * protection for all nat/sit bitmaps. 2227 */ 2228 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); 2229 } 2230 2231 if (__cp_payload(sbi) > 0) { 2232 if (flag == NAT_BITMAP) 2233 return &ckpt->sit_nat_version_bitmap; 2234 else 2235 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2236 } else { 2237 offset = (flag == NAT_BITMAP) ? 2238 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2239 return &ckpt->sit_nat_version_bitmap + offset; 2240 } 2241 } 2242 2243 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2244 { 2245 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2246 2247 if (sbi->cur_cp_pack == 2) 2248 start_addr += sbi->blocks_per_seg; 2249 return start_addr; 2250 } 2251 2252 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2253 { 2254 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2255 2256 if (sbi->cur_cp_pack == 1) 2257 start_addr += sbi->blocks_per_seg; 2258 return start_addr; 2259 } 2260 2261 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2262 { 2263 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2264 } 2265 2266 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2267 { 2268 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2269 } 2270 2271 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2272 struct inode *inode, bool is_inode) 2273 { 2274 block_t valid_block_count; 2275 unsigned int valid_node_count, user_block_count; 2276 int err; 2277 2278 if (is_inode) { 2279 if (inode) { 2280 err = dquot_alloc_inode(inode); 2281 if (err) 2282 return err; 2283 } 2284 } else { 2285 err = dquot_reserve_block(inode, 1); 2286 if (err) 2287 return err; 2288 } 2289 2290 if (time_to_inject(sbi, FAULT_BLOCK)) { 2291 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2292 goto enospc; 2293 } 2294 2295 spin_lock(&sbi->stat_lock); 2296 2297 valid_block_count = sbi->total_valid_block_count + 2298 sbi->current_reserved_blocks + 1; 2299 2300 if (!__allow_reserved_blocks(sbi, inode, false)) 2301 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2302 user_block_count = sbi->user_block_count; 2303 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2304 user_block_count -= sbi->unusable_block_count; 2305 2306 if (unlikely(valid_block_count > user_block_count)) { 2307 spin_unlock(&sbi->stat_lock); 2308 goto enospc; 2309 } 2310 2311 valid_node_count = sbi->total_valid_node_count + 1; 2312 if (unlikely(valid_node_count > sbi->total_node_count)) { 2313 spin_unlock(&sbi->stat_lock); 2314 goto enospc; 2315 } 2316 2317 sbi->total_valid_node_count++; 2318 sbi->total_valid_block_count++; 2319 spin_unlock(&sbi->stat_lock); 2320 2321 if (inode) { 2322 if (is_inode) 2323 f2fs_mark_inode_dirty_sync(inode, true); 2324 else 2325 f2fs_i_blocks_write(inode, 1, true, true); 2326 } 2327 2328 percpu_counter_inc(&sbi->alloc_valid_block_count); 2329 return 0; 2330 2331 enospc: 2332 if (is_inode) { 2333 if (inode) 2334 dquot_free_inode(inode); 2335 } else { 2336 dquot_release_reservation_block(inode, 1); 2337 } 2338 return -ENOSPC; 2339 } 2340 2341 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2342 struct inode *inode, bool is_inode) 2343 { 2344 spin_lock(&sbi->stat_lock); 2345 2346 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2347 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2348 2349 sbi->total_valid_node_count--; 2350 sbi->total_valid_block_count--; 2351 if (sbi->reserved_blocks && 2352 sbi->current_reserved_blocks < sbi->reserved_blocks) 2353 sbi->current_reserved_blocks++; 2354 2355 spin_unlock(&sbi->stat_lock); 2356 2357 if (is_inode) { 2358 dquot_free_inode(inode); 2359 } else { 2360 if (unlikely(inode->i_blocks == 0)) { 2361 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2362 inode->i_ino, 2363 (unsigned long long)inode->i_blocks); 2364 set_sbi_flag(sbi, SBI_NEED_FSCK); 2365 return; 2366 } 2367 f2fs_i_blocks_write(inode, 1, false, true); 2368 } 2369 } 2370 2371 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2372 { 2373 return sbi->total_valid_node_count; 2374 } 2375 2376 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2377 { 2378 percpu_counter_inc(&sbi->total_valid_inode_count); 2379 } 2380 2381 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2382 { 2383 percpu_counter_dec(&sbi->total_valid_inode_count); 2384 } 2385 2386 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2387 { 2388 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2389 } 2390 2391 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2392 pgoff_t index, bool for_write) 2393 { 2394 struct page *page; 2395 2396 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2397 if (!for_write) 2398 page = find_get_page_flags(mapping, index, 2399 FGP_LOCK | FGP_ACCESSED); 2400 else 2401 page = find_lock_page(mapping, index); 2402 if (page) 2403 return page; 2404 2405 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2406 f2fs_show_injection_info(F2FS_M_SB(mapping), 2407 FAULT_PAGE_ALLOC); 2408 return NULL; 2409 } 2410 } 2411 2412 if (!for_write) 2413 return grab_cache_page(mapping, index); 2414 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2415 } 2416 2417 static inline struct page *f2fs_pagecache_get_page( 2418 struct address_space *mapping, pgoff_t index, 2419 int fgp_flags, gfp_t gfp_mask) 2420 { 2421 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2422 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2423 return NULL; 2424 } 2425 2426 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2427 } 2428 2429 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2430 { 2431 char *src_kaddr = kmap(src); 2432 char *dst_kaddr = kmap(dst); 2433 2434 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2435 kunmap(dst); 2436 kunmap(src); 2437 } 2438 2439 static inline void f2fs_put_page(struct page *page, int unlock) 2440 { 2441 if (!page) 2442 return; 2443 2444 if (unlock) { 2445 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2446 unlock_page(page); 2447 } 2448 put_page(page); 2449 } 2450 2451 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2452 { 2453 if (dn->node_page) 2454 f2fs_put_page(dn->node_page, 1); 2455 if (dn->inode_page && dn->node_page != dn->inode_page) 2456 f2fs_put_page(dn->inode_page, 0); 2457 dn->node_page = NULL; 2458 dn->inode_page = NULL; 2459 } 2460 2461 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2462 size_t size) 2463 { 2464 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2465 } 2466 2467 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2468 gfp_t flags) 2469 { 2470 void *entry; 2471 2472 entry = kmem_cache_alloc(cachep, flags); 2473 if (!entry) 2474 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2475 return entry; 2476 } 2477 2478 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2479 { 2480 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2481 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2482 get_pages(sbi, F2FS_WB_CP_DATA) || 2483 get_pages(sbi, F2FS_DIO_READ) || 2484 get_pages(sbi, F2FS_DIO_WRITE)) 2485 return true; 2486 2487 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2488 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2489 return true; 2490 2491 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2492 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2493 return true; 2494 return false; 2495 } 2496 2497 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2498 { 2499 if (sbi->gc_mode == GC_URGENT_HIGH) 2500 return true; 2501 2502 if (is_inflight_io(sbi, type)) 2503 return false; 2504 2505 if (sbi->gc_mode == GC_URGENT_LOW && 2506 (type == DISCARD_TIME || type == GC_TIME)) 2507 return true; 2508 2509 return f2fs_time_over(sbi, type); 2510 } 2511 2512 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2513 unsigned long index, void *item) 2514 { 2515 while (radix_tree_insert(root, index, item)) 2516 cond_resched(); 2517 } 2518 2519 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2520 2521 static inline bool IS_INODE(struct page *page) 2522 { 2523 struct f2fs_node *p = F2FS_NODE(page); 2524 2525 return RAW_IS_INODE(p); 2526 } 2527 2528 static inline int offset_in_addr(struct f2fs_inode *i) 2529 { 2530 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2531 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2532 } 2533 2534 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2535 { 2536 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2537 } 2538 2539 static inline int f2fs_has_extra_attr(struct inode *inode); 2540 static inline block_t data_blkaddr(struct inode *inode, 2541 struct page *node_page, unsigned int offset) 2542 { 2543 struct f2fs_node *raw_node; 2544 __le32 *addr_array; 2545 int base = 0; 2546 bool is_inode = IS_INODE(node_page); 2547 2548 raw_node = F2FS_NODE(node_page); 2549 2550 if (is_inode) { 2551 if (!inode) 2552 /* from GC path only */ 2553 base = offset_in_addr(&raw_node->i); 2554 else if (f2fs_has_extra_attr(inode)) 2555 base = get_extra_isize(inode); 2556 } 2557 2558 addr_array = blkaddr_in_node(raw_node); 2559 return le32_to_cpu(addr_array[base + offset]); 2560 } 2561 2562 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2563 { 2564 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2565 } 2566 2567 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2568 { 2569 int mask; 2570 2571 addr += (nr >> 3); 2572 mask = 1 << (7 - (nr & 0x07)); 2573 return mask & *addr; 2574 } 2575 2576 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2577 { 2578 int mask; 2579 2580 addr += (nr >> 3); 2581 mask = 1 << (7 - (nr & 0x07)); 2582 *addr |= mask; 2583 } 2584 2585 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2586 { 2587 int mask; 2588 2589 addr += (nr >> 3); 2590 mask = 1 << (7 - (nr & 0x07)); 2591 *addr &= ~mask; 2592 } 2593 2594 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2595 { 2596 int mask; 2597 int ret; 2598 2599 addr += (nr >> 3); 2600 mask = 1 << (7 - (nr & 0x07)); 2601 ret = mask & *addr; 2602 *addr |= mask; 2603 return ret; 2604 } 2605 2606 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2607 { 2608 int mask; 2609 int ret; 2610 2611 addr += (nr >> 3); 2612 mask = 1 << (7 - (nr & 0x07)); 2613 ret = mask & *addr; 2614 *addr &= ~mask; 2615 return ret; 2616 } 2617 2618 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2619 { 2620 int mask; 2621 2622 addr += (nr >> 3); 2623 mask = 1 << (7 - (nr & 0x07)); 2624 *addr ^= mask; 2625 } 2626 2627 /* 2628 * On-disk inode flags (f2fs_inode::i_flags) 2629 */ 2630 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2631 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2632 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2633 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2634 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2635 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2636 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2637 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2638 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2639 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2640 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2641 2642 /* Flags that should be inherited by new inodes from their parent. */ 2643 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2644 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2645 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2646 2647 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2648 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2649 F2FS_CASEFOLD_FL)) 2650 2651 /* Flags that are appropriate for non-directories/regular files. */ 2652 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2653 2654 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2655 { 2656 if (S_ISDIR(mode)) 2657 return flags; 2658 else if (S_ISREG(mode)) 2659 return flags & F2FS_REG_FLMASK; 2660 else 2661 return flags & F2FS_OTHER_FLMASK; 2662 } 2663 2664 static inline void __mark_inode_dirty_flag(struct inode *inode, 2665 int flag, bool set) 2666 { 2667 switch (flag) { 2668 case FI_INLINE_XATTR: 2669 case FI_INLINE_DATA: 2670 case FI_INLINE_DENTRY: 2671 case FI_NEW_INODE: 2672 if (set) 2673 return; 2674 fallthrough; 2675 case FI_DATA_EXIST: 2676 case FI_INLINE_DOTS: 2677 case FI_PIN_FILE: 2678 f2fs_mark_inode_dirty_sync(inode, true); 2679 } 2680 } 2681 2682 static inline void set_inode_flag(struct inode *inode, int flag) 2683 { 2684 set_bit(flag, F2FS_I(inode)->flags); 2685 __mark_inode_dirty_flag(inode, flag, true); 2686 } 2687 2688 static inline int is_inode_flag_set(struct inode *inode, int flag) 2689 { 2690 return test_bit(flag, F2FS_I(inode)->flags); 2691 } 2692 2693 static inline void clear_inode_flag(struct inode *inode, int flag) 2694 { 2695 clear_bit(flag, F2FS_I(inode)->flags); 2696 __mark_inode_dirty_flag(inode, flag, false); 2697 } 2698 2699 static inline bool f2fs_verity_in_progress(struct inode *inode) 2700 { 2701 return IS_ENABLED(CONFIG_FS_VERITY) && 2702 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2703 } 2704 2705 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2706 { 2707 F2FS_I(inode)->i_acl_mode = mode; 2708 set_inode_flag(inode, FI_ACL_MODE); 2709 f2fs_mark_inode_dirty_sync(inode, false); 2710 } 2711 2712 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2713 { 2714 if (inc) 2715 inc_nlink(inode); 2716 else 2717 drop_nlink(inode); 2718 f2fs_mark_inode_dirty_sync(inode, true); 2719 } 2720 2721 static inline void f2fs_i_blocks_write(struct inode *inode, 2722 block_t diff, bool add, bool claim) 2723 { 2724 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2725 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2726 2727 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2728 if (add) { 2729 if (claim) 2730 dquot_claim_block(inode, diff); 2731 else 2732 dquot_alloc_block_nofail(inode, diff); 2733 } else { 2734 dquot_free_block(inode, diff); 2735 } 2736 2737 f2fs_mark_inode_dirty_sync(inode, true); 2738 if (clean || recover) 2739 set_inode_flag(inode, FI_AUTO_RECOVER); 2740 } 2741 2742 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2743 { 2744 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2745 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2746 2747 if (i_size_read(inode) == i_size) 2748 return; 2749 2750 i_size_write(inode, i_size); 2751 f2fs_mark_inode_dirty_sync(inode, true); 2752 if (clean || recover) 2753 set_inode_flag(inode, FI_AUTO_RECOVER); 2754 } 2755 2756 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2757 { 2758 F2FS_I(inode)->i_current_depth = depth; 2759 f2fs_mark_inode_dirty_sync(inode, true); 2760 } 2761 2762 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2763 unsigned int count) 2764 { 2765 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2766 f2fs_mark_inode_dirty_sync(inode, true); 2767 } 2768 2769 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2770 { 2771 F2FS_I(inode)->i_xattr_nid = xnid; 2772 f2fs_mark_inode_dirty_sync(inode, true); 2773 } 2774 2775 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2776 { 2777 F2FS_I(inode)->i_pino = pino; 2778 f2fs_mark_inode_dirty_sync(inode, true); 2779 } 2780 2781 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2782 { 2783 struct f2fs_inode_info *fi = F2FS_I(inode); 2784 2785 if (ri->i_inline & F2FS_INLINE_XATTR) 2786 set_bit(FI_INLINE_XATTR, fi->flags); 2787 if (ri->i_inline & F2FS_INLINE_DATA) 2788 set_bit(FI_INLINE_DATA, fi->flags); 2789 if (ri->i_inline & F2FS_INLINE_DENTRY) 2790 set_bit(FI_INLINE_DENTRY, fi->flags); 2791 if (ri->i_inline & F2FS_DATA_EXIST) 2792 set_bit(FI_DATA_EXIST, fi->flags); 2793 if (ri->i_inline & F2FS_INLINE_DOTS) 2794 set_bit(FI_INLINE_DOTS, fi->flags); 2795 if (ri->i_inline & F2FS_EXTRA_ATTR) 2796 set_bit(FI_EXTRA_ATTR, fi->flags); 2797 if (ri->i_inline & F2FS_PIN_FILE) 2798 set_bit(FI_PIN_FILE, fi->flags); 2799 } 2800 2801 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2802 { 2803 ri->i_inline = 0; 2804 2805 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2806 ri->i_inline |= F2FS_INLINE_XATTR; 2807 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2808 ri->i_inline |= F2FS_INLINE_DATA; 2809 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2810 ri->i_inline |= F2FS_INLINE_DENTRY; 2811 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2812 ri->i_inline |= F2FS_DATA_EXIST; 2813 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2814 ri->i_inline |= F2FS_INLINE_DOTS; 2815 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2816 ri->i_inline |= F2FS_EXTRA_ATTR; 2817 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2818 ri->i_inline |= F2FS_PIN_FILE; 2819 } 2820 2821 static inline int f2fs_has_extra_attr(struct inode *inode) 2822 { 2823 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2824 } 2825 2826 static inline int f2fs_has_inline_xattr(struct inode *inode) 2827 { 2828 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2829 } 2830 2831 static inline int f2fs_compressed_file(struct inode *inode) 2832 { 2833 return S_ISREG(inode->i_mode) && 2834 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2835 } 2836 2837 static inline bool f2fs_need_compress_data(struct inode *inode) 2838 { 2839 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 2840 2841 if (!f2fs_compressed_file(inode)) 2842 return false; 2843 2844 if (compress_mode == COMPR_MODE_FS) 2845 return true; 2846 else if (compress_mode == COMPR_MODE_USER && 2847 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 2848 return true; 2849 2850 return false; 2851 } 2852 2853 static inline unsigned int addrs_per_inode(struct inode *inode) 2854 { 2855 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2856 get_inline_xattr_addrs(inode); 2857 2858 if (!f2fs_compressed_file(inode)) 2859 return addrs; 2860 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2861 } 2862 2863 static inline unsigned int addrs_per_block(struct inode *inode) 2864 { 2865 if (!f2fs_compressed_file(inode)) 2866 return DEF_ADDRS_PER_BLOCK; 2867 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2868 } 2869 2870 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2871 { 2872 struct f2fs_inode *ri = F2FS_INODE(page); 2873 2874 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2875 get_inline_xattr_addrs(inode)]); 2876 } 2877 2878 static inline int inline_xattr_size(struct inode *inode) 2879 { 2880 if (f2fs_has_inline_xattr(inode)) 2881 return get_inline_xattr_addrs(inode) * sizeof(__le32); 2882 return 0; 2883 } 2884 2885 static inline int f2fs_has_inline_data(struct inode *inode) 2886 { 2887 return is_inode_flag_set(inode, FI_INLINE_DATA); 2888 } 2889 2890 static inline int f2fs_exist_data(struct inode *inode) 2891 { 2892 return is_inode_flag_set(inode, FI_DATA_EXIST); 2893 } 2894 2895 static inline int f2fs_has_inline_dots(struct inode *inode) 2896 { 2897 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2898 } 2899 2900 static inline int f2fs_is_mmap_file(struct inode *inode) 2901 { 2902 return is_inode_flag_set(inode, FI_MMAP_FILE); 2903 } 2904 2905 static inline bool f2fs_is_pinned_file(struct inode *inode) 2906 { 2907 return is_inode_flag_set(inode, FI_PIN_FILE); 2908 } 2909 2910 static inline bool f2fs_is_atomic_file(struct inode *inode) 2911 { 2912 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2913 } 2914 2915 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2916 { 2917 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2918 } 2919 2920 static inline bool f2fs_is_volatile_file(struct inode *inode) 2921 { 2922 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2923 } 2924 2925 static inline bool f2fs_is_first_block_written(struct inode *inode) 2926 { 2927 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2928 } 2929 2930 static inline bool f2fs_is_drop_cache(struct inode *inode) 2931 { 2932 return is_inode_flag_set(inode, FI_DROP_CACHE); 2933 } 2934 2935 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2936 { 2937 struct f2fs_inode *ri = F2FS_INODE(page); 2938 int extra_size = get_extra_isize(inode); 2939 2940 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2941 } 2942 2943 static inline int f2fs_has_inline_dentry(struct inode *inode) 2944 { 2945 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2946 } 2947 2948 static inline int is_file(struct inode *inode, int type) 2949 { 2950 return F2FS_I(inode)->i_advise & type; 2951 } 2952 2953 static inline void set_file(struct inode *inode, int type) 2954 { 2955 F2FS_I(inode)->i_advise |= type; 2956 f2fs_mark_inode_dirty_sync(inode, true); 2957 } 2958 2959 static inline void clear_file(struct inode *inode, int type) 2960 { 2961 F2FS_I(inode)->i_advise &= ~type; 2962 f2fs_mark_inode_dirty_sync(inode, true); 2963 } 2964 2965 static inline bool f2fs_is_time_consistent(struct inode *inode) 2966 { 2967 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 2968 return false; 2969 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 2970 return false; 2971 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 2972 return false; 2973 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 2974 &F2FS_I(inode)->i_crtime)) 2975 return false; 2976 return true; 2977 } 2978 2979 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2980 { 2981 bool ret; 2982 2983 if (dsync) { 2984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2985 2986 spin_lock(&sbi->inode_lock[DIRTY_META]); 2987 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2988 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2989 return ret; 2990 } 2991 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2992 file_keep_isize(inode) || 2993 i_size_read(inode) & ~PAGE_MASK) 2994 return false; 2995 2996 if (!f2fs_is_time_consistent(inode)) 2997 return false; 2998 2999 spin_lock(&F2FS_I(inode)->i_size_lock); 3000 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3001 spin_unlock(&F2FS_I(inode)->i_size_lock); 3002 3003 return ret; 3004 } 3005 3006 static inline bool f2fs_readonly(struct super_block *sb) 3007 { 3008 return sb_rdonly(sb); 3009 } 3010 3011 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3012 { 3013 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3014 } 3015 3016 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3017 { 3018 if (len == 1 && name[0] == '.') 3019 return true; 3020 3021 if (len == 2 && name[0] == '.' && name[1] == '.') 3022 return true; 3023 3024 return false; 3025 } 3026 3027 static inline bool f2fs_may_extent_tree(struct inode *inode) 3028 { 3029 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3030 3031 if (!test_opt(sbi, EXTENT_CACHE) || 3032 is_inode_flag_set(inode, FI_NO_EXTENT) || 3033 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 3034 return false; 3035 3036 /* 3037 * for recovered files during mount do not create extents 3038 * if shrinker is not registered. 3039 */ 3040 if (list_empty(&sbi->s_list)) 3041 return false; 3042 3043 return S_ISREG(inode->i_mode); 3044 } 3045 3046 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3047 size_t size, gfp_t flags) 3048 { 3049 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3050 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3051 return NULL; 3052 } 3053 3054 return kmalloc(size, flags); 3055 } 3056 3057 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3058 size_t size, gfp_t flags) 3059 { 3060 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3061 } 3062 3063 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3064 size_t size, gfp_t flags) 3065 { 3066 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3067 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3068 return NULL; 3069 } 3070 3071 return kvmalloc(size, flags); 3072 } 3073 3074 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3075 size_t size, gfp_t flags) 3076 { 3077 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3078 } 3079 3080 static inline int get_extra_isize(struct inode *inode) 3081 { 3082 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3083 } 3084 3085 static inline int get_inline_xattr_addrs(struct inode *inode) 3086 { 3087 return F2FS_I(inode)->i_inline_xattr_size; 3088 } 3089 3090 #define f2fs_get_inode_mode(i) \ 3091 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3092 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3093 3094 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3095 (offsetof(struct f2fs_inode, i_extra_end) - \ 3096 offsetof(struct f2fs_inode, i_extra_isize)) \ 3097 3098 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3099 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3100 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3101 sizeof((f2fs_inode)->field)) \ 3102 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3103 3104 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3105 #define MIN_IOSTAT_PERIOD_MS 100 3106 /* maximum period of iostat tracing is 1 day */ 3107 #define MAX_IOSTAT_PERIOD_MS 8640000 3108 3109 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3110 { 3111 int i; 3112 3113 spin_lock(&sbi->iostat_lock); 3114 for (i = 0; i < NR_IO_TYPE; i++) { 3115 sbi->rw_iostat[i] = 0; 3116 sbi->prev_rw_iostat[i] = 0; 3117 } 3118 spin_unlock(&sbi->iostat_lock); 3119 } 3120 3121 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3122 3123 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3124 enum iostat_type type, unsigned long long io_bytes) 3125 { 3126 if (!sbi->iostat_enable) 3127 return; 3128 spin_lock(&sbi->iostat_lock); 3129 sbi->rw_iostat[type] += io_bytes; 3130 3131 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3132 sbi->rw_iostat[APP_BUFFERED_IO] = 3133 sbi->rw_iostat[APP_WRITE_IO] - 3134 sbi->rw_iostat[APP_DIRECT_IO]; 3135 3136 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3137 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3138 sbi->rw_iostat[APP_READ_IO] - 3139 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3140 spin_unlock(&sbi->iostat_lock); 3141 3142 f2fs_record_iostat(sbi); 3143 } 3144 3145 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3146 3147 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3148 3149 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3150 block_t blkaddr, int type); 3151 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3152 block_t blkaddr, int type) 3153 { 3154 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3155 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3156 blkaddr, type); 3157 f2fs_bug_on(sbi, 1); 3158 } 3159 } 3160 3161 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3162 { 3163 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3164 blkaddr == COMPRESS_ADDR) 3165 return false; 3166 return true; 3167 } 3168 3169 static inline void f2fs_set_page_private(struct page *page, 3170 unsigned long data) 3171 { 3172 if (PagePrivate(page)) 3173 return; 3174 3175 attach_page_private(page, (void *)data); 3176 } 3177 3178 static inline void f2fs_clear_page_private(struct page *page) 3179 { 3180 detach_page_private(page); 3181 } 3182 3183 /* 3184 * file.c 3185 */ 3186 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3187 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3188 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3189 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3190 int f2fs_truncate(struct inode *inode); 3191 int f2fs_getattr(const struct path *path, struct kstat *stat, 3192 u32 request_mask, unsigned int flags); 3193 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 3194 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3195 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3196 int f2fs_precache_extents(struct inode *inode); 3197 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3198 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3199 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3200 int f2fs_pin_file_control(struct inode *inode, bool inc); 3201 3202 /* 3203 * inode.c 3204 */ 3205 void f2fs_set_inode_flags(struct inode *inode); 3206 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3207 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3208 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3209 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3210 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3211 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3212 void f2fs_update_inode_page(struct inode *inode); 3213 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3214 void f2fs_evict_inode(struct inode *inode); 3215 void f2fs_handle_failed_inode(struct inode *inode); 3216 3217 /* 3218 * namei.c 3219 */ 3220 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3221 bool hot, bool set); 3222 struct dentry *f2fs_get_parent(struct dentry *child); 3223 3224 /* 3225 * dir.c 3226 */ 3227 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3228 int f2fs_init_casefolded_name(const struct inode *dir, 3229 struct f2fs_filename *fname); 3230 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3231 int lookup, struct f2fs_filename *fname); 3232 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3233 struct f2fs_filename *fname); 3234 void f2fs_free_filename(struct f2fs_filename *fname); 3235 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3236 const struct f2fs_filename *fname, int *max_slots); 3237 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3238 unsigned int start_pos, struct fscrypt_str *fstr); 3239 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3240 struct f2fs_dentry_ptr *d); 3241 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3242 const struct f2fs_filename *fname, struct page *dpage); 3243 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3244 unsigned int current_depth); 3245 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3246 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3247 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3248 const struct f2fs_filename *fname, 3249 struct page **res_page); 3250 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3251 const struct qstr *child, struct page **res_page); 3252 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3253 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3254 struct page **page); 3255 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3256 struct page *page, struct inode *inode); 3257 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3258 const struct f2fs_filename *fname); 3259 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3260 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3261 unsigned int bit_pos); 3262 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3263 struct inode *inode, nid_t ino, umode_t mode); 3264 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3265 struct inode *inode, nid_t ino, umode_t mode); 3266 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3267 struct inode *inode, nid_t ino, umode_t mode); 3268 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3269 struct inode *dir, struct inode *inode); 3270 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3271 bool f2fs_empty_dir(struct inode *dir); 3272 3273 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3274 { 3275 if (fscrypt_is_nokey_name(dentry)) 3276 return -ENOKEY; 3277 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3278 inode, inode->i_ino, inode->i_mode); 3279 } 3280 3281 /* 3282 * super.c 3283 */ 3284 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3285 void f2fs_inode_synced(struct inode *inode); 3286 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3287 int f2fs_quota_sync(struct super_block *sb, int type); 3288 loff_t max_file_blocks(struct inode *inode); 3289 void f2fs_quota_off_umount(struct super_block *sb); 3290 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3291 int f2fs_sync_fs(struct super_block *sb, int sync); 3292 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3293 3294 /* 3295 * hash.c 3296 */ 3297 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3298 3299 /* 3300 * node.c 3301 */ 3302 struct dnode_of_data; 3303 struct node_info; 3304 3305 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3306 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3307 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3308 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3309 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3310 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3311 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3312 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3313 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3314 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3315 struct node_info *ni); 3316 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3317 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3318 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3319 int f2fs_truncate_xattr_node(struct inode *inode); 3320 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3321 unsigned int seq_id); 3322 int f2fs_remove_inode_page(struct inode *inode); 3323 struct page *f2fs_new_inode_page(struct inode *inode); 3324 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3325 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3326 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3327 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3328 int f2fs_move_node_page(struct page *node_page, int gc_type); 3329 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3330 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3331 struct writeback_control *wbc, bool atomic, 3332 unsigned int *seq_id); 3333 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3334 struct writeback_control *wbc, 3335 bool do_balance, enum iostat_type io_type); 3336 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3337 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3338 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3339 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3340 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3341 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3342 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3343 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3344 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3345 unsigned int segno, struct f2fs_summary_block *sum); 3346 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3347 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3348 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3349 int __init f2fs_create_node_manager_caches(void); 3350 void f2fs_destroy_node_manager_caches(void); 3351 3352 /* 3353 * segment.c 3354 */ 3355 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3356 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3357 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3358 void f2fs_drop_inmem_pages(struct inode *inode); 3359 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3360 int f2fs_commit_inmem_pages(struct inode *inode); 3361 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3362 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3363 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3364 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3365 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3366 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3367 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3368 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3369 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3370 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3371 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3372 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3373 struct cp_control *cpc); 3374 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3375 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3376 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3377 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3378 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3379 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3380 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3381 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3382 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3383 unsigned int *newseg, bool new_sec, int dir); 3384 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3385 unsigned int start, unsigned int end); 3386 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); 3387 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3388 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3389 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3390 struct cp_control *cpc); 3391 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3392 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3393 block_t blk_addr); 3394 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3395 enum iostat_type io_type); 3396 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3397 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3398 struct f2fs_io_info *fio); 3399 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3400 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3401 block_t old_blkaddr, block_t new_blkaddr, 3402 bool recover_curseg, bool recover_newaddr, 3403 bool from_gc); 3404 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3405 block_t old_addr, block_t new_addr, 3406 unsigned char version, bool recover_curseg, 3407 bool recover_newaddr); 3408 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3409 block_t old_blkaddr, block_t *new_blkaddr, 3410 struct f2fs_summary *sum, int type, 3411 struct f2fs_io_info *fio); 3412 void f2fs_wait_on_page_writeback(struct page *page, 3413 enum page_type type, bool ordered, bool locked); 3414 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3415 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3416 block_t len); 3417 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3418 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3419 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3420 unsigned int val, int alloc); 3421 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3422 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3423 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3424 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3425 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3426 int __init f2fs_create_segment_manager_caches(void); 3427 void f2fs_destroy_segment_manager_caches(void); 3428 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3429 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3430 enum page_type type, enum temp_type temp); 3431 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3432 unsigned int segno); 3433 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3434 unsigned int segno); 3435 3436 /* 3437 * checkpoint.c 3438 */ 3439 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3440 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3441 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3442 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3443 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3444 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3445 block_t blkaddr, int type); 3446 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3447 int type, bool sync); 3448 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3449 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3450 long nr_to_write, enum iostat_type io_type); 3451 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3452 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3453 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3454 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3455 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3456 unsigned int devidx, int type); 3457 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3458 unsigned int devidx, int type); 3459 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3460 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3461 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3462 void f2fs_add_orphan_inode(struct inode *inode); 3463 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3464 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3465 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3466 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3467 void f2fs_remove_dirty_inode(struct inode *inode); 3468 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3469 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3470 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3471 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3472 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3473 int __init f2fs_create_checkpoint_caches(void); 3474 void f2fs_destroy_checkpoint_caches(void); 3475 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3476 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3477 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3478 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3479 3480 /* 3481 * data.c 3482 */ 3483 int __init f2fs_init_bioset(void); 3484 void f2fs_destroy_bioset(void); 3485 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); 3486 int f2fs_init_bio_entry_cache(void); 3487 void f2fs_destroy_bio_entry_cache(void); 3488 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3489 struct bio *bio, enum page_type type); 3490 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3491 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3492 struct inode *inode, struct page *page, 3493 nid_t ino, enum page_type type); 3494 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3495 struct bio **bio, struct page *page); 3496 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3497 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3498 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3499 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3500 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3501 block_t blk_addr, struct bio *bio); 3502 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3503 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3504 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3505 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3506 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3507 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3508 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3509 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3510 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3511 int op_flags, bool for_write); 3512 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3513 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3514 bool for_write); 3515 struct page *f2fs_get_new_data_page(struct inode *inode, 3516 struct page *ipage, pgoff_t index, bool new_i_size); 3517 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3518 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3519 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3520 int create, int flag); 3521 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3522 u64 start, u64 len); 3523 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3524 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3525 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3526 int f2fs_write_single_data_page(struct page *page, int *submitted, 3527 struct bio **bio, sector_t *last_block, 3528 struct writeback_control *wbc, 3529 enum iostat_type io_type, 3530 int compr_blocks, bool allow_balance); 3531 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3532 unsigned int length); 3533 int f2fs_release_page(struct page *page, gfp_t wait); 3534 #ifdef CONFIG_MIGRATION 3535 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3536 struct page *page, enum migrate_mode mode); 3537 #endif 3538 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3539 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3540 int f2fs_init_post_read_processing(void); 3541 void f2fs_destroy_post_read_processing(void); 3542 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3543 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3544 3545 /* 3546 * gc.c 3547 */ 3548 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3549 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3550 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3551 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3552 unsigned int segno); 3553 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3554 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3555 int __init f2fs_create_garbage_collection_cache(void); 3556 void f2fs_destroy_garbage_collection_cache(void); 3557 3558 /* 3559 * recovery.c 3560 */ 3561 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3562 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3563 3564 /* 3565 * debug.c 3566 */ 3567 #ifdef CONFIG_F2FS_STAT_FS 3568 struct f2fs_stat_info { 3569 struct list_head stat_list; 3570 struct f2fs_sb_info *sbi; 3571 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3572 int main_area_segs, main_area_sections, main_area_zones; 3573 unsigned long long hit_largest, hit_cached, hit_rbtree; 3574 unsigned long long hit_total, total_ext; 3575 int ext_tree, zombie_tree, ext_node; 3576 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3577 int ndirty_data, ndirty_qdata; 3578 int inmem_pages; 3579 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3580 int nats, dirty_nats, sits, dirty_sits; 3581 int free_nids, avail_nids, alloc_nids; 3582 int total_count, utilization; 3583 int bg_gc, nr_wb_cp_data, nr_wb_data; 3584 int nr_rd_data, nr_rd_node, nr_rd_meta; 3585 int nr_dio_read, nr_dio_write; 3586 unsigned int io_skip_bggc, other_skip_bggc; 3587 int nr_flushing, nr_flushed, flush_list_empty; 3588 int nr_discarding, nr_discarded; 3589 int nr_discard_cmd; 3590 unsigned int undiscard_blks; 3591 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3592 unsigned int cur_ckpt_time, peak_ckpt_time; 3593 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3594 int compr_inode; 3595 unsigned long long compr_blocks; 3596 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3597 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3598 unsigned int bimodal, avg_vblocks; 3599 int util_free, util_valid, util_invalid; 3600 int rsvd_segs, overp_segs; 3601 int dirty_count, node_pages, meta_pages; 3602 int prefree_count, call_count, cp_count, bg_cp_count; 3603 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3604 int bg_node_segs, bg_data_segs; 3605 int tot_blks, data_blks, node_blks; 3606 int bg_data_blks, bg_node_blks; 3607 unsigned long long skipped_atomic_files[2]; 3608 int curseg[NR_CURSEG_TYPE]; 3609 int cursec[NR_CURSEG_TYPE]; 3610 int curzone[NR_CURSEG_TYPE]; 3611 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3612 unsigned int full_seg[NR_CURSEG_TYPE]; 3613 unsigned int valid_blks[NR_CURSEG_TYPE]; 3614 3615 unsigned int meta_count[META_MAX]; 3616 unsigned int segment_count[2]; 3617 unsigned int block_count[2]; 3618 unsigned int inplace_count; 3619 unsigned long long base_mem, cache_mem, page_mem; 3620 }; 3621 3622 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3623 { 3624 return (struct f2fs_stat_info *)sbi->stat_info; 3625 } 3626 3627 #define stat_inc_cp_count(si) ((si)->cp_count++) 3628 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3629 #define stat_inc_call_count(si) ((si)->call_count++) 3630 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3631 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3632 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3633 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3634 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3635 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3636 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3637 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3638 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3639 #define stat_inc_inline_xattr(inode) \ 3640 do { \ 3641 if (f2fs_has_inline_xattr(inode)) \ 3642 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3643 } while (0) 3644 #define stat_dec_inline_xattr(inode) \ 3645 do { \ 3646 if (f2fs_has_inline_xattr(inode)) \ 3647 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3648 } while (0) 3649 #define stat_inc_inline_inode(inode) \ 3650 do { \ 3651 if (f2fs_has_inline_data(inode)) \ 3652 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3653 } while (0) 3654 #define stat_dec_inline_inode(inode) \ 3655 do { \ 3656 if (f2fs_has_inline_data(inode)) \ 3657 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3658 } while (0) 3659 #define stat_inc_inline_dir(inode) \ 3660 do { \ 3661 if (f2fs_has_inline_dentry(inode)) \ 3662 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3663 } while (0) 3664 #define stat_dec_inline_dir(inode) \ 3665 do { \ 3666 if (f2fs_has_inline_dentry(inode)) \ 3667 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3668 } while (0) 3669 #define stat_inc_compr_inode(inode) \ 3670 do { \ 3671 if (f2fs_compressed_file(inode)) \ 3672 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3673 } while (0) 3674 #define stat_dec_compr_inode(inode) \ 3675 do { \ 3676 if (f2fs_compressed_file(inode)) \ 3677 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3678 } while (0) 3679 #define stat_add_compr_blocks(inode, blocks) \ 3680 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3681 #define stat_sub_compr_blocks(inode, blocks) \ 3682 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3683 #define stat_inc_meta_count(sbi, blkaddr) \ 3684 do { \ 3685 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3686 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3687 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3688 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3689 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3690 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3691 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3692 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3693 } while (0) 3694 #define stat_inc_seg_type(sbi, curseg) \ 3695 ((sbi)->segment_count[(curseg)->alloc_type]++) 3696 #define stat_inc_block_count(sbi, curseg) \ 3697 ((sbi)->block_count[(curseg)->alloc_type]++) 3698 #define stat_inc_inplace_blocks(sbi) \ 3699 (atomic_inc(&(sbi)->inplace_count)) 3700 #define stat_update_max_atomic_write(inode) \ 3701 do { \ 3702 int cur = F2FS_I_SB(inode)->atomic_files; \ 3703 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3704 if (cur > max) \ 3705 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3706 } while (0) 3707 #define stat_inc_volatile_write(inode) \ 3708 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3709 #define stat_dec_volatile_write(inode) \ 3710 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3711 #define stat_update_max_volatile_write(inode) \ 3712 do { \ 3713 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3714 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3715 if (cur > max) \ 3716 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3717 } while (0) 3718 #define stat_inc_seg_count(sbi, type, gc_type) \ 3719 do { \ 3720 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3721 si->tot_segs++; \ 3722 if ((type) == SUM_TYPE_DATA) { \ 3723 si->data_segs++; \ 3724 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3725 } else { \ 3726 si->node_segs++; \ 3727 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3728 } \ 3729 } while (0) 3730 3731 #define stat_inc_tot_blk_count(si, blks) \ 3732 ((si)->tot_blks += (blks)) 3733 3734 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3735 do { \ 3736 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3737 stat_inc_tot_blk_count(si, blks); \ 3738 si->data_blks += (blks); \ 3739 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3740 } while (0) 3741 3742 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3743 do { \ 3744 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3745 stat_inc_tot_blk_count(si, blks); \ 3746 si->node_blks += (blks); \ 3747 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3748 } while (0) 3749 3750 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3751 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3752 void __init f2fs_create_root_stats(void); 3753 void f2fs_destroy_root_stats(void); 3754 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3755 #else 3756 #define stat_inc_cp_count(si) do { } while (0) 3757 #define stat_inc_bg_cp_count(si) do { } while (0) 3758 #define stat_inc_call_count(si) do { } while (0) 3759 #define stat_inc_bggc_count(si) do { } while (0) 3760 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3761 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3762 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3763 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3764 #define stat_inc_total_hit(sbi) do { } while (0) 3765 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3766 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3767 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3768 #define stat_inc_inline_xattr(inode) do { } while (0) 3769 #define stat_dec_inline_xattr(inode) do { } while (0) 3770 #define stat_inc_inline_inode(inode) do { } while (0) 3771 #define stat_dec_inline_inode(inode) do { } while (0) 3772 #define stat_inc_inline_dir(inode) do { } while (0) 3773 #define stat_dec_inline_dir(inode) do { } while (0) 3774 #define stat_inc_compr_inode(inode) do { } while (0) 3775 #define stat_dec_compr_inode(inode) do { } while (0) 3776 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3777 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3778 #define stat_update_max_atomic_write(inode) do { } while (0) 3779 #define stat_inc_volatile_write(inode) do { } while (0) 3780 #define stat_dec_volatile_write(inode) do { } while (0) 3781 #define stat_update_max_volatile_write(inode) do { } while (0) 3782 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3783 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3784 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3785 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3786 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3787 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3788 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3789 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3790 3791 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3792 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3793 static inline void __init f2fs_create_root_stats(void) { } 3794 static inline void f2fs_destroy_root_stats(void) { } 3795 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3796 #endif 3797 3798 extern const struct file_operations f2fs_dir_operations; 3799 extern const struct file_operations f2fs_file_operations; 3800 extern const struct inode_operations f2fs_file_inode_operations; 3801 extern const struct address_space_operations f2fs_dblock_aops; 3802 extern const struct address_space_operations f2fs_node_aops; 3803 extern const struct address_space_operations f2fs_meta_aops; 3804 extern const struct inode_operations f2fs_dir_inode_operations; 3805 extern const struct inode_operations f2fs_symlink_inode_operations; 3806 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3807 extern const struct inode_operations f2fs_special_inode_operations; 3808 extern struct kmem_cache *f2fs_inode_entry_slab; 3809 3810 /* 3811 * inline.c 3812 */ 3813 bool f2fs_may_inline_data(struct inode *inode); 3814 bool f2fs_may_inline_dentry(struct inode *inode); 3815 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3816 void f2fs_truncate_inline_inode(struct inode *inode, 3817 struct page *ipage, u64 from); 3818 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3819 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3820 int f2fs_convert_inline_inode(struct inode *inode); 3821 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3822 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3823 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3824 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3825 const struct f2fs_filename *fname, 3826 struct page **res_page); 3827 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3828 struct page *ipage); 3829 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3830 struct inode *inode, nid_t ino, umode_t mode); 3831 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3832 struct page *page, struct inode *dir, 3833 struct inode *inode); 3834 bool f2fs_empty_inline_dir(struct inode *dir); 3835 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3836 struct fscrypt_str *fstr); 3837 int f2fs_inline_data_fiemap(struct inode *inode, 3838 struct fiemap_extent_info *fieinfo, 3839 __u64 start, __u64 len); 3840 3841 /* 3842 * shrinker.c 3843 */ 3844 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3845 struct shrink_control *sc); 3846 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3847 struct shrink_control *sc); 3848 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3849 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3850 3851 /* 3852 * extent_cache.c 3853 */ 3854 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3855 struct rb_entry *cached_re, unsigned int ofs); 3856 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3857 struct rb_root_cached *root, 3858 struct rb_node **parent, 3859 unsigned long long key, bool *left_most); 3860 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3861 struct rb_root_cached *root, 3862 struct rb_node **parent, 3863 unsigned int ofs, bool *leftmost); 3864 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3865 struct rb_entry *cached_re, unsigned int ofs, 3866 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3867 struct rb_node ***insert_p, struct rb_node **insert_parent, 3868 bool force, bool *leftmost); 3869 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3870 struct rb_root_cached *root, bool check_key); 3871 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3872 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3873 void f2fs_drop_extent_tree(struct inode *inode); 3874 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3875 void f2fs_destroy_extent_tree(struct inode *inode); 3876 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3877 struct extent_info *ei); 3878 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3879 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3880 pgoff_t fofs, block_t blkaddr, unsigned int len); 3881 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3882 int __init f2fs_create_extent_cache(void); 3883 void f2fs_destroy_extent_cache(void); 3884 3885 /* 3886 * sysfs.c 3887 */ 3888 int __init f2fs_init_sysfs(void); 3889 void f2fs_exit_sysfs(void); 3890 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 3891 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 3892 3893 /* verity.c */ 3894 extern const struct fsverity_operations f2fs_verityops; 3895 3896 /* 3897 * crypto support 3898 */ 3899 static inline bool f2fs_encrypted_file(struct inode *inode) 3900 { 3901 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 3902 } 3903 3904 static inline void f2fs_set_encrypted_inode(struct inode *inode) 3905 { 3906 #ifdef CONFIG_FS_ENCRYPTION 3907 file_set_encrypt(inode); 3908 f2fs_set_inode_flags(inode); 3909 #endif 3910 } 3911 3912 /* 3913 * Returns true if the reads of the inode's data need to undergo some 3914 * postprocessing step, like decryption or authenticity verification. 3915 */ 3916 static inline bool f2fs_post_read_required(struct inode *inode) 3917 { 3918 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 3919 f2fs_compressed_file(inode); 3920 } 3921 3922 /* 3923 * compress.c 3924 */ 3925 #ifdef CONFIG_F2FS_FS_COMPRESSION 3926 bool f2fs_is_compressed_page(struct page *page); 3927 struct page *f2fs_compress_control_page(struct page *page); 3928 int f2fs_prepare_compress_overwrite(struct inode *inode, 3929 struct page **pagep, pgoff_t index, void **fsdata); 3930 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 3931 pgoff_t index, unsigned copied); 3932 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 3933 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 3934 bool f2fs_is_compress_backend_ready(struct inode *inode); 3935 int f2fs_init_compress_mempool(void); 3936 void f2fs_destroy_compress_mempool(void); 3937 void f2fs_end_read_compressed_page(struct page *page, bool failed); 3938 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 3939 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 3940 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 3941 int f2fs_write_multi_pages(struct compress_ctx *cc, 3942 int *submitted, 3943 struct writeback_control *wbc, 3944 enum iostat_type io_type); 3945 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 3946 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 3947 unsigned nr_pages, sector_t *last_block_in_bio, 3948 bool is_readahead, bool for_write); 3949 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 3950 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 3951 void f2fs_put_page_dic(struct page *page); 3952 int f2fs_init_compress_ctx(struct compress_ctx *cc); 3953 void f2fs_destroy_compress_ctx(struct compress_ctx *cc); 3954 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 3955 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 3956 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 3957 int __init f2fs_init_compress_cache(void); 3958 void f2fs_destroy_compress_cache(void); 3959 #else 3960 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 3961 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 3962 { 3963 if (!f2fs_compressed_file(inode)) 3964 return true; 3965 /* not support compression */ 3966 return false; 3967 } 3968 static inline struct page *f2fs_compress_control_page(struct page *page) 3969 { 3970 WARN_ON_ONCE(1); 3971 return ERR_PTR(-EINVAL); 3972 } 3973 static inline int f2fs_init_compress_mempool(void) { return 0; } 3974 static inline void f2fs_destroy_compress_mempool(void) { } 3975 static inline void f2fs_end_read_compressed_page(struct page *page, bool failed) 3976 { 3977 WARN_ON_ONCE(1); 3978 } 3979 static inline void f2fs_put_page_dic(struct page *page) 3980 { 3981 WARN_ON_ONCE(1); 3982 } 3983 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 3984 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 3985 static inline int __init f2fs_init_compress_cache(void) { return 0; } 3986 static inline void f2fs_destroy_compress_cache(void) { } 3987 #endif 3988 3989 static inline void set_compress_context(struct inode *inode) 3990 { 3991 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3992 3993 F2FS_I(inode)->i_compress_algorithm = 3994 F2FS_OPTION(sbi).compress_algorithm; 3995 F2FS_I(inode)->i_log_cluster_size = 3996 F2FS_OPTION(sbi).compress_log_size; 3997 F2FS_I(inode)->i_compress_flag = 3998 F2FS_OPTION(sbi).compress_chksum ? 3999 1 << COMPRESS_CHKSUM : 0; 4000 F2FS_I(inode)->i_cluster_size = 4001 1 << F2FS_I(inode)->i_log_cluster_size; 4002 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 && 4003 F2FS_OPTION(sbi).compress_level) 4004 F2FS_I(inode)->i_compress_flag |= 4005 F2FS_OPTION(sbi).compress_level << 4006 COMPRESS_LEVEL_OFFSET; 4007 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4008 set_inode_flag(inode, FI_COMPRESSED_FILE); 4009 stat_inc_compr_inode(inode); 4010 f2fs_mark_inode_dirty_sync(inode, true); 4011 } 4012 4013 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4014 { 4015 struct f2fs_inode_info *fi = F2FS_I(inode); 4016 4017 if (!f2fs_compressed_file(inode)) 4018 return true; 4019 if (S_ISREG(inode->i_mode) && 4020 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks))) 4021 return false; 4022 4023 fi->i_flags &= ~F2FS_COMPR_FL; 4024 stat_dec_compr_inode(inode); 4025 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4026 f2fs_mark_inode_dirty_sync(inode, true); 4027 return true; 4028 } 4029 4030 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4031 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4032 { \ 4033 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4034 } 4035 4036 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4037 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4038 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4039 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4040 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4041 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4042 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4043 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4044 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4045 F2FS_FEATURE_FUNCS(verity, VERITY); 4046 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4047 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4048 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4049 4050 #ifdef CONFIG_BLK_DEV_ZONED 4051 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4052 block_t blkaddr) 4053 { 4054 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4055 4056 return test_bit(zno, FDEV(devi).blkz_seq); 4057 } 4058 #endif 4059 4060 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4061 { 4062 return f2fs_sb_has_blkzoned(sbi); 4063 } 4064 4065 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4066 { 4067 return blk_queue_discard(bdev_get_queue(bdev)) || 4068 bdev_is_zoned(bdev); 4069 } 4070 4071 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4072 { 4073 int i; 4074 4075 if (!f2fs_is_multi_device(sbi)) 4076 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4077 4078 for (i = 0; i < sbi->s_ndevs; i++) 4079 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4080 return true; 4081 return false; 4082 } 4083 4084 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4085 { 4086 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4087 f2fs_hw_should_discard(sbi); 4088 } 4089 4090 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4091 { 4092 int i; 4093 4094 if (!f2fs_is_multi_device(sbi)) 4095 return bdev_read_only(sbi->sb->s_bdev); 4096 4097 for (i = 0; i < sbi->s_ndevs; i++) 4098 if (bdev_read_only(FDEV(i).bdev)) 4099 return true; 4100 return false; 4101 } 4102 4103 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4104 { 4105 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4106 } 4107 4108 static inline bool f2fs_may_compress(struct inode *inode) 4109 { 4110 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4111 f2fs_is_atomic_file(inode) || 4112 f2fs_is_volatile_file(inode)) 4113 return false; 4114 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4115 } 4116 4117 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4118 u64 blocks, bool add) 4119 { 4120 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4121 struct f2fs_inode_info *fi = F2FS_I(inode); 4122 4123 /* don't update i_compr_blocks if saved blocks were released */ 4124 if (!add && !atomic_read(&fi->i_compr_blocks)) 4125 return; 4126 4127 if (add) { 4128 atomic_add(diff, &fi->i_compr_blocks); 4129 stat_add_compr_blocks(inode, diff); 4130 } else { 4131 atomic_sub(diff, &fi->i_compr_blocks); 4132 stat_sub_compr_blocks(inode, diff); 4133 } 4134 f2fs_mark_inode_dirty_sync(inode, true); 4135 } 4136 4137 static inline int block_unaligned_IO(struct inode *inode, 4138 struct kiocb *iocb, struct iov_iter *iter) 4139 { 4140 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4141 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4142 loff_t offset = iocb->ki_pos; 4143 unsigned long align = offset | iov_iter_alignment(iter); 4144 4145 return align & blocksize_mask; 4146 } 4147 4148 static inline int allow_outplace_dio(struct inode *inode, 4149 struct kiocb *iocb, struct iov_iter *iter) 4150 { 4151 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4152 int rw = iov_iter_rw(iter); 4153 4154 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4155 !block_unaligned_IO(inode, iocb, iter)); 4156 } 4157 4158 static inline bool f2fs_force_buffered_io(struct inode *inode, 4159 struct kiocb *iocb, struct iov_iter *iter) 4160 { 4161 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4162 int rw = iov_iter_rw(iter); 4163 4164 if (f2fs_post_read_required(inode)) 4165 return true; 4166 if (f2fs_is_multi_device(sbi)) 4167 return true; 4168 /* 4169 * for blkzoned device, fallback direct IO to buffered IO, so 4170 * all IOs can be serialized by log-structured write. 4171 */ 4172 if (f2fs_sb_has_blkzoned(sbi)) 4173 return true; 4174 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4175 if (block_unaligned_IO(inode, iocb, iter)) 4176 return true; 4177 if (F2FS_IO_ALIGNED(sbi)) 4178 return true; 4179 } 4180 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 4181 !IS_SWAPFILE(inode)) 4182 return true; 4183 4184 return false; 4185 } 4186 4187 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4188 { 4189 return fsverity_active(inode) && 4190 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4191 } 4192 4193 #ifdef CONFIG_F2FS_FAULT_INJECTION 4194 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4195 unsigned int type); 4196 #else 4197 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4198 #endif 4199 4200 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4201 { 4202 #ifdef CONFIG_QUOTA 4203 if (f2fs_sb_has_quota_ino(sbi)) 4204 return true; 4205 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4206 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4207 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4208 return true; 4209 #endif 4210 return false; 4211 } 4212 4213 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4214 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4215 4216 #endif /* _LINUX_F2FS_H */ 4217