1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (WARN_ON(condition)) \ 37 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 38 } while (0) 39 #endif 40 41 enum { 42 FAULT_KMALLOC, 43 FAULT_KVMALLOC, 44 FAULT_PAGE_ALLOC, 45 FAULT_PAGE_GET, 46 FAULT_ALLOC_BIO, 47 FAULT_ALLOC_NID, 48 FAULT_ORPHAN, 49 FAULT_BLOCK, 50 FAULT_DIR_DEPTH, 51 FAULT_EVICT_INODE, 52 FAULT_TRUNCATE, 53 FAULT_READ_IO, 54 FAULT_CHECKPOINT, 55 FAULT_DISCARD, 56 FAULT_WRITE_IO, 57 FAULT_MAX, 58 }; 59 60 #ifdef CONFIG_F2FS_FAULT_INJECTION 61 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 62 63 struct f2fs_fault_info { 64 atomic_t inject_ops; 65 unsigned int inject_rate; 66 unsigned int inject_type; 67 }; 68 69 extern const char *f2fs_fault_name[FAULT_MAX]; 70 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 71 #endif 72 73 /* 74 * For mount options 75 */ 76 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 77 #define F2FS_MOUNT_DISCARD 0x00000004 78 #define F2FS_MOUNT_NOHEAP 0x00000008 79 #define F2FS_MOUNT_XATTR_USER 0x00000010 80 #define F2FS_MOUNT_POSIX_ACL 0x00000020 81 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 82 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 83 #define F2FS_MOUNT_INLINE_DATA 0x00000100 84 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 85 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 86 #define F2FS_MOUNT_NOBARRIER 0x00000800 87 #define F2FS_MOUNT_FASTBOOT 0x00001000 88 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 89 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 90 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 91 #define F2FS_MOUNT_USRQUOTA 0x00080000 92 #define F2FS_MOUNT_GRPQUOTA 0x00100000 93 #define F2FS_MOUNT_PRJQUOTA 0x00200000 94 #define F2FS_MOUNT_QUOTA 0x00400000 95 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 96 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 97 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 98 #define F2FS_MOUNT_NORECOVERY 0x04000000 99 #define F2FS_MOUNT_ATGC 0x08000000 100 101 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 102 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 103 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 104 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 105 106 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 107 typecheck(unsigned long long, b) && \ 108 ((long long)((a) - (b)) > 0)) 109 110 typedef u32 block_t; /* 111 * should not change u32, since it is the on-disk block 112 * address format, __le32. 113 */ 114 typedef u32 nid_t; 115 116 #define COMPRESS_EXT_NUM 16 117 118 struct f2fs_mount_info { 119 unsigned int opt; 120 int write_io_size_bits; /* Write IO size bits */ 121 block_t root_reserved_blocks; /* root reserved blocks */ 122 kuid_t s_resuid; /* reserved blocks for uid */ 123 kgid_t s_resgid; /* reserved blocks for gid */ 124 int active_logs; /* # of active logs */ 125 int inline_xattr_size; /* inline xattr size */ 126 #ifdef CONFIG_F2FS_FAULT_INJECTION 127 struct f2fs_fault_info fault_info; /* For fault injection */ 128 #endif 129 #ifdef CONFIG_QUOTA 130 /* Names of quota files with journalled quota */ 131 char *s_qf_names[MAXQUOTAS]; 132 int s_jquota_fmt; /* Format of quota to use */ 133 #endif 134 /* For which write hints are passed down to block layer */ 135 int whint_mode; 136 int alloc_mode; /* segment allocation policy */ 137 int fsync_mode; /* fsync policy */ 138 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 139 int bggc_mode; /* bggc mode: off, on or sync */ 140 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 141 block_t unusable_cap_perc; /* percentage for cap */ 142 block_t unusable_cap; /* Amount of space allowed to be 143 * unusable when disabling checkpoint 144 */ 145 146 /* For compression */ 147 unsigned char compress_algorithm; /* algorithm type */ 148 unsigned char compress_log_size; /* cluster log size */ 149 unsigned char compress_level; /* compress level */ 150 bool compress_chksum; /* compressed data chksum */ 151 unsigned char compress_ext_cnt; /* extension count */ 152 int compress_mode; /* compression mode */ 153 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 154 }; 155 156 #define F2FS_FEATURE_ENCRYPT 0x0001 157 #define F2FS_FEATURE_BLKZONED 0x0002 158 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 159 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 160 #define F2FS_FEATURE_PRJQUOTA 0x0010 161 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 162 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 163 #define F2FS_FEATURE_QUOTA_INO 0x0080 164 #define F2FS_FEATURE_INODE_CRTIME 0x0100 165 #define F2FS_FEATURE_LOST_FOUND 0x0200 166 #define F2FS_FEATURE_VERITY 0x0400 167 #define F2FS_FEATURE_SB_CHKSUM 0x0800 168 #define F2FS_FEATURE_CASEFOLD 0x1000 169 #define F2FS_FEATURE_COMPRESSION 0x2000 170 171 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 172 ((raw_super->feature & cpu_to_le32(mask)) != 0) 173 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 174 #define F2FS_SET_FEATURE(sbi, mask) \ 175 (sbi->raw_super->feature |= cpu_to_le32(mask)) 176 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 177 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 178 179 /* 180 * Default values for user and/or group using reserved blocks 181 */ 182 #define F2FS_DEF_RESUID 0 183 #define F2FS_DEF_RESGID 0 184 185 /* 186 * For checkpoint manager 187 */ 188 enum { 189 NAT_BITMAP, 190 SIT_BITMAP 191 }; 192 193 #define CP_UMOUNT 0x00000001 194 #define CP_FASTBOOT 0x00000002 195 #define CP_SYNC 0x00000004 196 #define CP_RECOVERY 0x00000008 197 #define CP_DISCARD 0x00000010 198 #define CP_TRIMMED 0x00000020 199 #define CP_PAUSE 0x00000040 200 #define CP_RESIZE 0x00000080 201 202 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 203 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 204 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 205 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 206 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 207 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 208 #define DEF_CP_INTERVAL 60 /* 60 secs */ 209 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 210 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 211 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 212 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 213 214 struct cp_control { 215 int reason; 216 __u64 trim_start; 217 __u64 trim_end; 218 __u64 trim_minlen; 219 }; 220 221 /* 222 * indicate meta/data type 223 */ 224 enum { 225 META_CP, 226 META_NAT, 227 META_SIT, 228 META_SSA, 229 META_MAX, 230 META_POR, 231 DATA_GENERIC, /* check range only */ 232 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 233 DATA_GENERIC_ENHANCE_READ, /* 234 * strong check on range and segment 235 * bitmap but no warning due to race 236 * condition of read on truncated area 237 * by extent_cache 238 */ 239 META_GENERIC, 240 }; 241 242 /* for the list of ino */ 243 enum { 244 ORPHAN_INO, /* for orphan ino list */ 245 APPEND_INO, /* for append ino list */ 246 UPDATE_INO, /* for update ino list */ 247 TRANS_DIR_INO, /* for trasactions dir ino list */ 248 FLUSH_INO, /* for multiple device flushing */ 249 MAX_INO_ENTRY, /* max. list */ 250 }; 251 252 struct ino_entry { 253 struct list_head list; /* list head */ 254 nid_t ino; /* inode number */ 255 unsigned int dirty_device; /* dirty device bitmap */ 256 }; 257 258 /* for the list of inodes to be GCed */ 259 struct inode_entry { 260 struct list_head list; /* list head */ 261 struct inode *inode; /* vfs inode pointer */ 262 }; 263 264 struct fsync_node_entry { 265 struct list_head list; /* list head */ 266 struct page *page; /* warm node page pointer */ 267 unsigned int seq_id; /* sequence id */ 268 }; 269 270 /* for the bitmap indicate blocks to be discarded */ 271 struct discard_entry { 272 struct list_head list; /* list head */ 273 block_t start_blkaddr; /* start blockaddr of current segment */ 274 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 275 }; 276 277 /* default discard granularity of inner discard thread, unit: block count */ 278 #define DEFAULT_DISCARD_GRANULARITY 16 279 280 /* max discard pend list number */ 281 #define MAX_PLIST_NUM 512 282 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 283 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 284 285 enum { 286 D_PREP, /* initial */ 287 D_PARTIAL, /* partially submitted */ 288 D_SUBMIT, /* all submitted */ 289 D_DONE, /* finished */ 290 }; 291 292 struct discard_info { 293 block_t lstart; /* logical start address */ 294 block_t len; /* length */ 295 block_t start; /* actual start address in dev */ 296 }; 297 298 struct discard_cmd { 299 struct rb_node rb_node; /* rb node located in rb-tree */ 300 union { 301 struct { 302 block_t lstart; /* logical start address */ 303 block_t len; /* length */ 304 block_t start; /* actual start address in dev */ 305 }; 306 struct discard_info di; /* discard info */ 307 308 }; 309 struct list_head list; /* command list */ 310 struct completion wait; /* compleation */ 311 struct block_device *bdev; /* bdev */ 312 unsigned short ref; /* reference count */ 313 unsigned char state; /* state */ 314 unsigned char queued; /* queued discard */ 315 int error; /* bio error */ 316 spinlock_t lock; /* for state/bio_ref updating */ 317 unsigned short bio_ref; /* bio reference count */ 318 }; 319 320 enum { 321 DPOLICY_BG, 322 DPOLICY_FORCE, 323 DPOLICY_FSTRIM, 324 DPOLICY_UMOUNT, 325 MAX_DPOLICY, 326 }; 327 328 struct discard_policy { 329 int type; /* type of discard */ 330 unsigned int min_interval; /* used for candidates exist */ 331 unsigned int mid_interval; /* used for device busy */ 332 unsigned int max_interval; /* used for candidates not exist */ 333 unsigned int max_requests; /* # of discards issued per round */ 334 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 335 bool io_aware; /* issue discard in idle time */ 336 bool sync; /* submit discard with REQ_SYNC flag */ 337 bool ordered; /* issue discard by lba order */ 338 bool timeout; /* discard timeout for put_super */ 339 unsigned int granularity; /* discard granularity */ 340 }; 341 342 struct discard_cmd_control { 343 struct task_struct *f2fs_issue_discard; /* discard thread */ 344 struct list_head entry_list; /* 4KB discard entry list */ 345 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 346 struct list_head wait_list; /* store on-flushing entries */ 347 struct list_head fstrim_list; /* in-flight discard from fstrim */ 348 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 349 unsigned int discard_wake; /* to wake up discard thread */ 350 struct mutex cmd_lock; 351 unsigned int nr_discards; /* # of discards in the list */ 352 unsigned int max_discards; /* max. discards to be issued */ 353 unsigned int discard_granularity; /* discard granularity */ 354 unsigned int undiscard_blks; /* # of undiscard blocks */ 355 unsigned int next_pos; /* next discard position */ 356 atomic_t issued_discard; /* # of issued discard */ 357 atomic_t queued_discard; /* # of queued discard */ 358 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 359 struct rb_root_cached root; /* root of discard rb-tree */ 360 bool rbtree_check; /* config for consistence check */ 361 }; 362 363 /* for the list of fsync inodes, used only during recovery */ 364 struct fsync_inode_entry { 365 struct list_head list; /* list head */ 366 struct inode *inode; /* vfs inode pointer */ 367 block_t blkaddr; /* block address locating the last fsync */ 368 block_t last_dentry; /* block address locating the last dentry */ 369 }; 370 371 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 372 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 373 374 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 375 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 376 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 377 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 378 379 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 380 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 381 382 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 383 { 384 int before = nats_in_cursum(journal); 385 386 journal->n_nats = cpu_to_le16(before + i); 387 return before; 388 } 389 390 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 391 { 392 int before = sits_in_cursum(journal); 393 394 journal->n_sits = cpu_to_le16(before + i); 395 return before; 396 } 397 398 static inline bool __has_cursum_space(struct f2fs_journal *journal, 399 int size, int type) 400 { 401 if (type == NAT_JOURNAL) 402 return size <= MAX_NAT_JENTRIES(journal); 403 return size <= MAX_SIT_JENTRIES(journal); 404 } 405 406 /* for inline stuff */ 407 #define DEF_INLINE_RESERVED_SIZE 1 408 static inline int get_extra_isize(struct inode *inode); 409 static inline int get_inline_xattr_addrs(struct inode *inode); 410 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 411 (CUR_ADDRS_PER_INODE(inode) - \ 412 get_inline_xattr_addrs(inode) - \ 413 DEF_INLINE_RESERVED_SIZE)) 414 415 /* for inline dir */ 416 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 417 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 418 BITS_PER_BYTE + 1)) 419 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 420 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 421 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 422 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 423 NR_INLINE_DENTRY(inode) + \ 424 INLINE_DENTRY_BITMAP_SIZE(inode))) 425 426 /* 427 * For INODE and NODE manager 428 */ 429 /* for directory operations */ 430 431 struct f2fs_filename { 432 /* 433 * The filename the user specified. This is NULL for some 434 * filesystem-internal operations, e.g. converting an inline directory 435 * to a non-inline one, or roll-forward recovering an encrypted dentry. 436 */ 437 const struct qstr *usr_fname; 438 439 /* 440 * The on-disk filename. For encrypted directories, this is encrypted. 441 * This may be NULL for lookups in an encrypted dir without the key. 442 */ 443 struct fscrypt_str disk_name; 444 445 /* The dirhash of this filename */ 446 f2fs_hash_t hash; 447 448 #ifdef CONFIG_FS_ENCRYPTION 449 /* 450 * For lookups in encrypted directories: either the buffer backing 451 * disk_name, or a buffer that holds the decoded no-key name. 452 */ 453 struct fscrypt_str crypto_buf; 454 #endif 455 #ifdef CONFIG_UNICODE 456 /* 457 * For casefolded directories: the casefolded name, but it's left NULL 458 * if the original name is not valid Unicode, if the directory is both 459 * casefolded and encrypted and its encryption key is unavailable, or if 460 * the filesystem is doing an internal operation where usr_fname is also 461 * NULL. In all these cases we fall back to treating the name as an 462 * opaque byte sequence. 463 */ 464 struct fscrypt_str cf_name; 465 #endif 466 }; 467 468 struct f2fs_dentry_ptr { 469 struct inode *inode; 470 void *bitmap; 471 struct f2fs_dir_entry *dentry; 472 __u8 (*filename)[F2FS_SLOT_LEN]; 473 int max; 474 int nr_bitmap; 475 }; 476 477 static inline void make_dentry_ptr_block(struct inode *inode, 478 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 479 { 480 d->inode = inode; 481 d->max = NR_DENTRY_IN_BLOCK; 482 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 483 d->bitmap = t->dentry_bitmap; 484 d->dentry = t->dentry; 485 d->filename = t->filename; 486 } 487 488 static inline void make_dentry_ptr_inline(struct inode *inode, 489 struct f2fs_dentry_ptr *d, void *t) 490 { 491 int entry_cnt = NR_INLINE_DENTRY(inode); 492 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 493 int reserved_size = INLINE_RESERVED_SIZE(inode); 494 495 d->inode = inode; 496 d->max = entry_cnt; 497 d->nr_bitmap = bitmap_size; 498 d->bitmap = t; 499 d->dentry = t + bitmap_size + reserved_size; 500 d->filename = t + bitmap_size + reserved_size + 501 SIZE_OF_DIR_ENTRY * entry_cnt; 502 } 503 504 /* 505 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 506 * as its node offset to distinguish from index node blocks. 507 * But some bits are used to mark the node block. 508 */ 509 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 510 >> OFFSET_BIT_SHIFT) 511 enum { 512 ALLOC_NODE, /* allocate a new node page if needed */ 513 LOOKUP_NODE, /* look up a node without readahead */ 514 LOOKUP_NODE_RA, /* 515 * look up a node with readahead called 516 * by get_data_block. 517 */ 518 }; 519 520 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 521 522 /* congestion wait timeout value, default: 20ms */ 523 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 524 525 /* maximum retry quota flush count */ 526 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 527 528 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 529 530 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 531 532 /* for in-memory extent cache entry */ 533 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 534 535 /* number of extent info in extent cache we try to shrink */ 536 #define EXTENT_CACHE_SHRINK_NUMBER 128 537 538 struct rb_entry { 539 struct rb_node rb_node; /* rb node located in rb-tree */ 540 union { 541 struct { 542 unsigned int ofs; /* start offset of the entry */ 543 unsigned int len; /* length of the entry */ 544 }; 545 unsigned long long key; /* 64-bits key */ 546 } __packed; 547 }; 548 549 struct extent_info { 550 unsigned int fofs; /* start offset in a file */ 551 unsigned int len; /* length of the extent */ 552 u32 blk; /* start block address of the extent */ 553 }; 554 555 struct extent_node { 556 struct rb_node rb_node; /* rb node located in rb-tree */ 557 struct extent_info ei; /* extent info */ 558 struct list_head list; /* node in global extent list of sbi */ 559 struct extent_tree *et; /* extent tree pointer */ 560 }; 561 562 struct extent_tree { 563 nid_t ino; /* inode number */ 564 struct rb_root_cached root; /* root of extent info rb-tree */ 565 struct extent_node *cached_en; /* recently accessed extent node */ 566 struct extent_info largest; /* largested extent info */ 567 struct list_head list; /* to be used by sbi->zombie_list */ 568 rwlock_t lock; /* protect extent info rb-tree */ 569 atomic_t node_cnt; /* # of extent node in rb-tree*/ 570 bool largest_updated; /* largest extent updated */ 571 }; 572 573 /* 574 * This structure is taken from ext4_map_blocks. 575 * 576 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 577 */ 578 #define F2FS_MAP_NEW (1 << BH_New) 579 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 580 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 581 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 582 F2FS_MAP_UNWRITTEN) 583 584 struct f2fs_map_blocks { 585 block_t m_pblk; 586 block_t m_lblk; 587 unsigned int m_len; 588 unsigned int m_flags; 589 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 590 pgoff_t *m_next_extent; /* point to next possible extent */ 591 int m_seg_type; 592 bool m_may_create; /* indicate it is from write path */ 593 }; 594 595 /* for flag in get_data_block */ 596 enum { 597 F2FS_GET_BLOCK_DEFAULT, 598 F2FS_GET_BLOCK_FIEMAP, 599 F2FS_GET_BLOCK_BMAP, 600 F2FS_GET_BLOCK_DIO, 601 F2FS_GET_BLOCK_PRE_DIO, 602 F2FS_GET_BLOCK_PRE_AIO, 603 F2FS_GET_BLOCK_PRECACHE, 604 }; 605 606 /* 607 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 608 */ 609 #define FADVISE_COLD_BIT 0x01 610 #define FADVISE_LOST_PINO_BIT 0x02 611 #define FADVISE_ENCRYPT_BIT 0x04 612 #define FADVISE_ENC_NAME_BIT 0x08 613 #define FADVISE_KEEP_SIZE_BIT 0x10 614 #define FADVISE_HOT_BIT 0x20 615 #define FADVISE_VERITY_BIT 0x40 616 617 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 618 619 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 620 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 621 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 622 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 623 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 624 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 625 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 626 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 627 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 628 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 629 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 630 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 631 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 632 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 633 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 634 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 635 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 636 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 637 638 #define DEF_DIR_LEVEL 0 639 640 enum { 641 GC_FAILURE_PIN, 642 GC_FAILURE_ATOMIC, 643 MAX_GC_FAILURE 644 }; 645 646 /* used for f2fs_inode_info->flags */ 647 enum { 648 FI_NEW_INODE, /* indicate newly allocated inode */ 649 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 650 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 651 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 652 FI_INC_LINK, /* need to increment i_nlink */ 653 FI_ACL_MODE, /* indicate acl mode */ 654 FI_NO_ALLOC, /* should not allocate any blocks */ 655 FI_FREE_NID, /* free allocated nide */ 656 FI_NO_EXTENT, /* not to use the extent cache */ 657 FI_INLINE_XATTR, /* used for inline xattr */ 658 FI_INLINE_DATA, /* used for inline data*/ 659 FI_INLINE_DENTRY, /* used for inline dentry */ 660 FI_APPEND_WRITE, /* inode has appended data */ 661 FI_UPDATE_WRITE, /* inode has in-place-update data */ 662 FI_NEED_IPU, /* used for ipu per file */ 663 FI_ATOMIC_FILE, /* indicate atomic file */ 664 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 665 FI_VOLATILE_FILE, /* indicate volatile file */ 666 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 667 FI_DROP_CACHE, /* drop dirty page cache */ 668 FI_DATA_EXIST, /* indicate data exists */ 669 FI_INLINE_DOTS, /* indicate inline dot dentries */ 670 FI_DO_DEFRAG, /* indicate defragment is running */ 671 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 672 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 673 FI_HOT_DATA, /* indicate file is hot */ 674 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 675 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 676 FI_PIN_FILE, /* indicate file should not be gced */ 677 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 678 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 679 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 680 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 681 FI_MMAP_FILE, /* indicate file was mmapped */ 682 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 683 FI_MAX, /* max flag, never be used */ 684 }; 685 686 struct f2fs_inode_info { 687 struct inode vfs_inode; /* serve a vfs inode */ 688 unsigned long i_flags; /* keep an inode flags for ioctl */ 689 unsigned char i_advise; /* use to give file attribute hints */ 690 unsigned char i_dir_level; /* use for dentry level for large dir */ 691 unsigned int i_current_depth; /* only for directory depth */ 692 /* for gc failure statistic */ 693 unsigned int i_gc_failures[MAX_GC_FAILURE]; 694 unsigned int i_pino; /* parent inode number */ 695 umode_t i_acl_mode; /* keep file acl mode temporarily */ 696 697 /* Use below internally in f2fs*/ 698 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 699 struct rw_semaphore i_sem; /* protect fi info */ 700 atomic_t dirty_pages; /* # of dirty pages */ 701 f2fs_hash_t chash; /* hash value of given file name */ 702 unsigned int clevel; /* maximum level of given file name */ 703 struct task_struct *task; /* lookup and create consistency */ 704 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 705 nid_t i_xattr_nid; /* node id that contains xattrs */ 706 loff_t last_disk_size; /* lastly written file size */ 707 spinlock_t i_size_lock; /* protect last_disk_size */ 708 709 #ifdef CONFIG_QUOTA 710 struct dquot *i_dquot[MAXQUOTAS]; 711 712 /* quota space reservation, managed internally by quota code */ 713 qsize_t i_reserved_quota; 714 #endif 715 struct list_head dirty_list; /* dirty list for dirs and files */ 716 struct list_head gdirty_list; /* linked in global dirty list */ 717 struct list_head inmem_ilist; /* list for inmem inodes */ 718 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 719 struct task_struct *inmem_task; /* store inmemory task */ 720 struct mutex inmem_lock; /* lock for inmemory pages */ 721 struct extent_tree *extent_tree; /* cached extent_tree entry */ 722 723 /* avoid racing between foreground op and gc */ 724 struct rw_semaphore i_gc_rwsem[2]; 725 struct rw_semaphore i_mmap_sem; 726 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 727 728 int i_extra_isize; /* size of extra space located in i_addr */ 729 kprojid_t i_projid; /* id for project quota */ 730 int i_inline_xattr_size; /* inline xattr size */ 731 struct timespec64 i_crtime; /* inode creation time */ 732 struct timespec64 i_disk_time[4];/* inode disk times */ 733 734 /* for file compress */ 735 atomic_t i_compr_blocks; /* # of compressed blocks */ 736 unsigned char i_compress_algorithm; /* algorithm type */ 737 unsigned char i_log_cluster_size; /* log of cluster size */ 738 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 739 unsigned short i_compress_flag; /* compress flag */ 740 unsigned int i_cluster_size; /* cluster size */ 741 }; 742 743 static inline void get_extent_info(struct extent_info *ext, 744 struct f2fs_extent *i_ext) 745 { 746 ext->fofs = le32_to_cpu(i_ext->fofs); 747 ext->blk = le32_to_cpu(i_ext->blk); 748 ext->len = le32_to_cpu(i_ext->len); 749 } 750 751 static inline void set_raw_extent(struct extent_info *ext, 752 struct f2fs_extent *i_ext) 753 { 754 i_ext->fofs = cpu_to_le32(ext->fofs); 755 i_ext->blk = cpu_to_le32(ext->blk); 756 i_ext->len = cpu_to_le32(ext->len); 757 } 758 759 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 760 u32 blk, unsigned int len) 761 { 762 ei->fofs = fofs; 763 ei->blk = blk; 764 ei->len = len; 765 } 766 767 static inline bool __is_discard_mergeable(struct discard_info *back, 768 struct discard_info *front, unsigned int max_len) 769 { 770 return (back->lstart + back->len == front->lstart) && 771 (back->len + front->len <= max_len); 772 } 773 774 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 775 struct discard_info *back, unsigned int max_len) 776 { 777 return __is_discard_mergeable(back, cur, max_len); 778 } 779 780 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 781 struct discard_info *front, unsigned int max_len) 782 { 783 return __is_discard_mergeable(cur, front, max_len); 784 } 785 786 static inline bool __is_extent_mergeable(struct extent_info *back, 787 struct extent_info *front) 788 { 789 return (back->fofs + back->len == front->fofs && 790 back->blk + back->len == front->blk); 791 } 792 793 static inline bool __is_back_mergeable(struct extent_info *cur, 794 struct extent_info *back) 795 { 796 return __is_extent_mergeable(back, cur); 797 } 798 799 static inline bool __is_front_mergeable(struct extent_info *cur, 800 struct extent_info *front) 801 { 802 return __is_extent_mergeable(cur, front); 803 } 804 805 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 806 static inline void __try_update_largest_extent(struct extent_tree *et, 807 struct extent_node *en) 808 { 809 if (en->ei.len > et->largest.len) { 810 et->largest = en->ei; 811 et->largest_updated = true; 812 } 813 } 814 815 /* 816 * For free nid management 817 */ 818 enum nid_state { 819 FREE_NID, /* newly added to free nid list */ 820 PREALLOC_NID, /* it is preallocated */ 821 MAX_NID_STATE, 822 }; 823 824 enum nat_state { 825 TOTAL_NAT, 826 DIRTY_NAT, 827 RECLAIMABLE_NAT, 828 MAX_NAT_STATE, 829 }; 830 831 struct f2fs_nm_info { 832 block_t nat_blkaddr; /* base disk address of NAT */ 833 nid_t max_nid; /* maximum possible node ids */ 834 nid_t available_nids; /* # of available node ids */ 835 nid_t next_scan_nid; /* the next nid to be scanned */ 836 unsigned int ram_thresh; /* control the memory footprint */ 837 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 838 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 839 840 /* NAT cache management */ 841 struct radix_tree_root nat_root;/* root of the nat entry cache */ 842 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 843 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 844 struct list_head nat_entries; /* cached nat entry list (clean) */ 845 spinlock_t nat_list_lock; /* protect clean nat entry list */ 846 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 847 unsigned int nat_blocks; /* # of nat blocks */ 848 849 /* free node ids management */ 850 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 851 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 852 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 853 spinlock_t nid_list_lock; /* protect nid lists ops */ 854 struct mutex build_lock; /* lock for build free nids */ 855 unsigned char **free_nid_bitmap; 856 unsigned char *nat_block_bitmap; 857 unsigned short *free_nid_count; /* free nid count of NAT block */ 858 859 /* for checkpoint */ 860 char *nat_bitmap; /* NAT bitmap pointer */ 861 862 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 863 unsigned char *nat_bits; /* NAT bits blocks */ 864 unsigned char *full_nat_bits; /* full NAT pages */ 865 unsigned char *empty_nat_bits; /* empty NAT pages */ 866 #ifdef CONFIG_F2FS_CHECK_FS 867 char *nat_bitmap_mir; /* NAT bitmap mirror */ 868 #endif 869 int bitmap_size; /* bitmap size */ 870 }; 871 872 /* 873 * this structure is used as one of function parameters. 874 * all the information are dedicated to a given direct node block determined 875 * by the data offset in a file. 876 */ 877 struct dnode_of_data { 878 struct inode *inode; /* vfs inode pointer */ 879 struct page *inode_page; /* its inode page, NULL is possible */ 880 struct page *node_page; /* cached direct node page */ 881 nid_t nid; /* node id of the direct node block */ 882 unsigned int ofs_in_node; /* data offset in the node page */ 883 bool inode_page_locked; /* inode page is locked or not */ 884 bool node_changed; /* is node block changed */ 885 char cur_level; /* level of hole node page */ 886 char max_level; /* level of current page located */ 887 block_t data_blkaddr; /* block address of the node block */ 888 }; 889 890 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 891 struct page *ipage, struct page *npage, nid_t nid) 892 { 893 memset(dn, 0, sizeof(*dn)); 894 dn->inode = inode; 895 dn->inode_page = ipage; 896 dn->node_page = npage; 897 dn->nid = nid; 898 } 899 900 /* 901 * For SIT manager 902 * 903 * By default, there are 6 active log areas across the whole main area. 904 * When considering hot and cold data separation to reduce cleaning overhead, 905 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 906 * respectively. 907 * In the current design, you should not change the numbers intentionally. 908 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 909 * logs individually according to the underlying devices. (default: 6) 910 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 911 * data and 8 for node logs. 912 */ 913 #define NR_CURSEG_DATA_TYPE (3) 914 #define NR_CURSEG_NODE_TYPE (3) 915 #define NR_CURSEG_INMEM_TYPE (2) 916 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 917 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 918 919 enum { 920 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 921 CURSEG_WARM_DATA, /* data blocks */ 922 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 923 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 924 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 925 CURSEG_COLD_NODE, /* indirect node blocks */ 926 NR_PERSISTENT_LOG, /* number of persistent log */ 927 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 928 /* pinned file that needs consecutive block address */ 929 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 930 NO_CHECK_TYPE, /* number of persistent & inmem log */ 931 }; 932 933 struct flush_cmd { 934 struct completion wait; 935 struct llist_node llnode; 936 nid_t ino; 937 int ret; 938 }; 939 940 struct flush_cmd_control { 941 struct task_struct *f2fs_issue_flush; /* flush thread */ 942 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 943 atomic_t issued_flush; /* # of issued flushes */ 944 atomic_t queued_flush; /* # of queued flushes */ 945 struct llist_head issue_list; /* list for command issue */ 946 struct llist_node *dispatch_list; /* list for command dispatch */ 947 }; 948 949 struct f2fs_sm_info { 950 struct sit_info *sit_info; /* whole segment information */ 951 struct free_segmap_info *free_info; /* free segment information */ 952 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 953 struct curseg_info *curseg_array; /* active segment information */ 954 955 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 956 957 block_t seg0_blkaddr; /* block address of 0'th segment */ 958 block_t main_blkaddr; /* start block address of main area */ 959 block_t ssa_blkaddr; /* start block address of SSA area */ 960 961 unsigned int segment_count; /* total # of segments */ 962 unsigned int main_segments; /* # of segments in main area */ 963 unsigned int reserved_segments; /* # of reserved segments */ 964 unsigned int ovp_segments; /* # of overprovision segments */ 965 966 /* a threshold to reclaim prefree segments */ 967 unsigned int rec_prefree_segments; 968 969 /* for batched trimming */ 970 unsigned int trim_sections; /* # of sections to trim */ 971 972 struct list_head sit_entry_set; /* sit entry set list */ 973 974 unsigned int ipu_policy; /* in-place-update policy */ 975 unsigned int min_ipu_util; /* in-place-update threshold */ 976 unsigned int min_fsync_blocks; /* threshold for fsync */ 977 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 978 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 979 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 980 981 /* for flush command control */ 982 struct flush_cmd_control *fcc_info; 983 984 /* for discard command control */ 985 struct discard_cmd_control *dcc_info; 986 }; 987 988 /* 989 * For superblock 990 */ 991 /* 992 * COUNT_TYPE for monitoring 993 * 994 * f2fs monitors the number of several block types such as on-writeback, 995 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 996 */ 997 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 998 enum count_type { 999 F2FS_DIRTY_DENTS, 1000 F2FS_DIRTY_DATA, 1001 F2FS_DIRTY_QDATA, 1002 F2FS_DIRTY_NODES, 1003 F2FS_DIRTY_META, 1004 F2FS_INMEM_PAGES, 1005 F2FS_DIRTY_IMETA, 1006 F2FS_WB_CP_DATA, 1007 F2FS_WB_DATA, 1008 F2FS_RD_DATA, 1009 F2FS_RD_NODE, 1010 F2FS_RD_META, 1011 F2FS_DIO_WRITE, 1012 F2FS_DIO_READ, 1013 NR_COUNT_TYPE, 1014 }; 1015 1016 /* 1017 * The below are the page types of bios used in submit_bio(). 1018 * The available types are: 1019 * DATA User data pages. It operates as async mode. 1020 * NODE Node pages. It operates as async mode. 1021 * META FS metadata pages such as SIT, NAT, CP. 1022 * NR_PAGE_TYPE The number of page types. 1023 * META_FLUSH Make sure the previous pages are written 1024 * with waiting the bio's completion 1025 * ... Only can be used with META. 1026 */ 1027 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1028 enum page_type { 1029 DATA, 1030 NODE, 1031 META, 1032 NR_PAGE_TYPE, 1033 META_FLUSH, 1034 INMEM, /* the below types are used by tracepoints only. */ 1035 INMEM_DROP, 1036 INMEM_INVALIDATE, 1037 INMEM_REVOKE, 1038 IPU, 1039 OPU, 1040 }; 1041 1042 enum temp_type { 1043 HOT = 0, /* must be zero for meta bio */ 1044 WARM, 1045 COLD, 1046 NR_TEMP_TYPE, 1047 }; 1048 1049 enum need_lock_type { 1050 LOCK_REQ = 0, 1051 LOCK_DONE, 1052 LOCK_RETRY, 1053 }; 1054 1055 enum cp_reason_type { 1056 CP_NO_NEEDED, 1057 CP_NON_REGULAR, 1058 CP_COMPRESSED, 1059 CP_HARDLINK, 1060 CP_SB_NEED_CP, 1061 CP_WRONG_PINO, 1062 CP_NO_SPC_ROLL, 1063 CP_NODE_NEED_CP, 1064 CP_FASTBOOT_MODE, 1065 CP_SPEC_LOG_NUM, 1066 CP_RECOVER_DIR, 1067 }; 1068 1069 enum iostat_type { 1070 /* WRITE IO */ 1071 APP_DIRECT_IO, /* app direct write IOs */ 1072 APP_BUFFERED_IO, /* app buffered write IOs */ 1073 APP_WRITE_IO, /* app write IOs */ 1074 APP_MAPPED_IO, /* app mapped IOs */ 1075 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1076 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1077 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1078 FS_GC_DATA_IO, /* data IOs from forground gc */ 1079 FS_GC_NODE_IO, /* node IOs from forground gc */ 1080 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1081 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1082 FS_CP_META_IO, /* meta IOs from checkpoint */ 1083 1084 /* READ IO */ 1085 APP_DIRECT_READ_IO, /* app direct read IOs */ 1086 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1087 APP_READ_IO, /* app read IOs */ 1088 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1089 FS_DATA_READ_IO, /* data read IOs */ 1090 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1091 FS_CDATA_READ_IO, /* compressed data read IOs */ 1092 FS_NODE_READ_IO, /* node read IOs */ 1093 FS_META_READ_IO, /* meta read IOs */ 1094 1095 /* other */ 1096 FS_DISCARD, /* discard */ 1097 NR_IO_TYPE, 1098 }; 1099 1100 struct f2fs_io_info { 1101 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1102 nid_t ino; /* inode number */ 1103 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1104 enum temp_type temp; /* contains HOT/WARM/COLD */ 1105 int op; /* contains REQ_OP_ */ 1106 int op_flags; /* req_flag_bits */ 1107 block_t new_blkaddr; /* new block address to be written */ 1108 block_t old_blkaddr; /* old block address before Cow */ 1109 struct page *page; /* page to be written */ 1110 struct page *encrypted_page; /* encrypted page */ 1111 struct page *compressed_page; /* compressed page */ 1112 struct list_head list; /* serialize IOs */ 1113 bool submitted; /* indicate IO submission */ 1114 int need_lock; /* indicate we need to lock cp_rwsem */ 1115 bool in_list; /* indicate fio is in io_list */ 1116 bool is_por; /* indicate IO is from recovery or not */ 1117 bool retry; /* need to reallocate block address */ 1118 int compr_blocks; /* # of compressed block addresses */ 1119 bool encrypted; /* indicate file is encrypted */ 1120 enum iostat_type io_type; /* io type */ 1121 struct writeback_control *io_wbc; /* writeback control */ 1122 struct bio **bio; /* bio for ipu */ 1123 sector_t *last_block; /* last block number in bio */ 1124 unsigned char version; /* version of the node */ 1125 }; 1126 1127 struct bio_entry { 1128 struct bio *bio; 1129 struct list_head list; 1130 }; 1131 1132 #define is_read_io(rw) ((rw) == READ) 1133 struct f2fs_bio_info { 1134 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1135 struct bio *bio; /* bios to merge */ 1136 sector_t last_block_in_bio; /* last block number */ 1137 struct f2fs_io_info fio; /* store buffered io info. */ 1138 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1139 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1140 struct list_head io_list; /* track fios */ 1141 struct list_head bio_list; /* bio entry list head */ 1142 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1143 }; 1144 1145 #define FDEV(i) (sbi->devs[i]) 1146 #define RDEV(i) (raw_super->devs[i]) 1147 struct f2fs_dev_info { 1148 struct block_device *bdev; 1149 char path[MAX_PATH_LEN]; 1150 unsigned int total_segments; 1151 block_t start_blk; 1152 block_t end_blk; 1153 #ifdef CONFIG_BLK_DEV_ZONED 1154 unsigned int nr_blkz; /* Total number of zones */ 1155 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1156 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1157 #endif 1158 }; 1159 1160 enum inode_type { 1161 DIR_INODE, /* for dirty dir inode */ 1162 FILE_INODE, /* for dirty regular/symlink inode */ 1163 DIRTY_META, /* for all dirtied inode metadata */ 1164 ATOMIC_FILE, /* for all atomic files */ 1165 NR_INODE_TYPE, 1166 }; 1167 1168 /* for inner inode cache management */ 1169 struct inode_management { 1170 struct radix_tree_root ino_root; /* ino entry array */ 1171 spinlock_t ino_lock; /* for ino entry lock */ 1172 struct list_head ino_list; /* inode list head */ 1173 unsigned long ino_num; /* number of entries */ 1174 }; 1175 1176 /* for GC_AT */ 1177 struct atgc_management { 1178 bool atgc_enabled; /* ATGC is enabled or not */ 1179 struct rb_root_cached root; /* root of victim rb-tree */ 1180 struct list_head victim_list; /* linked with all victim entries */ 1181 unsigned int victim_count; /* victim count in rb-tree */ 1182 unsigned int candidate_ratio; /* candidate ratio */ 1183 unsigned int max_candidate_count; /* max candidate count */ 1184 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1185 unsigned long long age_threshold; /* age threshold */ 1186 }; 1187 1188 /* For s_flag in struct f2fs_sb_info */ 1189 enum { 1190 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1191 SBI_IS_CLOSE, /* specify unmounting */ 1192 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1193 SBI_POR_DOING, /* recovery is doing or not */ 1194 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1195 SBI_NEED_CP, /* need to checkpoint */ 1196 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1197 SBI_IS_RECOVERED, /* recovered orphan/data */ 1198 SBI_CP_DISABLED, /* CP was disabled last mount */ 1199 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1200 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1201 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1202 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1203 SBI_IS_RESIZEFS, /* resizefs is in process */ 1204 }; 1205 1206 enum { 1207 CP_TIME, 1208 REQ_TIME, 1209 DISCARD_TIME, 1210 GC_TIME, 1211 DISABLE_TIME, 1212 UMOUNT_DISCARD_TIMEOUT, 1213 MAX_TIME, 1214 }; 1215 1216 enum { 1217 GC_NORMAL, 1218 GC_IDLE_CB, 1219 GC_IDLE_GREEDY, 1220 GC_IDLE_AT, 1221 GC_URGENT_HIGH, 1222 GC_URGENT_LOW, 1223 }; 1224 1225 enum { 1226 BGGC_MODE_ON, /* background gc is on */ 1227 BGGC_MODE_OFF, /* background gc is off */ 1228 BGGC_MODE_SYNC, /* 1229 * background gc is on, migrating blocks 1230 * like foreground gc 1231 */ 1232 }; 1233 1234 enum { 1235 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1236 FS_MODE_LFS, /* use lfs allocation only */ 1237 }; 1238 1239 enum { 1240 WHINT_MODE_OFF, /* not pass down write hints */ 1241 WHINT_MODE_USER, /* try to pass down hints given by users */ 1242 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1243 }; 1244 1245 enum { 1246 ALLOC_MODE_DEFAULT, /* stay default */ 1247 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1248 }; 1249 1250 enum fsync_mode { 1251 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1252 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1253 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1254 }; 1255 1256 enum { 1257 COMPR_MODE_FS, /* 1258 * automatically compress compression 1259 * enabled files 1260 */ 1261 COMPR_MODE_USER, /* 1262 * automatical compression is disabled. 1263 * user can control the file compression 1264 * using ioctls 1265 */ 1266 }; 1267 1268 /* 1269 * this value is set in page as a private data which indicate that 1270 * the page is atomically written, and it is in inmem_pages list. 1271 */ 1272 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 1273 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2) 1274 1275 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 1276 (page_private(page) == ATOMIC_WRITTEN_PAGE) 1277 #define IS_DUMMY_WRITTEN_PAGE(page) \ 1278 (page_private(page) == DUMMY_WRITTEN_PAGE) 1279 1280 #ifdef CONFIG_F2FS_IO_TRACE 1281 #define IS_IO_TRACED_PAGE(page) \ 1282 (page_private(page) > 0 && \ 1283 page_private(page) < (unsigned long)PID_MAX_LIMIT) 1284 #else 1285 #define IS_IO_TRACED_PAGE(page) (0) 1286 #endif 1287 1288 /* For compression */ 1289 enum compress_algorithm_type { 1290 COMPRESS_LZO, 1291 COMPRESS_LZ4, 1292 COMPRESS_ZSTD, 1293 COMPRESS_LZORLE, 1294 COMPRESS_MAX, 1295 }; 1296 1297 enum compress_flag { 1298 COMPRESS_CHKSUM, 1299 COMPRESS_MAX_FLAG, 1300 }; 1301 1302 #define COMPRESS_DATA_RESERVED_SIZE 4 1303 struct compress_data { 1304 __le32 clen; /* compressed data size */ 1305 __le32 chksum; /* compressed data chksum */ 1306 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1307 u8 cdata[]; /* compressed data */ 1308 }; 1309 1310 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1311 1312 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1313 1314 #define COMPRESS_LEVEL_OFFSET 8 1315 1316 /* compress context */ 1317 struct compress_ctx { 1318 struct inode *inode; /* inode the context belong to */ 1319 pgoff_t cluster_idx; /* cluster index number */ 1320 unsigned int cluster_size; /* page count in cluster */ 1321 unsigned int log_cluster_size; /* log of cluster size */ 1322 struct page **rpages; /* pages store raw data in cluster */ 1323 unsigned int nr_rpages; /* total page number in rpages */ 1324 struct page **cpages; /* pages store compressed data in cluster */ 1325 unsigned int nr_cpages; /* total page number in cpages */ 1326 void *rbuf; /* virtual mapped address on rpages */ 1327 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1328 size_t rlen; /* valid data length in rbuf */ 1329 size_t clen; /* valid data length in cbuf */ 1330 void *private; /* payload buffer for specified compression algorithm */ 1331 void *private2; /* extra payload buffer */ 1332 }; 1333 1334 /* compress context for write IO path */ 1335 struct compress_io_ctx { 1336 u32 magic; /* magic number to indicate page is compressed */ 1337 struct inode *inode; /* inode the context belong to */ 1338 struct page **rpages; /* pages store raw data in cluster */ 1339 unsigned int nr_rpages; /* total page number in rpages */ 1340 atomic_t pending_pages; /* in-flight compressed page count */ 1341 }; 1342 1343 /* Context for decompressing one cluster on the read IO path */ 1344 struct decompress_io_ctx { 1345 u32 magic; /* magic number to indicate page is compressed */ 1346 struct inode *inode; /* inode the context belong to */ 1347 pgoff_t cluster_idx; /* cluster index number */ 1348 unsigned int cluster_size; /* page count in cluster */ 1349 unsigned int log_cluster_size; /* log of cluster size */ 1350 struct page **rpages; /* pages store raw data in cluster */ 1351 unsigned int nr_rpages; /* total page number in rpages */ 1352 struct page **cpages; /* pages store compressed data in cluster */ 1353 unsigned int nr_cpages; /* total page number in cpages */ 1354 struct page **tpages; /* temp pages to pad holes in cluster */ 1355 void *rbuf; /* virtual mapped address on rpages */ 1356 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1357 size_t rlen; /* valid data length in rbuf */ 1358 size_t clen; /* valid data length in cbuf */ 1359 1360 /* 1361 * The number of compressed pages remaining to be read in this cluster. 1362 * This is initially nr_cpages. It is decremented by 1 each time a page 1363 * has been read (or failed to be read). When it reaches 0, the cluster 1364 * is decompressed (or an error is reported). 1365 * 1366 * If an error occurs before all the pages have been submitted for I/O, 1367 * then this will never reach 0. In this case the I/O submitter is 1368 * responsible for calling f2fs_decompress_end_io() instead. 1369 */ 1370 atomic_t remaining_pages; 1371 1372 /* 1373 * Number of references to this decompress_io_ctx. 1374 * 1375 * One reference is held for I/O completion. This reference is dropped 1376 * after the pagecache pages are updated and unlocked -- either after 1377 * decompression (and verity if enabled), or after an error. 1378 * 1379 * In addition, each compressed page holds a reference while it is in a 1380 * bio. These references are necessary prevent compressed pages from 1381 * being freed while they are still in a bio. 1382 */ 1383 refcount_t refcnt; 1384 1385 bool failed; /* IO error occurred before decompression? */ 1386 bool need_verity; /* need fs-verity verification after decompression? */ 1387 void *private; /* payload buffer for specified decompression algorithm */ 1388 void *private2; /* extra payload buffer */ 1389 struct work_struct verity_work; /* work to verify the decompressed pages */ 1390 }; 1391 1392 #define NULL_CLUSTER ((unsigned int)(~0)) 1393 #define MIN_COMPRESS_LOG_SIZE 2 1394 #define MAX_COMPRESS_LOG_SIZE 8 1395 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1396 1397 struct f2fs_sb_info { 1398 struct super_block *sb; /* pointer to VFS super block */ 1399 struct proc_dir_entry *s_proc; /* proc entry */ 1400 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1401 struct rw_semaphore sb_lock; /* lock for raw super block */ 1402 int valid_super_block; /* valid super block no */ 1403 unsigned long s_flag; /* flags for sbi */ 1404 struct mutex writepages; /* mutex for writepages() */ 1405 1406 #ifdef CONFIG_BLK_DEV_ZONED 1407 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1408 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1409 #endif 1410 1411 /* for node-related operations */ 1412 struct f2fs_nm_info *nm_info; /* node manager */ 1413 struct inode *node_inode; /* cache node blocks */ 1414 1415 /* for segment-related operations */ 1416 struct f2fs_sm_info *sm_info; /* segment manager */ 1417 1418 /* for bio operations */ 1419 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1420 /* keep migration IO order for LFS mode */ 1421 struct rw_semaphore io_order_lock; 1422 mempool_t *write_io_dummy; /* Dummy pages */ 1423 1424 /* for checkpoint */ 1425 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1426 int cur_cp_pack; /* remain current cp pack */ 1427 spinlock_t cp_lock; /* for flag in ckpt */ 1428 struct inode *meta_inode; /* cache meta blocks */ 1429 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1430 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1431 struct rw_semaphore node_write; /* locking node writes */ 1432 struct rw_semaphore node_change; /* locking node change */ 1433 wait_queue_head_t cp_wait; 1434 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1435 long interval_time[MAX_TIME]; /* to store thresholds */ 1436 1437 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1438 1439 spinlock_t fsync_node_lock; /* for node entry lock */ 1440 struct list_head fsync_node_list; /* node list head */ 1441 unsigned int fsync_seg_id; /* sequence id */ 1442 unsigned int fsync_node_num; /* number of node entries */ 1443 1444 /* for orphan inode, use 0'th array */ 1445 unsigned int max_orphans; /* max orphan inodes */ 1446 1447 /* for inode management */ 1448 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1449 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1450 struct mutex flush_lock; /* for flush exclusion */ 1451 1452 /* for extent tree cache */ 1453 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1454 struct mutex extent_tree_lock; /* locking extent radix tree */ 1455 struct list_head extent_list; /* lru list for shrinker */ 1456 spinlock_t extent_lock; /* locking extent lru list */ 1457 atomic_t total_ext_tree; /* extent tree count */ 1458 struct list_head zombie_list; /* extent zombie tree list */ 1459 atomic_t total_zombie_tree; /* extent zombie tree count */ 1460 atomic_t total_ext_node; /* extent info count */ 1461 1462 /* basic filesystem units */ 1463 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1464 unsigned int log_blocksize; /* log2 block size */ 1465 unsigned int blocksize; /* block size */ 1466 unsigned int root_ino_num; /* root inode number*/ 1467 unsigned int node_ino_num; /* node inode number*/ 1468 unsigned int meta_ino_num; /* meta inode number*/ 1469 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1470 unsigned int blocks_per_seg; /* blocks per segment */ 1471 unsigned int segs_per_sec; /* segments per section */ 1472 unsigned int secs_per_zone; /* sections per zone */ 1473 unsigned int total_sections; /* total section count */ 1474 unsigned int total_node_count; /* total node block count */ 1475 unsigned int total_valid_node_count; /* valid node block count */ 1476 int dir_level; /* directory level */ 1477 int readdir_ra; /* readahead inode in readdir */ 1478 u64 max_io_bytes; /* max io bytes to merge IOs */ 1479 1480 block_t user_block_count; /* # of user blocks */ 1481 block_t total_valid_block_count; /* # of valid blocks */ 1482 block_t discard_blks; /* discard command candidats */ 1483 block_t last_valid_block_count; /* for recovery */ 1484 block_t reserved_blocks; /* configurable reserved blocks */ 1485 block_t current_reserved_blocks; /* current reserved blocks */ 1486 1487 /* Additional tracking for no checkpoint mode */ 1488 block_t unusable_block_count; /* # of blocks saved by last cp */ 1489 1490 unsigned int nquota_files; /* # of quota sysfile */ 1491 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1492 1493 /* # of pages, see count_type */ 1494 atomic_t nr_pages[NR_COUNT_TYPE]; 1495 /* # of allocated blocks */ 1496 struct percpu_counter alloc_valid_block_count; 1497 1498 /* writeback control */ 1499 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1500 1501 /* valid inode count */ 1502 struct percpu_counter total_valid_inode_count; 1503 1504 struct f2fs_mount_info mount_opt; /* mount options */ 1505 1506 /* for cleaning operations */ 1507 struct rw_semaphore gc_lock; /* 1508 * semaphore for GC, avoid 1509 * race between GC and GC or CP 1510 */ 1511 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1512 struct atgc_management am; /* atgc management */ 1513 unsigned int cur_victim_sec; /* current victim section num */ 1514 unsigned int gc_mode; /* current GC state */ 1515 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1516 1517 /* for skip statistic */ 1518 unsigned int atomic_files; /* # of opened atomic file */ 1519 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1520 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1521 1522 /* threshold for gc trials on pinned files */ 1523 u64 gc_pin_file_threshold; 1524 struct rw_semaphore pin_sem; 1525 1526 /* maximum # of trials to find a victim segment for SSR and GC */ 1527 unsigned int max_victim_search; 1528 /* migration granularity of garbage collection, unit: segment */ 1529 unsigned int migration_granularity; 1530 1531 /* 1532 * for stat information. 1533 * one is for the LFS mode, and the other is for the SSR mode. 1534 */ 1535 #ifdef CONFIG_F2FS_STAT_FS 1536 struct f2fs_stat_info *stat_info; /* FS status information */ 1537 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1538 unsigned int segment_count[2]; /* # of allocated segments */ 1539 unsigned int block_count[2]; /* # of allocated blocks */ 1540 atomic_t inplace_count; /* # of inplace update */ 1541 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1542 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1543 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1544 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1545 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1546 atomic_t inline_inode; /* # of inline_data inodes */ 1547 atomic_t inline_dir; /* # of inline_dentry inodes */ 1548 atomic_t compr_inode; /* # of compressed inodes */ 1549 atomic64_t compr_blocks; /* # of compressed blocks */ 1550 atomic_t vw_cnt; /* # of volatile writes */ 1551 atomic_t max_aw_cnt; /* max # of atomic writes */ 1552 atomic_t max_vw_cnt; /* max # of volatile writes */ 1553 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1554 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1555 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1556 #endif 1557 spinlock_t stat_lock; /* lock for stat operations */ 1558 1559 /* For app/fs IO statistics */ 1560 spinlock_t iostat_lock; 1561 unsigned long long rw_iostat[NR_IO_TYPE]; 1562 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1563 bool iostat_enable; 1564 unsigned long iostat_next_period; 1565 unsigned int iostat_period_ms; 1566 1567 /* to attach REQ_META|REQ_FUA flags */ 1568 unsigned int data_io_flag; 1569 unsigned int node_io_flag; 1570 1571 /* For sysfs suppport */ 1572 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1573 struct completion s_kobj_unregister; 1574 1575 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1576 struct completion s_stat_kobj_unregister; 1577 1578 /* For shrinker support */ 1579 struct list_head s_list; 1580 int s_ndevs; /* number of devices */ 1581 struct f2fs_dev_info *devs; /* for device list */ 1582 unsigned int dirty_device; /* for checkpoint data flush */ 1583 spinlock_t dev_lock; /* protect dirty_device */ 1584 struct mutex umount_mutex; 1585 unsigned int shrinker_run_no; 1586 1587 /* For write statistics */ 1588 u64 sectors_written_start; 1589 u64 kbytes_written; 1590 1591 /* Reference to checksum algorithm driver via cryptoapi */ 1592 struct crypto_shash *s_chksum_driver; 1593 1594 /* Precomputed FS UUID checksum for seeding other checksums */ 1595 __u32 s_chksum_seed; 1596 1597 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1598 1599 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1600 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1601 1602 #ifdef CONFIG_F2FS_FS_COMPRESSION 1603 struct kmem_cache *page_array_slab; /* page array entry */ 1604 unsigned int page_array_slab_size; /* default page array slab size */ 1605 #endif 1606 }; 1607 1608 struct f2fs_private_dio { 1609 struct inode *inode; 1610 void *orig_private; 1611 bio_end_io_t *orig_end_io; 1612 bool write; 1613 }; 1614 1615 #ifdef CONFIG_F2FS_FAULT_INJECTION 1616 #define f2fs_show_injection_info(sbi, type) \ 1617 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1618 KERN_INFO, sbi->sb->s_id, \ 1619 f2fs_fault_name[type], \ 1620 __func__, __builtin_return_address(0)) 1621 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1622 { 1623 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1624 1625 if (!ffi->inject_rate) 1626 return false; 1627 1628 if (!IS_FAULT_SET(ffi, type)) 1629 return false; 1630 1631 atomic_inc(&ffi->inject_ops); 1632 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1633 atomic_set(&ffi->inject_ops, 0); 1634 return true; 1635 } 1636 return false; 1637 } 1638 #else 1639 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1640 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1641 { 1642 return false; 1643 } 1644 #endif 1645 1646 /* 1647 * Test if the mounted volume is a multi-device volume. 1648 * - For a single regular disk volume, sbi->s_ndevs is 0. 1649 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1650 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1651 */ 1652 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1653 { 1654 return sbi->s_ndevs > 1; 1655 } 1656 1657 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1658 { 1659 unsigned long now = jiffies; 1660 1661 sbi->last_time[type] = now; 1662 1663 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1664 if (type == REQ_TIME) { 1665 sbi->last_time[DISCARD_TIME] = now; 1666 sbi->last_time[GC_TIME] = now; 1667 } 1668 } 1669 1670 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1671 { 1672 unsigned long interval = sbi->interval_time[type] * HZ; 1673 1674 return time_after(jiffies, sbi->last_time[type] + interval); 1675 } 1676 1677 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1678 int type) 1679 { 1680 unsigned long interval = sbi->interval_time[type] * HZ; 1681 unsigned int wait_ms = 0; 1682 long delta; 1683 1684 delta = (sbi->last_time[type] + interval) - jiffies; 1685 if (delta > 0) 1686 wait_ms = jiffies_to_msecs(delta); 1687 1688 return wait_ms; 1689 } 1690 1691 /* 1692 * Inline functions 1693 */ 1694 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1695 const void *address, unsigned int length) 1696 { 1697 struct { 1698 struct shash_desc shash; 1699 char ctx[4]; 1700 } desc; 1701 int err; 1702 1703 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1704 1705 desc.shash.tfm = sbi->s_chksum_driver; 1706 *(u32 *)desc.ctx = crc; 1707 1708 err = crypto_shash_update(&desc.shash, address, length); 1709 BUG_ON(err); 1710 1711 return *(u32 *)desc.ctx; 1712 } 1713 1714 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1715 unsigned int length) 1716 { 1717 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1718 } 1719 1720 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1721 void *buf, size_t buf_size) 1722 { 1723 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1724 } 1725 1726 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1727 const void *address, unsigned int length) 1728 { 1729 return __f2fs_crc32(sbi, crc, address, length); 1730 } 1731 1732 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1733 { 1734 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1735 } 1736 1737 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1738 { 1739 return sb->s_fs_info; 1740 } 1741 1742 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1743 { 1744 return F2FS_SB(inode->i_sb); 1745 } 1746 1747 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1748 { 1749 return F2FS_I_SB(mapping->host); 1750 } 1751 1752 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1753 { 1754 return F2FS_M_SB(page_file_mapping(page)); 1755 } 1756 1757 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1758 { 1759 return (struct f2fs_super_block *)(sbi->raw_super); 1760 } 1761 1762 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1763 { 1764 return (struct f2fs_checkpoint *)(sbi->ckpt); 1765 } 1766 1767 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1768 { 1769 return (struct f2fs_node *)page_address(page); 1770 } 1771 1772 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1773 { 1774 return &((struct f2fs_node *)page_address(page))->i; 1775 } 1776 1777 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1778 { 1779 return (struct f2fs_nm_info *)(sbi->nm_info); 1780 } 1781 1782 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1783 { 1784 return (struct f2fs_sm_info *)(sbi->sm_info); 1785 } 1786 1787 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1788 { 1789 return (struct sit_info *)(SM_I(sbi)->sit_info); 1790 } 1791 1792 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1793 { 1794 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1795 } 1796 1797 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1798 { 1799 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1800 } 1801 1802 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1803 { 1804 return sbi->meta_inode->i_mapping; 1805 } 1806 1807 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1808 { 1809 return sbi->node_inode->i_mapping; 1810 } 1811 1812 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1813 { 1814 return test_bit(type, &sbi->s_flag); 1815 } 1816 1817 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1818 { 1819 set_bit(type, &sbi->s_flag); 1820 } 1821 1822 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1823 { 1824 clear_bit(type, &sbi->s_flag); 1825 } 1826 1827 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1828 { 1829 return le64_to_cpu(cp->checkpoint_ver); 1830 } 1831 1832 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1833 { 1834 if (type < F2FS_MAX_QUOTAS) 1835 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1836 return 0; 1837 } 1838 1839 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1840 { 1841 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1842 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1843 } 1844 1845 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1846 { 1847 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1848 1849 return ckpt_flags & f; 1850 } 1851 1852 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1853 { 1854 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1855 } 1856 1857 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1858 { 1859 unsigned int ckpt_flags; 1860 1861 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1862 ckpt_flags |= f; 1863 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1864 } 1865 1866 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1867 { 1868 unsigned long flags; 1869 1870 spin_lock_irqsave(&sbi->cp_lock, flags); 1871 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1872 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1873 } 1874 1875 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1876 { 1877 unsigned int ckpt_flags; 1878 1879 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1880 ckpt_flags &= (~f); 1881 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1882 } 1883 1884 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1885 { 1886 unsigned long flags; 1887 1888 spin_lock_irqsave(&sbi->cp_lock, flags); 1889 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1890 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1891 } 1892 1893 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1894 { 1895 unsigned long flags; 1896 unsigned char *nat_bits; 1897 1898 /* 1899 * In order to re-enable nat_bits we need to call fsck.f2fs by 1900 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 1901 * so let's rely on regular fsck or unclean shutdown. 1902 */ 1903 1904 if (lock) 1905 spin_lock_irqsave(&sbi->cp_lock, flags); 1906 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1907 nat_bits = NM_I(sbi)->nat_bits; 1908 NM_I(sbi)->nat_bits = NULL; 1909 if (lock) 1910 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1911 1912 kvfree(nat_bits); 1913 } 1914 1915 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1916 struct cp_control *cpc) 1917 { 1918 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1919 1920 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1921 } 1922 1923 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1924 { 1925 down_read(&sbi->cp_rwsem); 1926 } 1927 1928 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1929 { 1930 return down_read_trylock(&sbi->cp_rwsem); 1931 } 1932 1933 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1934 { 1935 up_read(&sbi->cp_rwsem); 1936 } 1937 1938 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1939 { 1940 down_write(&sbi->cp_rwsem); 1941 } 1942 1943 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1944 { 1945 up_write(&sbi->cp_rwsem); 1946 } 1947 1948 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1949 { 1950 int reason = CP_SYNC; 1951 1952 if (test_opt(sbi, FASTBOOT)) 1953 reason = CP_FASTBOOT; 1954 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1955 reason = CP_UMOUNT; 1956 return reason; 1957 } 1958 1959 static inline bool __remain_node_summaries(int reason) 1960 { 1961 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1962 } 1963 1964 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1965 { 1966 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1967 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1968 } 1969 1970 /* 1971 * Check whether the inode has blocks or not 1972 */ 1973 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 1974 { 1975 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 1976 1977 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 1978 } 1979 1980 static inline bool f2fs_has_xattr_block(unsigned int ofs) 1981 { 1982 return ofs == XATTR_NODE_OFFSET; 1983 } 1984 1985 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 1986 struct inode *inode, bool cap) 1987 { 1988 if (!inode) 1989 return true; 1990 if (!test_opt(sbi, RESERVE_ROOT)) 1991 return false; 1992 if (IS_NOQUOTA(inode)) 1993 return true; 1994 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 1995 return true; 1996 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 1997 in_group_p(F2FS_OPTION(sbi).s_resgid)) 1998 return true; 1999 if (cap && capable(CAP_SYS_RESOURCE)) 2000 return true; 2001 return false; 2002 } 2003 2004 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2005 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2006 struct inode *inode, blkcnt_t *count) 2007 { 2008 blkcnt_t diff = 0, release = 0; 2009 block_t avail_user_block_count; 2010 int ret; 2011 2012 ret = dquot_reserve_block(inode, *count); 2013 if (ret) 2014 return ret; 2015 2016 if (time_to_inject(sbi, FAULT_BLOCK)) { 2017 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2018 release = *count; 2019 goto release_quota; 2020 } 2021 2022 /* 2023 * let's increase this in prior to actual block count change in order 2024 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2025 */ 2026 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2027 2028 spin_lock(&sbi->stat_lock); 2029 sbi->total_valid_block_count += (block_t)(*count); 2030 avail_user_block_count = sbi->user_block_count - 2031 sbi->current_reserved_blocks; 2032 2033 if (!__allow_reserved_blocks(sbi, inode, true)) 2034 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2035 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2036 if (avail_user_block_count > sbi->unusable_block_count) 2037 avail_user_block_count -= sbi->unusable_block_count; 2038 else 2039 avail_user_block_count = 0; 2040 } 2041 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2042 diff = sbi->total_valid_block_count - avail_user_block_count; 2043 if (diff > *count) 2044 diff = *count; 2045 *count -= diff; 2046 release = diff; 2047 sbi->total_valid_block_count -= diff; 2048 if (!*count) { 2049 spin_unlock(&sbi->stat_lock); 2050 goto enospc; 2051 } 2052 } 2053 spin_unlock(&sbi->stat_lock); 2054 2055 if (unlikely(release)) { 2056 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2057 dquot_release_reservation_block(inode, release); 2058 } 2059 f2fs_i_blocks_write(inode, *count, true, true); 2060 return 0; 2061 2062 enospc: 2063 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2064 release_quota: 2065 dquot_release_reservation_block(inode, release); 2066 return -ENOSPC; 2067 } 2068 2069 __printf(2, 3) 2070 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2071 2072 #define f2fs_err(sbi, fmt, ...) \ 2073 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2074 #define f2fs_warn(sbi, fmt, ...) \ 2075 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2076 #define f2fs_notice(sbi, fmt, ...) \ 2077 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2078 #define f2fs_info(sbi, fmt, ...) \ 2079 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2080 #define f2fs_debug(sbi, fmt, ...) \ 2081 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2082 2083 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2084 struct inode *inode, 2085 block_t count) 2086 { 2087 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2088 2089 spin_lock(&sbi->stat_lock); 2090 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2091 sbi->total_valid_block_count -= (block_t)count; 2092 if (sbi->reserved_blocks && 2093 sbi->current_reserved_blocks < sbi->reserved_blocks) 2094 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2095 sbi->current_reserved_blocks + count); 2096 spin_unlock(&sbi->stat_lock); 2097 if (unlikely(inode->i_blocks < sectors)) { 2098 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2099 inode->i_ino, 2100 (unsigned long long)inode->i_blocks, 2101 (unsigned long long)sectors); 2102 set_sbi_flag(sbi, SBI_NEED_FSCK); 2103 return; 2104 } 2105 f2fs_i_blocks_write(inode, count, false, true); 2106 } 2107 2108 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2109 { 2110 atomic_inc(&sbi->nr_pages[count_type]); 2111 2112 if (count_type == F2FS_DIRTY_DENTS || 2113 count_type == F2FS_DIRTY_NODES || 2114 count_type == F2FS_DIRTY_META || 2115 count_type == F2FS_DIRTY_QDATA || 2116 count_type == F2FS_DIRTY_IMETA) 2117 set_sbi_flag(sbi, SBI_IS_DIRTY); 2118 } 2119 2120 static inline void inode_inc_dirty_pages(struct inode *inode) 2121 { 2122 atomic_inc(&F2FS_I(inode)->dirty_pages); 2123 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2124 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2125 if (IS_NOQUOTA(inode)) 2126 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2127 } 2128 2129 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2130 { 2131 atomic_dec(&sbi->nr_pages[count_type]); 2132 } 2133 2134 static inline void inode_dec_dirty_pages(struct inode *inode) 2135 { 2136 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2137 !S_ISLNK(inode->i_mode)) 2138 return; 2139 2140 atomic_dec(&F2FS_I(inode)->dirty_pages); 2141 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2142 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2143 if (IS_NOQUOTA(inode)) 2144 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2145 } 2146 2147 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2148 { 2149 return atomic_read(&sbi->nr_pages[count_type]); 2150 } 2151 2152 static inline int get_dirty_pages(struct inode *inode) 2153 { 2154 return atomic_read(&F2FS_I(inode)->dirty_pages); 2155 } 2156 2157 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2158 { 2159 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2160 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2161 sbi->log_blocks_per_seg; 2162 2163 return segs / sbi->segs_per_sec; 2164 } 2165 2166 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2167 { 2168 return sbi->total_valid_block_count; 2169 } 2170 2171 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2172 { 2173 return sbi->discard_blks; 2174 } 2175 2176 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2177 { 2178 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2179 2180 /* return NAT or SIT bitmap */ 2181 if (flag == NAT_BITMAP) 2182 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2183 else if (flag == SIT_BITMAP) 2184 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2185 2186 return 0; 2187 } 2188 2189 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2190 { 2191 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2192 } 2193 2194 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2195 { 2196 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2197 int offset; 2198 2199 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2200 offset = (flag == SIT_BITMAP) ? 2201 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2202 /* 2203 * if large_nat_bitmap feature is enabled, leave checksum 2204 * protection for all nat/sit bitmaps. 2205 */ 2206 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); 2207 } 2208 2209 if (__cp_payload(sbi) > 0) { 2210 if (flag == NAT_BITMAP) 2211 return &ckpt->sit_nat_version_bitmap; 2212 else 2213 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2214 } else { 2215 offset = (flag == NAT_BITMAP) ? 2216 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2217 return &ckpt->sit_nat_version_bitmap + offset; 2218 } 2219 } 2220 2221 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2222 { 2223 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2224 2225 if (sbi->cur_cp_pack == 2) 2226 start_addr += sbi->blocks_per_seg; 2227 return start_addr; 2228 } 2229 2230 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2231 { 2232 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2233 2234 if (sbi->cur_cp_pack == 1) 2235 start_addr += sbi->blocks_per_seg; 2236 return start_addr; 2237 } 2238 2239 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2240 { 2241 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2242 } 2243 2244 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2245 { 2246 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2247 } 2248 2249 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2250 struct inode *inode, bool is_inode) 2251 { 2252 block_t valid_block_count; 2253 unsigned int valid_node_count, user_block_count; 2254 int err; 2255 2256 if (is_inode) { 2257 if (inode) { 2258 err = dquot_alloc_inode(inode); 2259 if (err) 2260 return err; 2261 } 2262 } else { 2263 err = dquot_reserve_block(inode, 1); 2264 if (err) 2265 return err; 2266 } 2267 2268 if (time_to_inject(sbi, FAULT_BLOCK)) { 2269 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2270 goto enospc; 2271 } 2272 2273 spin_lock(&sbi->stat_lock); 2274 2275 valid_block_count = sbi->total_valid_block_count + 2276 sbi->current_reserved_blocks + 1; 2277 2278 if (!__allow_reserved_blocks(sbi, inode, false)) 2279 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2280 user_block_count = sbi->user_block_count; 2281 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2282 user_block_count -= sbi->unusable_block_count; 2283 2284 if (unlikely(valid_block_count > user_block_count)) { 2285 spin_unlock(&sbi->stat_lock); 2286 goto enospc; 2287 } 2288 2289 valid_node_count = sbi->total_valid_node_count + 1; 2290 if (unlikely(valid_node_count > sbi->total_node_count)) { 2291 spin_unlock(&sbi->stat_lock); 2292 goto enospc; 2293 } 2294 2295 sbi->total_valid_node_count++; 2296 sbi->total_valid_block_count++; 2297 spin_unlock(&sbi->stat_lock); 2298 2299 if (inode) { 2300 if (is_inode) 2301 f2fs_mark_inode_dirty_sync(inode, true); 2302 else 2303 f2fs_i_blocks_write(inode, 1, true, true); 2304 } 2305 2306 percpu_counter_inc(&sbi->alloc_valid_block_count); 2307 return 0; 2308 2309 enospc: 2310 if (is_inode) { 2311 if (inode) 2312 dquot_free_inode(inode); 2313 } else { 2314 dquot_release_reservation_block(inode, 1); 2315 } 2316 return -ENOSPC; 2317 } 2318 2319 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2320 struct inode *inode, bool is_inode) 2321 { 2322 spin_lock(&sbi->stat_lock); 2323 2324 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2325 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2326 2327 sbi->total_valid_node_count--; 2328 sbi->total_valid_block_count--; 2329 if (sbi->reserved_blocks && 2330 sbi->current_reserved_blocks < sbi->reserved_blocks) 2331 sbi->current_reserved_blocks++; 2332 2333 spin_unlock(&sbi->stat_lock); 2334 2335 if (is_inode) { 2336 dquot_free_inode(inode); 2337 } else { 2338 if (unlikely(inode->i_blocks == 0)) { 2339 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2340 inode->i_ino, 2341 (unsigned long long)inode->i_blocks); 2342 set_sbi_flag(sbi, SBI_NEED_FSCK); 2343 return; 2344 } 2345 f2fs_i_blocks_write(inode, 1, false, true); 2346 } 2347 } 2348 2349 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2350 { 2351 return sbi->total_valid_node_count; 2352 } 2353 2354 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2355 { 2356 percpu_counter_inc(&sbi->total_valid_inode_count); 2357 } 2358 2359 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2360 { 2361 percpu_counter_dec(&sbi->total_valid_inode_count); 2362 } 2363 2364 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2365 { 2366 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2367 } 2368 2369 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2370 pgoff_t index, bool for_write) 2371 { 2372 struct page *page; 2373 2374 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2375 if (!for_write) 2376 page = find_get_page_flags(mapping, index, 2377 FGP_LOCK | FGP_ACCESSED); 2378 else 2379 page = find_lock_page(mapping, index); 2380 if (page) 2381 return page; 2382 2383 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2384 f2fs_show_injection_info(F2FS_M_SB(mapping), 2385 FAULT_PAGE_ALLOC); 2386 return NULL; 2387 } 2388 } 2389 2390 if (!for_write) 2391 return grab_cache_page(mapping, index); 2392 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2393 } 2394 2395 static inline struct page *f2fs_pagecache_get_page( 2396 struct address_space *mapping, pgoff_t index, 2397 int fgp_flags, gfp_t gfp_mask) 2398 { 2399 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2400 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2401 return NULL; 2402 } 2403 2404 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2405 } 2406 2407 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2408 { 2409 char *src_kaddr = kmap(src); 2410 char *dst_kaddr = kmap(dst); 2411 2412 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2413 kunmap(dst); 2414 kunmap(src); 2415 } 2416 2417 static inline void f2fs_put_page(struct page *page, int unlock) 2418 { 2419 if (!page) 2420 return; 2421 2422 if (unlock) { 2423 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2424 unlock_page(page); 2425 } 2426 put_page(page); 2427 } 2428 2429 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2430 { 2431 if (dn->node_page) 2432 f2fs_put_page(dn->node_page, 1); 2433 if (dn->inode_page && dn->node_page != dn->inode_page) 2434 f2fs_put_page(dn->inode_page, 0); 2435 dn->node_page = NULL; 2436 dn->inode_page = NULL; 2437 } 2438 2439 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2440 size_t size) 2441 { 2442 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2443 } 2444 2445 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2446 gfp_t flags) 2447 { 2448 void *entry; 2449 2450 entry = kmem_cache_alloc(cachep, flags); 2451 if (!entry) 2452 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2453 return entry; 2454 } 2455 2456 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2457 { 2458 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2459 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2460 get_pages(sbi, F2FS_WB_CP_DATA) || 2461 get_pages(sbi, F2FS_DIO_READ) || 2462 get_pages(sbi, F2FS_DIO_WRITE)) 2463 return true; 2464 2465 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2466 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2467 return true; 2468 2469 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2470 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2471 return true; 2472 return false; 2473 } 2474 2475 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2476 { 2477 if (sbi->gc_mode == GC_URGENT_HIGH) 2478 return true; 2479 2480 if (is_inflight_io(sbi, type)) 2481 return false; 2482 2483 if (sbi->gc_mode == GC_URGENT_LOW && 2484 (type == DISCARD_TIME || type == GC_TIME)) 2485 return true; 2486 2487 return f2fs_time_over(sbi, type); 2488 } 2489 2490 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2491 unsigned long index, void *item) 2492 { 2493 while (radix_tree_insert(root, index, item)) 2494 cond_resched(); 2495 } 2496 2497 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2498 2499 static inline bool IS_INODE(struct page *page) 2500 { 2501 struct f2fs_node *p = F2FS_NODE(page); 2502 2503 return RAW_IS_INODE(p); 2504 } 2505 2506 static inline int offset_in_addr(struct f2fs_inode *i) 2507 { 2508 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2509 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2510 } 2511 2512 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2513 { 2514 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2515 } 2516 2517 static inline int f2fs_has_extra_attr(struct inode *inode); 2518 static inline block_t data_blkaddr(struct inode *inode, 2519 struct page *node_page, unsigned int offset) 2520 { 2521 struct f2fs_node *raw_node; 2522 __le32 *addr_array; 2523 int base = 0; 2524 bool is_inode = IS_INODE(node_page); 2525 2526 raw_node = F2FS_NODE(node_page); 2527 2528 if (is_inode) { 2529 if (!inode) 2530 /* from GC path only */ 2531 base = offset_in_addr(&raw_node->i); 2532 else if (f2fs_has_extra_attr(inode)) 2533 base = get_extra_isize(inode); 2534 } 2535 2536 addr_array = blkaddr_in_node(raw_node); 2537 return le32_to_cpu(addr_array[base + offset]); 2538 } 2539 2540 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2541 { 2542 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2543 } 2544 2545 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2546 { 2547 int mask; 2548 2549 addr += (nr >> 3); 2550 mask = 1 << (7 - (nr & 0x07)); 2551 return mask & *addr; 2552 } 2553 2554 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2555 { 2556 int mask; 2557 2558 addr += (nr >> 3); 2559 mask = 1 << (7 - (nr & 0x07)); 2560 *addr |= mask; 2561 } 2562 2563 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2564 { 2565 int mask; 2566 2567 addr += (nr >> 3); 2568 mask = 1 << (7 - (nr & 0x07)); 2569 *addr &= ~mask; 2570 } 2571 2572 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2573 { 2574 int mask; 2575 int ret; 2576 2577 addr += (nr >> 3); 2578 mask = 1 << (7 - (nr & 0x07)); 2579 ret = mask & *addr; 2580 *addr |= mask; 2581 return ret; 2582 } 2583 2584 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2585 { 2586 int mask; 2587 int ret; 2588 2589 addr += (nr >> 3); 2590 mask = 1 << (7 - (nr & 0x07)); 2591 ret = mask & *addr; 2592 *addr &= ~mask; 2593 return ret; 2594 } 2595 2596 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2597 { 2598 int mask; 2599 2600 addr += (nr >> 3); 2601 mask = 1 << (7 - (nr & 0x07)); 2602 *addr ^= mask; 2603 } 2604 2605 /* 2606 * On-disk inode flags (f2fs_inode::i_flags) 2607 */ 2608 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2609 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2610 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2611 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2612 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2613 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2614 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2615 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2616 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2617 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2618 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2619 2620 /* Flags that should be inherited by new inodes from their parent. */ 2621 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2622 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2623 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2624 2625 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2626 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2627 F2FS_CASEFOLD_FL)) 2628 2629 /* Flags that are appropriate for non-directories/regular files. */ 2630 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2631 2632 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2633 { 2634 if (S_ISDIR(mode)) 2635 return flags; 2636 else if (S_ISREG(mode)) 2637 return flags & F2FS_REG_FLMASK; 2638 else 2639 return flags & F2FS_OTHER_FLMASK; 2640 } 2641 2642 static inline void __mark_inode_dirty_flag(struct inode *inode, 2643 int flag, bool set) 2644 { 2645 switch (flag) { 2646 case FI_INLINE_XATTR: 2647 case FI_INLINE_DATA: 2648 case FI_INLINE_DENTRY: 2649 case FI_NEW_INODE: 2650 if (set) 2651 return; 2652 fallthrough; 2653 case FI_DATA_EXIST: 2654 case FI_INLINE_DOTS: 2655 case FI_PIN_FILE: 2656 f2fs_mark_inode_dirty_sync(inode, true); 2657 } 2658 } 2659 2660 static inline void set_inode_flag(struct inode *inode, int flag) 2661 { 2662 set_bit(flag, F2FS_I(inode)->flags); 2663 __mark_inode_dirty_flag(inode, flag, true); 2664 } 2665 2666 static inline int is_inode_flag_set(struct inode *inode, int flag) 2667 { 2668 return test_bit(flag, F2FS_I(inode)->flags); 2669 } 2670 2671 static inline void clear_inode_flag(struct inode *inode, int flag) 2672 { 2673 clear_bit(flag, F2FS_I(inode)->flags); 2674 __mark_inode_dirty_flag(inode, flag, false); 2675 } 2676 2677 static inline bool f2fs_verity_in_progress(struct inode *inode) 2678 { 2679 return IS_ENABLED(CONFIG_FS_VERITY) && 2680 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2681 } 2682 2683 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2684 { 2685 F2FS_I(inode)->i_acl_mode = mode; 2686 set_inode_flag(inode, FI_ACL_MODE); 2687 f2fs_mark_inode_dirty_sync(inode, false); 2688 } 2689 2690 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2691 { 2692 if (inc) 2693 inc_nlink(inode); 2694 else 2695 drop_nlink(inode); 2696 f2fs_mark_inode_dirty_sync(inode, true); 2697 } 2698 2699 static inline void f2fs_i_blocks_write(struct inode *inode, 2700 block_t diff, bool add, bool claim) 2701 { 2702 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2703 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2704 2705 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2706 if (add) { 2707 if (claim) 2708 dquot_claim_block(inode, diff); 2709 else 2710 dquot_alloc_block_nofail(inode, diff); 2711 } else { 2712 dquot_free_block(inode, diff); 2713 } 2714 2715 f2fs_mark_inode_dirty_sync(inode, true); 2716 if (clean || recover) 2717 set_inode_flag(inode, FI_AUTO_RECOVER); 2718 } 2719 2720 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2721 { 2722 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2723 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2724 2725 if (i_size_read(inode) == i_size) 2726 return; 2727 2728 i_size_write(inode, i_size); 2729 f2fs_mark_inode_dirty_sync(inode, true); 2730 if (clean || recover) 2731 set_inode_flag(inode, FI_AUTO_RECOVER); 2732 } 2733 2734 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2735 { 2736 F2FS_I(inode)->i_current_depth = depth; 2737 f2fs_mark_inode_dirty_sync(inode, true); 2738 } 2739 2740 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2741 unsigned int count) 2742 { 2743 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2744 f2fs_mark_inode_dirty_sync(inode, true); 2745 } 2746 2747 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2748 { 2749 F2FS_I(inode)->i_xattr_nid = xnid; 2750 f2fs_mark_inode_dirty_sync(inode, true); 2751 } 2752 2753 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2754 { 2755 F2FS_I(inode)->i_pino = pino; 2756 f2fs_mark_inode_dirty_sync(inode, true); 2757 } 2758 2759 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2760 { 2761 struct f2fs_inode_info *fi = F2FS_I(inode); 2762 2763 if (ri->i_inline & F2FS_INLINE_XATTR) 2764 set_bit(FI_INLINE_XATTR, fi->flags); 2765 if (ri->i_inline & F2FS_INLINE_DATA) 2766 set_bit(FI_INLINE_DATA, fi->flags); 2767 if (ri->i_inline & F2FS_INLINE_DENTRY) 2768 set_bit(FI_INLINE_DENTRY, fi->flags); 2769 if (ri->i_inline & F2FS_DATA_EXIST) 2770 set_bit(FI_DATA_EXIST, fi->flags); 2771 if (ri->i_inline & F2FS_INLINE_DOTS) 2772 set_bit(FI_INLINE_DOTS, fi->flags); 2773 if (ri->i_inline & F2FS_EXTRA_ATTR) 2774 set_bit(FI_EXTRA_ATTR, fi->flags); 2775 if (ri->i_inline & F2FS_PIN_FILE) 2776 set_bit(FI_PIN_FILE, fi->flags); 2777 } 2778 2779 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2780 { 2781 ri->i_inline = 0; 2782 2783 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2784 ri->i_inline |= F2FS_INLINE_XATTR; 2785 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2786 ri->i_inline |= F2FS_INLINE_DATA; 2787 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2788 ri->i_inline |= F2FS_INLINE_DENTRY; 2789 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2790 ri->i_inline |= F2FS_DATA_EXIST; 2791 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2792 ri->i_inline |= F2FS_INLINE_DOTS; 2793 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2794 ri->i_inline |= F2FS_EXTRA_ATTR; 2795 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2796 ri->i_inline |= F2FS_PIN_FILE; 2797 } 2798 2799 static inline int f2fs_has_extra_attr(struct inode *inode) 2800 { 2801 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2802 } 2803 2804 static inline int f2fs_has_inline_xattr(struct inode *inode) 2805 { 2806 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2807 } 2808 2809 static inline int f2fs_compressed_file(struct inode *inode) 2810 { 2811 return S_ISREG(inode->i_mode) && 2812 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2813 } 2814 2815 static inline bool f2fs_need_compress_data(struct inode *inode) 2816 { 2817 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 2818 2819 if (!f2fs_compressed_file(inode)) 2820 return false; 2821 2822 if (compress_mode == COMPR_MODE_FS) 2823 return true; 2824 else if (compress_mode == COMPR_MODE_USER && 2825 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 2826 return true; 2827 2828 return false; 2829 } 2830 2831 static inline unsigned int addrs_per_inode(struct inode *inode) 2832 { 2833 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2834 get_inline_xattr_addrs(inode); 2835 2836 if (!f2fs_compressed_file(inode)) 2837 return addrs; 2838 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2839 } 2840 2841 static inline unsigned int addrs_per_block(struct inode *inode) 2842 { 2843 if (!f2fs_compressed_file(inode)) 2844 return DEF_ADDRS_PER_BLOCK; 2845 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2846 } 2847 2848 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2849 { 2850 struct f2fs_inode *ri = F2FS_INODE(page); 2851 2852 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2853 get_inline_xattr_addrs(inode)]); 2854 } 2855 2856 static inline int inline_xattr_size(struct inode *inode) 2857 { 2858 if (f2fs_has_inline_xattr(inode)) 2859 return get_inline_xattr_addrs(inode) * sizeof(__le32); 2860 return 0; 2861 } 2862 2863 static inline int f2fs_has_inline_data(struct inode *inode) 2864 { 2865 return is_inode_flag_set(inode, FI_INLINE_DATA); 2866 } 2867 2868 static inline int f2fs_exist_data(struct inode *inode) 2869 { 2870 return is_inode_flag_set(inode, FI_DATA_EXIST); 2871 } 2872 2873 static inline int f2fs_has_inline_dots(struct inode *inode) 2874 { 2875 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2876 } 2877 2878 static inline int f2fs_is_mmap_file(struct inode *inode) 2879 { 2880 return is_inode_flag_set(inode, FI_MMAP_FILE); 2881 } 2882 2883 static inline bool f2fs_is_pinned_file(struct inode *inode) 2884 { 2885 return is_inode_flag_set(inode, FI_PIN_FILE); 2886 } 2887 2888 static inline bool f2fs_is_atomic_file(struct inode *inode) 2889 { 2890 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2891 } 2892 2893 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2894 { 2895 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2896 } 2897 2898 static inline bool f2fs_is_volatile_file(struct inode *inode) 2899 { 2900 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2901 } 2902 2903 static inline bool f2fs_is_first_block_written(struct inode *inode) 2904 { 2905 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2906 } 2907 2908 static inline bool f2fs_is_drop_cache(struct inode *inode) 2909 { 2910 return is_inode_flag_set(inode, FI_DROP_CACHE); 2911 } 2912 2913 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2914 { 2915 struct f2fs_inode *ri = F2FS_INODE(page); 2916 int extra_size = get_extra_isize(inode); 2917 2918 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2919 } 2920 2921 static inline int f2fs_has_inline_dentry(struct inode *inode) 2922 { 2923 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2924 } 2925 2926 static inline int is_file(struct inode *inode, int type) 2927 { 2928 return F2FS_I(inode)->i_advise & type; 2929 } 2930 2931 static inline void set_file(struct inode *inode, int type) 2932 { 2933 F2FS_I(inode)->i_advise |= type; 2934 f2fs_mark_inode_dirty_sync(inode, true); 2935 } 2936 2937 static inline void clear_file(struct inode *inode, int type) 2938 { 2939 F2FS_I(inode)->i_advise &= ~type; 2940 f2fs_mark_inode_dirty_sync(inode, true); 2941 } 2942 2943 static inline bool f2fs_is_time_consistent(struct inode *inode) 2944 { 2945 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 2946 return false; 2947 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 2948 return false; 2949 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 2950 return false; 2951 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 2952 &F2FS_I(inode)->i_crtime)) 2953 return false; 2954 return true; 2955 } 2956 2957 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2958 { 2959 bool ret; 2960 2961 if (dsync) { 2962 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2963 2964 spin_lock(&sbi->inode_lock[DIRTY_META]); 2965 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2966 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2967 return ret; 2968 } 2969 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2970 file_keep_isize(inode) || 2971 i_size_read(inode) & ~PAGE_MASK) 2972 return false; 2973 2974 if (!f2fs_is_time_consistent(inode)) 2975 return false; 2976 2977 spin_lock(&F2FS_I(inode)->i_size_lock); 2978 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 2979 spin_unlock(&F2FS_I(inode)->i_size_lock); 2980 2981 return ret; 2982 } 2983 2984 static inline bool f2fs_readonly(struct super_block *sb) 2985 { 2986 return sb_rdonly(sb); 2987 } 2988 2989 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 2990 { 2991 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 2992 } 2993 2994 static inline bool is_dot_dotdot(const u8 *name, size_t len) 2995 { 2996 if (len == 1 && name[0] == '.') 2997 return true; 2998 2999 if (len == 2 && name[0] == '.' && name[1] == '.') 3000 return true; 3001 3002 return false; 3003 } 3004 3005 static inline bool f2fs_may_extent_tree(struct inode *inode) 3006 { 3007 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3008 3009 if (!test_opt(sbi, EXTENT_CACHE) || 3010 is_inode_flag_set(inode, FI_NO_EXTENT) || 3011 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 3012 return false; 3013 3014 /* 3015 * for recovered files during mount do not create extents 3016 * if shrinker is not registered. 3017 */ 3018 if (list_empty(&sbi->s_list)) 3019 return false; 3020 3021 return S_ISREG(inode->i_mode); 3022 } 3023 3024 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3025 size_t size, gfp_t flags) 3026 { 3027 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3028 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3029 return NULL; 3030 } 3031 3032 return kmalloc(size, flags); 3033 } 3034 3035 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3036 size_t size, gfp_t flags) 3037 { 3038 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3039 } 3040 3041 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3042 size_t size, gfp_t flags) 3043 { 3044 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3045 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3046 return NULL; 3047 } 3048 3049 return kvmalloc(size, flags); 3050 } 3051 3052 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3053 size_t size, gfp_t flags) 3054 { 3055 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3056 } 3057 3058 static inline int get_extra_isize(struct inode *inode) 3059 { 3060 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3061 } 3062 3063 static inline int get_inline_xattr_addrs(struct inode *inode) 3064 { 3065 return F2FS_I(inode)->i_inline_xattr_size; 3066 } 3067 3068 #define f2fs_get_inode_mode(i) \ 3069 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3070 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3071 3072 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3073 (offsetof(struct f2fs_inode, i_extra_end) - \ 3074 offsetof(struct f2fs_inode, i_extra_isize)) \ 3075 3076 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3077 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3078 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3079 sizeof((f2fs_inode)->field)) \ 3080 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3081 3082 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3083 #define MIN_IOSTAT_PERIOD_MS 100 3084 /* maximum period of iostat tracing is 1 day */ 3085 #define MAX_IOSTAT_PERIOD_MS 8640000 3086 3087 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3088 { 3089 int i; 3090 3091 spin_lock(&sbi->iostat_lock); 3092 for (i = 0; i < NR_IO_TYPE; i++) { 3093 sbi->rw_iostat[i] = 0; 3094 sbi->prev_rw_iostat[i] = 0; 3095 } 3096 spin_unlock(&sbi->iostat_lock); 3097 } 3098 3099 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3100 3101 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3102 enum iostat_type type, unsigned long long io_bytes) 3103 { 3104 if (!sbi->iostat_enable) 3105 return; 3106 spin_lock(&sbi->iostat_lock); 3107 sbi->rw_iostat[type] += io_bytes; 3108 3109 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3110 sbi->rw_iostat[APP_BUFFERED_IO] = 3111 sbi->rw_iostat[APP_WRITE_IO] - 3112 sbi->rw_iostat[APP_DIRECT_IO]; 3113 3114 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3115 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3116 sbi->rw_iostat[APP_READ_IO] - 3117 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3118 spin_unlock(&sbi->iostat_lock); 3119 3120 f2fs_record_iostat(sbi); 3121 } 3122 3123 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3124 3125 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3126 3127 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3128 block_t blkaddr, int type); 3129 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3130 block_t blkaddr, int type) 3131 { 3132 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3133 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3134 blkaddr, type); 3135 f2fs_bug_on(sbi, 1); 3136 } 3137 } 3138 3139 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3140 { 3141 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3142 blkaddr == COMPRESS_ADDR) 3143 return false; 3144 return true; 3145 } 3146 3147 static inline void f2fs_set_page_private(struct page *page, 3148 unsigned long data) 3149 { 3150 if (PagePrivate(page)) 3151 return; 3152 3153 attach_page_private(page, (void *)data); 3154 } 3155 3156 static inline void f2fs_clear_page_private(struct page *page) 3157 { 3158 detach_page_private(page); 3159 } 3160 3161 /* 3162 * file.c 3163 */ 3164 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3165 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3166 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3167 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3168 int f2fs_truncate(struct inode *inode); 3169 int f2fs_getattr(const struct path *path, struct kstat *stat, 3170 u32 request_mask, unsigned int flags); 3171 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 3172 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3173 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3174 int f2fs_precache_extents(struct inode *inode); 3175 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3176 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3177 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3178 int f2fs_pin_file_control(struct inode *inode, bool inc); 3179 3180 /* 3181 * inode.c 3182 */ 3183 void f2fs_set_inode_flags(struct inode *inode); 3184 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3185 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3186 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3187 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3188 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3189 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3190 void f2fs_update_inode_page(struct inode *inode); 3191 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3192 void f2fs_evict_inode(struct inode *inode); 3193 void f2fs_handle_failed_inode(struct inode *inode); 3194 3195 /* 3196 * namei.c 3197 */ 3198 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3199 bool hot, bool set); 3200 struct dentry *f2fs_get_parent(struct dentry *child); 3201 3202 /* 3203 * dir.c 3204 */ 3205 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3206 int f2fs_init_casefolded_name(const struct inode *dir, 3207 struct f2fs_filename *fname); 3208 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3209 int lookup, struct f2fs_filename *fname); 3210 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3211 struct f2fs_filename *fname); 3212 void f2fs_free_filename(struct f2fs_filename *fname); 3213 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3214 const struct f2fs_filename *fname, int *max_slots); 3215 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3216 unsigned int start_pos, struct fscrypt_str *fstr); 3217 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3218 struct f2fs_dentry_ptr *d); 3219 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3220 const struct f2fs_filename *fname, struct page *dpage); 3221 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3222 unsigned int current_depth); 3223 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3224 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3225 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3226 const struct f2fs_filename *fname, 3227 struct page **res_page); 3228 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3229 const struct qstr *child, struct page **res_page); 3230 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3231 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3232 struct page **page); 3233 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3234 struct page *page, struct inode *inode); 3235 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3236 const struct f2fs_filename *fname); 3237 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3238 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3239 unsigned int bit_pos); 3240 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3241 struct inode *inode, nid_t ino, umode_t mode); 3242 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3243 struct inode *inode, nid_t ino, umode_t mode); 3244 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3245 struct inode *inode, nid_t ino, umode_t mode); 3246 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3247 struct inode *dir, struct inode *inode); 3248 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3249 bool f2fs_empty_dir(struct inode *dir); 3250 3251 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3252 { 3253 if (fscrypt_is_nokey_name(dentry)) 3254 return -ENOKEY; 3255 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3256 inode, inode->i_ino, inode->i_mode); 3257 } 3258 3259 /* 3260 * super.c 3261 */ 3262 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3263 void f2fs_inode_synced(struct inode *inode); 3264 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3265 int f2fs_quota_sync(struct super_block *sb, int type); 3266 loff_t max_file_blocks(struct inode *inode); 3267 void f2fs_quota_off_umount(struct super_block *sb); 3268 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3269 int f2fs_sync_fs(struct super_block *sb, int sync); 3270 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3271 3272 /* 3273 * hash.c 3274 */ 3275 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3276 3277 /* 3278 * node.c 3279 */ 3280 struct dnode_of_data; 3281 struct node_info; 3282 3283 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3284 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3285 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3286 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3287 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3288 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3289 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3290 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3291 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3292 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3293 struct node_info *ni); 3294 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3295 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3296 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3297 int f2fs_truncate_xattr_node(struct inode *inode); 3298 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3299 unsigned int seq_id); 3300 int f2fs_remove_inode_page(struct inode *inode); 3301 struct page *f2fs_new_inode_page(struct inode *inode); 3302 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3303 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3304 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3305 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3306 int f2fs_move_node_page(struct page *node_page, int gc_type); 3307 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3308 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3309 struct writeback_control *wbc, bool atomic, 3310 unsigned int *seq_id); 3311 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3312 struct writeback_control *wbc, 3313 bool do_balance, enum iostat_type io_type); 3314 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3315 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3316 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3317 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3318 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3319 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3320 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3321 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3322 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3323 unsigned int segno, struct f2fs_summary_block *sum); 3324 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3325 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3326 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3327 int __init f2fs_create_node_manager_caches(void); 3328 void f2fs_destroy_node_manager_caches(void); 3329 3330 /* 3331 * segment.c 3332 */ 3333 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3334 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3335 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3336 void f2fs_drop_inmem_pages(struct inode *inode); 3337 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3338 int f2fs_commit_inmem_pages(struct inode *inode); 3339 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3340 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3341 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3342 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3343 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3344 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3345 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3346 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3347 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3348 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3349 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3350 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3351 struct cp_control *cpc); 3352 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3353 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3354 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3355 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3356 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3357 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3358 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3359 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3360 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3361 unsigned int *newseg, bool new_sec, int dir); 3362 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3363 unsigned int start, unsigned int end); 3364 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); 3365 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3366 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3367 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3368 struct cp_control *cpc); 3369 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3370 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3371 block_t blk_addr); 3372 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3373 enum iostat_type io_type); 3374 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3375 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3376 struct f2fs_io_info *fio); 3377 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3378 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3379 block_t old_blkaddr, block_t new_blkaddr, 3380 bool recover_curseg, bool recover_newaddr, 3381 bool from_gc); 3382 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3383 block_t old_addr, block_t new_addr, 3384 unsigned char version, bool recover_curseg, 3385 bool recover_newaddr); 3386 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3387 block_t old_blkaddr, block_t *new_blkaddr, 3388 struct f2fs_summary *sum, int type, 3389 struct f2fs_io_info *fio); 3390 void f2fs_wait_on_page_writeback(struct page *page, 3391 enum page_type type, bool ordered, bool locked); 3392 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3393 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3394 block_t len); 3395 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3396 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3397 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3398 unsigned int val, int alloc); 3399 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3400 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3401 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3402 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3403 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3404 int __init f2fs_create_segment_manager_caches(void); 3405 void f2fs_destroy_segment_manager_caches(void); 3406 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3407 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3408 enum page_type type, enum temp_type temp); 3409 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3410 unsigned int segno); 3411 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3412 unsigned int segno); 3413 3414 /* 3415 * checkpoint.c 3416 */ 3417 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3418 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3419 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3420 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3421 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3422 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3423 block_t blkaddr, int type); 3424 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3425 int type, bool sync); 3426 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3427 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3428 long nr_to_write, enum iostat_type io_type); 3429 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3430 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3431 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3432 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3433 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3434 unsigned int devidx, int type); 3435 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3436 unsigned int devidx, int type); 3437 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3438 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3439 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3440 void f2fs_add_orphan_inode(struct inode *inode); 3441 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3442 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3443 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3444 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3445 void f2fs_remove_dirty_inode(struct inode *inode); 3446 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3447 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3448 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3449 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3450 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3451 int __init f2fs_create_checkpoint_caches(void); 3452 void f2fs_destroy_checkpoint_caches(void); 3453 3454 /* 3455 * data.c 3456 */ 3457 int __init f2fs_init_bioset(void); 3458 void f2fs_destroy_bioset(void); 3459 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); 3460 int f2fs_init_bio_entry_cache(void); 3461 void f2fs_destroy_bio_entry_cache(void); 3462 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3463 struct bio *bio, enum page_type type); 3464 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3465 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3466 struct inode *inode, struct page *page, 3467 nid_t ino, enum page_type type); 3468 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3469 struct bio **bio, struct page *page); 3470 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3471 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3472 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3473 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3474 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3475 block_t blk_addr, struct bio *bio); 3476 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3477 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3478 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3479 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3480 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3481 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3482 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3483 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3484 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3485 int op_flags, bool for_write); 3486 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3487 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3488 bool for_write); 3489 struct page *f2fs_get_new_data_page(struct inode *inode, 3490 struct page *ipage, pgoff_t index, bool new_i_size); 3491 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3492 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3493 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3494 int create, int flag); 3495 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3496 u64 start, u64 len); 3497 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3498 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3499 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3500 int f2fs_write_single_data_page(struct page *page, int *submitted, 3501 struct bio **bio, sector_t *last_block, 3502 struct writeback_control *wbc, 3503 enum iostat_type io_type, 3504 int compr_blocks, bool allow_balance); 3505 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3506 unsigned int length); 3507 int f2fs_release_page(struct page *page, gfp_t wait); 3508 #ifdef CONFIG_MIGRATION 3509 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3510 struct page *page, enum migrate_mode mode); 3511 #endif 3512 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3513 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3514 int f2fs_init_post_read_processing(void); 3515 void f2fs_destroy_post_read_processing(void); 3516 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3517 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3518 3519 /* 3520 * gc.c 3521 */ 3522 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3523 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3524 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3525 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3526 unsigned int segno); 3527 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3528 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3529 int __init f2fs_create_garbage_collection_cache(void); 3530 void f2fs_destroy_garbage_collection_cache(void); 3531 3532 /* 3533 * recovery.c 3534 */ 3535 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3536 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3537 3538 /* 3539 * debug.c 3540 */ 3541 #ifdef CONFIG_F2FS_STAT_FS 3542 struct f2fs_stat_info { 3543 struct list_head stat_list; 3544 struct f2fs_sb_info *sbi; 3545 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3546 int main_area_segs, main_area_sections, main_area_zones; 3547 unsigned long long hit_largest, hit_cached, hit_rbtree; 3548 unsigned long long hit_total, total_ext; 3549 int ext_tree, zombie_tree, ext_node; 3550 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3551 int ndirty_data, ndirty_qdata; 3552 int inmem_pages; 3553 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3554 int nats, dirty_nats, sits, dirty_sits; 3555 int free_nids, avail_nids, alloc_nids; 3556 int total_count, utilization; 3557 int bg_gc, nr_wb_cp_data, nr_wb_data; 3558 int nr_rd_data, nr_rd_node, nr_rd_meta; 3559 int nr_dio_read, nr_dio_write; 3560 unsigned int io_skip_bggc, other_skip_bggc; 3561 int nr_flushing, nr_flushed, flush_list_empty; 3562 int nr_discarding, nr_discarded; 3563 int nr_discard_cmd; 3564 unsigned int undiscard_blks; 3565 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3566 int compr_inode; 3567 unsigned long long compr_blocks; 3568 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3569 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3570 unsigned int bimodal, avg_vblocks; 3571 int util_free, util_valid, util_invalid; 3572 int rsvd_segs, overp_segs; 3573 int dirty_count, node_pages, meta_pages; 3574 int prefree_count, call_count, cp_count, bg_cp_count; 3575 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3576 int bg_node_segs, bg_data_segs; 3577 int tot_blks, data_blks, node_blks; 3578 int bg_data_blks, bg_node_blks; 3579 unsigned long long skipped_atomic_files[2]; 3580 int curseg[NR_CURSEG_TYPE]; 3581 int cursec[NR_CURSEG_TYPE]; 3582 int curzone[NR_CURSEG_TYPE]; 3583 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3584 unsigned int full_seg[NR_CURSEG_TYPE]; 3585 unsigned int valid_blks[NR_CURSEG_TYPE]; 3586 3587 unsigned int meta_count[META_MAX]; 3588 unsigned int segment_count[2]; 3589 unsigned int block_count[2]; 3590 unsigned int inplace_count; 3591 unsigned long long base_mem, cache_mem, page_mem; 3592 }; 3593 3594 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3595 { 3596 return (struct f2fs_stat_info *)sbi->stat_info; 3597 } 3598 3599 #define stat_inc_cp_count(si) ((si)->cp_count++) 3600 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3601 #define stat_inc_call_count(si) ((si)->call_count++) 3602 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3603 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3604 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3605 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3606 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3607 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3608 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3609 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3610 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3611 #define stat_inc_inline_xattr(inode) \ 3612 do { \ 3613 if (f2fs_has_inline_xattr(inode)) \ 3614 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3615 } while (0) 3616 #define stat_dec_inline_xattr(inode) \ 3617 do { \ 3618 if (f2fs_has_inline_xattr(inode)) \ 3619 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3620 } while (0) 3621 #define stat_inc_inline_inode(inode) \ 3622 do { \ 3623 if (f2fs_has_inline_data(inode)) \ 3624 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3625 } while (0) 3626 #define stat_dec_inline_inode(inode) \ 3627 do { \ 3628 if (f2fs_has_inline_data(inode)) \ 3629 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3630 } while (0) 3631 #define stat_inc_inline_dir(inode) \ 3632 do { \ 3633 if (f2fs_has_inline_dentry(inode)) \ 3634 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3635 } while (0) 3636 #define stat_dec_inline_dir(inode) \ 3637 do { \ 3638 if (f2fs_has_inline_dentry(inode)) \ 3639 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3640 } while (0) 3641 #define stat_inc_compr_inode(inode) \ 3642 do { \ 3643 if (f2fs_compressed_file(inode)) \ 3644 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3645 } while (0) 3646 #define stat_dec_compr_inode(inode) \ 3647 do { \ 3648 if (f2fs_compressed_file(inode)) \ 3649 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3650 } while (0) 3651 #define stat_add_compr_blocks(inode, blocks) \ 3652 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3653 #define stat_sub_compr_blocks(inode, blocks) \ 3654 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3655 #define stat_inc_meta_count(sbi, blkaddr) \ 3656 do { \ 3657 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3658 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3659 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3660 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3661 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3662 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3663 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3664 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3665 } while (0) 3666 #define stat_inc_seg_type(sbi, curseg) \ 3667 ((sbi)->segment_count[(curseg)->alloc_type]++) 3668 #define stat_inc_block_count(sbi, curseg) \ 3669 ((sbi)->block_count[(curseg)->alloc_type]++) 3670 #define stat_inc_inplace_blocks(sbi) \ 3671 (atomic_inc(&(sbi)->inplace_count)) 3672 #define stat_update_max_atomic_write(inode) \ 3673 do { \ 3674 int cur = F2FS_I_SB(inode)->atomic_files; \ 3675 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3676 if (cur > max) \ 3677 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3678 } while (0) 3679 #define stat_inc_volatile_write(inode) \ 3680 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3681 #define stat_dec_volatile_write(inode) \ 3682 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3683 #define stat_update_max_volatile_write(inode) \ 3684 do { \ 3685 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3686 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3687 if (cur > max) \ 3688 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3689 } while (0) 3690 #define stat_inc_seg_count(sbi, type, gc_type) \ 3691 do { \ 3692 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3693 si->tot_segs++; \ 3694 if ((type) == SUM_TYPE_DATA) { \ 3695 si->data_segs++; \ 3696 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3697 } else { \ 3698 si->node_segs++; \ 3699 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3700 } \ 3701 } while (0) 3702 3703 #define stat_inc_tot_blk_count(si, blks) \ 3704 ((si)->tot_blks += (blks)) 3705 3706 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3707 do { \ 3708 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3709 stat_inc_tot_blk_count(si, blks); \ 3710 si->data_blks += (blks); \ 3711 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3712 } while (0) 3713 3714 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3715 do { \ 3716 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3717 stat_inc_tot_blk_count(si, blks); \ 3718 si->node_blks += (blks); \ 3719 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3720 } while (0) 3721 3722 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3723 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3724 void __init f2fs_create_root_stats(void); 3725 void f2fs_destroy_root_stats(void); 3726 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3727 #else 3728 #define stat_inc_cp_count(si) do { } while (0) 3729 #define stat_inc_bg_cp_count(si) do { } while (0) 3730 #define stat_inc_call_count(si) do { } while (0) 3731 #define stat_inc_bggc_count(si) do { } while (0) 3732 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3733 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3734 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3735 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3736 #define stat_inc_total_hit(sbi) do { } while (0) 3737 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3738 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3739 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3740 #define stat_inc_inline_xattr(inode) do { } while (0) 3741 #define stat_dec_inline_xattr(inode) do { } while (0) 3742 #define stat_inc_inline_inode(inode) do { } while (0) 3743 #define stat_dec_inline_inode(inode) do { } while (0) 3744 #define stat_inc_inline_dir(inode) do { } while (0) 3745 #define stat_dec_inline_dir(inode) do { } while (0) 3746 #define stat_inc_compr_inode(inode) do { } while (0) 3747 #define stat_dec_compr_inode(inode) do { } while (0) 3748 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3749 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3750 #define stat_update_max_atomic_write(inode) do { } while (0) 3751 #define stat_inc_volatile_write(inode) do { } while (0) 3752 #define stat_dec_volatile_write(inode) do { } while (0) 3753 #define stat_update_max_volatile_write(inode) do { } while (0) 3754 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3755 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3756 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3757 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3758 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3759 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3760 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3761 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3762 3763 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3764 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3765 static inline void __init f2fs_create_root_stats(void) { } 3766 static inline void f2fs_destroy_root_stats(void) { } 3767 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3768 #endif 3769 3770 extern const struct file_operations f2fs_dir_operations; 3771 extern const struct file_operations f2fs_file_operations; 3772 extern const struct inode_operations f2fs_file_inode_operations; 3773 extern const struct address_space_operations f2fs_dblock_aops; 3774 extern const struct address_space_operations f2fs_node_aops; 3775 extern const struct address_space_operations f2fs_meta_aops; 3776 extern const struct inode_operations f2fs_dir_inode_operations; 3777 extern const struct inode_operations f2fs_symlink_inode_operations; 3778 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3779 extern const struct inode_operations f2fs_special_inode_operations; 3780 extern struct kmem_cache *f2fs_inode_entry_slab; 3781 3782 /* 3783 * inline.c 3784 */ 3785 bool f2fs_may_inline_data(struct inode *inode); 3786 bool f2fs_may_inline_dentry(struct inode *inode); 3787 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3788 void f2fs_truncate_inline_inode(struct inode *inode, 3789 struct page *ipage, u64 from); 3790 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3791 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3792 int f2fs_convert_inline_inode(struct inode *inode); 3793 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3794 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3795 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3796 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3797 const struct f2fs_filename *fname, 3798 struct page **res_page); 3799 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3800 struct page *ipage); 3801 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3802 struct inode *inode, nid_t ino, umode_t mode); 3803 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3804 struct page *page, struct inode *dir, 3805 struct inode *inode); 3806 bool f2fs_empty_inline_dir(struct inode *dir); 3807 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3808 struct fscrypt_str *fstr); 3809 int f2fs_inline_data_fiemap(struct inode *inode, 3810 struct fiemap_extent_info *fieinfo, 3811 __u64 start, __u64 len); 3812 3813 /* 3814 * shrinker.c 3815 */ 3816 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3817 struct shrink_control *sc); 3818 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3819 struct shrink_control *sc); 3820 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3821 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3822 3823 /* 3824 * extent_cache.c 3825 */ 3826 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3827 struct rb_entry *cached_re, unsigned int ofs); 3828 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3829 struct rb_root_cached *root, 3830 struct rb_node **parent, 3831 unsigned long long key, bool *left_most); 3832 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3833 struct rb_root_cached *root, 3834 struct rb_node **parent, 3835 unsigned int ofs, bool *leftmost); 3836 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3837 struct rb_entry *cached_re, unsigned int ofs, 3838 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3839 struct rb_node ***insert_p, struct rb_node **insert_parent, 3840 bool force, bool *leftmost); 3841 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3842 struct rb_root_cached *root, bool check_key); 3843 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3844 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3845 void f2fs_drop_extent_tree(struct inode *inode); 3846 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3847 void f2fs_destroy_extent_tree(struct inode *inode); 3848 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3849 struct extent_info *ei); 3850 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3851 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3852 pgoff_t fofs, block_t blkaddr, unsigned int len); 3853 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3854 int __init f2fs_create_extent_cache(void); 3855 void f2fs_destroy_extent_cache(void); 3856 3857 /* 3858 * sysfs.c 3859 */ 3860 int __init f2fs_init_sysfs(void); 3861 void f2fs_exit_sysfs(void); 3862 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 3863 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 3864 3865 /* verity.c */ 3866 extern const struct fsverity_operations f2fs_verityops; 3867 3868 /* 3869 * crypto support 3870 */ 3871 static inline bool f2fs_encrypted_file(struct inode *inode) 3872 { 3873 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 3874 } 3875 3876 static inline void f2fs_set_encrypted_inode(struct inode *inode) 3877 { 3878 #ifdef CONFIG_FS_ENCRYPTION 3879 file_set_encrypt(inode); 3880 f2fs_set_inode_flags(inode); 3881 #endif 3882 } 3883 3884 /* 3885 * Returns true if the reads of the inode's data need to undergo some 3886 * postprocessing step, like decryption or authenticity verification. 3887 */ 3888 static inline bool f2fs_post_read_required(struct inode *inode) 3889 { 3890 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 3891 f2fs_compressed_file(inode); 3892 } 3893 3894 /* 3895 * compress.c 3896 */ 3897 #ifdef CONFIG_F2FS_FS_COMPRESSION 3898 bool f2fs_is_compressed_page(struct page *page); 3899 struct page *f2fs_compress_control_page(struct page *page); 3900 int f2fs_prepare_compress_overwrite(struct inode *inode, 3901 struct page **pagep, pgoff_t index, void **fsdata); 3902 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 3903 pgoff_t index, unsigned copied); 3904 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 3905 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 3906 bool f2fs_is_compress_backend_ready(struct inode *inode); 3907 int f2fs_init_compress_mempool(void); 3908 void f2fs_destroy_compress_mempool(void); 3909 void f2fs_end_read_compressed_page(struct page *page, bool failed); 3910 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 3911 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 3912 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 3913 int f2fs_write_multi_pages(struct compress_ctx *cc, 3914 int *submitted, 3915 struct writeback_control *wbc, 3916 enum iostat_type io_type); 3917 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 3918 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 3919 unsigned nr_pages, sector_t *last_block_in_bio, 3920 bool is_readahead, bool for_write); 3921 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 3922 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 3923 void f2fs_put_page_dic(struct page *page); 3924 int f2fs_init_compress_ctx(struct compress_ctx *cc); 3925 void f2fs_destroy_compress_ctx(struct compress_ctx *cc); 3926 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 3927 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 3928 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 3929 int __init f2fs_init_compress_cache(void); 3930 void f2fs_destroy_compress_cache(void); 3931 #else 3932 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 3933 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 3934 { 3935 if (!f2fs_compressed_file(inode)) 3936 return true; 3937 /* not support compression */ 3938 return false; 3939 } 3940 static inline struct page *f2fs_compress_control_page(struct page *page) 3941 { 3942 WARN_ON_ONCE(1); 3943 return ERR_PTR(-EINVAL); 3944 } 3945 static inline int f2fs_init_compress_mempool(void) { return 0; } 3946 static inline void f2fs_destroy_compress_mempool(void) { } 3947 static inline void f2fs_end_read_compressed_page(struct page *page, bool failed) 3948 { 3949 WARN_ON_ONCE(1); 3950 } 3951 static inline void f2fs_put_page_dic(struct page *page) 3952 { 3953 WARN_ON_ONCE(1); 3954 } 3955 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 3956 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 3957 static inline int __init f2fs_init_compress_cache(void) { return 0; } 3958 static inline void f2fs_destroy_compress_cache(void) { } 3959 #endif 3960 3961 static inline void set_compress_context(struct inode *inode) 3962 { 3963 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3964 3965 F2FS_I(inode)->i_compress_algorithm = 3966 F2FS_OPTION(sbi).compress_algorithm; 3967 F2FS_I(inode)->i_log_cluster_size = 3968 F2FS_OPTION(sbi).compress_log_size; 3969 F2FS_I(inode)->i_compress_flag = 3970 F2FS_OPTION(sbi).compress_chksum ? 3971 1 << COMPRESS_CHKSUM : 0; 3972 F2FS_I(inode)->i_cluster_size = 3973 1 << F2FS_I(inode)->i_log_cluster_size; 3974 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 && 3975 F2FS_OPTION(sbi).compress_level) 3976 F2FS_I(inode)->i_compress_flag |= 3977 F2FS_OPTION(sbi).compress_level << 3978 COMPRESS_LEVEL_OFFSET; 3979 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 3980 set_inode_flag(inode, FI_COMPRESSED_FILE); 3981 stat_inc_compr_inode(inode); 3982 f2fs_mark_inode_dirty_sync(inode, true); 3983 } 3984 3985 static inline bool f2fs_disable_compressed_file(struct inode *inode) 3986 { 3987 struct f2fs_inode_info *fi = F2FS_I(inode); 3988 3989 if (!f2fs_compressed_file(inode)) 3990 return true; 3991 if (S_ISREG(inode->i_mode) && 3992 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks))) 3993 return false; 3994 3995 fi->i_flags &= ~F2FS_COMPR_FL; 3996 stat_dec_compr_inode(inode); 3997 clear_inode_flag(inode, FI_COMPRESSED_FILE); 3998 f2fs_mark_inode_dirty_sync(inode, true); 3999 return true; 4000 } 4001 4002 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4003 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4004 { \ 4005 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4006 } 4007 4008 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4009 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4010 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4011 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4012 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4013 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4014 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4015 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4016 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4017 F2FS_FEATURE_FUNCS(verity, VERITY); 4018 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4019 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4020 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4021 4022 #ifdef CONFIG_BLK_DEV_ZONED 4023 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4024 block_t blkaddr) 4025 { 4026 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4027 4028 return test_bit(zno, FDEV(devi).blkz_seq); 4029 } 4030 #endif 4031 4032 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4033 { 4034 return f2fs_sb_has_blkzoned(sbi); 4035 } 4036 4037 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4038 { 4039 return blk_queue_discard(bdev_get_queue(bdev)) || 4040 bdev_is_zoned(bdev); 4041 } 4042 4043 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4044 { 4045 int i; 4046 4047 if (!f2fs_is_multi_device(sbi)) 4048 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4049 4050 for (i = 0; i < sbi->s_ndevs; i++) 4051 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4052 return true; 4053 return false; 4054 } 4055 4056 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4057 { 4058 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4059 f2fs_hw_should_discard(sbi); 4060 } 4061 4062 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4063 { 4064 int i; 4065 4066 if (!f2fs_is_multi_device(sbi)) 4067 return bdev_read_only(sbi->sb->s_bdev); 4068 4069 for (i = 0; i < sbi->s_ndevs; i++) 4070 if (bdev_read_only(FDEV(i).bdev)) 4071 return true; 4072 return false; 4073 } 4074 4075 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4076 { 4077 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4078 } 4079 4080 static inline bool f2fs_may_compress(struct inode *inode) 4081 { 4082 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4083 f2fs_is_atomic_file(inode) || 4084 f2fs_is_volatile_file(inode)) 4085 return false; 4086 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4087 } 4088 4089 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4090 u64 blocks, bool add) 4091 { 4092 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4093 struct f2fs_inode_info *fi = F2FS_I(inode); 4094 4095 /* don't update i_compr_blocks if saved blocks were released */ 4096 if (!add && !atomic_read(&fi->i_compr_blocks)) 4097 return; 4098 4099 if (add) { 4100 atomic_add(diff, &fi->i_compr_blocks); 4101 stat_add_compr_blocks(inode, diff); 4102 } else { 4103 atomic_sub(diff, &fi->i_compr_blocks); 4104 stat_sub_compr_blocks(inode, diff); 4105 } 4106 f2fs_mark_inode_dirty_sync(inode, true); 4107 } 4108 4109 static inline int block_unaligned_IO(struct inode *inode, 4110 struct kiocb *iocb, struct iov_iter *iter) 4111 { 4112 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4113 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4114 loff_t offset = iocb->ki_pos; 4115 unsigned long align = offset | iov_iter_alignment(iter); 4116 4117 return align & blocksize_mask; 4118 } 4119 4120 static inline int allow_outplace_dio(struct inode *inode, 4121 struct kiocb *iocb, struct iov_iter *iter) 4122 { 4123 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4124 int rw = iov_iter_rw(iter); 4125 4126 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4127 !block_unaligned_IO(inode, iocb, iter)); 4128 } 4129 4130 static inline bool f2fs_force_buffered_io(struct inode *inode, 4131 struct kiocb *iocb, struct iov_iter *iter) 4132 { 4133 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4134 int rw = iov_iter_rw(iter); 4135 4136 if (f2fs_post_read_required(inode)) 4137 return true; 4138 if (f2fs_is_multi_device(sbi)) 4139 return true; 4140 /* 4141 * for blkzoned device, fallback direct IO to buffered IO, so 4142 * all IOs can be serialized by log-structured write. 4143 */ 4144 if (f2fs_sb_has_blkzoned(sbi)) 4145 return true; 4146 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4147 if (block_unaligned_IO(inode, iocb, iter)) 4148 return true; 4149 if (F2FS_IO_ALIGNED(sbi)) 4150 return true; 4151 } 4152 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 4153 !IS_SWAPFILE(inode)) 4154 return true; 4155 4156 return false; 4157 } 4158 4159 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4160 { 4161 return fsverity_active(inode) && 4162 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4163 } 4164 4165 #ifdef CONFIG_F2FS_FAULT_INJECTION 4166 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4167 unsigned int type); 4168 #else 4169 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4170 #endif 4171 4172 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4173 { 4174 #ifdef CONFIG_QUOTA 4175 if (f2fs_sb_has_quota_ino(sbi)) 4176 return true; 4177 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4178 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4179 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4180 return true; 4181 #endif 4182 return false; 4183 } 4184 4185 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4186 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4187 4188 #endif /* _LINUX_F2FS_H */ 4189