1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (WARN_ON(condition)) \ 37 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 38 } while (0) 39 #endif 40 41 enum { 42 FAULT_KMALLOC, 43 FAULT_KVMALLOC, 44 FAULT_PAGE_ALLOC, 45 FAULT_PAGE_GET, 46 FAULT_ALLOC_NID, 47 FAULT_ORPHAN, 48 FAULT_BLOCK, 49 FAULT_DIR_DEPTH, 50 FAULT_EVICT_INODE, 51 FAULT_TRUNCATE, 52 FAULT_READ_IO, 53 FAULT_CHECKPOINT, 54 FAULT_DISCARD, 55 FAULT_WRITE_IO, 56 FAULT_MAX, 57 }; 58 59 #ifdef CONFIG_F2FS_FAULT_INJECTION 60 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 61 62 struct f2fs_fault_info { 63 atomic_t inject_ops; 64 unsigned int inject_rate; 65 unsigned int inject_type; 66 }; 67 68 extern const char *f2fs_fault_name[FAULT_MAX]; 69 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 70 #endif 71 72 /* 73 * For mount options 74 */ 75 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 76 #define F2FS_MOUNT_DISCARD 0x00000004 77 #define F2FS_MOUNT_NOHEAP 0x00000008 78 #define F2FS_MOUNT_XATTR_USER 0x00000010 79 #define F2FS_MOUNT_POSIX_ACL 0x00000020 80 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 81 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 82 #define F2FS_MOUNT_INLINE_DATA 0x00000100 83 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 84 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 85 #define F2FS_MOUNT_NOBARRIER 0x00000800 86 #define F2FS_MOUNT_FASTBOOT 0x00001000 87 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 88 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 89 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 90 #define F2FS_MOUNT_USRQUOTA 0x00080000 91 #define F2FS_MOUNT_GRPQUOTA 0x00100000 92 #define F2FS_MOUNT_PRJQUOTA 0x00200000 93 #define F2FS_MOUNT_QUOTA 0x00400000 94 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 95 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 96 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 97 #define F2FS_MOUNT_NORECOVERY 0x04000000 98 #define F2FS_MOUNT_ATGC 0x08000000 99 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 100 #define F2FS_MOUNT_GC_MERGE 0x20000000 101 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 102 103 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 104 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 105 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 106 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 107 108 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 109 typecheck(unsigned long long, b) && \ 110 ((long long)((a) - (b)) > 0)) 111 112 typedef u32 block_t; /* 113 * should not change u32, since it is the on-disk block 114 * address format, __le32. 115 */ 116 typedef u32 nid_t; 117 118 #define COMPRESS_EXT_NUM 16 119 120 struct f2fs_mount_info { 121 unsigned int opt; 122 int write_io_size_bits; /* Write IO size bits */ 123 block_t root_reserved_blocks; /* root reserved blocks */ 124 kuid_t s_resuid; /* reserved blocks for uid */ 125 kgid_t s_resgid; /* reserved blocks for gid */ 126 int active_logs; /* # of active logs */ 127 int inline_xattr_size; /* inline xattr size */ 128 #ifdef CONFIG_F2FS_FAULT_INJECTION 129 struct f2fs_fault_info fault_info; /* For fault injection */ 130 #endif 131 #ifdef CONFIG_QUOTA 132 /* Names of quota files with journalled quota */ 133 char *s_qf_names[MAXQUOTAS]; 134 int s_jquota_fmt; /* Format of quota to use */ 135 #endif 136 /* For which write hints are passed down to block layer */ 137 int whint_mode; 138 int alloc_mode; /* segment allocation policy */ 139 int fsync_mode; /* fsync policy */ 140 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 141 int bggc_mode; /* bggc mode: off, on or sync */ 142 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 143 block_t unusable_cap_perc; /* percentage for cap */ 144 block_t unusable_cap; /* Amount of space allowed to be 145 * unusable when disabling checkpoint 146 */ 147 148 /* For compression */ 149 unsigned char compress_algorithm; /* algorithm type */ 150 unsigned char compress_log_size; /* cluster log size */ 151 unsigned char compress_level; /* compress level */ 152 bool compress_chksum; /* compressed data chksum */ 153 unsigned char compress_ext_cnt; /* extension count */ 154 int compress_mode; /* compression mode */ 155 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 156 }; 157 158 #define F2FS_FEATURE_ENCRYPT 0x0001 159 #define F2FS_FEATURE_BLKZONED 0x0002 160 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 161 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 162 #define F2FS_FEATURE_PRJQUOTA 0x0010 163 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 164 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 165 #define F2FS_FEATURE_QUOTA_INO 0x0080 166 #define F2FS_FEATURE_INODE_CRTIME 0x0100 167 #define F2FS_FEATURE_LOST_FOUND 0x0200 168 #define F2FS_FEATURE_VERITY 0x0400 169 #define F2FS_FEATURE_SB_CHKSUM 0x0800 170 #define F2FS_FEATURE_CASEFOLD 0x1000 171 #define F2FS_FEATURE_COMPRESSION 0x2000 172 #define F2FS_FEATURE_RO 0x4000 173 174 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 175 ((raw_super->feature & cpu_to_le32(mask)) != 0) 176 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 177 #define F2FS_SET_FEATURE(sbi, mask) \ 178 (sbi->raw_super->feature |= cpu_to_le32(mask)) 179 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 180 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 181 182 /* 183 * Default values for user and/or group using reserved blocks 184 */ 185 #define F2FS_DEF_RESUID 0 186 #define F2FS_DEF_RESGID 0 187 188 /* 189 * For checkpoint manager 190 */ 191 enum { 192 NAT_BITMAP, 193 SIT_BITMAP 194 }; 195 196 #define CP_UMOUNT 0x00000001 197 #define CP_FASTBOOT 0x00000002 198 #define CP_SYNC 0x00000004 199 #define CP_RECOVERY 0x00000008 200 #define CP_DISCARD 0x00000010 201 #define CP_TRIMMED 0x00000020 202 #define CP_PAUSE 0x00000040 203 #define CP_RESIZE 0x00000080 204 205 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 206 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 207 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 208 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 209 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 210 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 211 #define DEF_CP_INTERVAL 60 /* 60 secs */ 212 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 213 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 214 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 215 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 216 217 struct cp_control { 218 int reason; 219 __u64 trim_start; 220 __u64 trim_end; 221 __u64 trim_minlen; 222 }; 223 224 /* 225 * indicate meta/data type 226 */ 227 enum { 228 META_CP, 229 META_NAT, 230 META_SIT, 231 META_SSA, 232 META_MAX, 233 META_POR, 234 DATA_GENERIC, /* check range only */ 235 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 236 DATA_GENERIC_ENHANCE_READ, /* 237 * strong check on range and segment 238 * bitmap but no warning due to race 239 * condition of read on truncated area 240 * by extent_cache 241 */ 242 META_GENERIC, 243 }; 244 245 /* for the list of ino */ 246 enum { 247 ORPHAN_INO, /* for orphan ino list */ 248 APPEND_INO, /* for append ino list */ 249 UPDATE_INO, /* for update ino list */ 250 TRANS_DIR_INO, /* for trasactions dir ino list */ 251 FLUSH_INO, /* for multiple device flushing */ 252 MAX_INO_ENTRY, /* max. list */ 253 }; 254 255 struct ino_entry { 256 struct list_head list; /* list head */ 257 nid_t ino; /* inode number */ 258 unsigned int dirty_device; /* dirty device bitmap */ 259 }; 260 261 /* for the list of inodes to be GCed */ 262 struct inode_entry { 263 struct list_head list; /* list head */ 264 struct inode *inode; /* vfs inode pointer */ 265 }; 266 267 struct fsync_node_entry { 268 struct list_head list; /* list head */ 269 struct page *page; /* warm node page pointer */ 270 unsigned int seq_id; /* sequence id */ 271 }; 272 273 struct ckpt_req { 274 struct completion wait; /* completion for checkpoint done */ 275 struct llist_node llnode; /* llist_node to be linked in wait queue */ 276 int ret; /* return code of checkpoint */ 277 ktime_t queue_time; /* request queued time */ 278 }; 279 280 struct ckpt_req_control { 281 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 282 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 283 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 284 atomic_t issued_ckpt; /* # of actually issued ckpts */ 285 atomic_t total_ckpt; /* # of total ckpts */ 286 atomic_t queued_ckpt; /* # of queued ckpts */ 287 struct llist_head issue_list; /* list for command issue */ 288 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 289 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 290 unsigned int peak_time; /* peak wait time in msec until now */ 291 }; 292 293 /* for the bitmap indicate blocks to be discarded */ 294 struct discard_entry { 295 struct list_head list; /* list head */ 296 block_t start_blkaddr; /* start blockaddr of current segment */ 297 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 298 }; 299 300 /* default discard granularity of inner discard thread, unit: block count */ 301 #define DEFAULT_DISCARD_GRANULARITY 16 302 303 /* max discard pend list number */ 304 #define MAX_PLIST_NUM 512 305 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 306 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 307 308 enum { 309 D_PREP, /* initial */ 310 D_PARTIAL, /* partially submitted */ 311 D_SUBMIT, /* all submitted */ 312 D_DONE, /* finished */ 313 }; 314 315 struct discard_info { 316 block_t lstart; /* logical start address */ 317 block_t len; /* length */ 318 block_t start; /* actual start address in dev */ 319 }; 320 321 struct discard_cmd { 322 struct rb_node rb_node; /* rb node located in rb-tree */ 323 union { 324 struct { 325 block_t lstart; /* logical start address */ 326 block_t len; /* length */ 327 block_t start; /* actual start address in dev */ 328 }; 329 struct discard_info di; /* discard info */ 330 331 }; 332 struct list_head list; /* command list */ 333 struct completion wait; /* compleation */ 334 struct block_device *bdev; /* bdev */ 335 unsigned short ref; /* reference count */ 336 unsigned char state; /* state */ 337 unsigned char queued; /* queued discard */ 338 int error; /* bio error */ 339 spinlock_t lock; /* for state/bio_ref updating */ 340 unsigned short bio_ref; /* bio reference count */ 341 }; 342 343 enum { 344 DPOLICY_BG, 345 DPOLICY_FORCE, 346 DPOLICY_FSTRIM, 347 DPOLICY_UMOUNT, 348 MAX_DPOLICY, 349 }; 350 351 struct discard_policy { 352 int type; /* type of discard */ 353 unsigned int min_interval; /* used for candidates exist */ 354 unsigned int mid_interval; /* used for device busy */ 355 unsigned int max_interval; /* used for candidates not exist */ 356 unsigned int max_requests; /* # of discards issued per round */ 357 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 358 bool io_aware; /* issue discard in idle time */ 359 bool sync; /* submit discard with REQ_SYNC flag */ 360 bool ordered; /* issue discard by lba order */ 361 bool timeout; /* discard timeout for put_super */ 362 unsigned int granularity; /* discard granularity */ 363 }; 364 365 struct discard_cmd_control { 366 struct task_struct *f2fs_issue_discard; /* discard thread */ 367 struct list_head entry_list; /* 4KB discard entry list */ 368 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 369 struct list_head wait_list; /* store on-flushing entries */ 370 struct list_head fstrim_list; /* in-flight discard from fstrim */ 371 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 372 unsigned int discard_wake; /* to wake up discard thread */ 373 struct mutex cmd_lock; 374 unsigned int nr_discards; /* # of discards in the list */ 375 unsigned int max_discards; /* max. discards to be issued */ 376 unsigned int discard_granularity; /* discard granularity */ 377 unsigned int undiscard_blks; /* # of undiscard blocks */ 378 unsigned int next_pos; /* next discard position */ 379 atomic_t issued_discard; /* # of issued discard */ 380 atomic_t queued_discard; /* # of queued discard */ 381 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 382 struct rb_root_cached root; /* root of discard rb-tree */ 383 bool rbtree_check; /* config for consistence check */ 384 }; 385 386 /* for the list of fsync inodes, used only during recovery */ 387 struct fsync_inode_entry { 388 struct list_head list; /* list head */ 389 struct inode *inode; /* vfs inode pointer */ 390 block_t blkaddr; /* block address locating the last fsync */ 391 block_t last_dentry; /* block address locating the last dentry */ 392 }; 393 394 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 395 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 396 397 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 398 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 399 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 400 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 401 402 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 403 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 404 405 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 406 { 407 int before = nats_in_cursum(journal); 408 409 journal->n_nats = cpu_to_le16(before + i); 410 return before; 411 } 412 413 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 414 { 415 int before = sits_in_cursum(journal); 416 417 journal->n_sits = cpu_to_le16(before + i); 418 return before; 419 } 420 421 static inline bool __has_cursum_space(struct f2fs_journal *journal, 422 int size, int type) 423 { 424 if (type == NAT_JOURNAL) 425 return size <= MAX_NAT_JENTRIES(journal); 426 return size <= MAX_SIT_JENTRIES(journal); 427 } 428 429 /* for inline stuff */ 430 #define DEF_INLINE_RESERVED_SIZE 1 431 static inline int get_extra_isize(struct inode *inode); 432 static inline int get_inline_xattr_addrs(struct inode *inode); 433 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 434 (CUR_ADDRS_PER_INODE(inode) - \ 435 get_inline_xattr_addrs(inode) - \ 436 DEF_INLINE_RESERVED_SIZE)) 437 438 /* for inline dir */ 439 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 440 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 441 BITS_PER_BYTE + 1)) 442 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 443 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 444 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 445 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 446 NR_INLINE_DENTRY(inode) + \ 447 INLINE_DENTRY_BITMAP_SIZE(inode))) 448 449 /* 450 * For INODE and NODE manager 451 */ 452 /* for directory operations */ 453 454 struct f2fs_filename { 455 /* 456 * The filename the user specified. This is NULL for some 457 * filesystem-internal operations, e.g. converting an inline directory 458 * to a non-inline one, or roll-forward recovering an encrypted dentry. 459 */ 460 const struct qstr *usr_fname; 461 462 /* 463 * The on-disk filename. For encrypted directories, this is encrypted. 464 * This may be NULL for lookups in an encrypted dir without the key. 465 */ 466 struct fscrypt_str disk_name; 467 468 /* The dirhash of this filename */ 469 f2fs_hash_t hash; 470 471 #ifdef CONFIG_FS_ENCRYPTION 472 /* 473 * For lookups in encrypted directories: either the buffer backing 474 * disk_name, or a buffer that holds the decoded no-key name. 475 */ 476 struct fscrypt_str crypto_buf; 477 #endif 478 #ifdef CONFIG_UNICODE 479 /* 480 * For casefolded directories: the casefolded name, but it's left NULL 481 * if the original name is not valid Unicode, if the directory is both 482 * casefolded and encrypted and its encryption key is unavailable, or if 483 * the filesystem is doing an internal operation where usr_fname is also 484 * NULL. In all these cases we fall back to treating the name as an 485 * opaque byte sequence. 486 */ 487 struct fscrypt_str cf_name; 488 #endif 489 }; 490 491 struct f2fs_dentry_ptr { 492 struct inode *inode; 493 void *bitmap; 494 struct f2fs_dir_entry *dentry; 495 __u8 (*filename)[F2FS_SLOT_LEN]; 496 int max; 497 int nr_bitmap; 498 }; 499 500 static inline void make_dentry_ptr_block(struct inode *inode, 501 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 502 { 503 d->inode = inode; 504 d->max = NR_DENTRY_IN_BLOCK; 505 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 506 d->bitmap = t->dentry_bitmap; 507 d->dentry = t->dentry; 508 d->filename = t->filename; 509 } 510 511 static inline void make_dentry_ptr_inline(struct inode *inode, 512 struct f2fs_dentry_ptr *d, void *t) 513 { 514 int entry_cnt = NR_INLINE_DENTRY(inode); 515 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 516 int reserved_size = INLINE_RESERVED_SIZE(inode); 517 518 d->inode = inode; 519 d->max = entry_cnt; 520 d->nr_bitmap = bitmap_size; 521 d->bitmap = t; 522 d->dentry = t + bitmap_size + reserved_size; 523 d->filename = t + bitmap_size + reserved_size + 524 SIZE_OF_DIR_ENTRY * entry_cnt; 525 } 526 527 /* 528 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 529 * as its node offset to distinguish from index node blocks. 530 * But some bits are used to mark the node block. 531 */ 532 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 533 >> OFFSET_BIT_SHIFT) 534 enum { 535 ALLOC_NODE, /* allocate a new node page if needed */ 536 LOOKUP_NODE, /* look up a node without readahead */ 537 LOOKUP_NODE_RA, /* 538 * look up a node with readahead called 539 * by get_data_block. 540 */ 541 }; 542 543 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 544 545 /* congestion wait timeout value, default: 20ms */ 546 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 547 548 /* maximum retry quota flush count */ 549 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 550 551 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 552 553 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 554 555 /* for in-memory extent cache entry */ 556 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 557 558 /* number of extent info in extent cache we try to shrink */ 559 #define EXTENT_CACHE_SHRINK_NUMBER 128 560 561 struct rb_entry { 562 struct rb_node rb_node; /* rb node located in rb-tree */ 563 union { 564 struct { 565 unsigned int ofs; /* start offset of the entry */ 566 unsigned int len; /* length of the entry */ 567 }; 568 unsigned long long key; /* 64-bits key */ 569 } __packed; 570 }; 571 572 struct extent_info { 573 unsigned int fofs; /* start offset in a file */ 574 unsigned int len; /* length of the extent */ 575 u32 blk; /* start block address of the extent */ 576 }; 577 578 struct extent_node { 579 struct rb_node rb_node; /* rb node located in rb-tree */ 580 struct extent_info ei; /* extent info */ 581 struct list_head list; /* node in global extent list of sbi */ 582 struct extent_tree *et; /* extent tree pointer */ 583 }; 584 585 struct extent_tree { 586 nid_t ino; /* inode number */ 587 struct rb_root_cached root; /* root of extent info rb-tree */ 588 struct extent_node *cached_en; /* recently accessed extent node */ 589 struct extent_info largest; /* largested extent info */ 590 struct list_head list; /* to be used by sbi->zombie_list */ 591 rwlock_t lock; /* protect extent info rb-tree */ 592 atomic_t node_cnt; /* # of extent node in rb-tree*/ 593 bool largest_updated; /* largest extent updated */ 594 }; 595 596 /* 597 * This structure is taken from ext4_map_blocks. 598 * 599 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 600 */ 601 #define F2FS_MAP_NEW (1 << BH_New) 602 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 603 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 604 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 605 F2FS_MAP_UNWRITTEN) 606 607 struct f2fs_map_blocks { 608 block_t m_pblk; 609 block_t m_lblk; 610 unsigned int m_len; 611 unsigned int m_flags; 612 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 613 pgoff_t *m_next_extent; /* point to next possible extent */ 614 int m_seg_type; 615 bool m_may_create; /* indicate it is from write path */ 616 }; 617 618 /* for flag in get_data_block */ 619 enum { 620 F2FS_GET_BLOCK_DEFAULT, 621 F2FS_GET_BLOCK_FIEMAP, 622 F2FS_GET_BLOCK_BMAP, 623 F2FS_GET_BLOCK_DIO, 624 F2FS_GET_BLOCK_PRE_DIO, 625 F2FS_GET_BLOCK_PRE_AIO, 626 F2FS_GET_BLOCK_PRECACHE, 627 }; 628 629 /* 630 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 631 */ 632 #define FADVISE_COLD_BIT 0x01 633 #define FADVISE_LOST_PINO_BIT 0x02 634 #define FADVISE_ENCRYPT_BIT 0x04 635 #define FADVISE_ENC_NAME_BIT 0x08 636 #define FADVISE_KEEP_SIZE_BIT 0x10 637 #define FADVISE_HOT_BIT 0x20 638 #define FADVISE_VERITY_BIT 0x40 639 640 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 641 642 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 643 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 644 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 645 646 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 647 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 648 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 649 650 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 651 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 652 653 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 654 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 655 656 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 657 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 658 659 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 660 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 661 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 662 663 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 664 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 665 666 #define DEF_DIR_LEVEL 0 667 668 enum { 669 GC_FAILURE_PIN, 670 GC_FAILURE_ATOMIC, 671 MAX_GC_FAILURE 672 }; 673 674 /* used for f2fs_inode_info->flags */ 675 enum { 676 FI_NEW_INODE, /* indicate newly allocated inode */ 677 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 678 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 679 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 680 FI_INC_LINK, /* need to increment i_nlink */ 681 FI_ACL_MODE, /* indicate acl mode */ 682 FI_NO_ALLOC, /* should not allocate any blocks */ 683 FI_FREE_NID, /* free allocated nide */ 684 FI_NO_EXTENT, /* not to use the extent cache */ 685 FI_INLINE_XATTR, /* used for inline xattr */ 686 FI_INLINE_DATA, /* used for inline data*/ 687 FI_INLINE_DENTRY, /* used for inline dentry */ 688 FI_APPEND_WRITE, /* inode has appended data */ 689 FI_UPDATE_WRITE, /* inode has in-place-update data */ 690 FI_NEED_IPU, /* used for ipu per file */ 691 FI_ATOMIC_FILE, /* indicate atomic file */ 692 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 693 FI_VOLATILE_FILE, /* indicate volatile file */ 694 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 695 FI_DROP_CACHE, /* drop dirty page cache */ 696 FI_DATA_EXIST, /* indicate data exists */ 697 FI_INLINE_DOTS, /* indicate inline dot dentries */ 698 FI_DO_DEFRAG, /* indicate defragment is running */ 699 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 700 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 701 FI_HOT_DATA, /* indicate file is hot */ 702 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 703 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 704 FI_PIN_FILE, /* indicate file should not be gced */ 705 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 706 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 707 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 708 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 709 FI_MMAP_FILE, /* indicate file was mmapped */ 710 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 711 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 712 FI_ALIGNED_WRITE, /* enable aligned write */ 713 FI_MAX, /* max flag, never be used */ 714 }; 715 716 struct f2fs_inode_info { 717 struct inode vfs_inode; /* serve a vfs inode */ 718 unsigned long i_flags; /* keep an inode flags for ioctl */ 719 unsigned char i_advise; /* use to give file attribute hints */ 720 unsigned char i_dir_level; /* use for dentry level for large dir */ 721 unsigned int i_current_depth; /* only for directory depth */ 722 /* for gc failure statistic */ 723 unsigned int i_gc_failures[MAX_GC_FAILURE]; 724 unsigned int i_pino; /* parent inode number */ 725 umode_t i_acl_mode; /* keep file acl mode temporarily */ 726 727 /* Use below internally in f2fs*/ 728 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 729 struct rw_semaphore i_sem; /* protect fi info */ 730 atomic_t dirty_pages; /* # of dirty pages */ 731 f2fs_hash_t chash; /* hash value of given file name */ 732 unsigned int clevel; /* maximum level of given file name */ 733 struct task_struct *task; /* lookup and create consistency */ 734 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 735 nid_t i_xattr_nid; /* node id that contains xattrs */ 736 loff_t last_disk_size; /* lastly written file size */ 737 spinlock_t i_size_lock; /* protect last_disk_size */ 738 739 #ifdef CONFIG_QUOTA 740 struct dquot *i_dquot[MAXQUOTAS]; 741 742 /* quota space reservation, managed internally by quota code */ 743 qsize_t i_reserved_quota; 744 #endif 745 struct list_head dirty_list; /* dirty list for dirs and files */ 746 struct list_head gdirty_list; /* linked in global dirty list */ 747 struct list_head inmem_ilist; /* list for inmem inodes */ 748 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 749 struct task_struct *inmem_task; /* store inmemory task */ 750 struct mutex inmem_lock; /* lock for inmemory pages */ 751 struct extent_tree *extent_tree; /* cached extent_tree entry */ 752 753 /* avoid racing between foreground op and gc */ 754 struct rw_semaphore i_gc_rwsem[2]; 755 struct rw_semaphore i_mmap_sem; 756 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 757 758 int i_extra_isize; /* size of extra space located in i_addr */ 759 kprojid_t i_projid; /* id for project quota */ 760 int i_inline_xattr_size; /* inline xattr size */ 761 struct timespec64 i_crtime; /* inode creation time */ 762 struct timespec64 i_disk_time[4];/* inode disk times */ 763 764 /* for file compress */ 765 atomic_t i_compr_blocks; /* # of compressed blocks */ 766 unsigned char i_compress_algorithm; /* algorithm type */ 767 unsigned char i_log_cluster_size; /* log of cluster size */ 768 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 769 unsigned short i_compress_flag; /* compress flag */ 770 unsigned int i_cluster_size; /* cluster size */ 771 }; 772 773 static inline void get_extent_info(struct extent_info *ext, 774 struct f2fs_extent *i_ext) 775 { 776 ext->fofs = le32_to_cpu(i_ext->fofs); 777 ext->blk = le32_to_cpu(i_ext->blk); 778 ext->len = le32_to_cpu(i_ext->len); 779 } 780 781 static inline void set_raw_extent(struct extent_info *ext, 782 struct f2fs_extent *i_ext) 783 { 784 i_ext->fofs = cpu_to_le32(ext->fofs); 785 i_ext->blk = cpu_to_le32(ext->blk); 786 i_ext->len = cpu_to_le32(ext->len); 787 } 788 789 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 790 u32 blk, unsigned int len) 791 { 792 ei->fofs = fofs; 793 ei->blk = blk; 794 ei->len = len; 795 } 796 797 static inline bool __is_discard_mergeable(struct discard_info *back, 798 struct discard_info *front, unsigned int max_len) 799 { 800 return (back->lstart + back->len == front->lstart) && 801 (back->len + front->len <= max_len); 802 } 803 804 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 805 struct discard_info *back, unsigned int max_len) 806 { 807 return __is_discard_mergeable(back, cur, max_len); 808 } 809 810 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 811 struct discard_info *front, unsigned int max_len) 812 { 813 return __is_discard_mergeable(cur, front, max_len); 814 } 815 816 static inline bool __is_extent_mergeable(struct extent_info *back, 817 struct extent_info *front) 818 { 819 return (back->fofs + back->len == front->fofs && 820 back->blk + back->len == front->blk); 821 } 822 823 static inline bool __is_back_mergeable(struct extent_info *cur, 824 struct extent_info *back) 825 { 826 return __is_extent_mergeable(back, cur); 827 } 828 829 static inline bool __is_front_mergeable(struct extent_info *cur, 830 struct extent_info *front) 831 { 832 return __is_extent_mergeable(cur, front); 833 } 834 835 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 836 static inline void __try_update_largest_extent(struct extent_tree *et, 837 struct extent_node *en) 838 { 839 if (en->ei.len > et->largest.len) { 840 et->largest = en->ei; 841 et->largest_updated = true; 842 } 843 } 844 845 /* 846 * For free nid management 847 */ 848 enum nid_state { 849 FREE_NID, /* newly added to free nid list */ 850 PREALLOC_NID, /* it is preallocated */ 851 MAX_NID_STATE, 852 }; 853 854 enum nat_state { 855 TOTAL_NAT, 856 DIRTY_NAT, 857 RECLAIMABLE_NAT, 858 MAX_NAT_STATE, 859 }; 860 861 struct f2fs_nm_info { 862 block_t nat_blkaddr; /* base disk address of NAT */ 863 nid_t max_nid; /* maximum possible node ids */ 864 nid_t available_nids; /* # of available node ids */ 865 nid_t next_scan_nid; /* the next nid to be scanned */ 866 unsigned int ram_thresh; /* control the memory footprint */ 867 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 868 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 869 870 /* NAT cache management */ 871 struct radix_tree_root nat_root;/* root of the nat entry cache */ 872 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 873 struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ 874 struct list_head nat_entries; /* cached nat entry list (clean) */ 875 spinlock_t nat_list_lock; /* protect clean nat entry list */ 876 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 877 unsigned int nat_blocks; /* # of nat blocks */ 878 879 /* free node ids management */ 880 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 881 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 882 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 883 spinlock_t nid_list_lock; /* protect nid lists ops */ 884 struct mutex build_lock; /* lock for build free nids */ 885 unsigned char **free_nid_bitmap; 886 unsigned char *nat_block_bitmap; 887 unsigned short *free_nid_count; /* free nid count of NAT block */ 888 889 /* for checkpoint */ 890 char *nat_bitmap; /* NAT bitmap pointer */ 891 892 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 893 unsigned char *nat_bits; /* NAT bits blocks */ 894 unsigned char *full_nat_bits; /* full NAT pages */ 895 unsigned char *empty_nat_bits; /* empty NAT pages */ 896 #ifdef CONFIG_F2FS_CHECK_FS 897 char *nat_bitmap_mir; /* NAT bitmap mirror */ 898 #endif 899 int bitmap_size; /* bitmap size */ 900 }; 901 902 /* 903 * this structure is used as one of function parameters. 904 * all the information are dedicated to a given direct node block determined 905 * by the data offset in a file. 906 */ 907 struct dnode_of_data { 908 struct inode *inode; /* vfs inode pointer */ 909 struct page *inode_page; /* its inode page, NULL is possible */ 910 struct page *node_page; /* cached direct node page */ 911 nid_t nid; /* node id of the direct node block */ 912 unsigned int ofs_in_node; /* data offset in the node page */ 913 bool inode_page_locked; /* inode page is locked or not */ 914 bool node_changed; /* is node block changed */ 915 char cur_level; /* level of hole node page */ 916 char max_level; /* level of current page located */ 917 block_t data_blkaddr; /* block address of the node block */ 918 }; 919 920 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 921 struct page *ipage, struct page *npage, nid_t nid) 922 { 923 memset(dn, 0, sizeof(*dn)); 924 dn->inode = inode; 925 dn->inode_page = ipage; 926 dn->node_page = npage; 927 dn->nid = nid; 928 } 929 930 /* 931 * For SIT manager 932 * 933 * By default, there are 6 active log areas across the whole main area. 934 * When considering hot and cold data separation to reduce cleaning overhead, 935 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 936 * respectively. 937 * In the current design, you should not change the numbers intentionally. 938 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 939 * logs individually according to the underlying devices. (default: 6) 940 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 941 * data and 8 for node logs. 942 */ 943 #define NR_CURSEG_DATA_TYPE (3) 944 #define NR_CURSEG_NODE_TYPE (3) 945 #define NR_CURSEG_INMEM_TYPE (2) 946 #define NR_CURSEG_RO_TYPE (2) 947 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 948 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 949 950 enum { 951 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 952 CURSEG_WARM_DATA, /* data blocks */ 953 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 954 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 955 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 956 CURSEG_COLD_NODE, /* indirect node blocks */ 957 NR_PERSISTENT_LOG, /* number of persistent log */ 958 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 959 /* pinned file that needs consecutive block address */ 960 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 961 NO_CHECK_TYPE, /* number of persistent & inmem log */ 962 }; 963 964 struct flush_cmd { 965 struct completion wait; 966 struct llist_node llnode; 967 nid_t ino; 968 int ret; 969 }; 970 971 struct flush_cmd_control { 972 struct task_struct *f2fs_issue_flush; /* flush thread */ 973 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 974 atomic_t issued_flush; /* # of issued flushes */ 975 atomic_t queued_flush; /* # of queued flushes */ 976 struct llist_head issue_list; /* list for command issue */ 977 struct llist_node *dispatch_list; /* list for command dispatch */ 978 }; 979 980 struct f2fs_sm_info { 981 struct sit_info *sit_info; /* whole segment information */ 982 struct free_segmap_info *free_info; /* free segment information */ 983 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 984 struct curseg_info *curseg_array; /* active segment information */ 985 986 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 987 988 block_t seg0_blkaddr; /* block address of 0'th segment */ 989 block_t main_blkaddr; /* start block address of main area */ 990 block_t ssa_blkaddr; /* start block address of SSA area */ 991 992 unsigned int segment_count; /* total # of segments */ 993 unsigned int main_segments; /* # of segments in main area */ 994 unsigned int reserved_segments; /* # of reserved segments */ 995 unsigned int ovp_segments; /* # of overprovision segments */ 996 997 /* a threshold to reclaim prefree segments */ 998 unsigned int rec_prefree_segments; 999 1000 /* for batched trimming */ 1001 unsigned int trim_sections; /* # of sections to trim */ 1002 1003 struct list_head sit_entry_set; /* sit entry set list */ 1004 1005 unsigned int ipu_policy; /* in-place-update policy */ 1006 unsigned int min_ipu_util; /* in-place-update threshold */ 1007 unsigned int min_fsync_blocks; /* threshold for fsync */ 1008 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1009 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1010 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1011 1012 /* for flush command control */ 1013 struct flush_cmd_control *fcc_info; 1014 1015 /* for discard command control */ 1016 struct discard_cmd_control *dcc_info; 1017 }; 1018 1019 /* 1020 * For superblock 1021 */ 1022 /* 1023 * COUNT_TYPE for monitoring 1024 * 1025 * f2fs monitors the number of several block types such as on-writeback, 1026 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1027 */ 1028 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1029 enum count_type { 1030 F2FS_DIRTY_DENTS, 1031 F2FS_DIRTY_DATA, 1032 F2FS_DIRTY_QDATA, 1033 F2FS_DIRTY_NODES, 1034 F2FS_DIRTY_META, 1035 F2FS_INMEM_PAGES, 1036 F2FS_DIRTY_IMETA, 1037 F2FS_WB_CP_DATA, 1038 F2FS_WB_DATA, 1039 F2FS_RD_DATA, 1040 F2FS_RD_NODE, 1041 F2FS_RD_META, 1042 F2FS_DIO_WRITE, 1043 F2FS_DIO_READ, 1044 NR_COUNT_TYPE, 1045 }; 1046 1047 /* 1048 * The below are the page types of bios used in submit_bio(). 1049 * The available types are: 1050 * DATA User data pages. It operates as async mode. 1051 * NODE Node pages. It operates as async mode. 1052 * META FS metadata pages such as SIT, NAT, CP. 1053 * NR_PAGE_TYPE The number of page types. 1054 * META_FLUSH Make sure the previous pages are written 1055 * with waiting the bio's completion 1056 * ... Only can be used with META. 1057 */ 1058 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1059 enum page_type { 1060 DATA, 1061 NODE, 1062 META, 1063 NR_PAGE_TYPE, 1064 META_FLUSH, 1065 INMEM, /* the below types are used by tracepoints only. */ 1066 INMEM_DROP, 1067 INMEM_INVALIDATE, 1068 INMEM_REVOKE, 1069 IPU, 1070 OPU, 1071 }; 1072 1073 enum temp_type { 1074 HOT = 0, /* must be zero for meta bio */ 1075 WARM, 1076 COLD, 1077 NR_TEMP_TYPE, 1078 }; 1079 1080 enum need_lock_type { 1081 LOCK_REQ = 0, 1082 LOCK_DONE, 1083 LOCK_RETRY, 1084 }; 1085 1086 enum cp_reason_type { 1087 CP_NO_NEEDED, 1088 CP_NON_REGULAR, 1089 CP_COMPRESSED, 1090 CP_HARDLINK, 1091 CP_SB_NEED_CP, 1092 CP_WRONG_PINO, 1093 CP_NO_SPC_ROLL, 1094 CP_NODE_NEED_CP, 1095 CP_FASTBOOT_MODE, 1096 CP_SPEC_LOG_NUM, 1097 CP_RECOVER_DIR, 1098 }; 1099 1100 enum iostat_type { 1101 /* WRITE IO */ 1102 APP_DIRECT_IO, /* app direct write IOs */ 1103 APP_BUFFERED_IO, /* app buffered write IOs */ 1104 APP_WRITE_IO, /* app write IOs */ 1105 APP_MAPPED_IO, /* app mapped IOs */ 1106 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1107 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1108 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1109 FS_GC_DATA_IO, /* data IOs from forground gc */ 1110 FS_GC_NODE_IO, /* node IOs from forground gc */ 1111 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1112 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1113 FS_CP_META_IO, /* meta IOs from checkpoint */ 1114 1115 /* READ IO */ 1116 APP_DIRECT_READ_IO, /* app direct read IOs */ 1117 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1118 APP_READ_IO, /* app read IOs */ 1119 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1120 FS_DATA_READ_IO, /* data read IOs */ 1121 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1122 FS_CDATA_READ_IO, /* compressed data read IOs */ 1123 FS_NODE_READ_IO, /* node read IOs */ 1124 FS_META_READ_IO, /* meta read IOs */ 1125 1126 /* other */ 1127 FS_DISCARD, /* discard */ 1128 NR_IO_TYPE, 1129 }; 1130 1131 struct f2fs_io_info { 1132 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1133 nid_t ino; /* inode number */ 1134 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1135 enum temp_type temp; /* contains HOT/WARM/COLD */ 1136 int op; /* contains REQ_OP_ */ 1137 int op_flags; /* req_flag_bits */ 1138 block_t new_blkaddr; /* new block address to be written */ 1139 block_t old_blkaddr; /* old block address before Cow */ 1140 struct page *page; /* page to be written */ 1141 struct page *encrypted_page; /* encrypted page */ 1142 struct page *compressed_page; /* compressed page */ 1143 struct list_head list; /* serialize IOs */ 1144 bool submitted; /* indicate IO submission */ 1145 int need_lock; /* indicate we need to lock cp_rwsem */ 1146 bool in_list; /* indicate fio is in io_list */ 1147 bool is_por; /* indicate IO is from recovery or not */ 1148 bool retry; /* need to reallocate block address */ 1149 int compr_blocks; /* # of compressed block addresses */ 1150 bool encrypted; /* indicate file is encrypted */ 1151 enum iostat_type io_type; /* io type */ 1152 struct writeback_control *io_wbc; /* writeback control */ 1153 struct bio **bio; /* bio for ipu */ 1154 sector_t *last_block; /* last block number in bio */ 1155 unsigned char version; /* version of the node */ 1156 }; 1157 1158 struct bio_entry { 1159 struct bio *bio; 1160 struct list_head list; 1161 }; 1162 1163 #define is_read_io(rw) ((rw) == READ) 1164 struct f2fs_bio_info { 1165 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1166 struct bio *bio; /* bios to merge */ 1167 sector_t last_block_in_bio; /* last block number */ 1168 struct f2fs_io_info fio; /* store buffered io info. */ 1169 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1170 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1171 struct list_head io_list; /* track fios */ 1172 struct list_head bio_list; /* bio entry list head */ 1173 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1174 }; 1175 1176 #define FDEV(i) (sbi->devs[i]) 1177 #define RDEV(i) (raw_super->devs[i]) 1178 struct f2fs_dev_info { 1179 struct block_device *bdev; 1180 char path[MAX_PATH_LEN]; 1181 unsigned int total_segments; 1182 block_t start_blk; 1183 block_t end_blk; 1184 #ifdef CONFIG_BLK_DEV_ZONED 1185 unsigned int nr_blkz; /* Total number of zones */ 1186 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1187 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1188 #endif 1189 }; 1190 1191 enum inode_type { 1192 DIR_INODE, /* for dirty dir inode */ 1193 FILE_INODE, /* for dirty regular/symlink inode */ 1194 DIRTY_META, /* for all dirtied inode metadata */ 1195 ATOMIC_FILE, /* for all atomic files */ 1196 NR_INODE_TYPE, 1197 }; 1198 1199 /* for inner inode cache management */ 1200 struct inode_management { 1201 struct radix_tree_root ino_root; /* ino entry array */ 1202 spinlock_t ino_lock; /* for ino entry lock */ 1203 struct list_head ino_list; /* inode list head */ 1204 unsigned long ino_num; /* number of entries */ 1205 }; 1206 1207 /* for GC_AT */ 1208 struct atgc_management { 1209 bool atgc_enabled; /* ATGC is enabled or not */ 1210 struct rb_root_cached root; /* root of victim rb-tree */ 1211 struct list_head victim_list; /* linked with all victim entries */ 1212 unsigned int victim_count; /* victim count in rb-tree */ 1213 unsigned int candidate_ratio; /* candidate ratio */ 1214 unsigned int max_candidate_count; /* max candidate count */ 1215 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1216 unsigned long long age_threshold; /* age threshold */ 1217 }; 1218 1219 /* For s_flag in struct f2fs_sb_info */ 1220 enum { 1221 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1222 SBI_IS_CLOSE, /* specify unmounting */ 1223 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1224 SBI_POR_DOING, /* recovery is doing or not */ 1225 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1226 SBI_NEED_CP, /* need to checkpoint */ 1227 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1228 SBI_IS_RECOVERED, /* recovered orphan/data */ 1229 SBI_CP_DISABLED, /* CP was disabled last mount */ 1230 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1231 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1232 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1233 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1234 SBI_IS_RESIZEFS, /* resizefs is in process */ 1235 }; 1236 1237 enum { 1238 CP_TIME, 1239 REQ_TIME, 1240 DISCARD_TIME, 1241 GC_TIME, 1242 DISABLE_TIME, 1243 UMOUNT_DISCARD_TIMEOUT, 1244 MAX_TIME, 1245 }; 1246 1247 enum { 1248 GC_NORMAL, 1249 GC_IDLE_CB, 1250 GC_IDLE_GREEDY, 1251 GC_IDLE_AT, 1252 GC_URGENT_HIGH, 1253 GC_URGENT_LOW, 1254 }; 1255 1256 enum { 1257 BGGC_MODE_ON, /* background gc is on */ 1258 BGGC_MODE_OFF, /* background gc is off */ 1259 BGGC_MODE_SYNC, /* 1260 * background gc is on, migrating blocks 1261 * like foreground gc 1262 */ 1263 }; 1264 1265 enum { 1266 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1267 FS_MODE_LFS, /* use lfs allocation only */ 1268 }; 1269 1270 enum { 1271 WHINT_MODE_OFF, /* not pass down write hints */ 1272 WHINT_MODE_USER, /* try to pass down hints given by users */ 1273 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1274 }; 1275 1276 enum { 1277 ALLOC_MODE_DEFAULT, /* stay default */ 1278 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1279 }; 1280 1281 enum fsync_mode { 1282 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1283 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1284 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1285 }; 1286 1287 enum { 1288 COMPR_MODE_FS, /* 1289 * automatically compress compression 1290 * enabled files 1291 */ 1292 COMPR_MODE_USER, /* 1293 * automatical compression is disabled. 1294 * user can control the file compression 1295 * using ioctls 1296 */ 1297 }; 1298 1299 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1300 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1301 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1302 1303 /* 1304 * Layout of f2fs page.private: 1305 * 1306 * Layout A: lowest bit should be 1 1307 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1308 * bit 0 PAGE_PRIVATE_NOT_POINTER 1309 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1310 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1311 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1312 * bit 4 PAGE_PRIVATE_INLINE_INODE 1313 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1314 * bit 6- f2fs private data 1315 * 1316 * Layout B: lowest bit should be 0 1317 * page.private is a wrapped pointer. 1318 */ 1319 enum { 1320 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1321 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1322 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1323 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1324 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1325 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1326 PAGE_PRIVATE_MAX 1327 }; 1328 1329 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1330 static inline bool page_private_##name(struct page *page) \ 1331 { \ 1332 return test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1333 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1334 } 1335 1336 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1337 static inline void set_page_private_##name(struct page *page) \ 1338 { \ 1339 if (!PagePrivate(page)) { \ 1340 get_page(page); \ 1341 SetPagePrivate(page); \ 1342 } \ 1343 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1344 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1345 } 1346 1347 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1348 static inline void clear_page_private_##name(struct page *page) \ 1349 { \ 1350 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1351 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1352 set_page_private(page, 0); \ 1353 if (PagePrivate(page)) { \ 1354 ClearPagePrivate(page); \ 1355 put_page(page); \ 1356 }\ 1357 } \ 1358 } 1359 1360 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1361 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1362 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1363 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1364 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1365 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1366 1367 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1368 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1369 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1370 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1371 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1372 1373 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1374 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1375 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1376 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1377 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1378 1379 static inline unsigned long get_page_private_data(struct page *page) 1380 { 1381 unsigned long data = page_private(page); 1382 1383 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1384 return 0; 1385 return data >> PAGE_PRIVATE_MAX; 1386 } 1387 1388 static inline void set_page_private_data(struct page *page, unsigned long data) 1389 { 1390 if (!PagePrivate(page)) { 1391 get_page(page); 1392 SetPagePrivate(page); 1393 } 1394 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1395 page_private(page) |= data << PAGE_PRIVATE_MAX; 1396 } 1397 1398 static inline void clear_page_private_data(struct page *page) 1399 { 1400 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1401 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1402 set_page_private(page, 0); 1403 if (PagePrivate(page)) { 1404 ClearPagePrivate(page); 1405 put_page(page); 1406 } 1407 } 1408 } 1409 1410 /* For compression */ 1411 enum compress_algorithm_type { 1412 COMPRESS_LZO, 1413 COMPRESS_LZ4, 1414 COMPRESS_ZSTD, 1415 COMPRESS_LZORLE, 1416 COMPRESS_MAX, 1417 }; 1418 1419 enum compress_flag { 1420 COMPRESS_CHKSUM, 1421 COMPRESS_MAX_FLAG, 1422 }; 1423 1424 #define COMPRESS_WATERMARK 20 1425 #define COMPRESS_PERCENT 20 1426 1427 #define COMPRESS_DATA_RESERVED_SIZE 4 1428 struct compress_data { 1429 __le32 clen; /* compressed data size */ 1430 __le32 chksum; /* compressed data chksum */ 1431 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1432 u8 cdata[]; /* compressed data */ 1433 }; 1434 1435 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1436 1437 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1438 1439 #define COMPRESS_LEVEL_OFFSET 8 1440 1441 /* compress context */ 1442 struct compress_ctx { 1443 struct inode *inode; /* inode the context belong to */ 1444 pgoff_t cluster_idx; /* cluster index number */ 1445 unsigned int cluster_size; /* page count in cluster */ 1446 unsigned int log_cluster_size; /* log of cluster size */ 1447 struct page **rpages; /* pages store raw data in cluster */ 1448 unsigned int nr_rpages; /* total page number in rpages */ 1449 struct page **cpages; /* pages store compressed data in cluster */ 1450 unsigned int nr_cpages; /* total page number in cpages */ 1451 void *rbuf; /* virtual mapped address on rpages */ 1452 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1453 size_t rlen; /* valid data length in rbuf */ 1454 size_t clen; /* valid data length in cbuf */ 1455 void *private; /* payload buffer for specified compression algorithm */ 1456 void *private2; /* extra payload buffer */ 1457 }; 1458 1459 /* compress context for write IO path */ 1460 struct compress_io_ctx { 1461 u32 magic; /* magic number to indicate page is compressed */ 1462 struct inode *inode; /* inode the context belong to */ 1463 struct page **rpages; /* pages store raw data in cluster */ 1464 unsigned int nr_rpages; /* total page number in rpages */ 1465 atomic_t pending_pages; /* in-flight compressed page count */ 1466 }; 1467 1468 /* Context for decompressing one cluster on the read IO path */ 1469 struct decompress_io_ctx { 1470 u32 magic; /* magic number to indicate page is compressed */ 1471 struct inode *inode; /* inode the context belong to */ 1472 pgoff_t cluster_idx; /* cluster index number */ 1473 unsigned int cluster_size; /* page count in cluster */ 1474 unsigned int log_cluster_size; /* log of cluster size */ 1475 struct page **rpages; /* pages store raw data in cluster */ 1476 unsigned int nr_rpages; /* total page number in rpages */ 1477 struct page **cpages; /* pages store compressed data in cluster */ 1478 unsigned int nr_cpages; /* total page number in cpages */ 1479 struct page **tpages; /* temp pages to pad holes in cluster */ 1480 void *rbuf; /* virtual mapped address on rpages */ 1481 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1482 size_t rlen; /* valid data length in rbuf */ 1483 size_t clen; /* valid data length in cbuf */ 1484 1485 /* 1486 * The number of compressed pages remaining to be read in this cluster. 1487 * This is initially nr_cpages. It is decremented by 1 each time a page 1488 * has been read (or failed to be read). When it reaches 0, the cluster 1489 * is decompressed (or an error is reported). 1490 * 1491 * If an error occurs before all the pages have been submitted for I/O, 1492 * then this will never reach 0. In this case the I/O submitter is 1493 * responsible for calling f2fs_decompress_end_io() instead. 1494 */ 1495 atomic_t remaining_pages; 1496 1497 /* 1498 * Number of references to this decompress_io_ctx. 1499 * 1500 * One reference is held for I/O completion. This reference is dropped 1501 * after the pagecache pages are updated and unlocked -- either after 1502 * decompression (and verity if enabled), or after an error. 1503 * 1504 * In addition, each compressed page holds a reference while it is in a 1505 * bio. These references are necessary prevent compressed pages from 1506 * being freed while they are still in a bio. 1507 */ 1508 refcount_t refcnt; 1509 1510 bool failed; /* IO error occurred before decompression? */ 1511 bool need_verity; /* need fs-verity verification after decompression? */ 1512 void *private; /* payload buffer for specified decompression algorithm */ 1513 void *private2; /* extra payload buffer */ 1514 struct work_struct verity_work; /* work to verify the decompressed pages */ 1515 }; 1516 1517 #define NULL_CLUSTER ((unsigned int)(~0)) 1518 #define MIN_COMPRESS_LOG_SIZE 2 1519 #define MAX_COMPRESS_LOG_SIZE 8 1520 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1521 1522 struct f2fs_sb_info { 1523 struct super_block *sb; /* pointer to VFS super block */ 1524 struct proc_dir_entry *s_proc; /* proc entry */ 1525 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1526 struct rw_semaphore sb_lock; /* lock for raw super block */ 1527 int valid_super_block; /* valid super block no */ 1528 unsigned long s_flag; /* flags for sbi */ 1529 struct mutex writepages; /* mutex for writepages() */ 1530 1531 #ifdef CONFIG_BLK_DEV_ZONED 1532 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1533 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1534 #endif 1535 1536 /* for node-related operations */ 1537 struct f2fs_nm_info *nm_info; /* node manager */ 1538 struct inode *node_inode; /* cache node blocks */ 1539 1540 /* for segment-related operations */ 1541 struct f2fs_sm_info *sm_info; /* segment manager */ 1542 1543 /* for bio operations */ 1544 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1545 /* keep migration IO order for LFS mode */ 1546 struct rw_semaphore io_order_lock; 1547 mempool_t *write_io_dummy; /* Dummy pages */ 1548 1549 /* for checkpoint */ 1550 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1551 int cur_cp_pack; /* remain current cp pack */ 1552 spinlock_t cp_lock; /* for flag in ckpt */ 1553 struct inode *meta_inode; /* cache meta blocks */ 1554 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1555 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1556 struct rw_semaphore node_write; /* locking node writes */ 1557 struct rw_semaphore node_change; /* locking node change */ 1558 wait_queue_head_t cp_wait; 1559 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1560 long interval_time[MAX_TIME]; /* to store thresholds */ 1561 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1562 1563 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1564 1565 spinlock_t fsync_node_lock; /* for node entry lock */ 1566 struct list_head fsync_node_list; /* node list head */ 1567 unsigned int fsync_seg_id; /* sequence id */ 1568 unsigned int fsync_node_num; /* number of node entries */ 1569 1570 /* for orphan inode, use 0'th array */ 1571 unsigned int max_orphans; /* max orphan inodes */ 1572 1573 /* for inode management */ 1574 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1575 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1576 struct mutex flush_lock; /* for flush exclusion */ 1577 1578 /* for extent tree cache */ 1579 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1580 struct mutex extent_tree_lock; /* locking extent radix tree */ 1581 struct list_head extent_list; /* lru list for shrinker */ 1582 spinlock_t extent_lock; /* locking extent lru list */ 1583 atomic_t total_ext_tree; /* extent tree count */ 1584 struct list_head zombie_list; /* extent zombie tree list */ 1585 atomic_t total_zombie_tree; /* extent zombie tree count */ 1586 atomic_t total_ext_node; /* extent info count */ 1587 1588 /* basic filesystem units */ 1589 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1590 unsigned int log_blocksize; /* log2 block size */ 1591 unsigned int blocksize; /* block size */ 1592 unsigned int root_ino_num; /* root inode number*/ 1593 unsigned int node_ino_num; /* node inode number*/ 1594 unsigned int meta_ino_num; /* meta inode number*/ 1595 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1596 unsigned int blocks_per_seg; /* blocks per segment */ 1597 unsigned int segs_per_sec; /* segments per section */ 1598 unsigned int secs_per_zone; /* sections per zone */ 1599 unsigned int total_sections; /* total section count */ 1600 unsigned int total_node_count; /* total node block count */ 1601 unsigned int total_valid_node_count; /* valid node block count */ 1602 int dir_level; /* directory level */ 1603 int readdir_ra; /* readahead inode in readdir */ 1604 u64 max_io_bytes; /* max io bytes to merge IOs */ 1605 1606 block_t user_block_count; /* # of user blocks */ 1607 block_t total_valid_block_count; /* # of valid blocks */ 1608 block_t discard_blks; /* discard command candidats */ 1609 block_t last_valid_block_count; /* for recovery */ 1610 block_t reserved_blocks; /* configurable reserved blocks */ 1611 block_t current_reserved_blocks; /* current reserved blocks */ 1612 1613 /* Additional tracking for no checkpoint mode */ 1614 block_t unusable_block_count; /* # of blocks saved by last cp */ 1615 1616 unsigned int nquota_files; /* # of quota sysfile */ 1617 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1618 1619 /* # of pages, see count_type */ 1620 atomic_t nr_pages[NR_COUNT_TYPE]; 1621 /* # of allocated blocks */ 1622 struct percpu_counter alloc_valid_block_count; 1623 1624 /* writeback control */ 1625 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1626 1627 /* valid inode count */ 1628 struct percpu_counter total_valid_inode_count; 1629 1630 struct f2fs_mount_info mount_opt; /* mount options */ 1631 1632 /* for cleaning operations */ 1633 struct rw_semaphore gc_lock; /* 1634 * semaphore for GC, avoid 1635 * race between GC and GC or CP 1636 */ 1637 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1638 struct atgc_management am; /* atgc management */ 1639 unsigned int cur_victim_sec; /* current victim section num */ 1640 unsigned int gc_mode; /* current GC state */ 1641 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1642 1643 /* for skip statistic */ 1644 unsigned int atomic_files; /* # of opened atomic file */ 1645 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1646 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1647 1648 /* threshold for gc trials on pinned files */ 1649 u64 gc_pin_file_threshold; 1650 struct rw_semaphore pin_sem; 1651 1652 /* maximum # of trials to find a victim segment for SSR and GC */ 1653 unsigned int max_victim_search; 1654 /* migration granularity of garbage collection, unit: segment */ 1655 unsigned int migration_granularity; 1656 1657 /* 1658 * for stat information. 1659 * one is for the LFS mode, and the other is for the SSR mode. 1660 */ 1661 #ifdef CONFIG_F2FS_STAT_FS 1662 struct f2fs_stat_info *stat_info; /* FS status information */ 1663 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1664 unsigned int segment_count[2]; /* # of allocated segments */ 1665 unsigned int block_count[2]; /* # of allocated blocks */ 1666 atomic_t inplace_count; /* # of inplace update */ 1667 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1668 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1669 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1670 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1671 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1672 atomic_t inline_inode; /* # of inline_data inodes */ 1673 atomic_t inline_dir; /* # of inline_dentry inodes */ 1674 atomic_t compr_inode; /* # of compressed inodes */ 1675 atomic64_t compr_blocks; /* # of compressed blocks */ 1676 atomic_t vw_cnt; /* # of volatile writes */ 1677 atomic_t max_aw_cnt; /* max # of atomic writes */ 1678 atomic_t max_vw_cnt; /* max # of volatile writes */ 1679 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1680 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1681 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1682 #endif 1683 spinlock_t stat_lock; /* lock for stat operations */ 1684 1685 /* For app/fs IO statistics */ 1686 spinlock_t iostat_lock; 1687 unsigned long long rw_iostat[NR_IO_TYPE]; 1688 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1689 bool iostat_enable; 1690 unsigned long iostat_next_period; 1691 unsigned int iostat_period_ms; 1692 1693 /* to attach REQ_META|REQ_FUA flags */ 1694 unsigned int data_io_flag; 1695 unsigned int node_io_flag; 1696 1697 /* For sysfs suppport */ 1698 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1699 struct completion s_kobj_unregister; 1700 1701 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1702 struct completion s_stat_kobj_unregister; 1703 1704 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1705 struct completion s_feature_list_kobj_unregister; 1706 1707 /* For shrinker support */ 1708 struct list_head s_list; 1709 int s_ndevs; /* number of devices */ 1710 struct f2fs_dev_info *devs; /* for device list */ 1711 unsigned int dirty_device; /* for checkpoint data flush */ 1712 spinlock_t dev_lock; /* protect dirty_device */ 1713 struct mutex umount_mutex; 1714 unsigned int shrinker_run_no; 1715 1716 /* For write statistics */ 1717 u64 sectors_written_start; 1718 u64 kbytes_written; 1719 1720 /* Reference to checksum algorithm driver via cryptoapi */ 1721 struct crypto_shash *s_chksum_driver; 1722 1723 /* Precomputed FS UUID checksum for seeding other checksums */ 1724 __u32 s_chksum_seed; 1725 1726 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1727 1728 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1729 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1730 1731 #ifdef CONFIG_F2FS_FS_COMPRESSION 1732 struct kmem_cache *page_array_slab; /* page array entry */ 1733 unsigned int page_array_slab_size; /* default page array slab size */ 1734 1735 /* For runtime compression statistics */ 1736 u64 compr_written_block; 1737 u64 compr_saved_block; 1738 u32 compr_new_inode; 1739 1740 /* For compressed block cache */ 1741 struct inode *compress_inode; /* cache compressed blocks */ 1742 unsigned int compress_percent; /* cache page percentage */ 1743 unsigned int compress_watermark; /* cache page watermark */ 1744 atomic_t compress_page_hit; /* cache hit count */ 1745 #endif 1746 }; 1747 1748 struct f2fs_private_dio { 1749 struct inode *inode; 1750 void *orig_private; 1751 bio_end_io_t *orig_end_io; 1752 bool write; 1753 }; 1754 1755 #ifdef CONFIG_F2FS_FAULT_INJECTION 1756 #define f2fs_show_injection_info(sbi, type) \ 1757 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1758 KERN_INFO, sbi->sb->s_id, \ 1759 f2fs_fault_name[type], \ 1760 __func__, __builtin_return_address(0)) 1761 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1762 { 1763 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1764 1765 if (!ffi->inject_rate) 1766 return false; 1767 1768 if (!IS_FAULT_SET(ffi, type)) 1769 return false; 1770 1771 atomic_inc(&ffi->inject_ops); 1772 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1773 atomic_set(&ffi->inject_ops, 0); 1774 return true; 1775 } 1776 return false; 1777 } 1778 #else 1779 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1780 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1781 { 1782 return false; 1783 } 1784 #endif 1785 1786 /* 1787 * Test if the mounted volume is a multi-device volume. 1788 * - For a single regular disk volume, sbi->s_ndevs is 0. 1789 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1790 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1791 */ 1792 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1793 { 1794 return sbi->s_ndevs > 1; 1795 } 1796 1797 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1798 { 1799 unsigned long now = jiffies; 1800 1801 sbi->last_time[type] = now; 1802 1803 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1804 if (type == REQ_TIME) { 1805 sbi->last_time[DISCARD_TIME] = now; 1806 sbi->last_time[GC_TIME] = now; 1807 } 1808 } 1809 1810 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1811 { 1812 unsigned long interval = sbi->interval_time[type] * HZ; 1813 1814 return time_after(jiffies, sbi->last_time[type] + interval); 1815 } 1816 1817 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1818 int type) 1819 { 1820 unsigned long interval = sbi->interval_time[type] * HZ; 1821 unsigned int wait_ms = 0; 1822 long delta; 1823 1824 delta = (sbi->last_time[type] + interval) - jiffies; 1825 if (delta > 0) 1826 wait_ms = jiffies_to_msecs(delta); 1827 1828 return wait_ms; 1829 } 1830 1831 /* 1832 * Inline functions 1833 */ 1834 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1835 const void *address, unsigned int length) 1836 { 1837 struct { 1838 struct shash_desc shash; 1839 char ctx[4]; 1840 } desc; 1841 int err; 1842 1843 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1844 1845 desc.shash.tfm = sbi->s_chksum_driver; 1846 *(u32 *)desc.ctx = crc; 1847 1848 err = crypto_shash_update(&desc.shash, address, length); 1849 BUG_ON(err); 1850 1851 return *(u32 *)desc.ctx; 1852 } 1853 1854 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1855 unsigned int length) 1856 { 1857 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1858 } 1859 1860 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1861 void *buf, size_t buf_size) 1862 { 1863 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1864 } 1865 1866 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1867 const void *address, unsigned int length) 1868 { 1869 return __f2fs_crc32(sbi, crc, address, length); 1870 } 1871 1872 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1873 { 1874 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1875 } 1876 1877 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1878 { 1879 return sb->s_fs_info; 1880 } 1881 1882 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1883 { 1884 return F2FS_SB(inode->i_sb); 1885 } 1886 1887 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1888 { 1889 return F2FS_I_SB(mapping->host); 1890 } 1891 1892 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1893 { 1894 return F2FS_M_SB(page_file_mapping(page)); 1895 } 1896 1897 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1898 { 1899 return (struct f2fs_super_block *)(sbi->raw_super); 1900 } 1901 1902 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1903 { 1904 return (struct f2fs_checkpoint *)(sbi->ckpt); 1905 } 1906 1907 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1908 { 1909 return (struct f2fs_node *)page_address(page); 1910 } 1911 1912 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1913 { 1914 return &((struct f2fs_node *)page_address(page))->i; 1915 } 1916 1917 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1918 { 1919 return (struct f2fs_nm_info *)(sbi->nm_info); 1920 } 1921 1922 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1923 { 1924 return (struct f2fs_sm_info *)(sbi->sm_info); 1925 } 1926 1927 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1928 { 1929 return (struct sit_info *)(SM_I(sbi)->sit_info); 1930 } 1931 1932 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1933 { 1934 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1935 } 1936 1937 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1938 { 1939 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1940 } 1941 1942 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1943 { 1944 return sbi->meta_inode->i_mapping; 1945 } 1946 1947 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1948 { 1949 return sbi->node_inode->i_mapping; 1950 } 1951 1952 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1953 { 1954 return test_bit(type, &sbi->s_flag); 1955 } 1956 1957 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1958 { 1959 set_bit(type, &sbi->s_flag); 1960 } 1961 1962 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1963 { 1964 clear_bit(type, &sbi->s_flag); 1965 } 1966 1967 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1968 { 1969 return le64_to_cpu(cp->checkpoint_ver); 1970 } 1971 1972 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1973 { 1974 if (type < F2FS_MAX_QUOTAS) 1975 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1976 return 0; 1977 } 1978 1979 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1980 { 1981 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1982 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1983 } 1984 1985 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1986 { 1987 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1988 1989 return ckpt_flags & f; 1990 } 1991 1992 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1993 { 1994 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1995 } 1996 1997 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1998 { 1999 unsigned int ckpt_flags; 2000 2001 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2002 ckpt_flags |= f; 2003 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2004 } 2005 2006 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2007 { 2008 unsigned long flags; 2009 2010 spin_lock_irqsave(&sbi->cp_lock, flags); 2011 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2012 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2013 } 2014 2015 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2016 { 2017 unsigned int ckpt_flags; 2018 2019 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2020 ckpt_flags &= (~f); 2021 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2022 } 2023 2024 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2025 { 2026 unsigned long flags; 2027 2028 spin_lock_irqsave(&sbi->cp_lock, flags); 2029 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2030 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2031 } 2032 2033 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 2034 { 2035 unsigned long flags; 2036 unsigned char *nat_bits; 2037 2038 /* 2039 * In order to re-enable nat_bits we need to call fsck.f2fs by 2040 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 2041 * so let's rely on regular fsck or unclean shutdown. 2042 */ 2043 2044 if (lock) 2045 spin_lock_irqsave(&sbi->cp_lock, flags); 2046 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 2047 nat_bits = NM_I(sbi)->nat_bits; 2048 NM_I(sbi)->nat_bits = NULL; 2049 if (lock) 2050 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2051 2052 kvfree(nat_bits); 2053 } 2054 2055 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 2056 struct cp_control *cpc) 2057 { 2058 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 2059 2060 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 2061 } 2062 2063 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2064 { 2065 down_read(&sbi->cp_rwsem); 2066 } 2067 2068 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2069 { 2070 return down_read_trylock(&sbi->cp_rwsem); 2071 } 2072 2073 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2074 { 2075 up_read(&sbi->cp_rwsem); 2076 } 2077 2078 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2079 { 2080 down_write(&sbi->cp_rwsem); 2081 } 2082 2083 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2084 { 2085 up_write(&sbi->cp_rwsem); 2086 } 2087 2088 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2089 { 2090 int reason = CP_SYNC; 2091 2092 if (test_opt(sbi, FASTBOOT)) 2093 reason = CP_FASTBOOT; 2094 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2095 reason = CP_UMOUNT; 2096 return reason; 2097 } 2098 2099 static inline bool __remain_node_summaries(int reason) 2100 { 2101 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2102 } 2103 2104 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2105 { 2106 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2107 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2108 } 2109 2110 /* 2111 * Check whether the inode has blocks or not 2112 */ 2113 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2114 { 2115 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2116 2117 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2118 } 2119 2120 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2121 { 2122 return ofs == XATTR_NODE_OFFSET; 2123 } 2124 2125 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2126 struct inode *inode, bool cap) 2127 { 2128 if (!inode) 2129 return true; 2130 if (!test_opt(sbi, RESERVE_ROOT)) 2131 return false; 2132 if (IS_NOQUOTA(inode)) 2133 return true; 2134 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2135 return true; 2136 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2137 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2138 return true; 2139 if (cap && capable(CAP_SYS_RESOURCE)) 2140 return true; 2141 return false; 2142 } 2143 2144 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2145 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2146 struct inode *inode, blkcnt_t *count) 2147 { 2148 blkcnt_t diff = 0, release = 0; 2149 block_t avail_user_block_count; 2150 int ret; 2151 2152 ret = dquot_reserve_block(inode, *count); 2153 if (ret) 2154 return ret; 2155 2156 if (time_to_inject(sbi, FAULT_BLOCK)) { 2157 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2158 release = *count; 2159 goto release_quota; 2160 } 2161 2162 /* 2163 * let's increase this in prior to actual block count change in order 2164 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2165 */ 2166 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2167 2168 spin_lock(&sbi->stat_lock); 2169 sbi->total_valid_block_count += (block_t)(*count); 2170 avail_user_block_count = sbi->user_block_count - 2171 sbi->current_reserved_blocks; 2172 2173 if (!__allow_reserved_blocks(sbi, inode, true)) 2174 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2175 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2176 if (avail_user_block_count > sbi->unusable_block_count) 2177 avail_user_block_count -= sbi->unusable_block_count; 2178 else 2179 avail_user_block_count = 0; 2180 } 2181 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2182 diff = sbi->total_valid_block_count - avail_user_block_count; 2183 if (diff > *count) 2184 diff = *count; 2185 *count -= diff; 2186 release = diff; 2187 sbi->total_valid_block_count -= diff; 2188 if (!*count) { 2189 spin_unlock(&sbi->stat_lock); 2190 goto enospc; 2191 } 2192 } 2193 spin_unlock(&sbi->stat_lock); 2194 2195 if (unlikely(release)) { 2196 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2197 dquot_release_reservation_block(inode, release); 2198 } 2199 f2fs_i_blocks_write(inode, *count, true, true); 2200 return 0; 2201 2202 enospc: 2203 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2204 release_quota: 2205 dquot_release_reservation_block(inode, release); 2206 return -ENOSPC; 2207 } 2208 2209 __printf(2, 3) 2210 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2211 2212 #define f2fs_err(sbi, fmt, ...) \ 2213 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2214 #define f2fs_warn(sbi, fmt, ...) \ 2215 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2216 #define f2fs_notice(sbi, fmt, ...) \ 2217 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2218 #define f2fs_info(sbi, fmt, ...) \ 2219 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2220 #define f2fs_debug(sbi, fmt, ...) \ 2221 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2222 2223 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2224 struct inode *inode, 2225 block_t count) 2226 { 2227 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2228 2229 spin_lock(&sbi->stat_lock); 2230 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2231 sbi->total_valid_block_count -= (block_t)count; 2232 if (sbi->reserved_blocks && 2233 sbi->current_reserved_blocks < sbi->reserved_blocks) 2234 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2235 sbi->current_reserved_blocks + count); 2236 spin_unlock(&sbi->stat_lock); 2237 if (unlikely(inode->i_blocks < sectors)) { 2238 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2239 inode->i_ino, 2240 (unsigned long long)inode->i_blocks, 2241 (unsigned long long)sectors); 2242 set_sbi_flag(sbi, SBI_NEED_FSCK); 2243 return; 2244 } 2245 f2fs_i_blocks_write(inode, count, false, true); 2246 } 2247 2248 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2249 { 2250 atomic_inc(&sbi->nr_pages[count_type]); 2251 2252 if (count_type == F2FS_DIRTY_DENTS || 2253 count_type == F2FS_DIRTY_NODES || 2254 count_type == F2FS_DIRTY_META || 2255 count_type == F2FS_DIRTY_QDATA || 2256 count_type == F2FS_DIRTY_IMETA) 2257 set_sbi_flag(sbi, SBI_IS_DIRTY); 2258 } 2259 2260 static inline void inode_inc_dirty_pages(struct inode *inode) 2261 { 2262 atomic_inc(&F2FS_I(inode)->dirty_pages); 2263 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2264 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2265 if (IS_NOQUOTA(inode)) 2266 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2267 } 2268 2269 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2270 { 2271 atomic_dec(&sbi->nr_pages[count_type]); 2272 } 2273 2274 static inline void inode_dec_dirty_pages(struct inode *inode) 2275 { 2276 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2277 !S_ISLNK(inode->i_mode)) 2278 return; 2279 2280 atomic_dec(&F2FS_I(inode)->dirty_pages); 2281 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2282 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2283 if (IS_NOQUOTA(inode)) 2284 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2285 } 2286 2287 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2288 { 2289 return atomic_read(&sbi->nr_pages[count_type]); 2290 } 2291 2292 static inline int get_dirty_pages(struct inode *inode) 2293 { 2294 return atomic_read(&F2FS_I(inode)->dirty_pages); 2295 } 2296 2297 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2298 { 2299 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2300 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2301 sbi->log_blocks_per_seg; 2302 2303 return segs / sbi->segs_per_sec; 2304 } 2305 2306 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2307 { 2308 return sbi->total_valid_block_count; 2309 } 2310 2311 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2312 { 2313 return sbi->discard_blks; 2314 } 2315 2316 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2317 { 2318 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2319 2320 /* return NAT or SIT bitmap */ 2321 if (flag == NAT_BITMAP) 2322 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2323 else if (flag == SIT_BITMAP) 2324 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2325 2326 return 0; 2327 } 2328 2329 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2330 { 2331 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2332 } 2333 2334 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2335 { 2336 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2337 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2338 int offset; 2339 2340 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2341 offset = (flag == SIT_BITMAP) ? 2342 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2343 /* 2344 * if large_nat_bitmap feature is enabled, leave checksum 2345 * protection for all nat/sit bitmaps. 2346 */ 2347 return tmp_ptr + offset + sizeof(__le32); 2348 } 2349 2350 if (__cp_payload(sbi) > 0) { 2351 if (flag == NAT_BITMAP) 2352 return &ckpt->sit_nat_version_bitmap; 2353 else 2354 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2355 } else { 2356 offset = (flag == NAT_BITMAP) ? 2357 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2358 return tmp_ptr + offset; 2359 } 2360 } 2361 2362 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2363 { 2364 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2365 2366 if (sbi->cur_cp_pack == 2) 2367 start_addr += sbi->blocks_per_seg; 2368 return start_addr; 2369 } 2370 2371 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2372 { 2373 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2374 2375 if (sbi->cur_cp_pack == 1) 2376 start_addr += sbi->blocks_per_seg; 2377 return start_addr; 2378 } 2379 2380 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2381 { 2382 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2383 } 2384 2385 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2386 { 2387 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2388 } 2389 2390 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2391 struct inode *inode, bool is_inode) 2392 { 2393 block_t valid_block_count; 2394 unsigned int valid_node_count, user_block_count; 2395 int err; 2396 2397 if (is_inode) { 2398 if (inode) { 2399 err = dquot_alloc_inode(inode); 2400 if (err) 2401 return err; 2402 } 2403 } else { 2404 err = dquot_reserve_block(inode, 1); 2405 if (err) 2406 return err; 2407 } 2408 2409 if (time_to_inject(sbi, FAULT_BLOCK)) { 2410 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2411 goto enospc; 2412 } 2413 2414 spin_lock(&sbi->stat_lock); 2415 2416 valid_block_count = sbi->total_valid_block_count + 2417 sbi->current_reserved_blocks + 1; 2418 2419 if (!__allow_reserved_blocks(sbi, inode, false)) 2420 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2421 user_block_count = sbi->user_block_count; 2422 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2423 user_block_count -= sbi->unusable_block_count; 2424 2425 if (unlikely(valid_block_count > user_block_count)) { 2426 spin_unlock(&sbi->stat_lock); 2427 goto enospc; 2428 } 2429 2430 valid_node_count = sbi->total_valid_node_count + 1; 2431 if (unlikely(valid_node_count > sbi->total_node_count)) { 2432 spin_unlock(&sbi->stat_lock); 2433 goto enospc; 2434 } 2435 2436 sbi->total_valid_node_count++; 2437 sbi->total_valid_block_count++; 2438 spin_unlock(&sbi->stat_lock); 2439 2440 if (inode) { 2441 if (is_inode) 2442 f2fs_mark_inode_dirty_sync(inode, true); 2443 else 2444 f2fs_i_blocks_write(inode, 1, true, true); 2445 } 2446 2447 percpu_counter_inc(&sbi->alloc_valid_block_count); 2448 return 0; 2449 2450 enospc: 2451 if (is_inode) { 2452 if (inode) 2453 dquot_free_inode(inode); 2454 } else { 2455 dquot_release_reservation_block(inode, 1); 2456 } 2457 return -ENOSPC; 2458 } 2459 2460 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2461 struct inode *inode, bool is_inode) 2462 { 2463 spin_lock(&sbi->stat_lock); 2464 2465 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2466 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2467 2468 sbi->total_valid_node_count--; 2469 sbi->total_valid_block_count--; 2470 if (sbi->reserved_blocks && 2471 sbi->current_reserved_blocks < sbi->reserved_blocks) 2472 sbi->current_reserved_blocks++; 2473 2474 spin_unlock(&sbi->stat_lock); 2475 2476 if (is_inode) { 2477 dquot_free_inode(inode); 2478 } else { 2479 if (unlikely(inode->i_blocks == 0)) { 2480 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2481 inode->i_ino, 2482 (unsigned long long)inode->i_blocks); 2483 set_sbi_flag(sbi, SBI_NEED_FSCK); 2484 return; 2485 } 2486 f2fs_i_blocks_write(inode, 1, false, true); 2487 } 2488 } 2489 2490 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2491 { 2492 return sbi->total_valid_node_count; 2493 } 2494 2495 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2496 { 2497 percpu_counter_inc(&sbi->total_valid_inode_count); 2498 } 2499 2500 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2501 { 2502 percpu_counter_dec(&sbi->total_valid_inode_count); 2503 } 2504 2505 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2506 { 2507 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2508 } 2509 2510 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2511 pgoff_t index, bool for_write) 2512 { 2513 struct page *page; 2514 2515 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2516 if (!for_write) 2517 page = find_get_page_flags(mapping, index, 2518 FGP_LOCK | FGP_ACCESSED); 2519 else 2520 page = find_lock_page(mapping, index); 2521 if (page) 2522 return page; 2523 2524 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2525 f2fs_show_injection_info(F2FS_M_SB(mapping), 2526 FAULT_PAGE_ALLOC); 2527 return NULL; 2528 } 2529 } 2530 2531 if (!for_write) 2532 return grab_cache_page(mapping, index); 2533 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2534 } 2535 2536 static inline struct page *f2fs_pagecache_get_page( 2537 struct address_space *mapping, pgoff_t index, 2538 int fgp_flags, gfp_t gfp_mask) 2539 { 2540 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2541 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2542 return NULL; 2543 } 2544 2545 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2546 } 2547 2548 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2549 { 2550 char *src_kaddr = kmap(src); 2551 char *dst_kaddr = kmap(dst); 2552 2553 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2554 kunmap(dst); 2555 kunmap(src); 2556 } 2557 2558 static inline void f2fs_put_page(struct page *page, int unlock) 2559 { 2560 if (!page) 2561 return; 2562 2563 if (unlock) { 2564 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2565 unlock_page(page); 2566 } 2567 put_page(page); 2568 } 2569 2570 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2571 { 2572 if (dn->node_page) 2573 f2fs_put_page(dn->node_page, 1); 2574 if (dn->inode_page && dn->node_page != dn->inode_page) 2575 f2fs_put_page(dn->inode_page, 0); 2576 dn->node_page = NULL; 2577 dn->inode_page = NULL; 2578 } 2579 2580 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2581 size_t size) 2582 { 2583 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2584 } 2585 2586 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2587 gfp_t flags) 2588 { 2589 void *entry; 2590 2591 entry = kmem_cache_alloc(cachep, flags); 2592 if (!entry) 2593 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2594 return entry; 2595 } 2596 2597 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2598 { 2599 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2600 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2601 get_pages(sbi, F2FS_WB_CP_DATA) || 2602 get_pages(sbi, F2FS_DIO_READ) || 2603 get_pages(sbi, F2FS_DIO_WRITE)) 2604 return true; 2605 2606 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2607 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2608 return true; 2609 2610 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2611 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2612 return true; 2613 return false; 2614 } 2615 2616 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2617 { 2618 if (sbi->gc_mode == GC_URGENT_HIGH) 2619 return true; 2620 2621 if (is_inflight_io(sbi, type)) 2622 return false; 2623 2624 if (sbi->gc_mode == GC_URGENT_LOW && 2625 (type == DISCARD_TIME || type == GC_TIME)) 2626 return true; 2627 2628 return f2fs_time_over(sbi, type); 2629 } 2630 2631 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2632 unsigned long index, void *item) 2633 { 2634 while (radix_tree_insert(root, index, item)) 2635 cond_resched(); 2636 } 2637 2638 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2639 2640 static inline bool IS_INODE(struct page *page) 2641 { 2642 struct f2fs_node *p = F2FS_NODE(page); 2643 2644 return RAW_IS_INODE(p); 2645 } 2646 2647 static inline int offset_in_addr(struct f2fs_inode *i) 2648 { 2649 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2650 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2651 } 2652 2653 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2654 { 2655 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2656 } 2657 2658 static inline int f2fs_has_extra_attr(struct inode *inode); 2659 static inline block_t data_blkaddr(struct inode *inode, 2660 struct page *node_page, unsigned int offset) 2661 { 2662 struct f2fs_node *raw_node; 2663 __le32 *addr_array; 2664 int base = 0; 2665 bool is_inode = IS_INODE(node_page); 2666 2667 raw_node = F2FS_NODE(node_page); 2668 2669 if (is_inode) { 2670 if (!inode) 2671 /* from GC path only */ 2672 base = offset_in_addr(&raw_node->i); 2673 else if (f2fs_has_extra_attr(inode)) 2674 base = get_extra_isize(inode); 2675 } 2676 2677 addr_array = blkaddr_in_node(raw_node); 2678 return le32_to_cpu(addr_array[base + offset]); 2679 } 2680 2681 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2682 { 2683 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2684 } 2685 2686 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2687 { 2688 int mask; 2689 2690 addr += (nr >> 3); 2691 mask = 1 << (7 - (nr & 0x07)); 2692 return mask & *addr; 2693 } 2694 2695 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2696 { 2697 int mask; 2698 2699 addr += (nr >> 3); 2700 mask = 1 << (7 - (nr & 0x07)); 2701 *addr |= mask; 2702 } 2703 2704 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2705 { 2706 int mask; 2707 2708 addr += (nr >> 3); 2709 mask = 1 << (7 - (nr & 0x07)); 2710 *addr &= ~mask; 2711 } 2712 2713 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2714 { 2715 int mask; 2716 int ret; 2717 2718 addr += (nr >> 3); 2719 mask = 1 << (7 - (nr & 0x07)); 2720 ret = mask & *addr; 2721 *addr |= mask; 2722 return ret; 2723 } 2724 2725 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2726 { 2727 int mask; 2728 int ret; 2729 2730 addr += (nr >> 3); 2731 mask = 1 << (7 - (nr & 0x07)); 2732 ret = mask & *addr; 2733 *addr &= ~mask; 2734 return ret; 2735 } 2736 2737 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2738 { 2739 int mask; 2740 2741 addr += (nr >> 3); 2742 mask = 1 << (7 - (nr & 0x07)); 2743 *addr ^= mask; 2744 } 2745 2746 /* 2747 * On-disk inode flags (f2fs_inode::i_flags) 2748 */ 2749 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2750 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2751 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2752 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2753 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2754 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2755 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2756 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2757 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2758 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2759 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2760 2761 /* Flags that should be inherited by new inodes from their parent. */ 2762 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2763 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2764 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2765 2766 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2767 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2768 F2FS_CASEFOLD_FL)) 2769 2770 /* Flags that are appropriate for non-directories/regular files. */ 2771 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2772 2773 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2774 { 2775 if (S_ISDIR(mode)) 2776 return flags; 2777 else if (S_ISREG(mode)) 2778 return flags & F2FS_REG_FLMASK; 2779 else 2780 return flags & F2FS_OTHER_FLMASK; 2781 } 2782 2783 static inline void __mark_inode_dirty_flag(struct inode *inode, 2784 int flag, bool set) 2785 { 2786 switch (flag) { 2787 case FI_INLINE_XATTR: 2788 case FI_INLINE_DATA: 2789 case FI_INLINE_DENTRY: 2790 case FI_NEW_INODE: 2791 if (set) 2792 return; 2793 fallthrough; 2794 case FI_DATA_EXIST: 2795 case FI_INLINE_DOTS: 2796 case FI_PIN_FILE: 2797 case FI_COMPRESS_RELEASED: 2798 f2fs_mark_inode_dirty_sync(inode, true); 2799 } 2800 } 2801 2802 static inline void set_inode_flag(struct inode *inode, int flag) 2803 { 2804 set_bit(flag, F2FS_I(inode)->flags); 2805 __mark_inode_dirty_flag(inode, flag, true); 2806 } 2807 2808 static inline int is_inode_flag_set(struct inode *inode, int flag) 2809 { 2810 return test_bit(flag, F2FS_I(inode)->flags); 2811 } 2812 2813 static inline void clear_inode_flag(struct inode *inode, int flag) 2814 { 2815 clear_bit(flag, F2FS_I(inode)->flags); 2816 __mark_inode_dirty_flag(inode, flag, false); 2817 } 2818 2819 static inline bool f2fs_verity_in_progress(struct inode *inode) 2820 { 2821 return IS_ENABLED(CONFIG_FS_VERITY) && 2822 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2823 } 2824 2825 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2826 { 2827 F2FS_I(inode)->i_acl_mode = mode; 2828 set_inode_flag(inode, FI_ACL_MODE); 2829 f2fs_mark_inode_dirty_sync(inode, false); 2830 } 2831 2832 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2833 { 2834 if (inc) 2835 inc_nlink(inode); 2836 else 2837 drop_nlink(inode); 2838 f2fs_mark_inode_dirty_sync(inode, true); 2839 } 2840 2841 static inline void f2fs_i_blocks_write(struct inode *inode, 2842 block_t diff, bool add, bool claim) 2843 { 2844 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2845 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2846 2847 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2848 if (add) { 2849 if (claim) 2850 dquot_claim_block(inode, diff); 2851 else 2852 dquot_alloc_block_nofail(inode, diff); 2853 } else { 2854 dquot_free_block(inode, diff); 2855 } 2856 2857 f2fs_mark_inode_dirty_sync(inode, true); 2858 if (clean || recover) 2859 set_inode_flag(inode, FI_AUTO_RECOVER); 2860 } 2861 2862 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2863 { 2864 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2865 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2866 2867 if (i_size_read(inode) == i_size) 2868 return; 2869 2870 i_size_write(inode, i_size); 2871 f2fs_mark_inode_dirty_sync(inode, true); 2872 if (clean || recover) 2873 set_inode_flag(inode, FI_AUTO_RECOVER); 2874 } 2875 2876 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2877 { 2878 F2FS_I(inode)->i_current_depth = depth; 2879 f2fs_mark_inode_dirty_sync(inode, true); 2880 } 2881 2882 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2883 unsigned int count) 2884 { 2885 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2886 f2fs_mark_inode_dirty_sync(inode, true); 2887 } 2888 2889 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2890 { 2891 F2FS_I(inode)->i_xattr_nid = xnid; 2892 f2fs_mark_inode_dirty_sync(inode, true); 2893 } 2894 2895 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2896 { 2897 F2FS_I(inode)->i_pino = pino; 2898 f2fs_mark_inode_dirty_sync(inode, true); 2899 } 2900 2901 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2902 { 2903 struct f2fs_inode_info *fi = F2FS_I(inode); 2904 2905 if (ri->i_inline & F2FS_INLINE_XATTR) 2906 set_bit(FI_INLINE_XATTR, fi->flags); 2907 if (ri->i_inline & F2FS_INLINE_DATA) 2908 set_bit(FI_INLINE_DATA, fi->flags); 2909 if (ri->i_inline & F2FS_INLINE_DENTRY) 2910 set_bit(FI_INLINE_DENTRY, fi->flags); 2911 if (ri->i_inline & F2FS_DATA_EXIST) 2912 set_bit(FI_DATA_EXIST, fi->flags); 2913 if (ri->i_inline & F2FS_INLINE_DOTS) 2914 set_bit(FI_INLINE_DOTS, fi->flags); 2915 if (ri->i_inline & F2FS_EXTRA_ATTR) 2916 set_bit(FI_EXTRA_ATTR, fi->flags); 2917 if (ri->i_inline & F2FS_PIN_FILE) 2918 set_bit(FI_PIN_FILE, fi->flags); 2919 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 2920 set_bit(FI_COMPRESS_RELEASED, fi->flags); 2921 } 2922 2923 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2924 { 2925 ri->i_inline = 0; 2926 2927 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2928 ri->i_inline |= F2FS_INLINE_XATTR; 2929 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2930 ri->i_inline |= F2FS_INLINE_DATA; 2931 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2932 ri->i_inline |= F2FS_INLINE_DENTRY; 2933 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2934 ri->i_inline |= F2FS_DATA_EXIST; 2935 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2936 ri->i_inline |= F2FS_INLINE_DOTS; 2937 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2938 ri->i_inline |= F2FS_EXTRA_ATTR; 2939 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2940 ri->i_inline |= F2FS_PIN_FILE; 2941 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 2942 ri->i_inline |= F2FS_COMPRESS_RELEASED; 2943 } 2944 2945 static inline int f2fs_has_extra_attr(struct inode *inode) 2946 { 2947 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2948 } 2949 2950 static inline int f2fs_has_inline_xattr(struct inode *inode) 2951 { 2952 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2953 } 2954 2955 static inline int f2fs_compressed_file(struct inode *inode) 2956 { 2957 return S_ISREG(inode->i_mode) && 2958 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2959 } 2960 2961 static inline bool f2fs_need_compress_data(struct inode *inode) 2962 { 2963 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 2964 2965 if (!f2fs_compressed_file(inode)) 2966 return false; 2967 2968 if (compress_mode == COMPR_MODE_FS) 2969 return true; 2970 else if (compress_mode == COMPR_MODE_USER && 2971 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 2972 return true; 2973 2974 return false; 2975 } 2976 2977 static inline unsigned int addrs_per_inode(struct inode *inode) 2978 { 2979 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2980 get_inline_xattr_addrs(inode); 2981 2982 if (!f2fs_compressed_file(inode)) 2983 return addrs; 2984 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2985 } 2986 2987 static inline unsigned int addrs_per_block(struct inode *inode) 2988 { 2989 if (!f2fs_compressed_file(inode)) 2990 return DEF_ADDRS_PER_BLOCK; 2991 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2992 } 2993 2994 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2995 { 2996 struct f2fs_inode *ri = F2FS_INODE(page); 2997 2998 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2999 get_inline_xattr_addrs(inode)]); 3000 } 3001 3002 static inline int inline_xattr_size(struct inode *inode) 3003 { 3004 if (f2fs_has_inline_xattr(inode)) 3005 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3006 return 0; 3007 } 3008 3009 static inline int f2fs_has_inline_data(struct inode *inode) 3010 { 3011 return is_inode_flag_set(inode, FI_INLINE_DATA); 3012 } 3013 3014 static inline int f2fs_exist_data(struct inode *inode) 3015 { 3016 return is_inode_flag_set(inode, FI_DATA_EXIST); 3017 } 3018 3019 static inline int f2fs_has_inline_dots(struct inode *inode) 3020 { 3021 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3022 } 3023 3024 static inline int f2fs_is_mmap_file(struct inode *inode) 3025 { 3026 return is_inode_flag_set(inode, FI_MMAP_FILE); 3027 } 3028 3029 static inline bool f2fs_is_pinned_file(struct inode *inode) 3030 { 3031 return is_inode_flag_set(inode, FI_PIN_FILE); 3032 } 3033 3034 static inline bool f2fs_is_atomic_file(struct inode *inode) 3035 { 3036 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3037 } 3038 3039 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 3040 { 3041 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 3042 } 3043 3044 static inline bool f2fs_is_volatile_file(struct inode *inode) 3045 { 3046 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 3047 } 3048 3049 static inline bool f2fs_is_first_block_written(struct inode *inode) 3050 { 3051 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3052 } 3053 3054 static inline bool f2fs_is_drop_cache(struct inode *inode) 3055 { 3056 return is_inode_flag_set(inode, FI_DROP_CACHE); 3057 } 3058 3059 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3060 { 3061 struct f2fs_inode *ri = F2FS_INODE(page); 3062 int extra_size = get_extra_isize(inode); 3063 3064 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3065 } 3066 3067 static inline int f2fs_has_inline_dentry(struct inode *inode) 3068 { 3069 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3070 } 3071 3072 static inline int is_file(struct inode *inode, int type) 3073 { 3074 return F2FS_I(inode)->i_advise & type; 3075 } 3076 3077 static inline void set_file(struct inode *inode, int type) 3078 { 3079 F2FS_I(inode)->i_advise |= type; 3080 f2fs_mark_inode_dirty_sync(inode, true); 3081 } 3082 3083 static inline void clear_file(struct inode *inode, int type) 3084 { 3085 F2FS_I(inode)->i_advise &= ~type; 3086 f2fs_mark_inode_dirty_sync(inode, true); 3087 } 3088 3089 static inline bool f2fs_is_time_consistent(struct inode *inode) 3090 { 3091 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3092 return false; 3093 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3094 return false; 3095 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3096 return false; 3097 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3098 &F2FS_I(inode)->i_crtime)) 3099 return false; 3100 return true; 3101 } 3102 3103 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3104 { 3105 bool ret; 3106 3107 if (dsync) { 3108 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3109 3110 spin_lock(&sbi->inode_lock[DIRTY_META]); 3111 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3112 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3113 return ret; 3114 } 3115 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3116 file_keep_isize(inode) || 3117 i_size_read(inode) & ~PAGE_MASK) 3118 return false; 3119 3120 if (!f2fs_is_time_consistent(inode)) 3121 return false; 3122 3123 spin_lock(&F2FS_I(inode)->i_size_lock); 3124 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3125 spin_unlock(&F2FS_I(inode)->i_size_lock); 3126 3127 return ret; 3128 } 3129 3130 static inline bool f2fs_readonly(struct super_block *sb) 3131 { 3132 return sb_rdonly(sb); 3133 } 3134 3135 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3136 { 3137 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3138 } 3139 3140 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3141 { 3142 if (len == 1 && name[0] == '.') 3143 return true; 3144 3145 if (len == 2 && name[0] == '.' && name[1] == '.') 3146 return true; 3147 3148 return false; 3149 } 3150 3151 static inline bool f2fs_may_extent_tree(struct inode *inode) 3152 { 3153 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3154 3155 if (!test_opt(sbi, EXTENT_CACHE) || 3156 is_inode_flag_set(inode, FI_NO_EXTENT) || 3157 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 3158 return false; 3159 3160 /* 3161 * for recovered files during mount do not create extents 3162 * if shrinker is not registered. 3163 */ 3164 if (list_empty(&sbi->s_list)) 3165 return false; 3166 3167 return S_ISREG(inode->i_mode); 3168 } 3169 3170 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3171 size_t size, gfp_t flags) 3172 { 3173 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3174 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3175 return NULL; 3176 } 3177 3178 return kmalloc(size, flags); 3179 } 3180 3181 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3182 size_t size, gfp_t flags) 3183 { 3184 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3185 } 3186 3187 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3188 size_t size, gfp_t flags) 3189 { 3190 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3191 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3192 return NULL; 3193 } 3194 3195 return kvmalloc(size, flags); 3196 } 3197 3198 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3199 size_t size, gfp_t flags) 3200 { 3201 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3202 } 3203 3204 static inline int get_extra_isize(struct inode *inode) 3205 { 3206 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3207 } 3208 3209 static inline int get_inline_xattr_addrs(struct inode *inode) 3210 { 3211 return F2FS_I(inode)->i_inline_xattr_size; 3212 } 3213 3214 #define f2fs_get_inode_mode(i) \ 3215 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3216 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3217 3218 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3219 (offsetof(struct f2fs_inode, i_extra_end) - \ 3220 offsetof(struct f2fs_inode, i_extra_isize)) \ 3221 3222 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3223 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3224 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3225 sizeof((f2fs_inode)->field)) \ 3226 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3227 3228 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3229 #define MIN_IOSTAT_PERIOD_MS 100 3230 /* maximum period of iostat tracing is 1 day */ 3231 #define MAX_IOSTAT_PERIOD_MS 8640000 3232 3233 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3234 { 3235 int i; 3236 3237 spin_lock(&sbi->iostat_lock); 3238 for (i = 0; i < NR_IO_TYPE; i++) { 3239 sbi->rw_iostat[i] = 0; 3240 sbi->prev_rw_iostat[i] = 0; 3241 } 3242 spin_unlock(&sbi->iostat_lock); 3243 } 3244 3245 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3246 3247 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3248 enum iostat_type type, unsigned long long io_bytes) 3249 { 3250 if (!sbi->iostat_enable) 3251 return; 3252 spin_lock(&sbi->iostat_lock); 3253 sbi->rw_iostat[type] += io_bytes; 3254 3255 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3256 sbi->rw_iostat[APP_BUFFERED_IO] = 3257 sbi->rw_iostat[APP_WRITE_IO] - 3258 sbi->rw_iostat[APP_DIRECT_IO]; 3259 3260 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3261 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3262 sbi->rw_iostat[APP_READ_IO] - 3263 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3264 spin_unlock(&sbi->iostat_lock); 3265 3266 f2fs_record_iostat(sbi); 3267 } 3268 3269 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3270 3271 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3272 3273 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3274 block_t blkaddr, int type); 3275 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3276 block_t blkaddr, int type) 3277 { 3278 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3279 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3280 blkaddr, type); 3281 f2fs_bug_on(sbi, 1); 3282 } 3283 } 3284 3285 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3286 { 3287 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3288 blkaddr == COMPRESS_ADDR) 3289 return false; 3290 return true; 3291 } 3292 3293 /* 3294 * file.c 3295 */ 3296 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3297 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3298 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3299 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3300 int f2fs_truncate(struct inode *inode); 3301 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3302 struct kstat *stat, u32 request_mask, unsigned int flags); 3303 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3304 struct iattr *attr); 3305 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3306 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3307 int f2fs_precache_extents(struct inode *inode); 3308 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3309 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3310 struct dentry *dentry, struct fileattr *fa); 3311 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3312 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3313 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3314 int f2fs_pin_file_control(struct inode *inode, bool inc); 3315 3316 /* 3317 * inode.c 3318 */ 3319 void f2fs_set_inode_flags(struct inode *inode); 3320 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3321 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3322 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3323 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3324 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3325 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3326 void f2fs_update_inode_page(struct inode *inode); 3327 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3328 void f2fs_evict_inode(struct inode *inode); 3329 void f2fs_handle_failed_inode(struct inode *inode); 3330 3331 /* 3332 * namei.c 3333 */ 3334 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3335 bool hot, bool set); 3336 struct dentry *f2fs_get_parent(struct dentry *child); 3337 3338 /* 3339 * dir.c 3340 */ 3341 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3342 int f2fs_init_casefolded_name(const struct inode *dir, 3343 struct f2fs_filename *fname); 3344 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3345 int lookup, struct f2fs_filename *fname); 3346 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3347 struct f2fs_filename *fname); 3348 void f2fs_free_filename(struct f2fs_filename *fname); 3349 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3350 const struct f2fs_filename *fname, int *max_slots); 3351 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3352 unsigned int start_pos, struct fscrypt_str *fstr); 3353 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3354 struct f2fs_dentry_ptr *d); 3355 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3356 const struct f2fs_filename *fname, struct page *dpage); 3357 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3358 unsigned int current_depth); 3359 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3360 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3361 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3362 const struct f2fs_filename *fname, 3363 struct page **res_page); 3364 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3365 const struct qstr *child, struct page **res_page); 3366 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3367 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3368 struct page **page); 3369 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3370 struct page *page, struct inode *inode); 3371 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3372 const struct f2fs_filename *fname); 3373 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3374 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3375 unsigned int bit_pos); 3376 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3377 struct inode *inode, nid_t ino, umode_t mode); 3378 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3379 struct inode *inode, nid_t ino, umode_t mode); 3380 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3381 struct inode *inode, nid_t ino, umode_t mode); 3382 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3383 struct inode *dir, struct inode *inode); 3384 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3385 bool f2fs_empty_dir(struct inode *dir); 3386 3387 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3388 { 3389 if (fscrypt_is_nokey_name(dentry)) 3390 return -ENOKEY; 3391 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3392 inode, inode->i_ino, inode->i_mode); 3393 } 3394 3395 /* 3396 * super.c 3397 */ 3398 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3399 void f2fs_inode_synced(struct inode *inode); 3400 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3401 int f2fs_quota_sync(struct super_block *sb, int type); 3402 loff_t max_file_blocks(struct inode *inode); 3403 void f2fs_quota_off_umount(struct super_block *sb); 3404 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3405 int f2fs_sync_fs(struct super_block *sb, int sync); 3406 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3407 3408 /* 3409 * hash.c 3410 */ 3411 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3412 3413 /* 3414 * node.c 3415 */ 3416 struct node_info; 3417 3418 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3419 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3420 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3421 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3422 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3423 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3424 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3425 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3426 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3427 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3428 struct node_info *ni); 3429 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3430 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3431 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3432 int f2fs_truncate_xattr_node(struct inode *inode); 3433 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3434 unsigned int seq_id); 3435 int f2fs_remove_inode_page(struct inode *inode); 3436 struct page *f2fs_new_inode_page(struct inode *inode); 3437 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3438 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3439 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3440 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3441 int f2fs_move_node_page(struct page *node_page, int gc_type); 3442 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3443 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3444 struct writeback_control *wbc, bool atomic, 3445 unsigned int *seq_id); 3446 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3447 struct writeback_control *wbc, 3448 bool do_balance, enum iostat_type io_type); 3449 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3450 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3451 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3452 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3453 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3454 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3455 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3456 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3457 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3458 unsigned int segno, struct f2fs_summary_block *sum); 3459 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3460 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3461 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3462 int __init f2fs_create_node_manager_caches(void); 3463 void f2fs_destroy_node_manager_caches(void); 3464 3465 /* 3466 * segment.c 3467 */ 3468 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3469 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3470 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3471 void f2fs_drop_inmem_pages(struct inode *inode); 3472 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3473 int f2fs_commit_inmem_pages(struct inode *inode); 3474 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3475 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3476 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3477 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3478 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3479 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3480 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3481 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3482 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3483 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3484 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3485 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3486 struct cp_control *cpc); 3487 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3488 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3489 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3490 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3491 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3492 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3493 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3494 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3495 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3496 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3497 unsigned int *newseg, bool new_sec, int dir); 3498 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3499 unsigned int start, unsigned int end); 3500 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3501 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3502 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3503 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3504 struct cp_control *cpc); 3505 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3506 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3507 block_t blk_addr); 3508 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3509 enum iostat_type io_type); 3510 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3511 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3512 struct f2fs_io_info *fio); 3513 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3514 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3515 block_t old_blkaddr, block_t new_blkaddr, 3516 bool recover_curseg, bool recover_newaddr, 3517 bool from_gc); 3518 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3519 block_t old_addr, block_t new_addr, 3520 unsigned char version, bool recover_curseg, 3521 bool recover_newaddr); 3522 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3523 block_t old_blkaddr, block_t *new_blkaddr, 3524 struct f2fs_summary *sum, int type, 3525 struct f2fs_io_info *fio); 3526 void f2fs_wait_on_page_writeback(struct page *page, 3527 enum page_type type, bool ordered, bool locked); 3528 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3529 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3530 block_t len); 3531 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3532 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3533 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3534 unsigned int val, int alloc); 3535 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3536 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3537 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3538 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3539 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3540 int __init f2fs_create_segment_manager_caches(void); 3541 void f2fs_destroy_segment_manager_caches(void); 3542 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3543 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3544 enum page_type type, enum temp_type temp); 3545 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3546 unsigned int segno); 3547 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3548 unsigned int segno); 3549 3550 /* 3551 * checkpoint.c 3552 */ 3553 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3554 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3555 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3556 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3557 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3558 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3559 block_t blkaddr, int type); 3560 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3561 int type, bool sync); 3562 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3563 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3564 long nr_to_write, enum iostat_type io_type); 3565 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3566 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3567 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3568 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3569 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3570 unsigned int devidx, int type); 3571 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3572 unsigned int devidx, int type); 3573 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3574 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3575 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3576 void f2fs_add_orphan_inode(struct inode *inode); 3577 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3578 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3579 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3580 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3581 void f2fs_remove_dirty_inode(struct inode *inode); 3582 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3583 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3584 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3585 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3586 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3587 int __init f2fs_create_checkpoint_caches(void); 3588 void f2fs_destroy_checkpoint_caches(void); 3589 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3590 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3591 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3592 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3593 3594 /* 3595 * data.c 3596 */ 3597 int __init f2fs_init_bioset(void); 3598 void f2fs_destroy_bioset(void); 3599 int f2fs_init_bio_entry_cache(void); 3600 void f2fs_destroy_bio_entry_cache(void); 3601 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3602 struct bio *bio, enum page_type type); 3603 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3604 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3605 struct inode *inode, struct page *page, 3606 nid_t ino, enum page_type type); 3607 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3608 struct bio **bio, struct page *page); 3609 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3610 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3611 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3612 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3613 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3614 block_t blk_addr, struct bio *bio); 3615 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3616 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3617 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3618 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3619 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3620 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3621 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3622 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3623 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3624 int op_flags, bool for_write); 3625 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3626 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3627 bool for_write); 3628 struct page *f2fs_get_new_data_page(struct inode *inode, 3629 struct page *ipage, pgoff_t index, bool new_i_size); 3630 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3631 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3632 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3633 int create, int flag); 3634 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3635 u64 start, u64 len); 3636 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3637 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3638 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3639 int f2fs_write_single_data_page(struct page *page, int *submitted, 3640 struct bio **bio, sector_t *last_block, 3641 struct writeback_control *wbc, 3642 enum iostat_type io_type, 3643 int compr_blocks, bool allow_balance); 3644 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3645 unsigned int length); 3646 int f2fs_release_page(struct page *page, gfp_t wait); 3647 #ifdef CONFIG_MIGRATION 3648 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3649 struct page *page, enum migrate_mode mode); 3650 #endif 3651 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3652 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3653 int f2fs_init_post_read_processing(void); 3654 void f2fs_destroy_post_read_processing(void); 3655 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3656 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3657 3658 /* 3659 * gc.c 3660 */ 3661 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3662 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3663 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3664 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, 3665 unsigned int segno); 3666 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3667 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3668 int __init f2fs_create_garbage_collection_cache(void); 3669 void f2fs_destroy_garbage_collection_cache(void); 3670 3671 /* 3672 * recovery.c 3673 */ 3674 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3675 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3676 int __init f2fs_create_recovery_cache(void); 3677 void f2fs_destroy_recovery_cache(void); 3678 3679 /* 3680 * debug.c 3681 */ 3682 #ifdef CONFIG_F2FS_STAT_FS 3683 struct f2fs_stat_info { 3684 struct list_head stat_list; 3685 struct f2fs_sb_info *sbi; 3686 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3687 int main_area_segs, main_area_sections, main_area_zones; 3688 unsigned long long hit_largest, hit_cached, hit_rbtree; 3689 unsigned long long hit_total, total_ext; 3690 int ext_tree, zombie_tree, ext_node; 3691 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3692 int ndirty_data, ndirty_qdata; 3693 int inmem_pages; 3694 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3695 int nats, dirty_nats, sits, dirty_sits; 3696 int free_nids, avail_nids, alloc_nids; 3697 int total_count, utilization; 3698 int bg_gc, nr_wb_cp_data, nr_wb_data; 3699 int nr_rd_data, nr_rd_node, nr_rd_meta; 3700 int nr_dio_read, nr_dio_write; 3701 unsigned int io_skip_bggc, other_skip_bggc; 3702 int nr_flushing, nr_flushed, flush_list_empty; 3703 int nr_discarding, nr_discarded; 3704 int nr_discard_cmd; 3705 unsigned int undiscard_blks; 3706 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3707 unsigned int cur_ckpt_time, peak_ckpt_time; 3708 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3709 int compr_inode; 3710 unsigned long long compr_blocks; 3711 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3712 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3713 unsigned int bimodal, avg_vblocks; 3714 int util_free, util_valid, util_invalid; 3715 int rsvd_segs, overp_segs; 3716 int dirty_count, node_pages, meta_pages, compress_pages; 3717 int compress_page_hit; 3718 int prefree_count, call_count, cp_count, bg_cp_count; 3719 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3720 int bg_node_segs, bg_data_segs; 3721 int tot_blks, data_blks, node_blks; 3722 int bg_data_blks, bg_node_blks; 3723 unsigned long long skipped_atomic_files[2]; 3724 int curseg[NR_CURSEG_TYPE]; 3725 int cursec[NR_CURSEG_TYPE]; 3726 int curzone[NR_CURSEG_TYPE]; 3727 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3728 unsigned int full_seg[NR_CURSEG_TYPE]; 3729 unsigned int valid_blks[NR_CURSEG_TYPE]; 3730 3731 unsigned int meta_count[META_MAX]; 3732 unsigned int segment_count[2]; 3733 unsigned int block_count[2]; 3734 unsigned int inplace_count; 3735 unsigned long long base_mem, cache_mem, page_mem; 3736 }; 3737 3738 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3739 { 3740 return (struct f2fs_stat_info *)sbi->stat_info; 3741 } 3742 3743 #define stat_inc_cp_count(si) ((si)->cp_count++) 3744 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3745 #define stat_inc_call_count(si) ((si)->call_count++) 3746 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3747 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3748 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3749 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3750 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3751 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3752 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3753 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3754 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3755 #define stat_inc_inline_xattr(inode) \ 3756 do { \ 3757 if (f2fs_has_inline_xattr(inode)) \ 3758 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3759 } while (0) 3760 #define stat_dec_inline_xattr(inode) \ 3761 do { \ 3762 if (f2fs_has_inline_xattr(inode)) \ 3763 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3764 } while (0) 3765 #define stat_inc_inline_inode(inode) \ 3766 do { \ 3767 if (f2fs_has_inline_data(inode)) \ 3768 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3769 } while (0) 3770 #define stat_dec_inline_inode(inode) \ 3771 do { \ 3772 if (f2fs_has_inline_data(inode)) \ 3773 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3774 } while (0) 3775 #define stat_inc_inline_dir(inode) \ 3776 do { \ 3777 if (f2fs_has_inline_dentry(inode)) \ 3778 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3779 } while (0) 3780 #define stat_dec_inline_dir(inode) \ 3781 do { \ 3782 if (f2fs_has_inline_dentry(inode)) \ 3783 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3784 } while (0) 3785 #define stat_inc_compr_inode(inode) \ 3786 do { \ 3787 if (f2fs_compressed_file(inode)) \ 3788 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3789 } while (0) 3790 #define stat_dec_compr_inode(inode) \ 3791 do { \ 3792 if (f2fs_compressed_file(inode)) \ 3793 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3794 } while (0) 3795 #define stat_add_compr_blocks(inode, blocks) \ 3796 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3797 #define stat_sub_compr_blocks(inode, blocks) \ 3798 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3799 #define stat_inc_meta_count(sbi, blkaddr) \ 3800 do { \ 3801 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3802 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3803 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3804 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3805 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3806 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3807 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3808 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3809 } while (0) 3810 #define stat_inc_seg_type(sbi, curseg) \ 3811 ((sbi)->segment_count[(curseg)->alloc_type]++) 3812 #define stat_inc_block_count(sbi, curseg) \ 3813 ((sbi)->block_count[(curseg)->alloc_type]++) 3814 #define stat_inc_inplace_blocks(sbi) \ 3815 (atomic_inc(&(sbi)->inplace_count)) 3816 #define stat_update_max_atomic_write(inode) \ 3817 do { \ 3818 int cur = F2FS_I_SB(inode)->atomic_files; \ 3819 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3820 if (cur > max) \ 3821 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3822 } while (0) 3823 #define stat_inc_volatile_write(inode) \ 3824 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3825 #define stat_dec_volatile_write(inode) \ 3826 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3827 #define stat_update_max_volatile_write(inode) \ 3828 do { \ 3829 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3830 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3831 if (cur > max) \ 3832 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3833 } while (0) 3834 #define stat_inc_seg_count(sbi, type, gc_type) \ 3835 do { \ 3836 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3837 si->tot_segs++; \ 3838 if ((type) == SUM_TYPE_DATA) { \ 3839 si->data_segs++; \ 3840 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3841 } else { \ 3842 si->node_segs++; \ 3843 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3844 } \ 3845 } while (0) 3846 3847 #define stat_inc_tot_blk_count(si, blks) \ 3848 ((si)->tot_blks += (blks)) 3849 3850 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3851 do { \ 3852 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3853 stat_inc_tot_blk_count(si, blks); \ 3854 si->data_blks += (blks); \ 3855 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3856 } while (0) 3857 3858 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3859 do { \ 3860 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3861 stat_inc_tot_blk_count(si, blks); \ 3862 si->node_blks += (blks); \ 3863 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3864 } while (0) 3865 3866 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3867 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3868 void __init f2fs_create_root_stats(void); 3869 void f2fs_destroy_root_stats(void); 3870 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3871 #else 3872 #define stat_inc_cp_count(si) do { } while (0) 3873 #define stat_inc_bg_cp_count(si) do { } while (0) 3874 #define stat_inc_call_count(si) do { } while (0) 3875 #define stat_inc_bggc_count(si) do { } while (0) 3876 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3877 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3878 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3879 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3880 #define stat_inc_total_hit(sbi) do { } while (0) 3881 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3882 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3883 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3884 #define stat_inc_inline_xattr(inode) do { } while (0) 3885 #define stat_dec_inline_xattr(inode) do { } while (0) 3886 #define stat_inc_inline_inode(inode) do { } while (0) 3887 #define stat_dec_inline_inode(inode) do { } while (0) 3888 #define stat_inc_inline_dir(inode) do { } while (0) 3889 #define stat_dec_inline_dir(inode) do { } while (0) 3890 #define stat_inc_compr_inode(inode) do { } while (0) 3891 #define stat_dec_compr_inode(inode) do { } while (0) 3892 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3893 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3894 #define stat_update_max_atomic_write(inode) do { } while (0) 3895 #define stat_inc_volatile_write(inode) do { } while (0) 3896 #define stat_dec_volatile_write(inode) do { } while (0) 3897 #define stat_update_max_volatile_write(inode) do { } while (0) 3898 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3899 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3900 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3901 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3902 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3903 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3904 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3905 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3906 3907 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3908 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3909 static inline void __init f2fs_create_root_stats(void) { } 3910 static inline void f2fs_destroy_root_stats(void) { } 3911 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3912 #endif 3913 3914 extern const struct file_operations f2fs_dir_operations; 3915 extern const struct file_operations f2fs_file_operations; 3916 extern const struct inode_operations f2fs_file_inode_operations; 3917 extern const struct address_space_operations f2fs_dblock_aops; 3918 extern const struct address_space_operations f2fs_node_aops; 3919 extern const struct address_space_operations f2fs_meta_aops; 3920 extern const struct inode_operations f2fs_dir_inode_operations; 3921 extern const struct inode_operations f2fs_symlink_inode_operations; 3922 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3923 extern const struct inode_operations f2fs_special_inode_operations; 3924 extern struct kmem_cache *f2fs_inode_entry_slab; 3925 3926 /* 3927 * inline.c 3928 */ 3929 bool f2fs_may_inline_data(struct inode *inode); 3930 bool f2fs_may_inline_dentry(struct inode *inode); 3931 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3932 void f2fs_truncate_inline_inode(struct inode *inode, 3933 struct page *ipage, u64 from); 3934 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3935 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3936 int f2fs_convert_inline_inode(struct inode *inode); 3937 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3938 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3939 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3940 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3941 const struct f2fs_filename *fname, 3942 struct page **res_page); 3943 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3944 struct page *ipage); 3945 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3946 struct inode *inode, nid_t ino, umode_t mode); 3947 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3948 struct page *page, struct inode *dir, 3949 struct inode *inode); 3950 bool f2fs_empty_inline_dir(struct inode *dir); 3951 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3952 struct fscrypt_str *fstr); 3953 int f2fs_inline_data_fiemap(struct inode *inode, 3954 struct fiemap_extent_info *fieinfo, 3955 __u64 start, __u64 len); 3956 3957 /* 3958 * shrinker.c 3959 */ 3960 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3961 struct shrink_control *sc); 3962 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3963 struct shrink_control *sc); 3964 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3965 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3966 3967 /* 3968 * extent_cache.c 3969 */ 3970 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3971 struct rb_entry *cached_re, unsigned int ofs); 3972 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3973 struct rb_root_cached *root, 3974 struct rb_node **parent, 3975 unsigned long long key, bool *left_most); 3976 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3977 struct rb_root_cached *root, 3978 struct rb_node **parent, 3979 unsigned int ofs, bool *leftmost); 3980 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3981 struct rb_entry *cached_re, unsigned int ofs, 3982 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3983 struct rb_node ***insert_p, struct rb_node **insert_parent, 3984 bool force, bool *leftmost); 3985 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3986 struct rb_root_cached *root, bool check_key); 3987 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3988 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3989 void f2fs_drop_extent_tree(struct inode *inode); 3990 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3991 void f2fs_destroy_extent_tree(struct inode *inode); 3992 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3993 struct extent_info *ei); 3994 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3995 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3996 pgoff_t fofs, block_t blkaddr, unsigned int len); 3997 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3998 int __init f2fs_create_extent_cache(void); 3999 void f2fs_destroy_extent_cache(void); 4000 4001 /* 4002 * sysfs.c 4003 */ 4004 int __init f2fs_init_sysfs(void); 4005 void f2fs_exit_sysfs(void); 4006 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4007 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4008 4009 /* verity.c */ 4010 extern const struct fsverity_operations f2fs_verityops; 4011 4012 /* 4013 * crypto support 4014 */ 4015 static inline bool f2fs_encrypted_file(struct inode *inode) 4016 { 4017 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4018 } 4019 4020 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4021 { 4022 #ifdef CONFIG_FS_ENCRYPTION 4023 file_set_encrypt(inode); 4024 f2fs_set_inode_flags(inode); 4025 #endif 4026 } 4027 4028 /* 4029 * Returns true if the reads of the inode's data need to undergo some 4030 * postprocessing step, like decryption or authenticity verification. 4031 */ 4032 static inline bool f2fs_post_read_required(struct inode *inode) 4033 { 4034 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4035 f2fs_compressed_file(inode); 4036 } 4037 4038 /* 4039 * compress.c 4040 */ 4041 #ifdef CONFIG_F2FS_FS_COMPRESSION 4042 bool f2fs_is_compressed_page(struct page *page); 4043 struct page *f2fs_compress_control_page(struct page *page); 4044 int f2fs_prepare_compress_overwrite(struct inode *inode, 4045 struct page **pagep, pgoff_t index, void **fsdata); 4046 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4047 pgoff_t index, unsigned copied); 4048 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4049 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4050 bool f2fs_is_compress_backend_ready(struct inode *inode); 4051 int f2fs_init_compress_mempool(void); 4052 void f2fs_destroy_compress_mempool(void); 4053 void f2fs_decompress_cluster(struct decompress_io_ctx *dic); 4054 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4055 block_t blkaddr); 4056 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4057 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4058 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4059 int f2fs_write_multi_pages(struct compress_ctx *cc, 4060 int *submitted, 4061 struct writeback_control *wbc, 4062 enum iostat_type io_type); 4063 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4064 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4065 unsigned nr_pages, sector_t *last_block_in_bio, 4066 bool is_readahead, bool for_write); 4067 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4068 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 4069 void f2fs_put_page_dic(struct page *page); 4070 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4071 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4072 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4073 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4074 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4075 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4076 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4077 int __init f2fs_init_compress_cache(void); 4078 void f2fs_destroy_compress_cache(void); 4079 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4080 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4081 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4082 nid_t ino, block_t blkaddr); 4083 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4084 block_t blkaddr); 4085 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4086 #define inc_compr_inode_stat(inode) \ 4087 do { \ 4088 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4089 sbi->compr_new_inode++; \ 4090 } while (0) 4091 #define add_compr_block_stat(inode, blocks) \ 4092 do { \ 4093 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4094 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4095 sbi->compr_written_block += blocks; \ 4096 sbi->compr_saved_block += diff; \ 4097 } while (0) 4098 #else 4099 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4100 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4101 { 4102 if (!f2fs_compressed_file(inode)) 4103 return true; 4104 /* not support compression */ 4105 return false; 4106 } 4107 static inline struct page *f2fs_compress_control_page(struct page *page) 4108 { 4109 WARN_ON_ONCE(1); 4110 return ERR_PTR(-EINVAL); 4111 } 4112 static inline int f2fs_init_compress_mempool(void) { return 0; } 4113 static inline void f2fs_destroy_compress_mempool(void) { } 4114 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } 4115 static inline void f2fs_end_read_compressed_page(struct page *page, 4116 bool failed, block_t blkaddr) 4117 { 4118 WARN_ON_ONCE(1); 4119 } 4120 static inline void f2fs_put_page_dic(struct page *page) 4121 { 4122 WARN_ON_ONCE(1); 4123 } 4124 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4125 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4126 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4127 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4128 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4129 static inline void f2fs_destroy_compress_cache(void) { } 4130 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4131 block_t blkaddr) { } 4132 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4133 struct page *page, nid_t ino, block_t blkaddr) { } 4134 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4135 struct page *page, block_t blkaddr) { return false; } 4136 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4137 nid_t ino) { } 4138 #define inc_compr_inode_stat(inode) do { } while (0) 4139 #endif 4140 4141 static inline void set_compress_context(struct inode *inode) 4142 { 4143 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4144 4145 F2FS_I(inode)->i_compress_algorithm = 4146 F2FS_OPTION(sbi).compress_algorithm; 4147 F2FS_I(inode)->i_log_cluster_size = 4148 F2FS_OPTION(sbi).compress_log_size; 4149 F2FS_I(inode)->i_compress_flag = 4150 F2FS_OPTION(sbi).compress_chksum ? 4151 1 << COMPRESS_CHKSUM : 0; 4152 F2FS_I(inode)->i_cluster_size = 4153 1 << F2FS_I(inode)->i_log_cluster_size; 4154 if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 && 4155 F2FS_OPTION(sbi).compress_level) 4156 F2FS_I(inode)->i_compress_flag |= 4157 F2FS_OPTION(sbi).compress_level << 4158 COMPRESS_LEVEL_OFFSET; 4159 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4160 set_inode_flag(inode, FI_COMPRESSED_FILE); 4161 stat_inc_compr_inode(inode); 4162 inc_compr_inode_stat(inode); 4163 f2fs_mark_inode_dirty_sync(inode, true); 4164 } 4165 4166 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4167 { 4168 struct f2fs_inode_info *fi = F2FS_I(inode); 4169 4170 if (!f2fs_compressed_file(inode)) 4171 return true; 4172 if (S_ISREG(inode->i_mode) && 4173 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks))) 4174 return false; 4175 4176 fi->i_flags &= ~F2FS_COMPR_FL; 4177 stat_dec_compr_inode(inode); 4178 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4179 f2fs_mark_inode_dirty_sync(inode, true); 4180 return true; 4181 } 4182 4183 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4184 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4185 { \ 4186 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4187 } 4188 4189 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4190 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4191 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4192 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4193 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4194 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4195 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4196 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4197 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4198 F2FS_FEATURE_FUNCS(verity, VERITY); 4199 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4200 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4201 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4202 F2FS_FEATURE_FUNCS(readonly, RO); 4203 4204 #ifdef CONFIG_BLK_DEV_ZONED 4205 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4206 block_t blkaddr) 4207 { 4208 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4209 4210 return test_bit(zno, FDEV(devi).blkz_seq); 4211 } 4212 #endif 4213 4214 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4215 { 4216 return f2fs_sb_has_blkzoned(sbi); 4217 } 4218 4219 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4220 { 4221 return blk_queue_discard(bdev_get_queue(bdev)) || 4222 bdev_is_zoned(bdev); 4223 } 4224 4225 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4226 { 4227 int i; 4228 4229 if (!f2fs_is_multi_device(sbi)) 4230 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4231 4232 for (i = 0; i < sbi->s_ndevs; i++) 4233 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4234 return true; 4235 return false; 4236 } 4237 4238 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4239 { 4240 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4241 f2fs_hw_should_discard(sbi); 4242 } 4243 4244 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4245 { 4246 int i; 4247 4248 if (!f2fs_is_multi_device(sbi)) 4249 return bdev_read_only(sbi->sb->s_bdev); 4250 4251 for (i = 0; i < sbi->s_ndevs; i++) 4252 if (bdev_read_only(FDEV(i).bdev)) 4253 return true; 4254 return false; 4255 } 4256 4257 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4258 { 4259 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4260 } 4261 4262 static inline bool f2fs_may_compress(struct inode *inode) 4263 { 4264 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4265 f2fs_is_atomic_file(inode) || 4266 f2fs_is_volatile_file(inode)) 4267 return false; 4268 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4269 } 4270 4271 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4272 u64 blocks, bool add) 4273 { 4274 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4275 struct f2fs_inode_info *fi = F2FS_I(inode); 4276 4277 /* don't update i_compr_blocks if saved blocks were released */ 4278 if (!add && !atomic_read(&fi->i_compr_blocks)) 4279 return; 4280 4281 if (add) { 4282 atomic_add(diff, &fi->i_compr_blocks); 4283 stat_add_compr_blocks(inode, diff); 4284 } else { 4285 atomic_sub(diff, &fi->i_compr_blocks); 4286 stat_sub_compr_blocks(inode, diff); 4287 } 4288 f2fs_mark_inode_dirty_sync(inode, true); 4289 } 4290 4291 static inline int block_unaligned_IO(struct inode *inode, 4292 struct kiocb *iocb, struct iov_iter *iter) 4293 { 4294 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4295 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4296 loff_t offset = iocb->ki_pos; 4297 unsigned long align = offset | iov_iter_alignment(iter); 4298 4299 return align & blocksize_mask; 4300 } 4301 4302 static inline int allow_outplace_dio(struct inode *inode, 4303 struct kiocb *iocb, struct iov_iter *iter) 4304 { 4305 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4306 int rw = iov_iter_rw(iter); 4307 4308 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4309 !block_unaligned_IO(inode, iocb, iter)); 4310 } 4311 4312 static inline bool f2fs_force_buffered_io(struct inode *inode, 4313 struct kiocb *iocb, struct iov_iter *iter) 4314 { 4315 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4316 int rw = iov_iter_rw(iter); 4317 4318 if (f2fs_post_read_required(inode)) 4319 return true; 4320 if (f2fs_is_multi_device(sbi)) 4321 return true; 4322 /* 4323 * for blkzoned device, fallback direct IO to buffered IO, so 4324 * all IOs can be serialized by log-structured write. 4325 */ 4326 if (f2fs_sb_has_blkzoned(sbi)) 4327 return true; 4328 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4329 if (block_unaligned_IO(inode, iocb, iter)) 4330 return true; 4331 if (F2FS_IO_ALIGNED(sbi)) 4332 return true; 4333 } 4334 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 4335 return true; 4336 4337 return false; 4338 } 4339 4340 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4341 { 4342 return fsverity_active(inode) && 4343 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4344 } 4345 4346 #ifdef CONFIG_F2FS_FAULT_INJECTION 4347 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4348 unsigned int type); 4349 #else 4350 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4351 #endif 4352 4353 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4354 { 4355 #ifdef CONFIG_QUOTA 4356 if (f2fs_sb_has_quota_ino(sbi)) 4357 return true; 4358 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4359 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4360 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4361 return true; 4362 #endif 4363 return false; 4364 } 4365 4366 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4367 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4368 4369 #endif /* _LINUX_F2FS_H */ 4370