1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/sched/mm.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bio.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #include <linux/part_stat.h> 27 #include <crypto/hash.h> 28 29 #include <linux/fscrypt.h> 30 #include <linux/fsverity.h> 31 32 struct pagevec; 33 34 #ifdef CONFIG_F2FS_CHECK_FS 35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 36 #else 37 #define f2fs_bug_on(sbi, condition) \ 38 do { \ 39 if (WARN_ON(condition)) \ 40 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 41 } while (0) 42 #endif 43 44 enum { 45 FAULT_KMALLOC, 46 FAULT_KVMALLOC, 47 FAULT_PAGE_ALLOC, 48 FAULT_PAGE_GET, 49 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 50 FAULT_ALLOC_NID, 51 FAULT_ORPHAN, 52 FAULT_BLOCK, 53 FAULT_DIR_DEPTH, 54 FAULT_EVICT_INODE, 55 FAULT_TRUNCATE, 56 FAULT_READ_IO, 57 FAULT_CHECKPOINT, 58 FAULT_DISCARD, 59 FAULT_WRITE_IO, 60 FAULT_SLAB_ALLOC, 61 FAULT_DQUOT_INIT, 62 FAULT_LOCK_OP, 63 FAULT_MAX, 64 }; 65 66 #ifdef CONFIG_F2FS_FAULT_INJECTION 67 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 68 69 struct f2fs_fault_info { 70 atomic_t inject_ops; 71 unsigned int inject_rate; 72 unsigned int inject_type; 73 }; 74 75 extern const char *f2fs_fault_name[FAULT_MAX]; 76 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 77 #endif 78 79 /* 80 * For mount options 81 */ 82 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 83 #define F2FS_MOUNT_DISCARD 0x00000004 84 #define F2FS_MOUNT_NOHEAP 0x00000008 85 #define F2FS_MOUNT_XATTR_USER 0x00000010 86 #define F2FS_MOUNT_POSIX_ACL 0x00000020 87 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 88 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 89 #define F2FS_MOUNT_INLINE_DATA 0x00000100 90 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 91 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 92 #define F2FS_MOUNT_NOBARRIER 0x00000800 93 #define F2FS_MOUNT_FASTBOOT 0x00001000 94 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 95 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 96 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 97 #define F2FS_MOUNT_USRQUOTA 0x00080000 98 #define F2FS_MOUNT_GRPQUOTA 0x00100000 99 #define F2FS_MOUNT_PRJQUOTA 0x00200000 100 #define F2FS_MOUNT_QUOTA 0x00400000 101 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 102 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 103 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 104 #define F2FS_MOUNT_NORECOVERY 0x04000000 105 #define F2FS_MOUNT_ATGC 0x08000000 106 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 107 #define F2FS_MOUNT_GC_MERGE 0x20000000 108 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 109 110 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 111 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 112 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 113 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 114 115 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 116 typecheck(unsigned long long, b) && \ 117 ((long long)((a) - (b)) > 0)) 118 119 typedef u32 block_t; /* 120 * should not change u32, since it is the on-disk block 121 * address format, __le32. 122 */ 123 typedef u32 nid_t; 124 125 #define COMPRESS_EXT_NUM 16 126 127 /* 128 * An implementation of an rwsem that is explicitly unfair to readers. This 129 * prevents priority inversion when a low-priority reader acquires the read lock 130 * while sleeping on the write lock but the write lock is needed by 131 * higher-priority clients. 132 */ 133 134 struct f2fs_rwsem { 135 struct rw_semaphore internal_rwsem; 136 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 137 wait_queue_head_t read_waiters; 138 #endif 139 }; 140 141 struct f2fs_mount_info { 142 unsigned int opt; 143 int write_io_size_bits; /* Write IO size bits */ 144 block_t root_reserved_blocks; /* root reserved blocks */ 145 kuid_t s_resuid; /* reserved blocks for uid */ 146 kgid_t s_resgid; /* reserved blocks for gid */ 147 int active_logs; /* # of active logs */ 148 int inline_xattr_size; /* inline xattr size */ 149 #ifdef CONFIG_F2FS_FAULT_INJECTION 150 struct f2fs_fault_info fault_info; /* For fault injection */ 151 #endif 152 #ifdef CONFIG_QUOTA 153 /* Names of quota files with journalled quota */ 154 char *s_qf_names[MAXQUOTAS]; 155 int s_jquota_fmt; /* Format of quota to use */ 156 #endif 157 /* For which write hints are passed down to block layer */ 158 int alloc_mode; /* segment allocation policy */ 159 int fsync_mode; /* fsync policy */ 160 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 161 int bggc_mode; /* bggc mode: off, on or sync */ 162 int discard_unit; /* 163 * discard command's offset/size should 164 * be aligned to this unit: block, 165 * segment or section 166 */ 167 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 168 block_t unusable_cap_perc; /* percentage for cap */ 169 block_t unusable_cap; /* Amount of space allowed to be 170 * unusable when disabling checkpoint 171 */ 172 173 /* For compression */ 174 unsigned char compress_algorithm; /* algorithm type */ 175 unsigned char compress_log_size; /* cluster log size */ 176 unsigned char compress_level; /* compress level */ 177 bool compress_chksum; /* compressed data chksum */ 178 unsigned char compress_ext_cnt; /* extension count */ 179 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 180 int compress_mode; /* compression mode */ 181 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 182 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 183 }; 184 185 #define F2FS_FEATURE_ENCRYPT 0x0001 186 #define F2FS_FEATURE_BLKZONED 0x0002 187 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 188 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 189 #define F2FS_FEATURE_PRJQUOTA 0x0010 190 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 191 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 192 #define F2FS_FEATURE_QUOTA_INO 0x0080 193 #define F2FS_FEATURE_INODE_CRTIME 0x0100 194 #define F2FS_FEATURE_LOST_FOUND 0x0200 195 #define F2FS_FEATURE_VERITY 0x0400 196 #define F2FS_FEATURE_SB_CHKSUM 0x0800 197 #define F2FS_FEATURE_CASEFOLD 0x1000 198 #define F2FS_FEATURE_COMPRESSION 0x2000 199 #define F2FS_FEATURE_RO 0x4000 200 201 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 202 ((raw_super->feature & cpu_to_le32(mask)) != 0) 203 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 204 #define F2FS_SET_FEATURE(sbi, mask) \ 205 (sbi->raw_super->feature |= cpu_to_le32(mask)) 206 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 207 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 208 209 /* 210 * Default values for user and/or group using reserved blocks 211 */ 212 #define F2FS_DEF_RESUID 0 213 #define F2FS_DEF_RESGID 0 214 215 /* 216 * For checkpoint manager 217 */ 218 enum { 219 NAT_BITMAP, 220 SIT_BITMAP 221 }; 222 223 #define CP_UMOUNT 0x00000001 224 #define CP_FASTBOOT 0x00000002 225 #define CP_SYNC 0x00000004 226 #define CP_RECOVERY 0x00000008 227 #define CP_DISCARD 0x00000010 228 #define CP_TRIMMED 0x00000020 229 #define CP_PAUSE 0x00000040 230 #define CP_RESIZE 0x00000080 231 232 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 233 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 234 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 235 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 236 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 237 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 238 #define DEF_CP_INTERVAL 60 /* 60 secs */ 239 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 240 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 241 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 242 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 243 244 struct cp_control { 245 int reason; 246 __u64 trim_start; 247 __u64 trim_end; 248 __u64 trim_minlen; 249 }; 250 251 /* 252 * indicate meta/data type 253 */ 254 enum { 255 META_CP, 256 META_NAT, 257 META_SIT, 258 META_SSA, 259 META_MAX, 260 META_POR, 261 DATA_GENERIC, /* check range only */ 262 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 263 DATA_GENERIC_ENHANCE_READ, /* 264 * strong check on range and segment 265 * bitmap but no warning due to race 266 * condition of read on truncated area 267 * by extent_cache 268 */ 269 META_GENERIC, 270 }; 271 272 /* for the list of ino */ 273 enum { 274 ORPHAN_INO, /* for orphan ino list */ 275 APPEND_INO, /* for append ino list */ 276 UPDATE_INO, /* for update ino list */ 277 TRANS_DIR_INO, /* for trasactions dir ino list */ 278 FLUSH_INO, /* for multiple device flushing */ 279 MAX_INO_ENTRY, /* max. list */ 280 }; 281 282 struct ino_entry { 283 struct list_head list; /* list head */ 284 nid_t ino; /* inode number */ 285 unsigned int dirty_device; /* dirty device bitmap */ 286 }; 287 288 /* for the list of inodes to be GCed */ 289 struct inode_entry { 290 struct list_head list; /* list head */ 291 struct inode *inode; /* vfs inode pointer */ 292 }; 293 294 struct fsync_node_entry { 295 struct list_head list; /* list head */ 296 struct page *page; /* warm node page pointer */ 297 unsigned int seq_id; /* sequence id */ 298 }; 299 300 struct ckpt_req { 301 struct completion wait; /* completion for checkpoint done */ 302 struct llist_node llnode; /* llist_node to be linked in wait queue */ 303 int ret; /* return code of checkpoint */ 304 ktime_t queue_time; /* request queued time */ 305 }; 306 307 struct ckpt_req_control { 308 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 309 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 310 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 311 atomic_t issued_ckpt; /* # of actually issued ckpts */ 312 atomic_t total_ckpt; /* # of total ckpts */ 313 atomic_t queued_ckpt; /* # of queued ckpts */ 314 struct llist_head issue_list; /* list for command issue */ 315 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 316 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 317 unsigned int peak_time; /* peak wait time in msec until now */ 318 }; 319 320 /* for the bitmap indicate blocks to be discarded */ 321 struct discard_entry { 322 struct list_head list; /* list head */ 323 block_t start_blkaddr; /* start blockaddr of current segment */ 324 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 325 }; 326 327 /* default discard granularity of inner discard thread, unit: block count */ 328 #define DEFAULT_DISCARD_GRANULARITY 16 329 330 /* max discard pend list number */ 331 #define MAX_PLIST_NUM 512 332 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 333 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 334 335 enum { 336 D_PREP, /* initial */ 337 D_PARTIAL, /* partially submitted */ 338 D_SUBMIT, /* all submitted */ 339 D_DONE, /* finished */ 340 }; 341 342 struct discard_info { 343 block_t lstart; /* logical start address */ 344 block_t len; /* length */ 345 block_t start; /* actual start address in dev */ 346 }; 347 348 struct discard_cmd { 349 struct rb_node rb_node; /* rb node located in rb-tree */ 350 union { 351 struct { 352 block_t lstart; /* logical start address */ 353 block_t len; /* length */ 354 block_t start; /* actual start address in dev */ 355 }; 356 struct discard_info di; /* discard info */ 357 358 }; 359 struct list_head list; /* command list */ 360 struct completion wait; /* compleation */ 361 struct block_device *bdev; /* bdev */ 362 unsigned short ref; /* reference count */ 363 unsigned char state; /* state */ 364 unsigned char queued; /* queued discard */ 365 int error; /* bio error */ 366 spinlock_t lock; /* for state/bio_ref updating */ 367 unsigned short bio_ref; /* bio reference count */ 368 }; 369 370 enum { 371 DPOLICY_BG, 372 DPOLICY_FORCE, 373 DPOLICY_FSTRIM, 374 DPOLICY_UMOUNT, 375 MAX_DPOLICY, 376 }; 377 378 struct discard_policy { 379 int type; /* type of discard */ 380 unsigned int min_interval; /* used for candidates exist */ 381 unsigned int mid_interval; /* used for device busy */ 382 unsigned int max_interval; /* used for candidates not exist */ 383 unsigned int max_requests; /* # of discards issued per round */ 384 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 385 bool io_aware; /* issue discard in idle time */ 386 bool sync; /* submit discard with REQ_SYNC flag */ 387 bool ordered; /* issue discard by lba order */ 388 bool timeout; /* discard timeout for put_super */ 389 unsigned int granularity; /* discard granularity */ 390 }; 391 392 struct discard_cmd_control { 393 struct task_struct *f2fs_issue_discard; /* discard thread */ 394 struct list_head entry_list; /* 4KB discard entry list */ 395 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 396 struct list_head wait_list; /* store on-flushing entries */ 397 struct list_head fstrim_list; /* in-flight discard from fstrim */ 398 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 399 unsigned int discard_wake; /* to wake up discard thread */ 400 struct mutex cmd_lock; 401 unsigned int nr_discards; /* # of discards in the list */ 402 unsigned int max_discards; /* max. discards to be issued */ 403 unsigned int max_discard_request; /* max. discard request per round */ 404 unsigned int min_discard_issue_time; /* min. interval between discard issue */ 405 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ 406 unsigned int max_discard_issue_time; /* max. interval between discard issue */ 407 unsigned int discard_granularity; /* discard granularity */ 408 unsigned int undiscard_blks; /* # of undiscard blocks */ 409 unsigned int next_pos; /* next discard position */ 410 atomic_t issued_discard; /* # of issued discard */ 411 atomic_t queued_discard; /* # of queued discard */ 412 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 413 struct rb_root_cached root; /* root of discard rb-tree */ 414 bool rbtree_check; /* config for consistence check */ 415 }; 416 417 /* for the list of fsync inodes, used only during recovery */ 418 struct fsync_inode_entry { 419 struct list_head list; /* list head */ 420 struct inode *inode; /* vfs inode pointer */ 421 block_t blkaddr; /* block address locating the last fsync */ 422 block_t last_dentry; /* block address locating the last dentry */ 423 }; 424 425 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 426 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 427 428 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 429 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 430 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 431 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 432 433 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 434 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 435 436 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 437 { 438 int before = nats_in_cursum(journal); 439 440 journal->n_nats = cpu_to_le16(before + i); 441 return before; 442 } 443 444 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 445 { 446 int before = sits_in_cursum(journal); 447 448 journal->n_sits = cpu_to_le16(before + i); 449 return before; 450 } 451 452 static inline bool __has_cursum_space(struct f2fs_journal *journal, 453 int size, int type) 454 { 455 if (type == NAT_JOURNAL) 456 return size <= MAX_NAT_JENTRIES(journal); 457 return size <= MAX_SIT_JENTRIES(journal); 458 } 459 460 /* for inline stuff */ 461 #define DEF_INLINE_RESERVED_SIZE 1 462 static inline int get_extra_isize(struct inode *inode); 463 static inline int get_inline_xattr_addrs(struct inode *inode); 464 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 465 (CUR_ADDRS_PER_INODE(inode) - \ 466 get_inline_xattr_addrs(inode) - \ 467 DEF_INLINE_RESERVED_SIZE)) 468 469 /* for inline dir */ 470 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 471 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 472 BITS_PER_BYTE + 1)) 473 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 474 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 475 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 476 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 477 NR_INLINE_DENTRY(inode) + \ 478 INLINE_DENTRY_BITMAP_SIZE(inode))) 479 480 /* 481 * For INODE and NODE manager 482 */ 483 /* for directory operations */ 484 485 struct f2fs_filename { 486 /* 487 * The filename the user specified. This is NULL for some 488 * filesystem-internal operations, e.g. converting an inline directory 489 * to a non-inline one, or roll-forward recovering an encrypted dentry. 490 */ 491 const struct qstr *usr_fname; 492 493 /* 494 * The on-disk filename. For encrypted directories, this is encrypted. 495 * This may be NULL for lookups in an encrypted dir without the key. 496 */ 497 struct fscrypt_str disk_name; 498 499 /* The dirhash of this filename */ 500 f2fs_hash_t hash; 501 502 #ifdef CONFIG_FS_ENCRYPTION 503 /* 504 * For lookups in encrypted directories: either the buffer backing 505 * disk_name, or a buffer that holds the decoded no-key name. 506 */ 507 struct fscrypt_str crypto_buf; 508 #endif 509 #if IS_ENABLED(CONFIG_UNICODE) 510 /* 511 * For casefolded directories: the casefolded name, but it's left NULL 512 * if the original name is not valid Unicode, if the directory is both 513 * casefolded and encrypted and its encryption key is unavailable, or if 514 * the filesystem is doing an internal operation where usr_fname is also 515 * NULL. In all these cases we fall back to treating the name as an 516 * opaque byte sequence. 517 */ 518 struct fscrypt_str cf_name; 519 #endif 520 }; 521 522 struct f2fs_dentry_ptr { 523 struct inode *inode; 524 void *bitmap; 525 struct f2fs_dir_entry *dentry; 526 __u8 (*filename)[F2FS_SLOT_LEN]; 527 int max; 528 int nr_bitmap; 529 }; 530 531 static inline void make_dentry_ptr_block(struct inode *inode, 532 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 533 { 534 d->inode = inode; 535 d->max = NR_DENTRY_IN_BLOCK; 536 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 537 d->bitmap = t->dentry_bitmap; 538 d->dentry = t->dentry; 539 d->filename = t->filename; 540 } 541 542 static inline void make_dentry_ptr_inline(struct inode *inode, 543 struct f2fs_dentry_ptr *d, void *t) 544 { 545 int entry_cnt = NR_INLINE_DENTRY(inode); 546 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 547 int reserved_size = INLINE_RESERVED_SIZE(inode); 548 549 d->inode = inode; 550 d->max = entry_cnt; 551 d->nr_bitmap = bitmap_size; 552 d->bitmap = t; 553 d->dentry = t + bitmap_size + reserved_size; 554 d->filename = t + bitmap_size + reserved_size + 555 SIZE_OF_DIR_ENTRY * entry_cnt; 556 } 557 558 /* 559 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 560 * as its node offset to distinguish from index node blocks. 561 * But some bits are used to mark the node block. 562 */ 563 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 564 >> OFFSET_BIT_SHIFT) 565 enum { 566 ALLOC_NODE, /* allocate a new node page if needed */ 567 LOOKUP_NODE, /* look up a node without readahead */ 568 LOOKUP_NODE_RA, /* 569 * look up a node with readahead called 570 * by get_data_block. 571 */ 572 }; 573 574 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 575 576 /* congestion wait timeout value, default: 20ms */ 577 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 578 579 /* maximum retry quota flush count */ 580 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 581 582 /* maximum retry of EIO'ed meta page */ 583 #define MAX_RETRY_META_PAGE_EIO 100 584 585 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 586 587 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 588 589 /* dirty segments threshold for triggering CP */ 590 #define DEFAULT_DIRTY_THRESHOLD 4 591 592 /* for in-memory extent cache entry */ 593 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 594 595 /* number of extent info in extent cache we try to shrink */ 596 #define EXTENT_CACHE_SHRINK_NUMBER 128 597 598 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS 599 #define RECOVERY_MIN_RA_BLOCKS 1 600 601 struct rb_entry { 602 struct rb_node rb_node; /* rb node located in rb-tree */ 603 union { 604 struct { 605 unsigned int ofs; /* start offset of the entry */ 606 unsigned int len; /* length of the entry */ 607 }; 608 unsigned long long key; /* 64-bits key */ 609 } __packed; 610 }; 611 612 struct extent_info { 613 unsigned int fofs; /* start offset in a file */ 614 unsigned int len; /* length of the extent */ 615 u32 blk; /* start block address of the extent */ 616 #ifdef CONFIG_F2FS_FS_COMPRESSION 617 unsigned int c_len; /* physical extent length of compressed blocks */ 618 #endif 619 }; 620 621 struct extent_node { 622 struct rb_node rb_node; /* rb node located in rb-tree */ 623 struct extent_info ei; /* extent info */ 624 struct list_head list; /* node in global extent list of sbi */ 625 struct extent_tree *et; /* extent tree pointer */ 626 }; 627 628 struct extent_tree { 629 nid_t ino; /* inode number */ 630 struct rb_root_cached root; /* root of extent info rb-tree */ 631 struct extent_node *cached_en; /* recently accessed extent node */ 632 struct extent_info largest; /* largested extent info */ 633 struct list_head list; /* to be used by sbi->zombie_list */ 634 rwlock_t lock; /* protect extent info rb-tree */ 635 atomic_t node_cnt; /* # of extent node in rb-tree*/ 636 bool largest_updated; /* largest extent updated */ 637 }; 638 639 /* 640 * This structure is taken from ext4_map_blocks. 641 * 642 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 643 */ 644 #define F2FS_MAP_NEW (1 << BH_New) 645 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 646 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 647 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 648 F2FS_MAP_UNWRITTEN) 649 650 struct f2fs_map_blocks { 651 struct block_device *m_bdev; /* for multi-device dio */ 652 block_t m_pblk; 653 block_t m_lblk; 654 unsigned int m_len; 655 unsigned int m_flags; 656 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 657 pgoff_t *m_next_extent; /* point to next possible extent */ 658 int m_seg_type; 659 bool m_may_create; /* indicate it is from write path */ 660 bool m_multidev_dio; /* indicate it allows multi-device dio */ 661 }; 662 663 /* for flag in get_data_block */ 664 enum { 665 F2FS_GET_BLOCK_DEFAULT, 666 F2FS_GET_BLOCK_FIEMAP, 667 F2FS_GET_BLOCK_BMAP, 668 F2FS_GET_BLOCK_DIO, 669 F2FS_GET_BLOCK_PRE_DIO, 670 F2FS_GET_BLOCK_PRE_AIO, 671 F2FS_GET_BLOCK_PRECACHE, 672 }; 673 674 /* 675 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 676 */ 677 #define FADVISE_COLD_BIT 0x01 678 #define FADVISE_LOST_PINO_BIT 0x02 679 #define FADVISE_ENCRYPT_BIT 0x04 680 #define FADVISE_ENC_NAME_BIT 0x08 681 #define FADVISE_KEEP_SIZE_BIT 0x10 682 #define FADVISE_HOT_BIT 0x20 683 #define FADVISE_VERITY_BIT 0x40 684 #define FADVISE_TRUNC_BIT 0x80 685 686 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 687 688 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 689 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 690 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 691 692 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 693 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 694 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 695 696 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 697 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 698 699 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 700 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 701 702 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 703 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 704 705 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 706 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 707 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 708 709 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 710 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 711 712 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 713 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 714 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 715 716 #define DEF_DIR_LEVEL 0 717 718 enum { 719 GC_FAILURE_PIN, 720 GC_FAILURE_ATOMIC, 721 MAX_GC_FAILURE 722 }; 723 724 /* used for f2fs_inode_info->flags */ 725 enum { 726 FI_NEW_INODE, /* indicate newly allocated inode */ 727 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 728 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 729 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 730 FI_INC_LINK, /* need to increment i_nlink */ 731 FI_ACL_MODE, /* indicate acl mode */ 732 FI_NO_ALLOC, /* should not allocate any blocks */ 733 FI_FREE_NID, /* free allocated nide */ 734 FI_NO_EXTENT, /* not to use the extent cache */ 735 FI_INLINE_XATTR, /* used for inline xattr */ 736 FI_INLINE_DATA, /* used for inline data*/ 737 FI_INLINE_DENTRY, /* used for inline dentry */ 738 FI_APPEND_WRITE, /* inode has appended data */ 739 FI_UPDATE_WRITE, /* inode has in-place-update data */ 740 FI_NEED_IPU, /* used for ipu per file */ 741 FI_ATOMIC_FILE, /* indicate atomic file */ 742 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 743 FI_VOLATILE_FILE, /* indicate volatile file */ 744 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 745 FI_DROP_CACHE, /* drop dirty page cache */ 746 FI_DATA_EXIST, /* indicate data exists */ 747 FI_INLINE_DOTS, /* indicate inline dot dentries */ 748 FI_SKIP_WRITES, /* should skip data page writeback */ 749 FI_OPU_WRITE, /* used for opu per file */ 750 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 751 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 752 FI_HOT_DATA, /* indicate file is hot */ 753 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 754 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 755 FI_PIN_FILE, /* indicate file should not be gced */ 756 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 757 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 758 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 759 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 760 FI_MMAP_FILE, /* indicate file was mmapped */ 761 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 762 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 763 FI_ALIGNED_WRITE, /* enable aligned write */ 764 FI_MAX, /* max flag, never be used */ 765 }; 766 767 struct f2fs_inode_info { 768 struct inode vfs_inode; /* serve a vfs inode */ 769 unsigned long i_flags; /* keep an inode flags for ioctl */ 770 unsigned char i_advise; /* use to give file attribute hints */ 771 unsigned char i_dir_level; /* use for dentry level for large dir */ 772 unsigned int i_current_depth; /* only for directory depth */ 773 /* for gc failure statistic */ 774 unsigned int i_gc_failures[MAX_GC_FAILURE]; 775 unsigned int i_pino; /* parent inode number */ 776 umode_t i_acl_mode; /* keep file acl mode temporarily */ 777 778 /* Use below internally in f2fs*/ 779 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 780 struct f2fs_rwsem i_sem; /* protect fi info */ 781 atomic_t dirty_pages; /* # of dirty pages */ 782 f2fs_hash_t chash; /* hash value of given file name */ 783 unsigned int clevel; /* maximum level of given file name */ 784 struct task_struct *task; /* lookup and create consistency */ 785 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 786 nid_t i_xattr_nid; /* node id that contains xattrs */ 787 loff_t last_disk_size; /* lastly written file size */ 788 spinlock_t i_size_lock; /* protect last_disk_size */ 789 790 #ifdef CONFIG_QUOTA 791 struct dquot *i_dquot[MAXQUOTAS]; 792 793 /* quota space reservation, managed internally by quota code */ 794 qsize_t i_reserved_quota; 795 #endif 796 struct list_head dirty_list; /* dirty list for dirs and files */ 797 struct list_head gdirty_list; /* linked in global dirty list */ 798 struct list_head inmem_ilist; /* list for inmem inodes */ 799 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 800 struct task_struct *inmem_task; /* store inmemory task */ 801 struct mutex inmem_lock; /* lock for inmemory pages */ 802 struct extent_tree *extent_tree; /* cached extent_tree entry */ 803 804 /* avoid racing between foreground op and gc */ 805 struct f2fs_rwsem i_gc_rwsem[2]; 806 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 807 808 int i_extra_isize; /* size of extra space located in i_addr */ 809 kprojid_t i_projid; /* id for project quota */ 810 int i_inline_xattr_size; /* inline xattr size */ 811 struct timespec64 i_crtime; /* inode creation time */ 812 struct timespec64 i_disk_time[4];/* inode disk times */ 813 814 /* for file compress */ 815 atomic_t i_compr_blocks; /* # of compressed blocks */ 816 unsigned char i_compress_algorithm; /* algorithm type */ 817 unsigned char i_log_cluster_size; /* log of cluster size */ 818 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 819 unsigned short i_compress_flag; /* compress flag */ 820 unsigned int i_cluster_size; /* cluster size */ 821 }; 822 823 static inline void get_extent_info(struct extent_info *ext, 824 struct f2fs_extent *i_ext) 825 { 826 ext->fofs = le32_to_cpu(i_ext->fofs); 827 ext->blk = le32_to_cpu(i_ext->blk); 828 ext->len = le32_to_cpu(i_ext->len); 829 } 830 831 static inline void set_raw_extent(struct extent_info *ext, 832 struct f2fs_extent *i_ext) 833 { 834 i_ext->fofs = cpu_to_le32(ext->fofs); 835 i_ext->blk = cpu_to_le32(ext->blk); 836 i_ext->len = cpu_to_le32(ext->len); 837 } 838 839 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 840 u32 blk, unsigned int len) 841 { 842 ei->fofs = fofs; 843 ei->blk = blk; 844 ei->len = len; 845 #ifdef CONFIG_F2FS_FS_COMPRESSION 846 ei->c_len = 0; 847 #endif 848 } 849 850 static inline bool __is_discard_mergeable(struct discard_info *back, 851 struct discard_info *front, unsigned int max_len) 852 { 853 return (back->lstart + back->len == front->lstart) && 854 (back->len + front->len <= max_len); 855 } 856 857 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 858 struct discard_info *back, unsigned int max_len) 859 { 860 return __is_discard_mergeable(back, cur, max_len); 861 } 862 863 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 864 struct discard_info *front, unsigned int max_len) 865 { 866 return __is_discard_mergeable(cur, front, max_len); 867 } 868 869 static inline bool __is_extent_mergeable(struct extent_info *back, 870 struct extent_info *front) 871 { 872 #ifdef CONFIG_F2FS_FS_COMPRESSION 873 if (back->c_len && back->len != back->c_len) 874 return false; 875 if (front->c_len && front->len != front->c_len) 876 return false; 877 #endif 878 return (back->fofs + back->len == front->fofs && 879 back->blk + back->len == front->blk); 880 } 881 882 static inline bool __is_back_mergeable(struct extent_info *cur, 883 struct extent_info *back) 884 { 885 return __is_extent_mergeable(back, cur); 886 } 887 888 static inline bool __is_front_mergeable(struct extent_info *cur, 889 struct extent_info *front) 890 { 891 return __is_extent_mergeable(cur, front); 892 } 893 894 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 895 static inline void __try_update_largest_extent(struct extent_tree *et, 896 struct extent_node *en) 897 { 898 if (en->ei.len > et->largest.len) { 899 et->largest = en->ei; 900 et->largest_updated = true; 901 } 902 } 903 904 /* 905 * For free nid management 906 */ 907 enum nid_state { 908 FREE_NID, /* newly added to free nid list */ 909 PREALLOC_NID, /* it is preallocated */ 910 MAX_NID_STATE, 911 }; 912 913 enum nat_state { 914 TOTAL_NAT, 915 DIRTY_NAT, 916 RECLAIMABLE_NAT, 917 MAX_NAT_STATE, 918 }; 919 920 struct f2fs_nm_info { 921 block_t nat_blkaddr; /* base disk address of NAT */ 922 nid_t max_nid; /* maximum possible node ids */ 923 nid_t available_nids; /* # of available node ids */ 924 nid_t next_scan_nid; /* the next nid to be scanned */ 925 nid_t max_rf_node_blocks; /* max # of nodes for recovery */ 926 unsigned int ram_thresh; /* control the memory footprint */ 927 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 928 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 929 930 /* NAT cache management */ 931 struct radix_tree_root nat_root;/* root of the nat entry cache */ 932 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 933 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 934 struct list_head nat_entries; /* cached nat entry list (clean) */ 935 spinlock_t nat_list_lock; /* protect clean nat entry list */ 936 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 937 unsigned int nat_blocks; /* # of nat blocks */ 938 939 /* free node ids management */ 940 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 941 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 942 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 943 spinlock_t nid_list_lock; /* protect nid lists ops */ 944 struct mutex build_lock; /* lock for build free nids */ 945 unsigned char **free_nid_bitmap; 946 unsigned char *nat_block_bitmap; 947 unsigned short *free_nid_count; /* free nid count of NAT block */ 948 949 /* for checkpoint */ 950 char *nat_bitmap; /* NAT bitmap pointer */ 951 952 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 953 unsigned char *nat_bits; /* NAT bits blocks */ 954 unsigned char *full_nat_bits; /* full NAT pages */ 955 unsigned char *empty_nat_bits; /* empty NAT pages */ 956 #ifdef CONFIG_F2FS_CHECK_FS 957 char *nat_bitmap_mir; /* NAT bitmap mirror */ 958 #endif 959 int bitmap_size; /* bitmap size */ 960 }; 961 962 /* 963 * this structure is used as one of function parameters. 964 * all the information are dedicated to a given direct node block determined 965 * by the data offset in a file. 966 */ 967 struct dnode_of_data { 968 struct inode *inode; /* vfs inode pointer */ 969 struct page *inode_page; /* its inode page, NULL is possible */ 970 struct page *node_page; /* cached direct node page */ 971 nid_t nid; /* node id of the direct node block */ 972 unsigned int ofs_in_node; /* data offset in the node page */ 973 bool inode_page_locked; /* inode page is locked or not */ 974 bool node_changed; /* is node block changed */ 975 char cur_level; /* level of hole node page */ 976 char max_level; /* level of current page located */ 977 block_t data_blkaddr; /* block address of the node block */ 978 }; 979 980 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 981 struct page *ipage, struct page *npage, nid_t nid) 982 { 983 memset(dn, 0, sizeof(*dn)); 984 dn->inode = inode; 985 dn->inode_page = ipage; 986 dn->node_page = npage; 987 dn->nid = nid; 988 } 989 990 /* 991 * For SIT manager 992 * 993 * By default, there are 6 active log areas across the whole main area. 994 * When considering hot and cold data separation to reduce cleaning overhead, 995 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 996 * respectively. 997 * In the current design, you should not change the numbers intentionally. 998 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 999 * logs individually according to the underlying devices. (default: 6) 1000 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 1001 * data and 8 for node logs. 1002 */ 1003 #define NR_CURSEG_DATA_TYPE (3) 1004 #define NR_CURSEG_NODE_TYPE (3) 1005 #define NR_CURSEG_INMEM_TYPE (2) 1006 #define NR_CURSEG_RO_TYPE (2) 1007 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 1008 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 1009 1010 enum { 1011 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 1012 CURSEG_WARM_DATA, /* data blocks */ 1013 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 1014 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 1015 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 1016 CURSEG_COLD_NODE, /* indirect node blocks */ 1017 NR_PERSISTENT_LOG, /* number of persistent log */ 1018 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 1019 /* pinned file that needs consecutive block address */ 1020 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 1021 NO_CHECK_TYPE, /* number of persistent & inmem log */ 1022 }; 1023 1024 struct flush_cmd { 1025 struct completion wait; 1026 struct llist_node llnode; 1027 nid_t ino; 1028 int ret; 1029 }; 1030 1031 struct flush_cmd_control { 1032 struct task_struct *f2fs_issue_flush; /* flush thread */ 1033 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1034 atomic_t issued_flush; /* # of issued flushes */ 1035 atomic_t queued_flush; /* # of queued flushes */ 1036 struct llist_head issue_list; /* list for command issue */ 1037 struct llist_node *dispatch_list; /* list for command dispatch */ 1038 }; 1039 1040 struct f2fs_sm_info { 1041 struct sit_info *sit_info; /* whole segment information */ 1042 struct free_segmap_info *free_info; /* free segment information */ 1043 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1044 struct curseg_info *curseg_array; /* active segment information */ 1045 1046 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1047 1048 block_t seg0_blkaddr; /* block address of 0'th segment */ 1049 block_t main_blkaddr; /* start block address of main area */ 1050 block_t ssa_blkaddr; /* start block address of SSA area */ 1051 1052 unsigned int segment_count; /* total # of segments */ 1053 unsigned int main_segments; /* # of segments in main area */ 1054 unsigned int reserved_segments; /* # of reserved segments */ 1055 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1056 unsigned int ovp_segments; /* # of overprovision segments */ 1057 1058 /* a threshold to reclaim prefree segments */ 1059 unsigned int rec_prefree_segments; 1060 1061 /* for batched trimming */ 1062 unsigned int trim_sections; /* # of sections to trim */ 1063 1064 struct list_head sit_entry_set; /* sit entry set list */ 1065 1066 unsigned int ipu_policy; /* in-place-update policy */ 1067 unsigned int min_ipu_util; /* in-place-update threshold */ 1068 unsigned int min_fsync_blocks; /* threshold for fsync */ 1069 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1070 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1071 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1072 1073 /* for flush command control */ 1074 struct flush_cmd_control *fcc_info; 1075 1076 /* for discard command control */ 1077 struct discard_cmd_control *dcc_info; 1078 }; 1079 1080 /* 1081 * For superblock 1082 */ 1083 /* 1084 * COUNT_TYPE for monitoring 1085 * 1086 * f2fs monitors the number of several block types such as on-writeback, 1087 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1088 */ 1089 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1090 enum count_type { 1091 F2FS_DIRTY_DENTS, 1092 F2FS_DIRTY_DATA, 1093 F2FS_DIRTY_QDATA, 1094 F2FS_DIRTY_NODES, 1095 F2FS_DIRTY_META, 1096 F2FS_INMEM_PAGES, 1097 F2FS_DIRTY_IMETA, 1098 F2FS_WB_CP_DATA, 1099 F2FS_WB_DATA, 1100 F2FS_RD_DATA, 1101 F2FS_RD_NODE, 1102 F2FS_RD_META, 1103 F2FS_DIO_WRITE, 1104 F2FS_DIO_READ, 1105 NR_COUNT_TYPE, 1106 }; 1107 1108 /* 1109 * The below are the page types of bios used in submit_bio(). 1110 * The available types are: 1111 * DATA User data pages. It operates as async mode. 1112 * NODE Node pages. It operates as async mode. 1113 * META FS metadata pages such as SIT, NAT, CP. 1114 * NR_PAGE_TYPE The number of page types. 1115 * META_FLUSH Make sure the previous pages are written 1116 * with waiting the bio's completion 1117 * ... Only can be used with META. 1118 */ 1119 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1120 enum page_type { 1121 DATA, 1122 NODE, 1123 META, 1124 NR_PAGE_TYPE, 1125 META_FLUSH, 1126 INMEM, /* the below types are used by tracepoints only. */ 1127 INMEM_DROP, 1128 INMEM_INVALIDATE, 1129 INMEM_REVOKE, 1130 IPU, 1131 OPU, 1132 }; 1133 1134 enum temp_type { 1135 HOT = 0, /* must be zero for meta bio */ 1136 WARM, 1137 COLD, 1138 NR_TEMP_TYPE, 1139 }; 1140 1141 enum need_lock_type { 1142 LOCK_REQ = 0, 1143 LOCK_DONE, 1144 LOCK_RETRY, 1145 }; 1146 1147 enum cp_reason_type { 1148 CP_NO_NEEDED, 1149 CP_NON_REGULAR, 1150 CP_COMPRESSED, 1151 CP_HARDLINK, 1152 CP_SB_NEED_CP, 1153 CP_WRONG_PINO, 1154 CP_NO_SPC_ROLL, 1155 CP_NODE_NEED_CP, 1156 CP_FASTBOOT_MODE, 1157 CP_SPEC_LOG_NUM, 1158 CP_RECOVER_DIR, 1159 }; 1160 1161 enum iostat_type { 1162 /* WRITE IO */ 1163 APP_DIRECT_IO, /* app direct write IOs */ 1164 APP_BUFFERED_IO, /* app buffered write IOs */ 1165 APP_WRITE_IO, /* app write IOs */ 1166 APP_MAPPED_IO, /* app mapped IOs */ 1167 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1168 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1169 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1170 FS_GC_DATA_IO, /* data IOs from forground gc */ 1171 FS_GC_NODE_IO, /* node IOs from forground gc */ 1172 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1173 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1174 FS_CP_META_IO, /* meta IOs from checkpoint */ 1175 1176 /* READ IO */ 1177 APP_DIRECT_READ_IO, /* app direct read IOs */ 1178 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1179 APP_READ_IO, /* app read IOs */ 1180 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1181 FS_DATA_READ_IO, /* data read IOs */ 1182 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1183 FS_CDATA_READ_IO, /* compressed data read IOs */ 1184 FS_NODE_READ_IO, /* node read IOs */ 1185 FS_META_READ_IO, /* meta read IOs */ 1186 1187 /* other */ 1188 FS_DISCARD, /* discard */ 1189 NR_IO_TYPE, 1190 }; 1191 1192 struct f2fs_io_info { 1193 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1194 nid_t ino; /* inode number */ 1195 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1196 enum temp_type temp; /* contains HOT/WARM/COLD */ 1197 int op; /* contains REQ_OP_ */ 1198 int op_flags; /* req_flag_bits */ 1199 block_t new_blkaddr; /* new block address to be written */ 1200 block_t old_blkaddr; /* old block address before Cow */ 1201 struct page *page; /* page to be written */ 1202 struct page *encrypted_page; /* encrypted page */ 1203 struct page *compressed_page; /* compressed page */ 1204 struct list_head list; /* serialize IOs */ 1205 bool submitted; /* indicate IO submission */ 1206 int need_lock; /* indicate we need to lock cp_rwsem */ 1207 bool in_list; /* indicate fio is in io_list */ 1208 bool is_por; /* indicate IO is from recovery or not */ 1209 bool retry; /* need to reallocate block address */ 1210 int compr_blocks; /* # of compressed block addresses */ 1211 bool encrypted; /* indicate file is encrypted */ 1212 enum iostat_type io_type; /* io type */ 1213 struct writeback_control *io_wbc; /* writeback control */ 1214 struct bio **bio; /* bio for ipu */ 1215 sector_t *last_block; /* last block number in bio */ 1216 unsigned char version; /* version of the node */ 1217 }; 1218 1219 struct bio_entry { 1220 struct bio *bio; 1221 struct list_head list; 1222 }; 1223 1224 #define is_read_io(rw) ((rw) == READ) 1225 struct f2fs_bio_info { 1226 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1227 struct bio *bio; /* bios to merge */ 1228 sector_t last_block_in_bio; /* last block number */ 1229 struct f2fs_io_info fio; /* store buffered io info. */ 1230 struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1231 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1232 struct list_head io_list; /* track fios */ 1233 struct list_head bio_list; /* bio entry list head */ 1234 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1235 }; 1236 1237 #define FDEV(i) (sbi->devs[i]) 1238 #define RDEV(i) (raw_super->devs[i]) 1239 struct f2fs_dev_info { 1240 struct block_device *bdev; 1241 char path[MAX_PATH_LEN]; 1242 unsigned int total_segments; 1243 block_t start_blk; 1244 block_t end_blk; 1245 #ifdef CONFIG_BLK_DEV_ZONED 1246 unsigned int nr_blkz; /* Total number of zones */ 1247 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1248 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1249 #endif 1250 }; 1251 1252 enum inode_type { 1253 DIR_INODE, /* for dirty dir inode */ 1254 FILE_INODE, /* for dirty regular/symlink inode */ 1255 DIRTY_META, /* for all dirtied inode metadata */ 1256 ATOMIC_FILE, /* for all atomic files */ 1257 NR_INODE_TYPE, 1258 }; 1259 1260 /* for inner inode cache management */ 1261 struct inode_management { 1262 struct radix_tree_root ino_root; /* ino entry array */ 1263 spinlock_t ino_lock; /* for ino entry lock */ 1264 struct list_head ino_list; /* inode list head */ 1265 unsigned long ino_num; /* number of entries */ 1266 }; 1267 1268 /* for GC_AT */ 1269 struct atgc_management { 1270 bool atgc_enabled; /* ATGC is enabled or not */ 1271 struct rb_root_cached root; /* root of victim rb-tree */ 1272 struct list_head victim_list; /* linked with all victim entries */ 1273 unsigned int victim_count; /* victim count in rb-tree */ 1274 unsigned int candidate_ratio; /* candidate ratio */ 1275 unsigned int max_candidate_count; /* max candidate count */ 1276 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1277 unsigned long long age_threshold; /* age threshold */ 1278 }; 1279 1280 /* For s_flag in struct f2fs_sb_info */ 1281 enum { 1282 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1283 SBI_IS_CLOSE, /* specify unmounting */ 1284 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1285 SBI_POR_DOING, /* recovery is doing or not */ 1286 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1287 SBI_NEED_CP, /* need to checkpoint */ 1288 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1289 SBI_IS_RECOVERED, /* recovered orphan/data */ 1290 SBI_CP_DISABLED, /* CP was disabled last mount */ 1291 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1292 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1293 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1294 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1295 SBI_IS_RESIZEFS, /* resizefs is in process */ 1296 SBI_IS_FREEZING, /* freezefs is in process */ 1297 }; 1298 1299 enum { 1300 CP_TIME, 1301 REQ_TIME, 1302 DISCARD_TIME, 1303 GC_TIME, 1304 DISABLE_TIME, 1305 UMOUNT_DISCARD_TIMEOUT, 1306 MAX_TIME, 1307 }; 1308 1309 enum { 1310 GC_NORMAL, 1311 GC_IDLE_CB, 1312 GC_IDLE_GREEDY, 1313 GC_IDLE_AT, 1314 GC_URGENT_HIGH, 1315 GC_URGENT_LOW, 1316 GC_URGENT_MID, 1317 MAX_GC_MODE, 1318 }; 1319 1320 enum { 1321 BGGC_MODE_ON, /* background gc is on */ 1322 BGGC_MODE_OFF, /* background gc is off */ 1323 BGGC_MODE_SYNC, /* 1324 * background gc is on, migrating blocks 1325 * like foreground gc 1326 */ 1327 }; 1328 1329 enum { 1330 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1331 FS_MODE_LFS, /* use lfs allocation only */ 1332 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1333 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1334 }; 1335 1336 enum { 1337 ALLOC_MODE_DEFAULT, /* stay default */ 1338 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1339 }; 1340 1341 enum fsync_mode { 1342 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1343 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1344 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1345 }; 1346 1347 enum { 1348 COMPR_MODE_FS, /* 1349 * automatically compress compression 1350 * enabled files 1351 */ 1352 COMPR_MODE_USER, /* 1353 * automatical compression is disabled. 1354 * user can control the file compression 1355 * using ioctls 1356 */ 1357 }; 1358 1359 enum { 1360 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1361 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1362 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1363 }; 1364 1365 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1366 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1367 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1368 1369 /* 1370 * Layout of f2fs page.private: 1371 * 1372 * Layout A: lowest bit should be 1 1373 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1374 * bit 0 PAGE_PRIVATE_NOT_POINTER 1375 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1376 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1377 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1378 * bit 4 PAGE_PRIVATE_INLINE_INODE 1379 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1380 * bit 6- f2fs private data 1381 * 1382 * Layout B: lowest bit should be 0 1383 * page.private is a wrapped pointer. 1384 */ 1385 enum { 1386 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1387 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1388 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1389 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1390 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1391 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1392 PAGE_PRIVATE_MAX 1393 }; 1394 1395 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1396 static inline bool page_private_##name(struct page *page) \ 1397 { \ 1398 return PagePrivate(page) && \ 1399 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1400 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1401 } 1402 1403 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1404 static inline void set_page_private_##name(struct page *page) \ 1405 { \ 1406 if (!PagePrivate(page)) { \ 1407 get_page(page); \ 1408 SetPagePrivate(page); \ 1409 set_page_private(page, 0); \ 1410 } \ 1411 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1412 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1413 } 1414 1415 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1416 static inline void clear_page_private_##name(struct page *page) \ 1417 { \ 1418 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1419 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1420 set_page_private(page, 0); \ 1421 if (PagePrivate(page)) { \ 1422 ClearPagePrivate(page); \ 1423 put_page(page); \ 1424 }\ 1425 } \ 1426 } 1427 1428 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1429 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1430 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1431 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1432 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1433 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1434 1435 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1436 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1437 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1438 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1439 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1440 1441 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1442 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1443 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1444 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1445 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1446 1447 static inline unsigned long get_page_private_data(struct page *page) 1448 { 1449 unsigned long data = page_private(page); 1450 1451 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1452 return 0; 1453 return data >> PAGE_PRIVATE_MAX; 1454 } 1455 1456 static inline void set_page_private_data(struct page *page, unsigned long data) 1457 { 1458 if (!PagePrivate(page)) { 1459 get_page(page); 1460 SetPagePrivate(page); 1461 set_page_private(page, 0); 1462 } 1463 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1464 page_private(page) |= data << PAGE_PRIVATE_MAX; 1465 } 1466 1467 static inline void clear_page_private_data(struct page *page) 1468 { 1469 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1470 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1471 set_page_private(page, 0); 1472 if (PagePrivate(page)) { 1473 ClearPagePrivate(page); 1474 put_page(page); 1475 } 1476 } 1477 } 1478 1479 /* For compression */ 1480 enum compress_algorithm_type { 1481 COMPRESS_LZO, 1482 COMPRESS_LZ4, 1483 COMPRESS_ZSTD, 1484 COMPRESS_LZORLE, 1485 COMPRESS_MAX, 1486 }; 1487 1488 enum compress_flag { 1489 COMPRESS_CHKSUM, 1490 COMPRESS_MAX_FLAG, 1491 }; 1492 1493 #define COMPRESS_WATERMARK 20 1494 #define COMPRESS_PERCENT 20 1495 1496 #define COMPRESS_DATA_RESERVED_SIZE 4 1497 struct compress_data { 1498 __le32 clen; /* compressed data size */ 1499 __le32 chksum; /* compressed data chksum */ 1500 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1501 u8 cdata[]; /* compressed data */ 1502 }; 1503 1504 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1505 1506 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1507 1508 #define COMPRESS_LEVEL_OFFSET 8 1509 1510 /* compress context */ 1511 struct compress_ctx { 1512 struct inode *inode; /* inode the context belong to */ 1513 pgoff_t cluster_idx; /* cluster index number */ 1514 unsigned int cluster_size; /* page count in cluster */ 1515 unsigned int log_cluster_size; /* log of cluster size */ 1516 struct page **rpages; /* pages store raw data in cluster */ 1517 unsigned int nr_rpages; /* total page number in rpages */ 1518 struct page **cpages; /* pages store compressed data in cluster */ 1519 unsigned int nr_cpages; /* total page number in cpages */ 1520 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1521 void *rbuf; /* virtual mapped address on rpages */ 1522 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1523 size_t rlen; /* valid data length in rbuf */ 1524 size_t clen; /* valid data length in cbuf */ 1525 void *private; /* payload buffer for specified compression algorithm */ 1526 void *private2; /* extra payload buffer */ 1527 }; 1528 1529 /* compress context for write IO path */ 1530 struct compress_io_ctx { 1531 u32 magic; /* magic number to indicate page is compressed */ 1532 struct inode *inode; /* inode the context belong to */ 1533 struct page **rpages; /* pages store raw data in cluster */ 1534 unsigned int nr_rpages; /* total page number in rpages */ 1535 atomic_t pending_pages; /* in-flight compressed page count */ 1536 }; 1537 1538 /* Context for decompressing one cluster on the read IO path */ 1539 struct decompress_io_ctx { 1540 u32 magic; /* magic number to indicate page is compressed */ 1541 struct inode *inode; /* inode the context belong to */ 1542 pgoff_t cluster_idx; /* cluster index number */ 1543 unsigned int cluster_size; /* page count in cluster */ 1544 unsigned int log_cluster_size; /* log of cluster size */ 1545 struct page **rpages; /* pages store raw data in cluster */ 1546 unsigned int nr_rpages; /* total page number in rpages */ 1547 struct page **cpages; /* pages store compressed data in cluster */ 1548 unsigned int nr_cpages; /* total page number in cpages */ 1549 struct page **tpages; /* temp pages to pad holes in cluster */ 1550 void *rbuf; /* virtual mapped address on rpages */ 1551 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1552 size_t rlen; /* valid data length in rbuf */ 1553 size_t clen; /* valid data length in cbuf */ 1554 1555 /* 1556 * The number of compressed pages remaining to be read in this cluster. 1557 * This is initially nr_cpages. It is decremented by 1 each time a page 1558 * has been read (or failed to be read). When it reaches 0, the cluster 1559 * is decompressed (or an error is reported). 1560 * 1561 * If an error occurs before all the pages have been submitted for I/O, 1562 * then this will never reach 0. In this case the I/O submitter is 1563 * responsible for calling f2fs_decompress_end_io() instead. 1564 */ 1565 atomic_t remaining_pages; 1566 1567 /* 1568 * Number of references to this decompress_io_ctx. 1569 * 1570 * One reference is held for I/O completion. This reference is dropped 1571 * after the pagecache pages are updated and unlocked -- either after 1572 * decompression (and verity if enabled), or after an error. 1573 * 1574 * In addition, each compressed page holds a reference while it is in a 1575 * bio. These references are necessary prevent compressed pages from 1576 * being freed while they are still in a bio. 1577 */ 1578 refcount_t refcnt; 1579 1580 bool failed; /* IO error occurred before decompression? */ 1581 bool need_verity; /* need fs-verity verification after decompression? */ 1582 void *private; /* payload buffer for specified decompression algorithm */ 1583 void *private2; /* extra payload buffer */ 1584 struct work_struct verity_work; /* work to verify the decompressed pages */ 1585 }; 1586 1587 #define NULL_CLUSTER ((unsigned int)(~0)) 1588 #define MIN_COMPRESS_LOG_SIZE 2 1589 #define MAX_COMPRESS_LOG_SIZE 8 1590 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1591 1592 struct f2fs_sb_info { 1593 struct super_block *sb; /* pointer to VFS super block */ 1594 struct proc_dir_entry *s_proc; /* proc entry */ 1595 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1596 struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1597 int valid_super_block; /* valid super block no */ 1598 unsigned long s_flag; /* flags for sbi */ 1599 struct mutex writepages; /* mutex for writepages() */ 1600 1601 #ifdef CONFIG_BLK_DEV_ZONED 1602 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1603 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1604 #endif 1605 1606 /* for node-related operations */ 1607 struct f2fs_nm_info *nm_info; /* node manager */ 1608 struct inode *node_inode; /* cache node blocks */ 1609 1610 /* for segment-related operations */ 1611 struct f2fs_sm_info *sm_info; /* segment manager */ 1612 1613 /* for bio operations */ 1614 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1615 /* keep migration IO order for LFS mode */ 1616 struct f2fs_rwsem io_order_lock; 1617 mempool_t *write_io_dummy; /* Dummy pages */ 1618 pgoff_t metapage_eio_ofs; /* EIO page offset */ 1619 int metapage_eio_cnt; /* EIO count */ 1620 1621 /* for checkpoint */ 1622 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1623 int cur_cp_pack; /* remain current cp pack */ 1624 spinlock_t cp_lock; /* for flag in ckpt */ 1625 struct inode *meta_inode; /* cache meta blocks */ 1626 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1627 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1628 struct f2fs_rwsem node_write; /* locking node writes */ 1629 struct f2fs_rwsem node_change; /* locking node change */ 1630 wait_queue_head_t cp_wait; 1631 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1632 long interval_time[MAX_TIME]; /* to store thresholds */ 1633 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1634 1635 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1636 1637 spinlock_t fsync_node_lock; /* for node entry lock */ 1638 struct list_head fsync_node_list; /* node list head */ 1639 unsigned int fsync_seg_id; /* sequence id */ 1640 unsigned int fsync_node_num; /* number of node entries */ 1641 1642 /* for orphan inode, use 0'th array */ 1643 unsigned int max_orphans; /* max orphan inodes */ 1644 1645 /* for inode management */ 1646 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1647 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1648 struct mutex flush_lock; /* for flush exclusion */ 1649 1650 /* for extent tree cache */ 1651 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1652 struct mutex extent_tree_lock; /* locking extent radix tree */ 1653 struct list_head extent_list; /* lru list for shrinker */ 1654 spinlock_t extent_lock; /* locking extent lru list */ 1655 atomic_t total_ext_tree; /* extent tree count */ 1656 struct list_head zombie_list; /* extent zombie tree list */ 1657 atomic_t total_zombie_tree; /* extent zombie tree count */ 1658 atomic_t total_ext_node; /* extent info count */ 1659 1660 /* basic filesystem units */ 1661 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1662 unsigned int log_blocksize; /* log2 block size */ 1663 unsigned int blocksize; /* block size */ 1664 unsigned int root_ino_num; /* root inode number*/ 1665 unsigned int node_ino_num; /* node inode number*/ 1666 unsigned int meta_ino_num; /* meta inode number*/ 1667 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1668 unsigned int blocks_per_seg; /* blocks per segment */ 1669 unsigned int segs_per_sec; /* segments per section */ 1670 unsigned int secs_per_zone; /* sections per zone */ 1671 unsigned int total_sections; /* total section count */ 1672 unsigned int total_node_count; /* total node block count */ 1673 unsigned int total_valid_node_count; /* valid node block count */ 1674 int dir_level; /* directory level */ 1675 int readdir_ra; /* readahead inode in readdir */ 1676 u64 max_io_bytes; /* max io bytes to merge IOs */ 1677 1678 block_t user_block_count; /* # of user blocks */ 1679 block_t total_valid_block_count; /* # of valid blocks */ 1680 block_t discard_blks; /* discard command candidats */ 1681 block_t last_valid_block_count; /* for recovery */ 1682 block_t reserved_blocks; /* configurable reserved blocks */ 1683 block_t current_reserved_blocks; /* current reserved blocks */ 1684 1685 /* Additional tracking for no checkpoint mode */ 1686 block_t unusable_block_count; /* # of blocks saved by last cp */ 1687 1688 unsigned int nquota_files; /* # of quota sysfile */ 1689 struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1690 1691 /* # of pages, see count_type */ 1692 atomic_t nr_pages[NR_COUNT_TYPE]; 1693 /* # of allocated blocks */ 1694 struct percpu_counter alloc_valid_block_count; 1695 /* # of node block writes as roll forward recovery */ 1696 struct percpu_counter rf_node_block_count; 1697 1698 /* writeback control */ 1699 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1700 1701 /* valid inode count */ 1702 struct percpu_counter total_valid_inode_count; 1703 1704 struct f2fs_mount_info mount_opt; /* mount options */ 1705 1706 /* for cleaning operations */ 1707 struct f2fs_rwsem gc_lock; /* 1708 * semaphore for GC, avoid 1709 * race between GC and GC or CP 1710 */ 1711 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1712 struct atgc_management am; /* atgc management */ 1713 unsigned int cur_victim_sec; /* current victim section num */ 1714 unsigned int gc_mode; /* current GC state */ 1715 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1716 spinlock_t gc_urgent_high_lock; 1717 bool gc_urgent_high_limited; /* indicates having limited trial count */ 1718 unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */ 1719 1720 /* for skip statistic */ 1721 unsigned int atomic_files; /* # of opened atomic file */ 1722 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1723 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1724 1725 /* threshold for gc trials on pinned files */ 1726 u64 gc_pin_file_threshold; 1727 struct f2fs_rwsem pin_sem; 1728 1729 /* maximum # of trials to find a victim segment for SSR and GC */ 1730 unsigned int max_victim_search; 1731 /* migration granularity of garbage collection, unit: segment */ 1732 unsigned int migration_granularity; 1733 1734 /* 1735 * for stat information. 1736 * one is for the LFS mode, and the other is for the SSR mode. 1737 */ 1738 #ifdef CONFIG_F2FS_STAT_FS 1739 struct f2fs_stat_info *stat_info; /* FS status information */ 1740 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1741 unsigned int segment_count[2]; /* # of allocated segments */ 1742 unsigned int block_count[2]; /* # of allocated blocks */ 1743 atomic_t inplace_count; /* # of inplace update */ 1744 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1745 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1746 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1747 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1748 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1749 atomic_t inline_inode; /* # of inline_data inodes */ 1750 atomic_t inline_dir; /* # of inline_dentry inodes */ 1751 atomic_t compr_inode; /* # of compressed inodes */ 1752 atomic64_t compr_blocks; /* # of compressed blocks */ 1753 atomic_t vw_cnt; /* # of volatile writes */ 1754 atomic_t max_aw_cnt; /* max # of atomic writes */ 1755 atomic_t max_vw_cnt; /* max # of volatile writes */ 1756 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1757 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1758 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1759 #endif 1760 spinlock_t stat_lock; /* lock for stat operations */ 1761 1762 /* to attach REQ_META|REQ_FUA flags */ 1763 unsigned int data_io_flag; 1764 unsigned int node_io_flag; 1765 1766 /* For sysfs suppport */ 1767 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1768 struct completion s_kobj_unregister; 1769 1770 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1771 struct completion s_stat_kobj_unregister; 1772 1773 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1774 struct completion s_feature_list_kobj_unregister; 1775 1776 /* For shrinker support */ 1777 struct list_head s_list; 1778 struct mutex umount_mutex; 1779 unsigned int shrinker_run_no; 1780 1781 /* For multi devices */ 1782 int s_ndevs; /* number of devices */ 1783 struct f2fs_dev_info *devs; /* for device list */ 1784 unsigned int dirty_device; /* for checkpoint data flush */ 1785 spinlock_t dev_lock; /* protect dirty_device */ 1786 bool aligned_blksize; /* all devices has the same logical blksize */ 1787 1788 /* For write statistics */ 1789 u64 sectors_written_start; 1790 u64 kbytes_written; 1791 1792 /* Reference to checksum algorithm driver via cryptoapi */ 1793 struct crypto_shash *s_chksum_driver; 1794 1795 /* Precomputed FS UUID checksum for seeding other checksums */ 1796 __u32 s_chksum_seed; 1797 1798 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1799 1800 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1801 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1802 1803 /* For reclaimed segs statistics per each GC mode */ 1804 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1805 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1806 1807 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1808 1809 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1810 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1811 1812 #ifdef CONFIG_F2FS_FS_COMPRESSION 1813 struct kmem_cache *page_array_slab; /* page array entry */ 1814 unsigned int page_array_slab_size; /* default page array slab size */ 1815 1816 /* For runtime compression statistics */ 1817 u64 compr_written_block; 1818 u64 compr_saved_block; 1819 u32 compr_new_inode; 1820 1821 /* For compressed block cache */ 1822 struct inode *compress_inode; /* cache compressed blocks */ 1823 unsigned int compress_percent; /* cache page percentage */ 1824 unsigned int compress_watermark; /* cache page watermark */ 1825 atomic_t compress_page_hit; /* cache hit count */ 1826 #endif 1827 1828 #ifdef CONFIG_F2FS_IOSTAT 1829 /* For app/fs IO statistics */ 1830 spinlock_t iostat_lock; 1831 unsigned long long rw_iostat[NR_IO_TYPE]; 1832 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1833 bool iostat_enable; 1834 unsigned long iostat_next_period; 1835 unsigned int iostat_period_ms; 1836 1837 /* For io latency related statistics info in one iostat period */ 1838 spinlock_t iostat_lat_lock; 1839 struct iostat_lat_info *iostat_io_lat; 1840 #endif 1841 }; 1842 1843 #ifdef CONFIG_F2FS_FAULT_INJECTION 1844 #define f2fs_show_injection_info(sbi, type) \ 1845 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1846 KERN_INFO, sbi->sb->s_id, \ 1847 f2fs_fault_name[type], \ 1848 __func__, __builtin_return_address(0)) 1849 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1850 { 1851 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1852 1853 if (!ffi->inject_rate) 1854 return false; 1855 1856 if (!IS_FAULT_SET(ffi, type)) 1857 return false; 1858 1859 atomic_inc(&ffi->inject_ops); 1860 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1861 atomic_set(&ffi->inject_ops, 0); 1862 return true; 1863 } 1864 return false; 1865 } 1866 #else 1867 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1868 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1869 { 1870 return false; 1871 } 1872 #endif 1873 1874 /* 1875 * Test if the mounted volume is a multi-device volume. 1876 * - For a single regular disk volume, sbi->s_ndevs is 0. 1877 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1878 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1879 */ 1880 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1881 { 1882 return sbi->s_ndevs > 1; 1883 } 1884 1885 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1886 { 1887 unsigned long now = jiffies; 1888 1889 sbi->last_time[type] = now; 1890 1891 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1892 if (type == REQ_TIME) { 1893 sbi->last_time[DISCARD_TIME] = now; 1894 sbi->last_time[GC_TIME] = now; 1895 } 1896 } 1897 1898 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1899 { 1900 unsigned long interval = sbi->interval_time[type] * HZ; 1901 1902 return time_after(jiffies, sbi->last_time[type] + interval); 1903 } 1904 1905 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1906 int type) 1907 { 1908 unsigned long interval = sbi->interval_time[type] * HZ; 1909 unsigned int wait_ms = 0; 1910 long delta; 1911 1912 delta = (sbi->last_time[type] + interval) - jiffies; 1913 if (delta > 0) 1914 wait_ms = jiffies_to_msecs(delta); 1915 1916 return wait_ms; 1917 } 1918 1919 /* 1920 * Inline functions 1921 */ 1922 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1923 const void *address, unsigned int length) 1924 { 1925 struct { 1926 struct shash_desc shash; 1927 char ctx[4]; 1928 } desc; 1929 int err; 1930 1931 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1932 1933 desc.shash.tfm = sbi->s_chksum_driver; 1934 *(u32 *)desc.ctx = crc; 1935 1936 err = crypto_shash_update(&desc.shash, address, length); 1937 BUG_ON(err); 1938 1939 return *(u32 *)desc.ctx; 1940 } 1941 1942 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1943 unsigned int length) 1944 { 1945 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1946 } 1947 1948 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1949 void *buf, size_t buf_size) 1950 { 1951 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1952 } 1953 1954 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1955 const void *address, unsigned int length) 1956 { 1957 return __f2fs_crc32(sbi, crc, address, length); 1958 } 1959 1960 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1961 { 1962 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1963 } 1964 1965 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1966 { 1967 return sb->s_fs_info; 1968 } 1969 1970 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1971 { 1972 return F2FS_SB(inode->i_sb); 1973 } 1974 1975 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1976 { 1977 return F2FS_I_SB(mapping->host); 1978 } 1979 1980 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1981 { 1982 return F2FS_M_SB(page_file_mapping(page)); 1983 } 1984 1985 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1986 { 1987 return (struct f2fs_super_block *)(sbi->raw_super); 1988 } 1989 1990 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1991 { 1992 return (struct f2fs_checkpoint *)(sbi->ckpt); 1993 } 1994 1995 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1996 { 1997 return (struct f2fs_node *)page_address(page); 1998 } 1999 2000 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 2001 { 2002 return &((struct f2fs_node *)page_address(page))->i; 2003 } 2004 2005 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 2006 { 2007 return (struct f2fs_nm_info *)(sbi->nm_info); 2008 } 2009 2010 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 2011 { 2012 return (struct f2fs_sm_info *)(sbi->sm_info); 2013 } 2014 2015 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 2016 { 2017 return (struct sit_info *)(SM_I(sbi)->sit_info); 2018 } 2019 2020 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 2021 { 2022 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 2023 } 2024 2025 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 2026 { 2027 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 2028 } 2029 2030 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2031 { 2032 return sbi->meta_inode->i_mapping; 2033 } 2034 2035 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2036 { 2037 return sbi->node_inode->i_mapping; 2038 } 2039 2040 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2041 { 2042 return test_bit(type, &sbi->s_flag); 2043 } 2044 2045 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2046 { 2047 set_bit(type, &sbi->s_flag); 2048 } 2049 2050 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2051 { 2052 clear_bit(type, &sbi->s_flag); 2053 } 2054 2055 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2056 { 2057 return le64_to_cpu(cp->checkpoint_ver); 2058 } 2059 2060 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2061 { 2062 if (type < F2FS_MAX_QUOTAS) 2063 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2064 return 0; 2065 } 2066 2067 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2068 { 2069 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2070 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2071 } 2072 2073 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2074 { 2075 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2076 2077 return ckpt_flags & f; 2078 } 2079 2080 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2081 { 2082 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2083 } 2084 2085 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2086 { 2087 unsigned int ckpt_flags; 2088 2089 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2090 ckpt_flags |= f; 2091 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2092 } 2093 2094 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2095 { 2096 unsigned long flags; 2097 2098 spin_lock_irqsave(&sbi->cp_lock, flags); 2099 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2100 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2101 } 2102 2103 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2104 { 2105 unsigned int ckpt_flags; 2106 2107 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2108 ckpt_flags &= (~f); 2109 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2110 } 2111 2112 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2113 { 2114 unsigned long flags; 2115 2116 spin_lock_irqsave(&sbi->cp_lock, flags); 2117 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2118 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2119 } 2120 2121 #define init_f2fs_rwsem(sem) \ 2122 do { \ 2123 static struct lock_class_key __key; \ 2124 \ 2125 __init_f2fs_rwsem((sem), #sem, &__key); \ 2126 } while (0) 2127 2128 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, 2129 const char *sem_name, struct lock_class_key *key) 2130 { 2131 __init_rwsem(&sem->internal_rwsem, sem_name, key); 2132 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2133 init_waitqueue_head(&sem->read_waiters); 2134 #endif 2135 } 2136 2137 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2138 { 2139 return rwsem_is_locked(&sem->internal_rwsem); 2140 } 2141 2142 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2143 { 2144 return rwsem_is_contended(&sem->internal_rwsem); 2145 } 2146 2147 static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2148 { 2149 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2150 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2151 #else 2152 down_read(&sem->internal_rwsem); 2153 #endif 2154 } 2155 2156 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2157 { 2158 return down_read_trylock(&sem->internal_rwsem); 2159 } 2160 2161 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2162 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2163 { 2164 down_read_nested(&sem->internal_rwsem, subclass); 2165 } 2166 #else 2167 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2168 #endif 2169 2170 static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2171 { 2172 up_read(&sem->internal_rwsem); 2173 } 2174 2175 static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2176 { 2177 down_write(&sem->internal_rwsem); 2178 } 2179 2180 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2181 { 2182 return down_write_trylock(&sem->internal_rwsem); 2183 } 2184 2185 static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2186 { 2187 up_write(&sem->internal_rwsem); 2188 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2189 wake_up_all(&sem->read_waiters); 2190 #endif 2191 } 2192 2193 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2194 { 2195 f2fs_down_read(&sbi->cp_rwsem); 2196 } 2197 2198 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2199 { 2200 if (time_to_inject(sbi, FAULT_LOCK_OP)) { 2201 f2fs_show_injection_info(sbi, FAULT_LOCK_OP); 2202 return 0; 2203 } 2204 return f2fs_down_read_trylock(&sbi->cp_rwsem); 2205 } 2206 2207 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2208 { 2209 f2fs_up_read(&sbi->cp_rwsem); 2210 } 2211 2212 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2213 { 2214 f2fs_down_write(&sbi->cp_rwsem); 2215 } 2216 2217 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2218 { 2219 f2fs_up_write(&sbi->cp_rwsem); 2220 } 2221 2222 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2223 { 2224 int reason = CP_SYNC; 2225 2226 if (test_opt(sbi, FASTBOOT)) 2227 reason = CP_FASTBOOT; 2228 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2229 reason = CP_UMOUNT; 2230 return reason; 2231 } 2232 2233 static inline bool __remain_node_summaries(int reason) 2234 { 2235 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2236 } 2237 2238 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2239 { 2240 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2241 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2242 } 2243 2244 /* 2245 * Check whether the inode has blocks or not 2246 */ 2247 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2248 { 2249 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2250 2251 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2252 } 2253 2254 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2255 { 2256 return ofs == XATTR_NODE_OFFSET; 2257 } 2258 2259 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2260 struct inode *inode, bool cap) 2261 { 2262 if (!inode) 2263 return true; 2264 if (!test_opt(sbi, RESERVE_ROOT)) 2265 return false; 2266 if (IS_NOQUOTA(inode)) 2267 return true; 2268 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2269 return true; 2270 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2271 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2272 return true; 2273 if (cap && capable(CAP_SYS_RESOURCE)) 2274 return true; 2275 return false; 2276 } 2277 2278 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2279 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2280 struct inode *inode, blkcnt_t *count) 2281 { 2282 blkcnt_t diff = 0, release = 0; 2283 block_t avail_user_block_count; 2284 int ret; 2285 2286 ret = dquot_reserve_block(inode, *count); 2287 if (ret) 2288 return ret; 2289 2290 if (time_to_inject(sbi, FAULT_BLOCK)) { 2291 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2292 release = *count; 2293 goto release_quota; 2294 } 2295 2296 /* 2297 * let's increase this in prior to actual block count change in order 2298 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2299 */ 2300 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2301 2302 spin_lock(&sbi->stat_lock); 2303 sbi->total_valid_block_count += (block_t)(*count); 2304 avail_user_block_count = sbi->user_block_count - 2305 sbi->current_reserved_blocks; 2306 2307 if (!__allow_reserved_blocks(sbi, inode, true)) 2308 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2309 2310 if (F2FS_IO_ALIGNED(sbi)) 2311 avail_user_block_count -= sbi->blocks_per_seg * 2312 SM_I(sbi)->additional_reserved_segments; 2313 2314 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2315 if (avail_user_block_count > sbi->unusable_block_count) 2316 avail_user_block_count -= sbi->unusable_block_count; 2317 else 2318 avail_user_block_count = 0; 2319 } 2320 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2321 diff = sbi->total_valid_block_count - avail_user_block_count; 2322 if (diff > *count) 2323 diff = *count; 2324 *count -= diff; 2325 release = diff; 2326 sbi->total_valid_block_count -= diff; 2327 if (!*count) { 2328 spin_unlock(&sbi->stat_lock); 2329 goto enospc; 2330 } 2331 } 2332 spin_unlock(&sbi->stat_lock); 2333 2334 if (unlikely(release)) { 2335 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2336 dquot_release_reservation_block(inode, release); 2337 } 2338 f2fs_i_blocks_write(inode, *count, true, true); 2339 return 0; 2340 2341 enospc: 2342 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2343 release_quota: 2344 dquot_release_reservation_block(inode, release); 2345 return -ENOSPC; 2346 } 2347 2348 __printf(2, 3) 2349 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2350 2351 #define f2fs_err(sbi, fmt, ...) \ 2352 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2353 #define f2fs_warn(sbi, fmt, ...) \ 2354 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2355 #define f2fs_notice(sbi, fmt, ...) \ 2356 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2357 #define f2fs_info(sbi, fmt, ...) \ 2358 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2359 #define f2fs_debug(sbi, fmt, ...) \ 2360 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2361 2362 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2363 struct inode *inode, 2364 block_t count) 2365 { 2366 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2367 2368 spin_lock(&sbi->stat_lock); 2369 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2370 sbi->total_valid_block_count -= (block_t)count; 2371 if (sbi->reserved_blocks && 2372 sbi->current_reserved_blocks < sbi->reserved_blocks) 2373 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2374 sbi->current_reserved_blocks + count); 2375 spin_unlock(&sbi->stat_lock); 2376 if (unlikely(inode->i_blocks < sectors)) { 2377 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2378 inode->i_ino, 2379 (unsigned long long)inode->i_blocks, 2380 (unsigned long long)sectors); 2381 set_sbi_flag(sbi, SBI_NEED_FSCK); 2382 return; 2383 } 2384 f2fs_i_blocks_write(inode, count, false, true); 2385 } 2386 2387 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2388 { 2389 atomic_inc(&sbi->nr_pages[count_type]); 2390 2391 if (count_type == F2FS_DIRTY_DENTS || 2392 count_type == F2FS_DIRTY_NODES || 2393 count_type == F2FS_DIRTY_META || 2394 count_type == F2FS_DIRTY_QDATA || 2395 count_type == F2FS_DIRTY_IMETA) 2396 set_sbi_flag(sbi, SBI_IS_DIRTY); 2397 } 2398 2399 static inline void inode_inc_dirty_pages(struct inode *inode) 2400 { 2401 atomic_inc(&F2FS_I(inode)->dirty_pages); 2402 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2403 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2404 if (IS_NOQUOTA(inode)) 2405 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2406 } 2407 2408 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2409 { 2410 atomic_dec(&sbi->nr_pages[count_type]); 2411 } 2412 2413 static inline void inode_dec_dirty_pages(struct inode *inode) 2414 { 2415 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2416 !S_ISLNK(inode->i_mode)) 2417 return; 2418 2419 atomic_dec(&F2FS_I(inode)->dirty_pages); 2420 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2421 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2422 if (IS_NOQUOTA(inode)) 2423 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2424 } 2425 2426 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2427 { 2428 return atomic_read(&sbi->nr_pages[count_type]); 2429 } 2430 2431 static inline int get_dirty_pages(struct inode *inode) 2432 { 2433 return atomic_read(&F2FS_I(inode)->dirty_pages); 2434 } 2435 2436 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2437 { 2438 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2439 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2440 sbi->log_blocks_per_seg; 2441 2442 return segs / sbi->segs_per_sec; 2443 } 2444 2445 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2446 { 2447 return sbi->total_valid_block_count; 2448 } 2449 2450 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2451 { 2452 return sbi->discard_blks; 2453 } 2454 2455 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2456 { 2457 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2458 2459 /* return NAT or SIT bitmap */ 2460 if (flag == NAT_BITMAP) 2461 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2462 else if (flag == SIT_BITMAP) 2463 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2464 2465 return 0; 2466 } 2467 2468 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2469 { 2470 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2471 } 2472 2473 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2474 { 2475 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2476 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2477 int offset; 2478 2479 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2480 offset = (flag == SIT_BITMAP) ? 2481 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2482 /* 2483 * if large_nat_bitmap feature is enabled, leave checksum 2484 * protection for all nat/sit bitmaps. 2485 */ 2486 return tmp_ptr + offset + sizeof(__le32); 2487 } 2488 2489 if (__cp_payload(sbi) > 0) { 2490 if (flag == NAT_BITMAP) 2491 return &ckpt->sit_nat_version_bitmap; 2492 else 2493 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2494 } else { 2495 offset = (flag == NAT_BITMAP) ? 2496 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2497 return tmp_ptr + offset; 2498 } 2499 } 2500 2501 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2502 { 2503 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2504 2505 if (sbi->cur_cp_pack == 2) 2506 start_addr += sbi->blocks_per_seg; 2507 return start_addr; 2508 } 2509 2510 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2511 { 2512 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2513 2514 if (sbi->cur_cp_pack == 1) 2515 start_addr += sbi->blocks_per_seg; 2516 return start_addr; 2517 } 2518 2519 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2520 { 2521 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2522 } 2523 2524 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2525 { 2526 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2527 } 2528 2529 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2530 struct inode *inode, bool is_inode) 2531 { 2532 block_t valid_block_count; 2533 unsigned int valid_node_count, user_block_count; 2534 int err; 2535 2536 if (is_inode) { 2537 if (inode) { 2538 err = dquot_alloc_inode(inode); 2539 if (err) 2540 return err; 2541 } 2542 } else { 2543 err = dquot_reserve_block(inode, 1); 2544 if (err) 2545 return err; 2546 } 2547 2548 if (time_to_inject(sbi, FAULT_BLOCK)) { 2549 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2550 goto enospc; 2551 } 2552 2553 spin_lock(&sbi->stat_lock); 2554 2555 valid_block_count = sbi->total_valid_block_count + 2556 sbi->current_reserved_blocks + 1; 2557 2558 if (!__allow_reserved_blocks(sbi, inode, false)) 2559 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2560 2561 if (F2FS_IO_ALIGNED(sbi)) 2562 valid_block_count += sbi->blocks_per_seg * 2563 SM_I(sbi)->additional_reserved_segments; 2564 2565 user_block_count = sbi->user_block_count; 2566 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2567 user_block_count -= sbi->unusable_block_count; 2568 2569 if (unlikely(valid_block_count > user_block_count)) { 2570 spin_unlock(&sbi->stat_lock); 2571 goto enospc; 2572 } 2573 2574 valid_node_count = sbi->total_valid_node_count + 1; 2575 if (unlikely(valid_node_count > sbi->total_node_count)) { 2576 spin_unlock(&sbi->stat_lock); 2577 goto enospc; 2578 } 2579 2580 sbi->total_valid_node_count++; 2581 sbi->total_valid_block_count++; 2582 spin_unlock(&sbi->stat_lock); 2583 2584 if (inode) { 2585 if (is_inode) 2586 f2fs_mark_inode_dirty_sync(inode, true); 2587 else 2588 f2fs_i_blocks_write(inode, 1, true, true); 2589 } 2590 2591 percpu_counter_inc(&sbi->alloc_valid_block_count); 2592 return 0; 2593 2594 enospc: 2595 if (is_inode) { 2596 if (inode) 2597 dquot_free_inode(inode); 2598 } else { 2599 dquot_release_reservation_block(inode, 1); 2600 } 2601 return -ENOSPC; 2602 } 2603 2604 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2605 struct inode *inode, bool is_inode) 2606 { 2607 spin_lock(&sbi->stat_lock); 2608 2609 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2610 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2611 2612 sbi->total_valid_node_count--; 2613 sbi->total_valid_block_count--; 2614 if (sbi->reserved_blocks && 2615 sbi->current_reserved_blocks < sbi->reserved_blocks) 2616 sbi->current_reserved_blocks++; 2617 2618 spin_unlock(&sbi->stat_lock); 2619 2620 if (is_inode) { 2621 dquot_free_inode(inode); 2622 } else { 2623 if (unlikely(inode->i_blocks == 0)) { 2624 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2625 inode->i_ino, 2626 (unsigned long long)inode->i_blocks); 2627 set_sbi_flag(sbi, SBI_NEED_FSCK); 2628 return; 2629 } 2630 f2fs_i_blocks_write(inode, 1, false, true); 2631 } 2632 } 2633 2634 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2635 { 2636 return sbi->total_valid_node_count; 2637 } 2638 2639 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2640 { 2641 percpu_counter_inc(&sbi->total_valid_inode_count); 2642 } 2643 2644 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2645 { 2646 percpu_counter_dec(&sbi->total_valid_inode_count); 2647 } 2648 2649 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2650 { 2651 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2652 } 2653 2654 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2655 pgoff_t index, bool for_write) 2656 { 2657 struct page *page; 2658 unsigned int flags; 2659 2660 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2661 if (!for_write) 2662 page = find_get_page_flags(mapping, index, 2663 FGP_LOCK | FGP_ACCESSED); 2664 else 2665 page = find_lock_page(mapping, index); 2666 if (page) 2667 return page; 2668 2669 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2670 f2fs_show_injection_info(F2FS_M_SB(mapping), 2671 FAULT_PAGE_ALLOC); 2672 return NULL; 2673 } 2674 } 2675 2676 if (!for_write) 2677 return grab_cache_page(mapping, index); 2678 2679 flags = memalloc_nofs_save(); 2680 page = grab_cache_page_write_begin(mapping, index); 2681 memalloc_nofs_restore(flags); 2682 2683 return page; 2684 } 2685 2686 static inline struct page *f2fs_pagecache_get_page( 2687 struct address_space *mapping, pgoff_t index, 2688 int fgp_flags, gfp_t gfp_mask) 2689 { 2690 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2691 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2692 return NULL; 2693 } 2694 2695 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2696 } 2697 2698 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2699 { 2700 char *src_kaddr = kmap(src); 2701 char *dst_kaddr = kmap(dst); 2702 2703 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2704 kunmap(dst); 2705 kunmap(src); 2706 } 2707 2708 static inline void f2fs_put_page(struct page *page, int unlock) 2709 { 2710 if (!page) 2711 return; 2712 2713 if (unlock) { 2714 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2715 unlock_page(page); 2716 } 2717 put_page(page); 2718 } 2719 2720 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2721 { 2722 if (dn->node_page) 2723 f2fs_put_page(dn->node_page, 1); 2724 if (dn->inode_page && dn->node_page != dn->inode_page) 2725 f2fs_put_page(dn->inode_page, 0); 2726 dn->node_page = NULL; 2727 dn->inode_page = NULL; 2728 } 2729 2730 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2731 size_t size) 2732 { 2733 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2734 } 2735 2736 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2737 gfp_t flags) 2738 { 2739 void *entry; 2740 2741 entry = kmem_cache_alloc(cachep, flags); 2742 if (!entry) 2743 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2744 return entry; 2745 } 2746 2747 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2748 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2749 { 2750 if (nofail) 2751 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2752 2753 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) { 2754 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC); 2755 return NULL; 2756 } 2757 2758 return kmem_cache_alloc(cachep, flags); 2759 } 2760 2761 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2762 { 2763 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2764 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2765 get_pages(sbi, F2FS_WB_CP_DATA) || 2766 get_pages(sbi, F2FS_DIO_READ) || 2767 get_pages(sbi, F2FS_DIO_WRITE)) 2768 return true; 2769 2770 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2771 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2772 return true; 2773 2774 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2775 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2776 return true; 2777 return false; 2778 } 2779 2780 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2781 { 2782 if (sbi->gc_mode == GC_URGENT_HIGH) 2783 return true; 2784 2785 if (is_inflight_io(sbi, type)) 2786 return false; 2787 2788 if (sbi->gc_mode == GC_URGENT_MID) 2789 return true; 2790 2791 if (sbi->gc_mode == GC_URGENT_LOW && 2792 (type == DISCARD_TIME || type == GC_TIME)) 2793 return true; 2794 2795 return f2fs_time_over(sbi, type); 2796 } 2797 2798 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2799 unsigned long index, void *item) 2800 { 2801 while (radix_tree_insert(root, index, item)) 2802 cond_resched(); 2803 } 2804 2805 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2806 2807 static inline bool IS_INODE(struct page *page) 2808 { 2809 struct f2fs_node *p = F2FS_NODE(page); 2810 2811 return RAW_IS_INODE(p); 2812 } 2813 2814 static inline int offset_in_addr(struct f2fs_inode *i) 2815 { 2816 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2817 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2818 } 2819 2820 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2821 { 2822 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2823 } 2824 2825 static inline int f2fs_has_extra_attr(struct inode *inode); 2826 static inline block_t data_blkaddr(struct inode *inode, 2827 struct page *node_page, unsigned int offset) 2828 { 2829 struct f2fs_node *raw_node; 2830 __le32 *addr_array; 2831 int base = 0; 2832 bool is_inode = IS_INODE(node_page); 2833 2834 raw_node = F2FS_NODE(node_page); 2835 2836 if (is_inode) { 2837 if (!inode) 2838 /* from GC path only */ 2839 base = offset_in_addr(&raw_node->i); 2840 else if (f2fs_has_extra_attr(inode)) 2841 base = get_extra_isize(inode); 2842 } 2843 2844 addr_array = blkaddr_in_node(raw_node); 2845 return le32_to_cpu(addr_array[base + offset]); 2846 } 2847 2848 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2849 { 2850 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2851 } 2852 2853 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2854 { 2855 int mask; 2856 2857 addr += (nr >> 3); 2858 mask = 1 << (7 - (nr & 0x07)); 2859 return mask & *addr; 2860 } 2861 2862 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2863 { 2864 int mask; 2865 2866 addr += (nr >> 3); 2867 mask = 1 << (7 - (nr & 0x07)); 2868 *addr |= mask; 2869 } 2870 2871 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2872 { 2873 int mask; 2874 2875 addr += (nr >> 3); 2876 mask = 1 << (7 - (nr & 0x07)); 2877 *addr &= ~mask; 2878 } 2879 2880 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2881 { 2882 int mask; 2883 int ret; 2884 2885 addr += (nr >> 3); 2886 mask = 1 << (7 - (nr & 0x07)); 2887 ret = mask & *addr; 2888 *addr |= mask; 2889 return ret; 2890 } 2891 2892 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2893 { 2894 int mask; 2895 int ret; 2896 2897 addr += (nr >> 3); 2898 mask = 1 << (7 - (nr & 0x07)); 2899 ret = mask & *addr; 2900 *addr &= ~mask; 2901 return ret; 2902 } 2903 2904 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2905 { 2906 int mask; 2907 2908 addr += (nr >> 3); 2909 mask = 1 << (7 - (nr & 0x07)); 2910 *addr ^= mask; 2911 } 2912 2913 /* 2914 * On-disk inode flags (f2fs_inode::i_flags) 2915 */ 2916 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2917 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2918 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2919 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2920 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2921 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2922 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2923 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2924 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2925 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2926 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2927 2928 /* Flags that should be inherited by new inodes from their parent. */ 2929 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2930 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2931 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2932 2933 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2934 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2935 F2FS_CASEFOLD_FL)) 2936 2937 /* Flags that are appropriate for non-directories/regular files. */ 2938 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2939 2940 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2941 { 2942 if (S_ISDIR(mode)) 2943 return flags; 2944 else if (S_ISREG(mode)) 2945 return flags & F2FS_REG_FLMASK; 2946 else 2947 return flags & F2FS_OTHER_FLMASK; 2948 } 2949 2950 static inline void __mark_inode_dirty_flag(struct inode *inode, 2951 int flag, bool set) 2952 { 2953 switch (flag) { 2954 case FI_INLINE_XATTR: 2955 case FI_INLINE_DATA: 2956 case FI_INLINE_DENTRY: 2957 case FI_NEW_INODE: 2958 if (set) 2959 return; 2960 fallthrough; 2961 case FI_DATA_EXIST: 2962 case FI_INLINE_DOTS: 2963 case FI_PIN_FILE: 2964 case FI_COMPRESS_RELEASED: 2965 f2fs_mark_inode_dirty_sync(inode, true); 2966 } 2967 } 2968 2969 static inline void set_inode_flag(struct inode *inode, int flag) 2970 { 2971 set_bit(flag, F2FS_I(inode)->flags); 2972 __mark_inode_dirty_flag(inode, flag, true); 2973 } 2974 2975 static inline int is_inode_flag_set(struct inode *inode, int flag) 2976 { 2977 return test_bit(flag, F2FS_I(inode)->flags); 2978 } 2979 2980 static inline void clear_inode_flag(struct inode *inode, int flag) 2981 { 2982 clear_bit(flag, F2FS_I(inode)->flags); 2983 __mark_inode_dirty_flag(inode, flag, false); 2984 } 2985 2986 static inline bool f2fs_verity_in_progress(struct inode *inode) 2987 { 2988 return IS_ENABLED(CONFIG_FS_VERITY) && 2989 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2990 } 2991 2992 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2993 { 2994 F2FS_I(inode)->i_acl_mode = mode; 2995 set_inode_flag(inode, FI_ACL_MODE); 2996 f2fs_mark_inode_dirty_sync(inode, false); 2997 } 2998 2999 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 3000 { 3001 if (inc) 3002 inc_nlink(inode); 3003 else 3004 drop_nlink(inode); 3005 f2fs_mark_inode_dirty_sync(inode, true); 3006 } 3007 3008 static inline void f2fs_i_blocks_write(struct inode *inode, 3009 block_t diff, bool add, bool claim) 3010 { 3011 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3012 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3013 3014 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 3015 if (add) { 3016 if (claim) 3017 dquot_claim_block(inode, diff); 3018 else 3019 dquot_alloc_block_nofail(inode, diff); 3020 } else { 3021 dquot_free_block(inode, diff); 3022 } 3023 3024 f2fs_mark_inode_dirty_sync(inode, true); 3025 if (clean || recover) 3026 set_inode_flag(inode, FI_AUTO_RECOVER); 3027 } 3028 3029 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 3030 { 3031 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3032 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3033 3034 if (i_size_read(inode) == i_size) 3035 return; 3036 3037 i_size_write(inode, i_size); 3038 f2fs_mark_inode_dirty_sync(inode, true); 3039 if (clean || recover) 3040 set_inode_flag(inode, FI_AUTO_RECOVER); 3041 } 3042 3043 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 3044 { 3045 F2FS_I(inode)->i_current_depth = depth; 3046 f2fs_mark_inode_dirty_sync(inode, true); 3047 } 3048 3049 static inline void f2fs_i_gc_failures_write(struct inode *inode, 3050 unsigned int count) 3051 { 3052 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 3053 f2fs_mark_inode_dirty_sync(inode, true); 3054 } 3055 3056 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 3057 { 3058 F2FS_I(inode)->i_xattr_nid = xnid; 3059 f2fs_mark_inode_dirty_sync(inode, true); 3060 } 3061 3062 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 3063 { 3064 F2FS_I(inode)->i_pino = pino; 3065 f2fs_mark_inode_dirty_sync(inode, true); 3066 } 3067 3068 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 3069 { 3070 struct f2fs_inode_info *fi = F2FS_I(inode); 3071 3072 if (ri->i_inline & F2FS_INLINE_XATTR) 3073 set_bit(FI_INLINE_XATTR, fi->flags); 3074 if (ri->i_inline & F2FS_INLINE_DATA) 3075 set_bit(FI_INLINE_DATA, fi->flags); 3076 if (ri->i_inline & F2FS_INLINE_DENTRY) 3077 set_bit(FI_INLINE_DENTRY, fi->flags); 3078 if (ri->i_inline & F2FS_DATA_EXIST) 3079 set_bit(FI_DATA_EXIST, fi->flags); 3080 if (ri->i_inline & F2FS_INLINE_DOTS) 3081 set_bit(FI_INLINE_DOTS, fi->flags); 3082 if (ri->i_inline & F2FS_EXTRA_ATTR) 3083 set_bit(FI_EXTRA_ATTR, fi->flags); 3084 if (ri->i_inline & F2FS_PIN_FILE) 3085 set_bit(FI_PIN_FILE, fi->flags); 3086 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 3087 set_bit(FI_COMPRESS_RELEASED, fi->flags); 3088 } 3089 3090 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 3091 { 3092 ri->i_inline = 0; 3093 3094 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 3095 ri->i_inline |= F2FS_INLINE_XATTR; 3096 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 3097 ri->i_inline |= F2FS_INLINE_DATA; 3098 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 3099 ri->i_inline |= F2FS_INLINE_DENTRY; 3100 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 3101 ri->i_inline |= F2FS_DATA_EXIST; 3102 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 3103 ri->i_inline |= F2FS_INLINE_DOTS; 3104 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 3105 ri->i_inline |= F2FS_EXTRA_ATTR; 3106 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3107 ri->i_inline |= F2FS_PIN_FILE; 3108 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3109 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3110 } 3111 3112 static inline int f2fs_has_extra_attr(struct inode *inode) 3113 { 3114 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3115 } 3116 3117 static inline int f2fs_has_inline_xattr(struct inode *inode) 3118 { 3119 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3120 } 3121 3122 static inline int f2fs_compressed_file(struct inode *inode) 3123 { 3124 return S_ISREG(inode->i_mode) && 3125 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3126 } 3127 3128 static inline bool f2fs_need_compress_data(struct inode *inode) 3129 { 3130 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3131 3132 if (!f2fs_compressed_file(inode)) 3133 return false; 3134 3135 if (compress_mode == COMPR_MODE_FS) 3136 return true; 3137 else if (compress_mode == COMPR_MODE_USER && 3138 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3139 return true; 3140 3141 return false; 3142 } 3143 3144 static inline unsigned int addrs_per_inode(struct inode *inode) 3145 { 3146 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3147 get_inline_xattr_addrs(inode); 3148 3149 if (!f2fs_compressed_file(inode)) 3150 return addrs; 3151 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3152 } 3153 3154 static inline unsigned int addrs_per_block(struct inode *inode) 3155 { 3156 if (!f2fs_compressed_file(inode)) 3157 return DEF_ADDRS_PER_BLOCK; 3158 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3159 } 3160 3161 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3162 { 3163 struct f2fs_inode *ri = F2FS_INODE(page); 3164 3165 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3166 get_inline_xattr_addrs(inode)]); 3167 } 3168 3169 static inline int inline_xattr_size(struct inode *inode) 3170 { 3171 if (f2fs_has_inline_xattr(inode)) 3172 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3173 return 0; 3174 } 3175 3176 static inline int f2fs_has_inline_data(struct inode *inode) 3177 { 3178 return is_inode_flag_set(inode, FI_INLINE_DATA); 3179 } 3180 3181 static inline int f2fs_exist_data(struct inode *inode) 3182 { 3183 return is_inode_flag_set(inode, FI_DATA_EXIST); 3184 } 3185 3186 static inline int f2fs_has_inline_dots(struct inode *inode) 3187 { 3188 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3189 } 3190 3191 static inline int f2fs_is_mmap_file(struct inode *inode) 3192 { 3193 return is_inode_flag_set(inode, FI_MMAP_FILE); 3194 } 3195 3196 static inline bool f2fs_is_pinned_file(struct inode *inode) 3197 { 3198 return is_inode_flag_set(inode, FI_PIN_FILE); 3199 } 3200 3201 static inline bool f2fs_is_atomic_file(struct inode *inode) 3202 { 3203 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3204 } 3205 3206 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 3207 { 3208 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 3209 } 3210 3211 static inline bool f2fs_is_volatile_file(struct inode *inode) 3212 { 3213 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 3214 } 3215 3216 static inline bool f2fs_is_first_block_written(struct inode *inode) 3217 { 3218 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3219 } 3220 3221 static inline bool f2fs_is_drop_cache(struct inode *inode) 3222 { 3223 return is_inode_flag_set(inode, FI_DROP_CACHE); 3224 } 3225 3226 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3227 { 3228 struct f2fs_inode *ri = F2FS_INODE(page); 3229 int extra_size = get_extra_isize(inode); 3230 3231 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3232 } 3233 3234 static inline int f2fs_has_inline_dentry(struct inode *inode) 3235 { 3236 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3237 } 3238 3239 static inline int is_file(struct inode *inode, int type) 3240 { 3241 return F2FS_I(inode)->i_advise & type; 3242 } 3243 3244 static inline void set_file(struct inode *inode, int type) 3245 { 3246 if (is_file(inode, type)) 3247 return; 3248 F2FS_I(inode)->i_advise |= type; 3249 f2fs_mark_inode_dirty_sync(inode, true); 3250 } 3251 3252 static inline void clear_file(struct inode *inode, int type) 3253 { 3254 if (!is_file(inode, type)) 3255 return; 3256 F2FS_I(inode)->i_advise &= ~type; 3257 f2fs_mark_inode_dirty_sync(inode, true); 3258 } 3259 3260 static inline bool f2fs_is_time_consistent(struct inode *inode) 3261 { 3262 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3263 return false; 3264 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3265 return false; 3266 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3267 return false; 3268 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3269 &F2FS_I(inode)->i_crtime)) 3270 return false; 3271 return true; 3272 } 3273 3274 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3275 { 3276 bool ret; 3277 3278 if (dsync) { 3279 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3280 3281 spin_lock(&sbi->inode_lock[DIRTY_META]); 3282 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3283 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3284 return ret; 3285 } 3286 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3287 file_keep_isize(inode) || 3288 i_size_read(inode) & ~PAGE_MASK) 3289 return false; 3290 3291 if (!f2fs_is_time_consistent(inode)) 3292 return false; 3293 3294 spin_lock(&F2FS_I(inode)->i_size_lock); 3295 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3296 spin_unlock(&F2FS_I(inode)->i_size_lock); 3297 3298 return ret; 3299 } 3300 3301 static inline bool f2fs_readonly(struct super_block *sb) 3302 { 3303 return sb_rdonly(sb); 3304 } 3305 3306 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3307 { 3308 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3309 } 3310 3311 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3312 { 3313 if (len == 1 && name[0] == '.') 3314 return true; 3315 3316 if (len == 2 && name[0] == '.' && name[1] == '.') 3317 return true; 3318 3319 return false; 3320 } 3321 3322 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3323 size_t size, gfp_t flags) 3324 { 3325 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3326 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3327 return NULL; 3328 } 3329 3330 return kmalloc(size, flags); 3331 } 3332 3333 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3334 size_t size, gfp_t flags) 3335 { 3336 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3337 } 3338 3339 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3340 size_t size, gfp_t flags) 3341 { 3342 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3343 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3344 return NULL; 3345 } 3346 3347 return kvmalloc(size, flags); 3348 } 3349 3350 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3351 size_t size, gfp_t flags) 3352 { 3353 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3354 } 3355 3356 static inline int get_extra_isize(struct inode *inode) 3357 { 3358 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3359 } 3360 3361 static inline int get_inline_xattr_addrs(struct inode *inode) 3362 { 3363 return F2FS_I(inode)->i_inline_xattr_size; 3364 } 3365 3366 #define f2fs_get_inode_mode(i) \ 3367 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3368 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3369 3370 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3371 (offsetof(struct f2fs_inode, i_extra_end) - \ 3372 offsetof(struct f2fs_inode, i_extra_isize)) \ 3373 3374 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3375 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3376 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3377 sizeof((f2fs_inode)->field)) \ 3378 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3379 3380 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3381 3382 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3383 3384 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3385 block_t blkaddr, int type); 3386 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3387 block_t blkaddr, int type) 3388 { 3389 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3390 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3391 blkaddr, type); 3392 f2fs_bug_on(sbi, 1); 3393 } 3394 } 3395 3396 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3397 { 3398 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3399 blkaddr == COMPRESS_ADDR) 3400 return false; 3401 return true; 3402 } 3403 3404 /* 3405 * file.c 3406 */ 3407 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3408 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3409 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3410 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3411 int f2fs_truncate(struct inode *inode); 3412 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3413 struct kstat *stat, u32 request_mask, unsigned int flags); 3414 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3415 struct iattr *attr); 3416 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3417 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3418 int f2fs_precache_extents(struct inode *inode); 3419 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3420 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3421 struct dentry *dentry, struct fileattr *fa); 3422 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3423 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3424 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3425 int f2fs_pin_file_control(struct inode *inode, bool inc); 3426 3427 /* 3428 * inode.c 3429 */ 3430 void f2fs_set_inode_flags(struct inode *inode); 3431 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3432 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3433 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3434 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3435 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3436 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3437 void f2fs_update_inode_page(struct inode *inode); 3438 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3439 void f2fs_evict_inode(struct inode *inode); 3440 void f2fs_handle_failed_inode(struct inode *inode); 3441 3442 /* 3443 * namei.c 3444 */ 3445 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3446 bool hot, bool set); 3447 struct dentry *f2fs_get_parent(struct dentry *child); 3448 3449 /* 3450 * dir.c 3451 */ 3452 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3453 int f2fs_init_casefolded_name(const struct inode *dir, 3454 struct f2fs_filename *fname); 3455 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3456 int lookup, struct f2fs_filename *fname); 3457 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3458 struct f2fs_filename *fname); 3459 void f2fs_free_filename(struct f2fs_filename *fname); 3460 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3461 const struct f2fs_filename *fname, int *max_slots); 3462 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3463 unsigned int start_pos, struct fscrypt_str *fstr); 3464 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3465 struct f2fs_dentry_ptr *d); 3466 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3467 const struct f2fs_filename *fname, struct page *dpage); 3468 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3469 unsigned int current_depth); 3470 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3471 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3472 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3473 const struct f2fs_filename *fname, 3474 struct page **res_page); 3475 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3476 const struct qstr *child, struct page **res_page); 3477 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3478 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3479 struct page **page); 3480 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3481 struct page *page, struct inode *inode); 3482 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3483 const struct f2fs_filename *fname); 3484 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3485 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3486 unsigned int bit_pos); 3487 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3488 struct inode *inode, nid_t ino, umode_t mode); 3489 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3490 struct inode *inode, nid_t ino, umode_t mode); 3491 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3492 struct inode *inode, nid_t ino, umode_t mode); 3493 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3494 struct inode *dir, struct inode *inode); 3495 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3496 bool f2fs_empty_dir(struct inode *dir); 3497 3498 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3499 { 3500 if (fscrypt_is_nokey_name(dentry)) 3501 return -ENOKEY; 3502 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3503 inode, inode->i_ino, inode->i_mode); 3504 } 3505 3506 /* 3507 * super.c 3508 */ 3509 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3510 void f2fs_inode_synced(struct inode *inode); 3511 int f2fs_dquot_initialize(struct inode *inode); 3512 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3513 int f2fs_quota_sync(struct super_block *sb, int type); 3514 loff_t max_file_blocks(struct inode *inode); 3515 void f2fs_quota_off_umount(struct super_block *sb); 3516 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3517 int f2fs_sync_fs(struct super_block *sb, int sync); 3518 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3519 3520 /* 3521 * hash.c 3522 */ 3523 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3524 3525 /* 3526 * node.c 3527 */ 3528 struct node_info; 3529 3530 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3531 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3532 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3533 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3534 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3535 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3536 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3537 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3538 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3539 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3540 struct node_info *ni, bool checkpoint_context); 3541 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3542 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3543 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3544 int f2fs_truncate_xattr_node(struct inode *inode); 3545 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3546 unsigned int seq_id); 3547 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3548 int f2fs_remove_inode_page(struct inode *inode); 3549 struct page *f2fs_new_inode_page(struct inode *inode); 3550 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3551 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3552 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3553 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3554 int f2fs_move_node_page(struct page *node_page, int gc_type); 3555 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3556 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3557 struct writeback_control *wbc, bool atomic, 3558 unsigned int *seq_id); 3559 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3560 struct writeback_control *wbc, 3561 bool do_balance, enum iostat_type io_type); 3562 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3563 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3564 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3565 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3566 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3567 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3568 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3569 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3570 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3571 unsigned int segno, struct f2fs_summary_block *sum); 3572 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3573 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3574 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3575 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3576 int __init f2fs_create_node_manager_caches(void); 3577 void f2fs_destroy_node_manager_caches(void); 3578 3579 /* 3580 * segment.c 3581 */ 3582 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3583 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3584 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3585 void f2fs_drop_inmem_pages(struct inode *inode); 3586 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3587 int f2fs_commit_inmem_pages(struct inode *inode); 3588 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3589 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3590 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3591 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3592 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3593 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3594 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3595 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3596 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3597 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3598 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3599 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3600 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3601 struct cp_control *cpc); 3602 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3603 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3604 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3605 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3606 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3607 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3608 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3609 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3610 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3611 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3612 unsigned int *newseg, bool new_sec, int dir); 3613 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3614 unsigned int start, unsigned int end); 3615 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3616 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3617 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3618 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3619 struct cp_control *cpc); 3620 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3621 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3622 block_t blk_addr); 3623 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3624 enum iostat_type io_type); 3625 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3626 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3627 struct f2fs_io_info *fio); 3628 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3629 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3630 block_t old_blkaddr, block_t new_blkaddr, 3631 bool recover_curseg, bool recover_newaddr, 3632 bool from_gc); 3633 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3634 block_t old_addr, block_t new_addr, 3635 unsigned char version, bool recover_curseg, 3636 bool recover_newaddr); 3637 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3638 block_t old_blkaddr, block_t *new_blkaddr, 3639 struct f2fs_summary *sum, int type, 3640 struct f2fs_io_info *fio); 3641 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3642 block_t blkaddr, unsigned int blkcnt); 3643 void f2fs_wait_on_page_writeback(struct page *page, 3644 enum page_type type, bool ordered, bool locked); 3645 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3646 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3647 block_t len); 3648 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3649 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3650 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3651 unsigned int val, int alloc); 3652 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3653 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3654 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3655 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3656 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3657 int __init f2fs_create_segment_manager_caches(void); 3658 void f2fs_destroy_segment_manager_caches(void); 3659 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3660 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3661 unsigned int segno); 3662 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3663 unsigned int segno); 3664 3665 #define DEF_FRAGMENT_SIZE 4 3666 #define MIN_FRAGMENT_SIZE 1 3667 #define MAX_FRAGMENT_SIZE 512 3668 3669 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3670 { 3671 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3672 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3673 } 3674 3675 /* 3676 * checkpoint.c 3677 */ 3678 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3679 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3680 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3681 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3682 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3683 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3684 block_t blkaddr, int type); 3685 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3686 int type, bool sync); 3687 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 3688 unsigned int ra_blocks); 3689 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3690 long nr_to_write, enum iostat_type io_type); 3691 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3692 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3693 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3694 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3695 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3696 unsigned int devidx, int type); 3697 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3698 unsigned int devidx, int type); 3699 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3700 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3701 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3702 void f2fs_add_orphan_inode(struct inode *inode); 3703 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3704 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3705 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3706 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); 3707 void f2fs_remove_dirty_inode(struct inode *inode); 3708 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3709 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3710 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3711 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3712 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3713 int __init f2fs_create_checkpoint_caches(void); 3714 void f2fs_destroy_checkpoint_caches(void); 3715 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3716 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3717 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3718 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3719 3720 /* 3721 * data.c 3722 */ 3723 int __init f2fs_init_bioset(void); 3724 void f2fs_destroy_bioset(void); 3725 int f2fs_init_bio_entry_cache(void); 3726 void f2fs_destroy_bio_entry_cache(void); 3727 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3728 struct bio *bio, enum page_type type); 3729 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3730 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3731 struct inode *inode, struct page *page, 3732 nid_t ino, enum page_type type); 3733 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3734 struct bio **bio, struct page *page); 3735 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3736 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3737 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3738 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3739 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3740 block_t blk_addr, sector_t *sector); 3741 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3742 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3743 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3744 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3745 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3746 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3747 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3748 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3749 int op_flags, bool for_write); 3750 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3751 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3752 bool for_write); 3753 struct page *f2fs_get_new_data_page(struct inode *inode, 3754 struct page *ipage, pgoff_t index, bool new_i_size); 3755 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3756 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3757 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3758 int create, int flag); 3759 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3760 u64 start, u64 len); 3761 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3762 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3763 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3764 int f2fs_write_single_data_page(struct page *page, int *submitted, 3765 struct bio **bio, sector_t *last_block, 3766 struct writeback_control *wbc, 3767 enum iostat_type io_type, 3768 int compr_blocks, bool allow_balance); 3769 void f2fs_write_failed(struct inode *inode, loff_t to); 3770 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); 3771 int f2fs_release_page(struct page *page, gfp_t wait); 3772 #ifdef CONFIG_MIGRATION 3773 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3774 struct page *page, enum migrate_mode mode); 3775 #endif 3776 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3777 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3778 int f2fs_init_post_read_processing(void); 3779 void f2fs_destroy_post_read_processing(void); 3780 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3781 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3782 extern const struct iomap_ops f2fs_iomap_ops; 3783 3784 /* 3785 * gc.c 3786 */ 3787 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3788 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3789 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3790 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, 3791 unsigned int segno); 3792 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3793 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3794 int __init f2fs_create_garbage_collection_cache(void); 3795 void f2fs_destroy_garbage_collection_cache(void); 3796 3797 /* 3798 * recovery.c 3799 */ 3800 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3801 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3802 int __init f2fs_create_recovery_cache(void); 3803 void f2fs_destroy_recovery_cache(void); 3804 3805 /* 3806 * debug.c 3807 */ 3808 #ifdef CONFIG_F2FS_STAT_FS 3809 struct f2fs_stat_info { 3810 struct list_head stat_list; 3811 struct f2fs_sb_info *sbi; 3812 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3813 int main_area_segs, main_area_sections, main_area_zones; 3814 unsigned long long hit_largest, hit_cached, hit_rbtree; 3815 unsigned long long hit_total, total_ext; 3816 int ext_tree, zombie_tree, ext_node; 3817 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3818 int ndirty_data, ndirty_qdata; 3819 int inmem_pages; 3820 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3821 int nats, dirty_nats, sits, dirty_sits; 3822 int free_nids, avail_nids, alloc_nids; 3823 int total_count, utilization; 3824 int bg_gc, nr_wb_cp_data, nr_wb_data; 3825 int nr_rd_data, nr_rd_node, nr_rd_meta; 3826 int nr_dio_read, nr_dio_write; 3827 unsigned int io_skip_bggc, other_skip_bggc; 3828 int nr_flushing, nr_flushed, flush_list_empty; 3829 int nr_discarding, nr_discarded; 3830 int nr_discard_cmd; 3831 unsigned int undiscard_blks; 3832 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3833 unsigned int cur_ckpt_time, peak_ckpt_time; 3834 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3835 int compr_inode; 3836 unsigned long long compr_blocks; 3837 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3838 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3839 unsigned int bimodal, avg_vblocks; 3840 int util_free, util_valid, util_invalid; 3841 int rsvd_segs, overp_segs; 3842 int dirty_count, node_pages, meta_pages, compress_pages; 3843 int compress_page_hit; 3844 int prefree_count, call_count, cp_count, bg_cp_count; 3845 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3846 int bg_node_segs, bg_data_segs; 3847 int tot_blks, data_blks, node_blks; 3848 int bg_data_blks, bg_node_blks; 3849 unsigned long long skipped_atomic_files[2]; 3850 int curseg[NR_CURSEG_TYPE]; 3851 int cursec[NR_CURSEG_TYPE]; 3852 int curzone[NR_CURSEG_TYPE]; 3853 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3854 unsigned int full_seg[NR_CURSEG_TYPE]; 3855 unsigned int valid_blks[NR_CURSEG_TYPE]; 3856 3857 unsigned int meta_count[META_MAX]; 3858 unsigned int segment_count[2]; 3859 unsigned int block_count[2]; 3860 unsigned int inplace_count; 3861 unsigned long long base_mem, cache_mem, page_mem; 3862 }; 3863 3864 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3865 { 3866 return (struct f2fs_stat_info *)sbi->stat_info; 3867 } 3868 3869 #define stat_inc_cp_count(si) ((si)->cp_count++) 3870 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3871 #define stat_inc_call_count(si) ((si)->call_count++) 3872 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3873 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3874 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3875 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3876 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3877 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3878 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3879 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3880 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3881 #define stat_inc_inline_xattr(inode) \ 3882 do { \ 3883 if (f2fs_has_inline_xattr(inode)) \ 3884 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3885 } while (0) 3886 #define stat_dec_inline_xattr(inode) \ 3887 do { \ 3888 if (f2fs_has_inline_xattr(inode)) \ 3889 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3890 } while (0) 3891 #define stat_inc_inline_inode(inode) \ 3892 do { \ 3893 if (f2fs_has_inline_data(inode)) \ 3894 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3895 } while (0) 3896 #define stat_dec_inline_inode(inode) \ 3897 do { \ 3898 if (f2fs_has_inline_data(inode)) \ 3899 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3900 } while (0) 3901 #define stat_inc_inline_dir(inode) \ 3902 do { \ 3903 if (f2fs_has_inline_dentry(inode)) \ 3904 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3905 } while (0) 3906 #define stat_dec_inline_dir(inode) \ 3907 do { \ 3908 if (f2fs_has_inline_dentry(inode)) \ 3909 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3910 } while (0) 3911 #define stat_inc_compr_inode(inode) \ 3912 do { \ 3913 if (f2fs_compressed_file(inode)) \ 3914 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3915 } while (0) 3916 #define stat_dec_compr_inode(inode) \ 3917 do { \ 3918 if (f2fs_compressed_file(inode)) \ 3919 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3920 } while (0) 3921 #define stat_add_compr_blocks(inode, blocks) \ 3922 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3923 #define stat_sub_compr_blocks(inode, blocks) \ 3924 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3925 #define stat_inc_meta_count(sbi, blkaddr) \ 3926 do { \ 3927 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3928 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3929 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3930 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3931 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3932 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3933 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3934 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3935 } while (0) 3936 #define stat_inc_seg_type(sbi, curseg) \ 3937 ((sbi)->segment_count[(curseg)->alloc_type]++) 3938 #define stat_inc_block_count(sbi, curseg) \ 3939 ((sbi)->block_count[(curseg)->alloc_type]++) 3940 #define stat_inc_inplace_blocks(sbi) \ 3941 (atomic_inc(&(sbi)->inplace_count)) 3942 #define stat_update_max_atomic_write(inode) \ 3943 do { \ 3944 int cur = F2FS_I_SB(inode)->atomic_files; \ 3945 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3946 if (cur > max) \ 3947 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3948 } while (0) 3949 #define stat_inc_volatile_write(inode) \ 3950 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3951 #define stat_dec_volatile_write(inode) \ 3952 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3953 #define stat_update_max_volatile_write(inode) \ 3954 do { \ 3955 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3956 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3957 if (cur > max) \ 3958 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3959 } while (0) 3960 #define stat_inc_seg_count(sbi, type, gc_type) \ 3961 do { \ 3962 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3963 si->tot_segs++; \ 3964 if ((type) == SUM_TYPE_DATA) { \ 3965 si->data_segs++; \ 3966 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3967 } else { \ 3968 si->node_segs++; \ 3969 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3970 } \ 3971 } while (0) 3972 3973 #define stat_inc_tot_blk_count(si, blks) \ 3974 ((si)->tot_blks += (blks)) 3975 3976 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3977 do { \ 3978 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3979 stat_inc_tot_blk_count(si, blks); \ 3980 si->data_blks += (blks); \ 3981 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3982 } while (0) 3983 3984 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3985 do { \ 3986 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3987 stat_inc_tot_blk_count(si, blks); \ 3988 si->node_blks += (blks); \ 3989 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3990 } while (0) 3991 3992 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3993 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3994 void __init f2fs_create_root_stats(void); 3995 void f2fs_destroy_root_stats(void); 3996 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3997 #else 3998 #define stat_inc_cp_count(si) do { } while (0) 3999 #define stat_inc_bg_cp_count(si) do { } while (0) 4000 #define stat_inc_call_count(si) do { } while (0) 4001 #define stat_inc_bggc_count(si) do { } while (0) 4002 #define stat_io_skip_bggc_count(sbi) do { } while (0) 4003 #define stat_other_skip_bggc_count(sbi) do { } while (0) 4004 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 4005 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 4006 #define stat_inc_total_hit(sbi) do { } while (0) 4007 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 4008 #define stat_inc_largest_node_hit(sbi) do { } while (0) 4009 #define stat_inc_cached_node_hit(sbi) do { } while (0) 4010 #define stat_inc_inline_xattr(inode) do { } while (0) 4011 #define stat_dec_inline_xattr(inode) do { } while (0) 4012 #define stat_inc_inline_inode(inode) do { } while (0) 4013 #define stat_dec_inline_inode(inode) do { } while (0) 4014 #define stat_inc_inline_dir(inode) do { } while (0) 4015 #define stat_dec_inline_dir(inode) do { } while (0) 4016 #define stat_inc_compr_inode(inode) do { } while (0) 4017 #define stat_dec_compr_inode(inode) do { } while (0) 4018 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 4019 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 4020 #define stat_update_max_atomic_write(inode) do { } while (0) 4021 #define stat_inc_volatile_write(inode) do { } while (0) 4022 #define stat_dec_volatile_write(inode) do { } while (0) 4023 #define stat_update_max_volatile_write(inode) do { } while (0) 4024 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 4025 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 4026 #define stat_inc_block_count(sbi, curseg) do { } while (0) 4027 #define stat_inc_inplace_blocks(sbi) do { } while (0) 4028 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 4029 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 4030 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 4031 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 4032 4033 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 4034 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 4035 static inline void __init f2fs_create_root_stats(void) { } 4036 static inline void f2fs_destroy_root_stats(void) { } 4037 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 4038 #endif 4039 4040 extern const struct file_operations f2fs_dir_operations; 4041 extern const struct file_operations f2fs_file_operations; 4042 extern const struct inode_operations f2fs_file_inode_operations; 4043 extern const struct address_space_operations f2fs_dblock_aops; 4044 extern const struct address_space_operations f2fs_node_aops; 4045 extern const struct address_space_operations f2fs_meta_aops; 4046 extern const struct inode_operations f2fs_dir_inode_operations; 4047 extern const struct inode_operations f2fs_symlink_inode_operations; 4048 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 4049 extern const struct inode_operations f2fs_special_inode_operations; 4050 extern struct kmem_cache *f2fs_inode_entry_slab; 4051 4052 /* 4053 * inline.c 4054 */ 4055 bool f2fs_may_inline_data(struct inode *inode); 4056 bool f2fs_may_inline_dentry(struct inode *inode); 4057 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 4058 void f2fs_truncate_inline_inode(struct inode *inode, 4059 struct page *ipage, u64 from); 4060 int f2fs_read_inline_data(struct inode *inode, struct page *page); 4061 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 4062 int f2fs_convert_inline_inode(struct inode *inode); 4063 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 4064 int f2fs_write_inline_data(struct inode *inode, struct page *page); 4065 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 4066 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 4067 const struct f2fs_filename *fname, 4068 struct page **res_page); 4069 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 4070 struct page *ipage); 4071 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 4072 struct inode *inode, nid_t ino, umode_t mode); 4073 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 4074 struct page *page, struct inode *dir, 4075 struct inode *inode); 4076 bool f2fs_empty_inline_dir(struct inode *dir); 4077 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 4078 struct fscrypt_str *fstr); 4079 int f2fs_inline_data_fiemap(struct inode *inode, 4080 struct fiemap_extent_info *fieinfo, 4081 __u64 start, __u64 len); 4082 4083 /* 4084 * shrinker.c 4085 */ 4086 unsigned long f2fs_shrink_count(struct shrinker *shrink, 4087 struct shrink_control *sc); 4088 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 4089 struct shrink_control *sc); 4090 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 4091 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 4092 4093 /* 4094 * extent_cache.c 4095 */ 4096 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 4097 struct rb_entry *cached_re, unsigned int ofs); 4098 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 4099 struct rb_root_cached *root, 4100 struct rb_node **parent, 4101 unsigned long long key, bool *left_most); 4102 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 4103 struct rb_root_cached *root, 4104 struct rb_node **parent, 4105 unsigned int ofs, bool *leftmost); 4106 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 4107 struct rb_entry *cached_re, unsigned int ofs, 4108 struct rb_entry **prev_entry, struct rb_entry **next_entry, 4109 struct rb_node ***insert_p, struct rb_node **insert_parent, 4110 bool force, bool *leftmost); 4111 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 4112 struct rb_root_cached *root, bool check_key); 4113 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 4114 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 4115 void f2fs_drop_extent_tree(struct inode *inode); 4116 unsigned int f2fs_destroy_extent_node(struct inode *inode); 4117 void f2fs_destroy_extent_tree(struct inode *inode); 4118 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 4119 struct extent_info *ei); 4120 void f2fs_update_extent_cache(struct dnode_of_data *dn); 4121 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 4122 pgoff_t fofs, block_t blkaddr, unsigned int len); 4123 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4124 int __init f2fs_create_extent_cache(void); 4125 void f2fs_destroy_extent_cache(void); 4126 4127 /* 4128 * sysfs.c 4129 */ 4130 #define MIN_RA_MUL 2 4131 #define MAX_RA_MUL 256 4132 4133 int __init f2fs_init_sysfs(void); 4134 void f2fs_exit_sysfs(void); 4135 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4136 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4137 4138 /* verity.c */ 4139 extern const struct fsverity_operations f2fs_verityops; 4140 4141 /* 4142 * crypto support 4143 */ 4144 static inline bool f2fs_encrypted_file(struct inode *inode) 4145 { 4146 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4147 } 4148 4149 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4150 { 4151 #ifdef CONFIG_FS_ENCRYPTION 4152 file_set_encrypt(inode); 4153 f2fs_set_inode_flags(inode); 4154 #endif 4155 } 4156 4157 /* 4158 * Returns true if the reads of the inode's data need to undergo some 4159 * postprocessing step, like decryption or authenticity verification. 4160 */ 4161 static inline bool f2fs_post_read_required(struct inode *inode) 4162 { 4163 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4164 f2fs_compressed_file(inode); 4165 } 4166 4167 /* 4168 * compress.c 4169 */ 4170 #ifdef CONFIG_F2FS_FS_COMPRESSION 4171 bool f2fs_is_compressed_page(struct page *page); 4172 struct page *f2fs_compress_control_page(struct page *page); 4173 int f2fs_prepare_compress_overwrite(struct inode *inode, 4174 struct page **pagep, pgoff_t index, void **fsdata); 4175 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4176 pgoff_t index, unsigned copied); 4177 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4178 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4179 bool f2fs_is_compress_backend_ready(struct inode *inode); 4180 int f2fs_init_compress_mempool(void); 4181 void f2fs_destroy_compress_mempool(void); 4182 void f2fs_decompress_cluster(struct decompress_io_ctx *dic); 4183 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4184 block_t blkaddr); 4185 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4186 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4187 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec, 4188 int index, int nr_pages); 4189 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4190 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4191 int f2fs_write_multi_pages(struct compress_ctx *cc, 4192 int *submitted, 4193 struct writeback_control *wbc, 4194 enum iostat_type io_type); 4195 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4196 void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4197 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4198 unsigned int c_len); 4199 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4200 unsigned nr_pages, sector_t *last_block_in_bio, 4201 bool is_readahead, bool for_write); 4202 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4203 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 4204 void f2fs_put_page_dic(struct page *page); 4205 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4206 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4207 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4208 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4209 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4210 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4211 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4212 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4213 int __init f2fs_init_compress_cache(void); 4214 void f2fs_destroy_compress_cache(void); 4215 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4216 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4217 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4218 nid_t ino, block_t blkaddr); 4219 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4220 block_t blkaddr); 4221 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4222 #define inc_compr_inode_stat(inode) \ 4223 do { \ 4224 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4225 sbi->compr_new_inode++; \ 4226 } while (0) 4227 #define add_compr_block_stat(inode, blocks) \ 4228 do { \ 4229 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4230 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4231 sbi->compr_written_block += blocks; \ 4232 sbi->compr_saved_block += diff; \ 4233 } while (0) 4234 #else 4235 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4236 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4237 { 4238 if (!f2fs_compressed_file(inode)) 4239 return true; 4240 /* not support compression */ 4241 return false; 4242 } 4243 static inline struct page *f2fs_compress_control_page(struct page *page) 4244 { 4245 WARN_ON_ONCE(1); 4246 return ERR_PTR(-EINVAL); 4247 } 4248 static inline int f2fs_init_compress_mempool(void) { return 0; } 4249 static inline void f2fs_destroy_compress_mempool(void) { } 4250 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } 4251 static inline void f2fs_end_read_compressed_page(struct page *page, 4252 bool failed, block_t blkaddr) 4253 { 4254 WARN_ON_ONCE(1); 4255 } 4256 static inline void f2fs_put_page_dic(struct page *page) 4257 { 4258 WARN_ON_ONCE(1); 4259 } 4260 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4261 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4262 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4263 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4264 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4265 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4266 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4267 static inline void f2fs_destroy_compress_cache(void) { } 4268 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4269 block_t blkaddr) { } 4270 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4271 struct page *page, nid_t ino, block_t blkaddr) { } 4272 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4273 struct page *page, block_t blkaddr) { return false; } 4274 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4275 nid_t ino) { } 4276 #define inc_compr_inode_stat(inode) do { } while (0) 4277 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4278 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4279 unsigned int c_len) { } 4280 #endif 4281 4282 static inline void set_compress_context(struct inode *inode) 4283 { 4284 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4285 4286 F2FS_I(inode)->i_compress_algorithm = 4287 F2FS_OPTION(sbi).compress_algorithm; 4288 F2FS_I(inode)->i_log_cluster_size = 4289 F2FS_OPTION(sbi).compress_log_size; 4290 F2FS_I(inode)->i_compress_flag = 4291 F2FS_OPTION(sbi).compress_chksum ? 4292 1 << COMPRESS_CHKSUM : 0; 4293 F2FS_I(inode)->i_cluster_size = 4294 1 << F2FS_I(inode)->i_log_cluster_size; 4295 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4296 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4297 F2FS_OPTION(sbi).compress_level) 4298 F2FS_I(inode)->i_compress_flag |= 4299 F2FS_OPTION(sbi).compress_level << 4300 COMPRESS_LEVEL_OFFSET; 4301 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4302 set_inode_flag(inode, FI_COMPRESSED_FILE); 4303 stat_inc_compr_inode(inode); 4304 inc_compr_inode_stat(inode); 4305 f2fs_mark_inode_dirty_sync(inode, true); 4306 } 4307 4308 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4309 { 4310 struct f2fs_inode_info *fi = F2FS_I(inode); 4311 4312 if (!f2fs_compressed_file(inode)) 4313 return true; 4314 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4315 return false; 4316 4317 fi->i_flags &= ~F2FS_COMPR_FL; 4318 stat_dec_compr_inode(inode); 4319 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4320 f2fs_mark_inode_dirty_sync(inode, true); 4321 return true; 4322 } 4323 4324 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4325 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4326 { \ 4327 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4328 } 4329 4330 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4331 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4332 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4333 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4334 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4335 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4336 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4337 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4338 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4339 F2FS_FEATURE_FUNCS(verity, VERITY); 4340 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4341 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4342 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4343 F2FS_FEATURE_FUNCS(readonly, RO); 4344 4345 static inline bool f2fs_may_extent_tree(struct inode *inode) 4346 { 4347 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4348 4349 if (!test_opt(sbi, EXTENT_CACHE) || 4350 is_inode_flag_set(inode, FI_NO_EXTENT) || 4351 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 4352 !f2fs_sb_has_readonly(sbi))) 4353 return false; 4354 4355 /* 4356 * for recovered files during mount do not create extents 4357 * if shrinker is not registered. 4358 */ 4359 if (list_empty(&sbi->s_list)) 4360 return false; 4361 4362 return S_ISREG(inode->i_mode); 4363 } 4364 4365 #ifdef CONFIG_BLK_DEV_ZONED 4366 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4367 block_t blkaddr) 4368 { 4369 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4370 4371 return test_bit(zno, FDEV(devi).blkz_seq); 4372 } 4373 #endif 4374 4375 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4376 { 4377 return f2fs_sb_has_blkzoned(sbi); 4378 } 4379 4380 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4381 { 4382 return blk_queue_discard(bdev_get_queue(bdev)) || 4383 bdev_is_zoned(bdev); 4384 } 4385 4386 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4387 { 4388 int i; 4389 4390 if (!f2fs_is_multi_device(sbi)) 4391 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4392 4393 for (i = 0; i < sbi->s_ndevs; i++) 4394 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4395 return true; 4396 return false; 4397 } 4398 4399 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4400 { 4401 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4402 f2fs_hw_should_discard(sbi); 4403 } 4404 4405 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4406 { 4407 int i; 4408 4409 if (!f2fs_is_multi_device(sbi)) 4410 return bdev_read_only(sbi->sb->s_bdev); 4411 4412 for (i = 0; i < sbi->s_ndevs; i++) 4413 if (bdev_read_only(FDEV(i).bdev)) 4414 return true; 4415 return false; 4416 } 4417 4418 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4419 { 4420 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4421 } 4422 4423 static inline bool f2fs_may_compress(struct inode *inode) 4424 { 4425 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4426 f2fs_is_atomic_file(inode) || 4427 f2fs_is_volatile_file(inode)) 4428 return false; 4429 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4430 } 4431 4432 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4433 u64 blocks, bool add) 4434 { 4435 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4436 struct f2fs_inode_info *fi = F2FS_I(inode); 4437 4438 /* don't update i_compr_blocks if saved blocks were released */ 4439 if (!add && !atomic_read(&fi->i_compr_blocks)) 4440 return; 4441 4442 if (add) { 4443 atomic_add(diff, &fi->i_compr_blocks); 4444 stat_add_compr_blocks(inode, diff); 4445 } else { 4446 atomic_sub(diff, &fi->i_compr_blocks); 4447 stat_sub_compr_blocks(inode, diff); 4448 } 4449 f2fs_mark_inode_dirty_sync(inode, true); 4450 } 4451 4452 static inline int block_unaligned_IO(struct inode *inode, 4453 struct kiocb *iocb, struct iov_iter *iter) 4454 { 4455 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4456 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4457 loff_t offset = iocb->ki_pos; 4458 unsigned long align = offset | iov_iter_alignment(iter); 4459 4460 return align & blocksize_mask; 4461 } 4462 4463 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4464 int flag) 4465 { 4466 if (!f2fs_is_multi_device(sbi)) 4467 return false; 4468 if (flag != F2FS_GET_BLOCK_DIO) 4469 return false; 4470 return sbi->aligned_blksize; 4471 } 4472 4473 static inline bool f2fs_force_buffered_io(struct inode *inode, 4474 struct kiocb *iocb, struct iov_iter *iter) 4475 { 4476 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4477 int rw = iov_iter_rw(iter); 4478 4479 if (!fscrypt_dio_supported(iocb, iter)) 4480 return true; 4481 if (fsverity_active(inode)) 4482 return true; 4483 if (f2fs_compressed_file(inode)) 4484 return true; 4485 4486 /* disallow direct IO if any of devices has unaligned blksize */ 4487 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) 4488 return true; 4489 /* 4490 * for blkzoned device, fallback direct IO to buffered IO, so 4491 * all IOs can be serialized by log-structured write. 4492 */ 4493 if (f2fs_sb_has_blkzoned(sbi)) 4494 return true; 4495 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4496 if (block_unaligned_IO(inode, iocb, iter)) 4497 return true; 4498 if (F2FS_IO_ALIGNED(sbi)) 4499 return true; 4500 } 4501 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 4502 return true; 4503 4504 return false; 4505 } 4506 4507 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4508 { 4509 return fsverity_active(inode) && 4510 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4511 } 4512 4513 #ifdef CONFIG_F2FS_FAULT_INJECTION 4514 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4515 unsigned int type); 4516 #else 4517 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4518 #endif 4519 4520 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4521 { 4522 #ifdef CONFIG_QUOTA 4523 if (f2fs_sb_has_quota_ino(sbi)) 4524 return true; 4525 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4526 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4527 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4528 return true; 4529 #endif 4530 return false; 4531 } 4532 4533 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4534 { 4535 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4536 } 4537 4538 static inline void f2fs_io_schedule_timeout(long timeout) 4539 { 4540 set_current_state(TASK_UNINTERRUPTIBLE); 4541 io_schedule_timeout(timeout); 4542 } 4543 4544 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4545 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4546 4547 #endif /* _LINUX_F2FS_H */ 4548