1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/sched/mm.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bio.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #include <linux/part_stat.h> 27 #include <crypto/hash.h> 28 29 #include <linux/fscrypt.h> 30 #include <linux/fsverity.h> 31 32 struct pagevec; 33 34 #ifdef CONFIG_F2FS_CHECK_FS 35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 36 #else 37 #define f2fs_bug_on(sbi, condition) \ 38 do { \ 39 if (WARN_ON(condition)) \ 40 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 41 } while (0) 42 #endif 43 44 enum { 45 FAULT_KMALLOC, 46 FAULT_KVMALLOC, 47 FAULT_PAGE_ALLOC, 48 FAULT_PAGE_GET, 49 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 50 FAULT_ALLOC_NID, 51 FAULT_ORPHAN, 52 FAULT_BLOCK, 53 FAULT_DIR_DEPTH, 54 FAULT_EVICT_INODE, 55 FAULT_TRUNCATE, 56 FAULT_READ_IO, 57 FAULT_CHECKPOINT, 58 FAULT_DISCARD, 59 FAULT_WRITE_IO, 60 FAULT_SLAB_ALLOC, 61 FAULT_DQUOT_INIT, 62 FAULT_LOCK_OP, 63 FAULT_BLKADDR, 64 FAULT_MAX, 65 }; 66 67 #ifdef CONFIG_F2FS_FAULT_INJECTION 68 #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0)) 69 70 struct f2fs_fault_info { 71 atomic_t inject_ops; 72 unsigned int inject_rate; 73 unsigned int inject_type; 74 }; 75 76 extern const char *f2fs_fault_name[FAULT_MAX]; 77 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type)) 78 #endif 79 80 /* 81 * For mount options 82 */ 83 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001 84 #define F2FS_MOUNT_DISCARD 0x00000002 85 #define F2FS_MOUNT_NOHEAP 0x00000004 86 #define F2FS_MOUNT_XATTR_USER 0x00000008 87 #define F2FS_MOUNT_POSIX_ACL 0x00000010 88 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020 89 #define F2FS_MOUNT_INLINE_XATTR 0x00000040 90 #define F2FS_MOUNT_INLINE_DATA 0x00000080 91 #define F2FS_MOUNT_INLINE_DENTRY 0x00000100 92 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200 93 #define F2FS_MOUNT_NOBARRIER 0x00000400 94 #define F2FS_MOUNT_FASTBOOT 0x00000800 95 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000 96 #define F2FS_MOUNT_DATA_FLUSH 0x00002000 97 #define F2FS_MOUNT_FAULT_INJECTION 0x00004000 98 #define F2FS_MOUNT_USRQUOTA 0x00008000 99 #define F2FS_MOUNT_GRPQUOTA 0x00010000 100 #define F2FS_MOUNT_PRJQUOTA 0x00020000 101 #define F2FS_MOUNT_QUOTA 0x00040000 102 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000 103 #define F2FS_MOUNT_RESERVE_ROOT 0x00100000 104 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000 105 #define F2FS_MOUNT_NORECOVERY 0x00400000 106 #define F2FS_MOUNT_ATGC 0x00800000 107 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000 108 #define F2FS_MOUNT_GC_MERGE 0x02000000 109 #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000 110 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000 111 112 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 113 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 114 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 115 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 116 117 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 118 typecheck(unsigned long long, b) && \ 119 ((long long)((a) - (b)) > 0)) 120 121 typedef u32 block_t; /* 122 * should not change u32, since it is the on-disk block 123 * address format, __le32. 124 */ 125 typedef u32 nid_t; 126 127 #define COMPRESS_EXT_NUM 16 128 129 /* 130 * An implementation of an rwsem that is explicitly unfair to readers. This 131 * prevents priority inversion when a low-priority reader acquires the read lock 132 * while sleeping on the write lock but the write lock is needed by 133 * higher-priority clients. 134 */ 135 136 struct f2fs_rwsem { 137 struct rw_semaphore internal_rwsem; 138 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 139 wait_queue_head_t read_waiters; 140 #endif 141 }; 142 143 struct f2fs_mount_info { 144 unsigned int opt; 145 int write_io_size_bits; /* Write IO size bits */ 146 block_t root_reserved_blocks; /* root reserved blocks */ 147 kuid_t s_resuid; /* reserved blocks for uid */ 148 kgid_t s_resgid; /* reserved blocks for gid */ 149 int active_logs; /* # of active logs */ 150 int inline_xattr_size; /* inline xattr size */ 151 #ifdef CONFIG_F2FS_FAULT_INJECTION 152 struct f2fs_fault_info fault_info; /* For fault injection */ 153 #endif 154 #ifdef CONFIG_QUOTA 155 /* Names of quota files with journalled quota */ 156 char *s_qf_names[MAXQUOTAS]; 157 int s_jquota_fmt; /* Format of quota to use */ 158 #endif 159 /* For which write hints are passed down to block layer */ 160 int alloc_mode; /* segment allocation policy */ 161 int fsync_mode; /* fsync policy */ 162 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 163 int bggc_mode; /* bggc mode: off, on or sync */ 164 int memory_mode; /* memory mode */ 165 int errors; /* errors parameter */ 166 int discard_unit; /* 167 * discard command's offset/size should 168 * be aligned to this unit: block, 169 * segment or section 170 */ 171 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 172 block_t unusable_cap_perc; /* percentage for cap */ 173 block_t unusable_cap; /* Amount of space allowed to be 174 * unusable when disabling checkpoint 175 */ 176 177 /* For compression */ 178 unsigned char compress_algorithm; /* algorithm type */ 179 unsigned char compress_log_size; /* cluster log size */ 180 unsigned char compress_level; /* compress level */ 181 bool compress_chksum; /* compressed data chksum */ 182 unsigned char compress_ext_cnt; /* extension count */ 183 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 184 int compress_mode; /* compression mode */ 185 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 186 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 187 }; 188 189 #define F2FS_FEATURE_ENCRYPT 0x00000001 190 #define F2FS_FEATURE_BLKZONED 0x00000002 191 #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004 192 #define F2FS_FEATURE_EXTRA_ATTR 0x00000008 193 #define F2FS_FEATURE_PRJQUOTA 0x00000010 194 #define F2FS_FEATURE_INODE_CHKSUM 0x00000020 195 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040 196 #define F2FS_FEATURE_QUOTA_INO 0x00000080 197 #define F2FS_FEATURE_INODE_CRTIME 0x00000100 198 #define F2FS_FEATURE_LOST_FOUND 0x00000200 199 #define F2FS_FEATURE_VERITY 0x00000400 200 #define F2FS_FEATURE_SB_CHKSUM 0x00000800 201 #define F2FS_FEATURE_CASEFOLD 0x00001000 202 #define F2FS_FEATURE_COMPRESSION 0x00002000 203 #define F2FS_FEATURE_RO 0x00004000 204 205 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 206 ((raw_super->feature & cpu_to_le32(mask)) != 0) 207 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 208 209 /* 210 * Default values for user and/or group using reserved blocks 211 */ 212 #define F2FS_DEF_RESUID 0 213 #define F2FS_DEF_RESGID 0 214 215 /* 216 * For checkpoint manager 217 */ 218 enum { 219 NAT_BITMAP, 220 SIT_BITMAP 221 }; 222 223 #define CP_UMOUNT 0x00000001 224 #define CP_FASTBOOT 0x00000002 225 #define CP_SYNC 0x00000004 226 #define CP_RECOVERY 0x00000008 227 #define CP_DISCARD 0x00000010 228 #define CP_TRIMMED 0x00000020 229 #define CP_PAUSE 0x00000040 230 #define CP_RESIZE 0x00000080 231 232 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 233 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 234 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 235 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 236 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 237 #define DEF_CP_INTERVAL 60 /* 60 secs */ 238 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 239 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 240 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 241 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 242 243 struct cp_control { 244 int reason; 245 __u64 trim_start; 246 __u64 trim_end; 247 __u64 trim_minlen; 248 }; 249 250 /* 251 * indicate meta/data type 252 */ 253 enum { 254 META_CP, 255 META_NAT, 256 META_SIT, 257 META_SSA, 258 META_MAX, 259 META_POR, 260 DATA_GENERIC, /* check range only */ 261 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 262 DATA_GENERIC_ENHANCE_READ, /* 263 * strong check on range and segment 264 * bitmap but no warning due to race 265 * condition of read on truncated area 266 * by extent_cache 267 */ 268 DATA_GENERIC_ENHANCE_UPDATE, /* 269 * strong check on range and segment 270 * bitmap for update case 271 */ 272 META_GENERIC, 273 }; 274 275 /* for the list of ino */ 276 enum { 277 ORPHAN_INO, /* for orphan ino list */ 278 APPEND_INO, /* for append ino list */ 279 UPDATE_INO, /* for update ino list */ 280 TRANS_DIR_INO, /* for transactions dir ino list */ 281 FLUSH_INO, /* for multiple device flushing */ 282 MAX_INO_ENTRY, /* max. list */ 283 }; 284 285 struct ino_entry { 286 struct list_head list; /* list head */ 287 nid_t ino; /* inode number */ 288 unsigned int dirty_device; /* dirty device bitmap */ 289 }; 290 291 /* for the list of inodes to be GCed */ 292 struct inode_entry { 293 struct list_head list; /* list head */ 294 struct inode *inode; /* vfs inode pointer */ 295 }; 296 297 struct fsync_node_entry { 298 struct list_head list; /* list head */ 299 struct page *page; /* warm node page pointer */ 300 unsigned int seq_id; /* sequence id */ 301 }; 302 303 struct ckpt_req { 304 struct completion wait; /* completion for checkpoint done */ 305 struct llist_node llnode; /* llist_node to be linked in wait queue */ 306 int ret; /* return code of checkpoint */ 307 ktime_t queue_time; /* request queued time */ 308 }; 309 310 struct ckpt_req_control { 311 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 312 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 313 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 314 atomic_t issued_ckpt; /* # of actually issued ckpts */ 315 atomic_t total_ckpt; /* # of total ckpts */ 316 atomic_t queued_ckpt; /* # of queued ckpts */ 317 struct llist_head issue_list; /* list for command issue */ 318 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 319 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 320 unsigned int peak_time; /* peak wait time in msec until now */ 321 }; 322 323 /* for the bitmap indicate blocks to be discarded */ 324 struct discard_entry { 325 struct list_head list; /* list head */ 326 block_t start_blkaddr; /* start blockaddr of current segment */ 327 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 328 }; 329 330 /* minimum discard granularity, unit: block count */ 331 #define MIN_DISCARD_GRANULARITY 1 332 /* default discard granularity of inner discard thread, unit: block count */ 333 #define DEFAULT_DISCARD_GRANULARITY 16 334 /* default maximum discard granularity of ordered discard, unit: block count */ 335 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16 336 337 /* max discard pend list number */ 338 #define MAX_PLIST_NUM 512 339 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 340 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 341 342 enum { 343 D_PREP, /* initial */ 344 D_PARTIAL, /* partially submitted */ 345 D_SUBMIT, /* all submitted */ 346 D_DONE, /* finished */ 347 }; 348 349 struct discard_info { 350 block_t lstart; /* logical start address */ 351 block_t len; /* length */ 352 block_t start; /* actual start address in dev */ 353 }; 354 355 struct discard_cmd { 356 struct rb_node rb_node; /* rb node located in rb-tree */ 357 struct discard_info di; /* discard info */ 358 struct list_head list; /* command list */ 359 struct completion wait; /* compleation */ 360 struct block_device *bdev; /* bdev */ 361 unsigned short ref; /* reference count */ 362 unsigned char state; /* state */ 363 unsigned char queued; /* queued discard */ 364 int error; /* bio error */ 365 spinlock_t lock; /* for state/bio_ref updating */ 366 unsigned short bio_ref; /* bio reference count */ 367 }; 368 369 enum { 370 DPOLICY_BG, 371 DPOLICY_FORCE, 372 DPOLICY_FSTRIM, 373 DPOLICY_UMOUNT, 374 MAX_DPOLICY, 375 }; 376 377 struct discard_policy { 378 int type; /* type of discard */ 379 unsigned int min_interval; /* used for candidates exist */ 380 unsigned int mid_interval; /* used for device busy */ 381 unsigned int max_interval; /* used for candidates not exist */ 382 unsigned int max_requests; /* # of discards issued per round */ 383 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 384 bool io_aware; /* issue discard in idle time */ 385 bool sync; /* submit discard with REQ_SYNC flag */ 386 bool ordered; /* issue discard by lba order */ 387 bool timeout; /* discard timeout for put_super */ 388 unsigned int granularity; /* discard granularity */ 389 }; 390 391 struct discard_cmd_control { 392 struct task_struct *f2fs_issue_discard; /* discard thread */ 393 struct list_head entry_list; /* 4KB discard entry list */ 394 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 395 struct list_head wait_list; /* store on-flushing entries */ 396 struct list_head fstrim_list; /* in-flight discard from fstrim */ 397 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 398 struct mutex cmd_lock; 399 unsigned int nr_discards; /* # of discards in the list */ 400 unsigned int max_discards; /* max. discards to be issued */ 401 unsigned int max_discard_request; /* max. discard request per round */ 402 unsigned int min_discard_issue_time; /* min. interval between discard issue */ 403 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ 404 unsigned int max_discard_issue_time; /* max. interval between discard issue */ 405 unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */ 406 unsigned int discard_urgent_util; /* utilization which issue discard proactively */ 407 unsigned int discard_granularity; /* discard granularity */ 408 unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */ 409 unsigned int undiscard_blks; /* # of undiscard blocks */ 410 unsigned int next_pos; /* next discard position */ 411 atomic_t issued_discard; /* # of issued discard */ 412 atomic_t queued_discard; /* # of queued discard */ 413 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 414 struct rb_root_cached root; /* root of discard rb-tree */ 415 bool rbtree_check; /* config for consistence check */ 416 bool discard_wake; /* to wake up discard thread */ 417 }; 418 419 /* for the list of fsync inodes, used only during recovery */ 420 struct fsync_inode_entry { 421 struct list_head list; /* list head */ 422 struct inode *inode; /* vfs inode pointer */ 423 block_t blkaddr; /* block address locating the last fsync */ 424 block_t last_dentry; /* block address locating the last dentry */ 425 }; 426 427 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 428 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 429 430 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 431 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 432 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 433 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 434 435 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 436 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 437 438 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 439 { 440 int before = nats_in_cursum(journal); 441 442 journal->n_nats = cpu_to_le16(before + i); 443 return before; 444 } 445 446 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 447 { 448 int before = sits_in_cursum(journal); 449 450 journal->n_sits = cpu_to_le16(before + i); 451 return before; 452 } 453 454 static inline bool __has_cursum_space(struct f2fs_journal *journal, 455 int size, int type) 456 { 457 if (type == NAT_JOURNAL) 458 return size <= MAX_NAT_JENTRIES(journal); 459 return size <= MAX_SIT_JENTRIES(journal); 460 } 461 462 /* for inline stuff */ 463 #define DEF_INLINE_RESERVED_SIZE 1 464 static inline int get_extra_isize(struct inode *inode); 465 static inline int get_inline_xattr_addrs(struct inode *inode); 466 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 467 (CUR_ADDRS_PER_INODE(inode) - \ 468 get_inline_xattr_addrs(inode) - \ 469 DEF_INLINE_RESERVED_SIZE)) 470 471 /* for inline dir */ 472 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 473 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 474 BITS_PER_BYTE + 1)) 475 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 476 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 477 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 478 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 479 NR_INLINE_DENTRY(inode) + \ 480 INLINE_DENTRY_BITMAP_SIZE(inode))) 481 482 /* 483 * For INODE and NODE manager 484 */ 485 /* for directory operations */ 486 487 struct f2fs_filename { 488 /* 489 * The filename the user specified. This is NULL for some 490 * filesystem-internal operations, e.g. converting an inline directory 491 * to a non-inline one, or roll-forward recovering an encrypted dentry. 492 */ 493 const struct qstr *usr_fname; 494 495 /* 496 * The on-disk filename. For encrypted directories, this is encrypted. 497 * This may be NULL for lookups in an encrypted dir without the key. 498 */ 499 struct fscrypt_str disk_name; 500 501 /* The dirhash of this filename */ 502 f2fs_hash_t hash; 503 504 #ifdef CONFIG_FS_ENCRYPTION 505 /* 506 * For lookups in encrypted directories: either the buffer backing 507 * disk_name, or a buffer that holds the decoded no-key name. 508 */ 509 struct fscrypt_str crypto_buf; 510 #endif 511 #if IS_ENABLED(CONFIG_UNICODE) 512 /* 513 * For casefolded directories: the casefolded name, but it's left NULL 514 * if the original name is not valid Unicode, if the original name is 515 * "." or "..", if the directory is both casefolded and encrypted and 516 * its encryption key is unavailable, or if the filesystem is doing an 517 * internal operation where usr_fname is also NULL. In all these cases 518 * we fall back to treating the name as an opaque byte sequence. 519 */ 520 struct fscrypt_str cf_name; 521 #endif 522 }; 523 524 struct f2fs_dentry_ptr { 525 struct inode *inode; 526 void *bitmap; 527 struct f2fs_dir_entry *dentry; 528 __u8 (*filename)[F2FS_SLOT_LEN]; 529 int max; 530 int nr_bitmap; 531 }; 532 533 static inline void make_dentry_ptr_block(struct inode *inode, 534 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 535 { 536 d->inode = inode; 537 d->max = NR_DENTRY_IN_BLOCK; 538 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 539 d->bitmap = t->dentry_bitmap; 540 d->dentry = t->dentry; 541 d->filename = t->filename; 542 } 543 544 static inline void make_dentry_ptr_inline(struct inode *inode, 545 struct f2fs_dentry_ptr *d, void *t) 546 { 547 int entry_cnt = NR_INLINE_DENTRY(inode); 548 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 549 int reserved_size = INLINE_RESERVED_SIZE(inode); 550 551 d->inode = inode; 552 d->max = entry_cnt; 553 d->nr_bitmap = bitmap_size; 554 d->bitmap = t; 555 d->dentry = t + bitmap_size + reserved_size; 556 d->filename = t + bitmap_size + reserved_size + 557 SIZE_OF_DIR_ENTRY * entry_cnt; 558 } 559 560 /* 561 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 562 * as its node offset to distinguish from index node blocks. 563 * But some bits are used to mark the node block. 564 */ 565 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 566 >> OFFSET_BIT_SHIFT) 567 enum { 568 ALLOC_NODE, /* allocate a new node page if needed */ 569 LOOKUP_NODE, /* look up a node without readahead */ 570 LOOKUP_NODE_RA, /* 571 * look up a node with readahead called 572 * by get_data_block. 573 */ 574 }; 575 576 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 577 578 /* congestion wait timeout value, default: 20ms */ 579 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 580 581 /* maximum retry quota flush count */ 582 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 583 584 /* maximum retry of EIO'ed page */ 585 #define MAX_RETRY_PAGE_EIO 100 586 587 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 588 589 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 590 591 /* dirty segments threshold for triggering CP */ 592 #define DEFAULT_DIRTY_THRESHOLD 4 593 594 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS 595 #define RECOVERY_MIN_RA_BLOCKS 1 596 597 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */ 598 599 /* for in-memory extent cache entry */ 600 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 601 602 /* number of extent info in extent cache we try to shrink */ 603 #define READ_EXTENT_CACHE_SHRINK_NUMBER 128 604 605 /* number of age extent info in extent cache we try to shrink */ 606 #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 607 #define LAST_AGE_WEIGHT 30 608 #define SAME_AGE_REGION 1024 609 610 /* 611 * Define data block with age less than 1GB as hot data 612 * define data block with age less than 10GB but more than 1GB as warm data 613 */ 614 #define DEF_HOT_DATA_AGE_THRESHOLD 262144 615 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440 616 617 /* extent cache type */ 618 enum extent_type { 619 EX_READ, 620 EX_BLOCK_AGE, 621 NR_EXTENT_CACHES, 622 }; 623 624 struct extent_info { 625 unsigned int fofs; /* start offset in a file */ 626 unsigned int len; /* length of the extent */ 627 union { 628 /* read extent_cache */ 629 struct { 630 /* start block address of the extent */ 631 block_t blk; 632 #ifdef CONFIG_F2FS_FS_COMPRESSION 633 /* physical extent length of compressed blocks */ 634 unsigned int c_len; 635 #endif 636 }; 637 /* block age extent_cache */ 638 struct { 639 /* block age of the extent */ 640 unsigned long long age; 641 /* last total blocks allocated */ 642 unsigned long long last_blocks; 643 }; 644 }; 645 }; 646 647 struct extent_node { 648 struct rb_node rb_node; /* rb node located in rb-tree */ 649 struct extent_info ei; /* extent info */ 650 struct list_head list; /* node in global extent list of sbi */ 651 struct extent_tree *et; /* extent tree pointer */ 652 }; 653 654 struct extent_tree { 655 nid_t ino; /* inode number */ 656 enum extent_type type; /* keep the extent tree type */ 657 struct rb_root_cached root; /* root of extent info rb-tree */ 658 struct extent_node *cached_en; /* recently accessed extent node */ 659 struct list_head list; /* to be used by sbi->zombie_list */ 660 rwlock_t lock; /* protect extent info rb-tree */ 661 atomic_t node_cnt; /* # of extent node in rb-tree*/ 662 bool largest_updated; /* largest extent updated */ 663 struct extent_info largest; /* largest cached extent for EX_READ */ 664 }; 665 666 struct extent_tree_info { 667 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 668 struct mutex extent_tree_lock; /* locking extent radix tree */ 669 struct list_head extent_list; /* lru list for shrinker */ 670 spinlock_t extent_lock; /* locking extent lru list */ 671 atomic_t total_ext_tree; /* extent tree count */ 672 struct list_head zombie_list; /* extent zombie tree list */ 673 atomic_t total_zombie_tree; /* extent zombie tree count */ 674 atomic_t total_ext_node; /* extent info count */ 675 }; 676 677 /* 678 * State of block returned by f2fs_map_blocks. 679 */ 680 #define F2FS_MAP_NEW (1U << 0) 681 #define F2FS_MAP_MAPPED (1U << 1) 682 #define F2FS_MAP_DELALLOC (1U << 2) 683 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 684 F2FS_MAP_DELALLOC) 685 686 struct f2fs_map_blocks { 687 struct block_device *m_bdev; /* for multi-device dio */ 688 block_t m_pblk; 689 block_t m_lblk; 690 unsigned int m_len; 691 unsigned int m_flags; 692 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 693 pgoff_t *m_next_extent; /* point to next possible extent */ 694 int m_seg_type; 695 bool m_may_create; /* indicate it is from write path */ 696 bool m_multidev_dio; /* indicate it allows multi-device dio */ 697 }; 698 699 /* for flag in get_data_block */ 700 enum { 701 F2FS_GET_BLOCK_DEFAULT, 702 F2FS_GET_BLOCK_FIEMAP, 703 F2FS_GET_BLOCK_BMAP, 704 F2FS_GET_BLOCK_DIO, 705 F2FS_GET_BLOCK_PRE_DIO, 706 F2FS_GET_BLOCK_PRE_AIO, 707 F2FS_GET_BLOCK_PRECACHE, 708 }; 709 710 /* 711 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 712 */ 713 #define FADVISE_COLD_BIT 0x01 714 #define FADVISE_LOST_PINO_BIT 0x02 715 #define FADVISE_ENCRYPT_BIT 0x04 716 #define FADVISE_ENC_NAME_BIT 0x08 717 #define FADVISE_KEEP_SIZE_BIT 0x10 718 #define FADVISE_HOT_BIT 0x20 719 #define FADVISE_VERITY_BIT 0x40 720 #define FADVISE_TRUNC_BIT 0x80 721 722 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 723 724 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 725 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 726 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 727 728 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 729 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 730 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 731 732 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 733 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 734 735 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 736 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 737 738 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 739 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 740 741 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 742 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 743 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 744 745 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 746 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 747 748 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 749 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 750 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 751 752 #define DEF_DIR_LEVEL 0 753 754 enum { 755 GC_FAILURE_PIN, 756 MAX_GC_FAILURE 757 }; 758 759 /* used for f2fs_inode_info->flags */ 760 enum { 761 FI_NEW_INODE, /* indicate newly allocated inode */ 762 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 763 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 764 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 765 FI_INC_LINK, /* need to increment i_nlink */ 766 FI_ACL_MODE, /* indicate acl mode */ 767 FI_NO_ALLOC, /* should not allocate any blocks */ 768 FI_FREE_NID, /* free allocated nide */ 769 FI_NO_EXTENT, /* not to use the extent cache */ 770 FI_INLINE_XATTR, /* used for inline xattr */ 771 FI_INLINE_DATA, /* used for inline data*/ 772 FI_INLINE_DENTRY, /* used for inline dentry */ 773 FI_APPEND_WRITE, /* inode has appended data */ 774 FI_UPDATE_WRITE, /* inode has in-place-update data */ 775 FI_NEED_IPU, /* used for ipu per file */ 776 FI_ATOMIC_FILE, /* indicate atomic file */ 777 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 778 FI_DROP_CACHE, /* drop dirty page cache */ 779 FI_DATA_EXIST, /* indicate data exists */ 780 FI_INLINE_DOTS, /* indicate inline dot dentries */ 781 FI_SKIP_WRITES, /* should skip data page writeback */ 782 FI_OPU_WRITE, /* used for opu per file */ 783 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 784 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 785 FI_HOT_DATA, /* indicate file is hot */ 786 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 787 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 788 FI_PIN_FILE, /* indicate file should not be gced */ 789 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 790 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 791 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 792 FI_MMAP_FILE, /* indicate file was mmapped */ 793 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 794 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 795 FI_ALIGNED_WRITE, /* enable aligned write */ 796 FI_COW_FILE, /* indicate COW file */ 797 FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ 798 FI_ATOMIC_REPLACE, /* indicate atomic replace */ 799 FI_MAX, /* max flag, never be used */ 800 }; 801 802 struct f2fs_inode_info { 803 struct inode vfs_inode; /* serve a vfs inode */ 804 unsigned long i_flags; /* keep an inode flags for ioctl */ 805 unsigned char i_advise; /* use to give file attribute hints */ 806 unsigned char i_dir_level; /* use for dentry level for large dir */ 807 unsigned int i_current_depth; /* only for directory depth */ 808 /* for gc failure statistic */ 809 unsigned int i_gc_failures[MAX_GC_FAILURE]; 810 unsigned int i_pino; /* parent inode number */ 811 umode_t i_acl_mode; /* keep file acl mode temporarily */ 812 813 /* Use below internally in f2fs*/ 814 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 815 struct f2fs_rwsem i_sem; /* protect fi info */ 816 atomic_t dirty_pages; /* # of dirty pages */ 817 f2fs_hash_t chash; /* hash value of given file name */ 818 unsigned int clevel; /* maximum level of given file name */ 819 struct task_struct *task; /* lookup and create consistency */ 820 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 821 struct task_struct *wb_task; /* indicate inode is in context of writeback */ 822 nid_t i_xattr_nid; /* node id that contains xattrs */ 823 loff_t last_disk_size; /* lastly written file size */ 824 spinlock_t i_size_lock; /* protect last_disk_size */ 825 826 #ifdef CONFIG_QUOTA 827 struct dquot *i_dquot[MAXQUOTAS]; 828 829 /* quota space reservation, managed internally by quota code */ 830 qsize_t i_reserved_quota; 831 #endif 832 struct list_head dirty_list; /* dirty list for dirs and files */ 833 struct list_head gdirty_list; /* linked in global dirty list */ 834 struct task_struct *atomic_write_task; /* store atomic write task */ 835 struct extent_tree *extent_tree[NR_EXTENT_CACHES]; 836 /* cached extent_tree entry */ 837 struct inode *cow_inode; /* copy-on-write inode for atomic write */ 838 839 /* avoid racing between foreground op and gc */ 840 struct f2fs_rwsem i_gc_rwsem[2]; 841 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 842 843 int i_extra_isize; /* size of extra space located in i_addr */ 844 kprojid_t i_projid; /* id for project quota */ 845 int i_inline_xattr_size; /* inline xattr size */ 846 struct timespec64 i_crtime; /* inode creation time */ 847 struct timespec64 i_disk_time[3];/* inode disk times */ 848 849 /* for file compress */ 850 atomic_t i_compr_blocks; /* # of compressed blocks */ 851 unsigned char i_compress_algorithm; /* algorithm type */ 852 unsigned char i_log_cluster_size; /* log of cluster size */ 853 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 854 unsigned char i_compress_flag; /* compress flag */ 855 unsigned int i_cluster_size; /* cluster size */ 856 857 unsigned int atomic_write_cnt; 858 loff_t original_i_size; /* original i_size before atomic write */ 859 }; 860 861 static inline void get_read_extent_info(struct extent_info *ext, 862 struct f2fs_extent *i_ext) 863 { 864 ext->fofs = le32_to_cpu(i_ext->fofs); 865 ext->blk = le32_to_cpu(i_ext->blk); 866 ext->len = le32_to_cpu(i_ext->len); 867 } 868 869 static inline void set_raw_read_extent(struct extent_info *ext, 870 struct f2fs_extent *i_ext) 871 { 872 i_ext->fofs = cpu_to_le32(ext->fofs); 873 i_ext->blk = cpu_to_le32(ext->blk); 874 i_ext->len = cpu_to_le32(ext->len); 875 } 876 877 static inline bool __is_discard_mergeable(struct discard_info *back, 878 struct discard_info *front, unsigned int max_len) 879 { 880 return (back->lstart + back->len == front->lstart) && 881 (back->len + front->len <= max_len); 882 } 883 884 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 885 struct discard_info *back, unsigned int max_len) 886 { 887 return __is_discard_mergeable(back, cur, max_len); 888 } 889 890 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 891 struct discard_info *front, unsigned int max_len) 892 { 893 return __is_discard_mergeable(cur, front, max_len); 894 } 895 896 /* 897 * For free nid management 898 */ 899 enum nid_state { 900 FREE_NID, /* newly added to free nid list */ 901 PREALLOC_NID, /* it is preallocated */ 902 MAX_NID_STATE, 903 }; 904 905 enum nat_state { 906 TOTAL_NAT, 907 DIRTY_NAT, 908 RECLAIMABLE_NAT, 909 MAX_NAT_STATE, 910 }; 911 912 struct f2fs_nm_info { 913 block_t nat_blkaddr; /* base disk address of NAT */ 914 nid_t max_nid; /* maximum possible node ids */ 915 nid_t available_nids; /* # of available node ids */ 916 nid_t next_scan_nid; /* the next nid to be scanned */ 917 nid_t max_rf_node_blocks; /* max # of nodes for recovery */ 918 unsigned int ram_thresh; /* control the memory footprint */ 919 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 920 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 921 922 /* NAT cache management */ 923 struct radix_tree_root nat_root;/* root of the nat entry cache */ 924 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 925 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 926 struct list_head nat_entries; /* cached nat entry list (clean) */ 927 spinlock_t nat_list_lock; /* protect clean nat entry list */ 928 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 929 unsigned int nat_blocks; /* # of nat blocks */ 930 931 /* free node ids management */ 932 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 933 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 934 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 935 spinlock_t nid_list_lock; /* protect nid lists ops */ 936 struct mutex build_lock; /* lock for build free nids */ 937 unsigned char **free_nid_bitmap; 938 unsigned char *nat_block_bitmap; 939 unsigned short *free_nid_count; /* free nid count of NAT block */ 940 941 /* for checkpoint */ 942 char *nat_bitmap; /* NAT bitmap pointer */ 943 944 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 945 unsigned char *nat_bits; /* NAT bits blocks */ 946 unsigned char *full_nat_bits; /* full NAT pages */ 947 unsigned char *empty_nat_bits; /* empty NAT pages */ 948 #ifdef CONFIG_F2FS_CHECK_FS 949 char *nat_bitmap_mir; /* NAT bitmap mirror */ 950 #endif 951 int bitmap_size; /* bitmap size */ 952 }; 953 954 /* 955 * this structure is used as one of function parameters. 956 * all the information are dedicated to a given direct node block determined 957 * by the data offset in a file. 958 */ 959 struct dnode_of_data { 960 struct inode *inode; /* vfs inode pointer */ 961 struct page *inode_page; /* its inode page, NULL is possible */ 962 struct page *node_page; /* cached direct node page */ 963 nid_t nid; /* node id of the direct node block */ 964 unsigned int ofs_in_node; /* data offset in the node page */ 965 bool inode_page_locked; /* inode page is locked or not */ 966 bool node_changed; /* is node block changed */ 967 char cur_level; /* level of hole node page */ 968 char max_level; /* level of current page located */ 969 block_t data_blkaddr; /* block address of the node block */ 970 }; 971 972 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 973 struct page *ipage, struct page *npage, nid_t nid) 974 { 975 memset(dn, 0, sizeof(*dn)); 976 dn->inode = inode; 977 dn->inode_page = ipage; 978 dn->node_page = npage; 979 dn->nid = nid; 980 } 981 982 /* 983 * For SIT manager 984 * 985 * By default, there are 6 active log areas across the whole main area. 986 * When considering hot and cold data separation to reduce cleaning overhead, 987 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 988 * respectively. 989 * In the current design, you should not change the numbers intentionally. 990 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 991 * logs individually according to the underlying devices. (default: 6) 992 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 993 * data and 8 for node logs. 994 */ 995 #define NR_CURSEG_DATA_TYPE (3) 996 #define NR_CURSEG_NODE_TYPE (3) 997 #define NR_CURSEG_INMEM_TYPE (2) 998 #define NR_CURSEG_RO_TYPE (2) 999 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 1000 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 1001 1002 enum { 1003 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 1004 CURSEG_WARM_DATA, /* data blocks */ 1005 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 1006 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 1007 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 1008 CURSEG_COLD_NODE, /* indirect node blocks */ 1009 NR_PERSISTENT_LOG, /* number of persistent log */ 1010 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 1011 /* pinned file that needs consecutive block address */ 1012 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 1013 NO_CHECK_TYPE, /* number of persistent & inmem log */ 1014 }; 1015 1016 struct flush_cmd { 1017 struct completion wait; 1018 struct llist_node llnode; 1019 nid_t ino; 1020 int ret; 1021 }; 1022 1023 struct flush_cmd_control { 1024 struct task_struct *f2fs_issue_flush; /* flush thread */ 1025 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1026 atomic_t issued_flush; /* # of issued flushes */ 1027 atomic_t queued_flush; /* # of queued flushes */ 1028 struct llist_head issue_list; /* list for command issue */ 1029 struct llist_node *dispatch_list; /* list for command dispatch */ 1030 }; 1031 1032 struct f2fs_sm_info { 1033 struct sit_info *sit_info; /* whole segment information */ 1034 struct free_segmap_info *free_info; /* free segment information */ 1035 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1036 struct curseg_info *curseg_array; /* active segment information */ 1037 1038 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1039 1040 block_t seg0_blkaddr; /* block address of 0'th segment */ 1041 block_t main_blkaddr; /* start block address of main area */ 1042 block_t ssa_blkaddr; /* start block address of SSA area */ 1043 1044 unsigned int segment_count; /* total # of segments */ 1045 unsigned int main_segments; /* # of segments in main area */ 1046 unsigned int reserved_segments; /* # of reserved segments */ 1047 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1048 unsigned int ovp_segments; /* # of overprovision segments */ 1049 1050 /* a threshold to reclaim prefree segments */ 1051 unsigned int rec_prefree_segments; 1052 1053 struct list_head sit_entry_set; /* sit entry set list */ 1054 1055 unsigned int ipu_policy; /* in-place-update policy */ 1056 unsigned int min_ipu_util; /* in-place-update threshold */ 1057 unsigned int min_fsync_blocks; /* threshold for fsync */ 1058 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1059 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1060 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1061 1062 /* for flush command control */ 1063 struct flush_cmd_control *fcc_info; 1064 1065 /* for discard command control */ 1066 struct discard_cmd_control *dcc_info; 1067 }; 1068 1069 /* 1070 * For superblock 1071 */ 1072 /* 1073 * COUNT_TYPE for monitoring 1074 * 1075 * f2fs monitors the number of several block types such as on-writeback, 1076 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1077 */ 1078 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1079 enum count_type { 1080 F2FS_DIRTY_DENTS, 1081 F2FS_DIRTY_DATA, 1082 F2FS_DIRTY_QDATA, 1083 F2FS_DIRTY_NODES, 1084 F2FS_DIRTY_META, 1085 F2FS_DIRTY_IMETA, 1086 F2FS_WB_CP_DATA, 1087 F2FS_WB_DATA, 1088 F2FS_RD_DATA, 1089 F2FS_RD_NODE, 1090 F2FS_RD_META, 1091 F2FS_DIO_WRITE, 1092 F2FS_DIO_READ, 1093 NR_COUNT_TYPE, 1094 }; 1095 1096 /* 1097 * The below are the page types of bios used in submit_bio(). 1098 * The available types are: 1099 * DATA User data pages. It operates as async mode. 1100 * NODE Node pages. It operates as async mode. 1101 * META FS metadata pages such as SIT, NAT, CP. 1102 * NR_PAGE_TYPE The number of page types. 1103 * META_FLUSH Make sure the previous pages are written 1104 * with waiting the bio's completion 1105 * ... Only can be used with META. 1106 */ 1107 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1108 enum page_type { 1109 DATA = 0, 1110 NODE = 1, /* should not change this */ 1111 META, 1112 NR_PAGE_TYPE, 1113 META_FLUSH, 1114 IPU, /* the below types are used by tracepoints only. */ 1115 OPU, 1116 }; 1117 1118 enum temp_type { 1119 HOT = 0, /* must be zero for meta bio */ 1120 WARM, 1121 COLD, 1122 NR_TEMP_TYPE, 1123 }; 1124 1125 enum need_lock_type { 1126 LOCK_REQ = 0, 1127 LOCK_DONE, 1128 LOCK_RETRY, 1129 }; 1130 1131 enum cp_reason_type { 1132 CP_NO_NEEDED, 1133 CP_NON_REGULAR, 1134 CP_COMPRESSED, 1135 CP_HARDLINK, 1136 CP_SB_NEED_CP, 1137 CP_WRONG_PINO, 1138 CP_NO_SPC_ROLL, 1139 CP_NODE_NEED_CP, 1140 CP_FASTBOOT_MODE, 1141 CP_SPEC_LOG_NUM, 1142 CP_RECOVER_DIR, 1143 }; 1144 1145 enum iostat_type { 1146 /* WRITE IO */ 1147 APP_DIRECT_IO, /* app direct write IOs */ 1148 APP_BUFFERED_IO, /* app buffered write IOs */ 1149 APP_WRITE_IO, /* app write IOs */ 1150 APP_MAPPED_IO, /* app mapped IOs */ 1151 APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ 1152 APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ 1153 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1154 FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ 1155 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1156 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1157 FS_GC_DATA_IO, /* data IOs from forground gc */ 1158 FS_GC_NODE_IO, /* node IOs from forground gc */ 1159 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1160 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1161 FS_CP_META_IO, /* meta IOs from checkpoint */ 1162 1163 /* READ IO */ 1164 APP_DIRECT_READ_IO, /* app direct read IOs */ 1165 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1166 APP_READ_IO, /* app read IOs */ 1167 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1168 APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ 1169 APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ 1170 FS_DATA_READ_IO, /* data read IOs */ 1171 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1172 FS_CDATA_READ_IO, /* compressed data read IOs */ 1173 FS_NODE_READ_IO, /* node read IOs */ 1174 FS_META_READ_IO, /* meta read IOs */ 1175 1176 /* other */ 1177 FS_DISCARD_IO, /* discard */ 1178 FS_FLUSH_IO, /* flush */ 1179 NR_IO_TYPE, 1180 }; 1181 1182 struct f2fs_io_info { 1183 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1184 nid_t ino; /* inode number */ 1185 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1186 enum temp_type temp; /* contains HOT/WARM/COLD */ 1187 enum req_op op; /* contains REQ_OP_ */ 1188 blk_opf_t op_flags; /* req_flag_bits */ 1189 block_t new_blkaddr; /* new block address to be written */ 1190 block_t old_blkaddr; /* old block address before Cow */ 1191 struct page *page; /* page to be written */ 1192 struct page *encrypted_page; /* encrypted page */ 1193 struct page *compressed_page; /* compressed page */ 1194 struct list_head list; /* serialize IOs */ 1195 unsigned int compr_blocks; /* # of compressed block addresses */ 1196 unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */ 1197 unsigned int version:8; /* version of the node */ 1198 unsigned int submitted:1; /* indicate IO submission */ 1199 unsigned int in_list:1; /* indicate fio is in io_list */ 1200 unsigned int is_por:1; /* indicate IO is from recovery or not */ 1201 unsigned int retry:1; /* need to reallocate block address */ 1202 unsigned int encrypted:1; /* indicate file is encrypted */ 1203 unsigned int post_read:1; /* require post read */ 1204 enum iostat_type io_type; /* io type */ 1205 struct writeback_control *io_wbc; /* writeback control */ 1206 struct bio **bio; /* bio for ipu */ 1207 sector_t *last_block; /* last block number in bio */ 1208 }; 1209 1210 struct bio_entry { 1211 struct bio *bio; 1212 struct list_head list; 1213 }; 1214 1215 #define is_read_io(rw) ((rw) == READ) 1216 struct f2fs_bio_info { 1217 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1218 struct bio *bio; /* bios to merge */ 1219 sector_t last_block_in_bio; /* last block number */ 1220 struct f2fs_io_info fio; /* store buffered io info. */ 1221 #ifdef CONFIG_BLK_DEV_ZONED 1222 struct completion zone_wait; /* condition value for the previous open zone to close */ 1223 struct bio *zone_pending_bio; /* pending bio for the previous zone */ 1224 void *bi_private; /* previous bi_private for pending bio */ 1225 #endif 1226 struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1227 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1228 struct list_head io_list; /* track fios */ 1229 struct list_head bio_list; /* bio entry list head */ 1230 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1231 }; 1232 1233 #define FDEV(i) (sbi->devs[i]) 1234 #define RDEV(i) (raw_super->devs[i]) 1235 struct f2fs_dev_info { 1236 struct block_device *bdev; 1237 char path[MAX_PATH_LEN]; 1238 unsigned int total_segments; 1239 block_t start_blk; 1240 block_t end_blk; 1241 #ifdef CONFIG_BLK_DEV_ZONED 1242 unsigned int nr_blkz; /* Total number of zones */ 1243 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1244 #endif 1245 }; 1246 1247 enum inode_type { 1248 DIR_INODE, /* for dirty dir inode */ 1249 FILE_INODE, /* for dirty regular/symlink inode */ 1250 DIRTY_META, /* for all dirtied inode metadata */ 1251 NR_INODE_TYPE, 1252 }; 1253 1254 /* for inner inode cache management */ 1255 struct inode_management { 1256 struct radix_tree_root ino_root; /* ino entry array */ 1257 spinlock_t ino_lock; /* for ino entry lock */ 1258 struct list_head ino_list; /* inode list head */ 1259 unsigned long ino_num; /* number of entries */ 1260 }; 1261 1262 /* for GC_AT */ 1263 struct atgc_management { 1264 bool atgc_enabled; /* ATGC is enabled or not */ 1265 struct rb_root_cached root; /* root of victim rb-tree */ 1266 struct list_head victim_list; /* linked with all victim entries */ 1267 unsigned int victim_count; /* victim count in rb-tree */ 1268 unsigned int candidate_ratio; /* candidate ratio */ 1269 unsigned int max_candidate_count; /* max candidate count */ 1270 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1271 unsigned long long age_threshold; /* age threshold */ 1272 }; 1273 1274 struct f2fs_gc_control { 1275 unsigned int victim_segno; /* target victim segment number */ 1276 int init_gc_type; /* FG_GC or BG_GC */ 1277 bool no_bg_gc; /* check the space and stop bg_gc */ 1278 bool should_migrate_blocks; /* should migrate blocks */ 1279 bool err_gc_skipped; /* return EAGAIN if GC skipped */ 1280 unsigned int nr_free_secs; /* # of free sections to do GC */ 1281 }; 1282 1283 /* 1284 * For s_flag in struct f2fs_sb_info 1285 * Modification on enum should be synchronized with s_flag array 1286 */ 1287 enum { 1288 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1289 SBI_IS_CLOSE, /* specify unmounting */ 1290 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1291 SBI_POR_DOING, /* recovery is doing or not */ 1292 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1293 SBI_NEED_CP, /* need to checkpoint */ 1294 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1295 SBI_IS_RECOVERED, /* recovered orphan/data */ 1296 SBI_CP_DISABLED, /* CP was disabled last mount */ 1297 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1298 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1299 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1300 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1301 SBI_IS_RESIZEFS, /* resizefs is in process */ 1302 SBI_IS_FREEZING, /* freezefs is in process */ 1303 SBI_IS_WRITABLE, /* remove ro mountoption transiently */ 1304 MAX_SBI_FLAG, 1305 }; 1306 1307 enum { 1308 CP_TIME, 1309 REQ_TIME, 1310 DISCARD_TIME, 1311 GC_TIME, 1312 DISABLE_TIME, 1313 UMOUNT_DISCARD_TIMEOUT, 1314 MAX_TIME, 1315 }; 1316 1317 /* Note that you need to keep synchronization with this gc_mode_names array */ 1318 enum { 1319 GC_NORMAL, 1320 GC_IDLE_CB, 1321 GC_IDLE_GREEDY, 1322 GC_IDLE_AT, 1323 GC_URGENT_HIGH, 1324 GC_URGENT_LOW, 1325 GC_URGENT_MID, 1326 MAX_GC_MODE, 1327 }; 1328 1329 enum { 1330 BGGC_MODE_ON, /* background gc is on */ 1331 BGGC_MODE_OFF, /* background gc is off */ 1332 BGGC_MODE_SYNC, /* 1333 * background gc is on, migrating blocks 1334 * like foreground gc 1335 */ 1336 }; 1337 1338 enum { 1339 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1340 FS_MODE_LFS, /* use lfs allocation only */ 1341 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1342 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1343 }; 1344 1345 enum { 1346 ALLOC_MODE_DEFAULT, /* stay default */ 1347 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1348 }; 1349 1350 enum fsync_mode { 1351 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1352 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1353 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1354 }; 1355 1356 enum { 1357 COMPR_MODE_FS, /* 1358 * automatically compress compression 1359 * enabled files 1360 */ 1361 COMPR_MODE_USER, /* 1362 * automatical compression is disabled. 1363 * user can control the file compression 1364 * using ioctls 1365 */ 1366 }; 1367 1368 enum { 1369 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1370 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1371 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1372 }; 1373 1374 enum { 1375 MEMORY_MODE_NORMAL, /* memory mode for normal devices */ 1376 MEMORY_MODE_LOW, /* memory mode for low memry devices */ 1377 }; 1378 1379 enum errors_option { 1380 MOUNT_ERRORS_READONLY, /* remount fs ro on errors */ 1381 MOUNT_ERRORS_CONTINUE, /* continue on errors */ 1382 MOUNT_ERRORS_PANIC, /* panic on errors */ 1383 }; 1384 1385 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1386 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1387 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1388 1389 /* 1390 * Layout of f2fs page.private: 1391 * 1392 * Layout A: lowest bit should be 1 1393 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1394 * bit 0 PAGE_PRIVATE_NOT_POINTER 1395 * bit 1 PAGE_PRIVATE_DUMMY_WRITE 1396 * bit 2 PAGE_PRIVATE_ONGOING_MIGRATION 1397 * bit 3 PAGE_PRIVATE_INLINE_INODE 1398 * bit 4 PAGE_PRIVATE_REF_RESOURCE 1399 * bit 5- f2fs private data 1400 * 1401 * Layout B: lowest bit should be 0 1402 * page.private is a wrapped pointer. 1403 */ 1404 enum { 1405 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1406 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1407 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1408 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1409 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1410 PAGE_PRIVATE_MAX 1411 }; 1412 1413 /* For compression */ 1414 enum compress_algorithm_type { 1415 COMPRESS_LZO, 1416 COMPRESS_LZ4, 1417 COMPRESS_ZSTD, 1418 COMPRESS_LZORLE, 1419 COMPRESS_MAX, 1420 }; 1421 1422 enum compress_flag { 1423 COMPRESS_CHKSUM, 1424 COMPRESS_MAX_FLAG, 1425 }; 1426 1427 #define COMPRESS_WATERMARK 20 1428 #define COMPRESS_PERCENT 20 1429 1430 #define COMPRESS_DATA_RESERVED_SIZE 4 1431 struct compress_data { 1432 __le32 clen; /* compressed data size */ 1433 __le32 chksum; /* compressed data chksum */ 1434 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1435 u8 cdata[]; /* compressed data */ 1436 }; 1437 1438 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1439 1440 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1441 1442 #define COMPRESS_LEVEL_OFFSET 8 1443 1444 /* compress context */ 1445 struct compress_ctx { 1446 struct inode *inode; /* inode the context belong to */ 1447 pgoff_t cluster_idx; /* cluster index number */ 1448 unsigned int cluster_size; /* page count in cluster */ 1449 unsigned int log_cluster_size; /* log of cluster size */ 1450 struct page **rpages; /* pages store raw data in cluster */ 1451 unsigned int nr_rpages; /* total page number in rpages */ 1452 struct page **cpages; /* pages store compressed data in cluster */ 1453 unsigned int nr_cpages; /* total page number in cpages */ 1454 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1455 void *rbuf; /* virtual mapped address on rpages */ 1456 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1457 size_t rlen; /* valid data length in rbuf */ 1458 size_t clen; /* valid data length in cbuf */ 1459 void *private; /* payload buffer for specified compression algorithm */ 1460 void *private2; /* extra payload buffer */ 1461 }; 1462 1463 /* compress context for write IO path */ 1464 struct compress_io_ctx { 1465 u32 magic; /* magic number to indicate page is compressed */ 1466 struct inode *inode; /* inode the context belong to */ 1467 struct page **rpages; /* pages store raw data in cluster */ 1468 unsigned int nr_rpages; /* total page number in rpages */ 1469 atomic_t pending_pages; /* in-flight compressed page count */ 1470 }; 1471 1472 /* Context for decompressing one cluster on the read IO path */ 1473 struct decompress_io_ctx { 1474 u32 magic; /* magic number to indicate page is compressed */ 1475 struct inode *inode; /* inode the context belong to */ 1476 pgoff_t cluster_idx; /* cluster index number */ 1477 unsigned int cluster_size; /* page count in cluster */ 1478 unsigned int log_cluster_size; /* log of cluster size */ 1479 struct page **rpages; /* pages store raw data in cluster */ 1480 unsigned int nr_rpages; /* total page number in rpages */ 1481 struct page **cpages; /* pages store compressed data in cluster */ 1482 unsigned int nr_cpages; /* total page number in cpages */ 1483 struct page **tpages; /* temp pages to pad holes in cluster */ 1484 void *rbuf; /* virtual mapped address on rpages */ 1485 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1486 size_t rlen; /* valid data length in rbuf */ 1487 size_t clen; /* valid data length in cbuf */ 1488 1489 /* 1490 * The number of compressed pages remaining to be read in this cluster. 1491 * This is initially nr_cpages. It is decremented by 1 each time a page 1492 * has been read (or failed to be read). When it reaches 0, the cluster 1493 * is decompressed (or an error is reported). 1494 * 1495 * If an error occurs before all the pages have been submitted for I/O, 1496 * then this will never reach 0. In this case the I/O submitter is 1497 * responsible for calling f2fs_decompress_end_io() instead. 1498 */ 1499 atomic_t remaining_pages; 1500 1501 /* 1502 * Number of references to this decompress_io_ctx. 1503 * 1504 * One reference is held for I/O completion. This reference is dropped 1505 * after the pagecache pages are updated and unlocked -- either after 1506 * decompression (and verity if enabled), or after an error. 1507 * 1508 * In addition, each compressed page holds a reference while it is in a 1509 * bio. These references are necessary prevent compressed pages from 1510 * being freed while they are still in a bio. 1511 */ 1512 refcount_t refcnt; 1513 1514 bool failed; /* IO error occurred before decompression? */ 1515 bool need_verity; /* need fs-verity verification after decompression? */ 1516 void *private; /* payload buffer for specified decompression algorithm */ 1517 void *private2; /* extra payload buffer */ 1518 struct work_struct verity_work; /* work to verify the decompressed pages */ 1519 struct work_struct free_work; /* work for late free this structure itself */ 1520 }; 1521 1522 #define NULL_CLUSTER ((unsigned int)(~0)) 1523 #define MIN_COMPRESS_LOG_SIZE 2 1524 #define MAX_COMPRESS_LOG_SIZE 8 1525 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1526 1527 struct f2fs_sb_info { 1528 struct super_block *sb; /* pointer to VFS super block */ 1529 struct proc_dir_entry *s_proc; /* proc entry */ 1530 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1531 struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1532 int valid_super_block; /* valid super block no */ 1533 unsigned long s_flag; /* flags for sbi */ 1534 struct mutex writepages; /* mutex for writepages() */ 1535 1536 #ifdef CONFIG_BLK_DEV_ZONED 1537 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1538 #endif 1539 1540 /* for node-related operations */ 1541 struct f2fs_nm_info *nm_info; /* node manager */ 1542 struct inode *node_inode; /* cache node blocks */ 1543 1544 /* for segment-related operations */ 1545 struct f2fs_sm_info *sm_info; /* segment manager */ 1546 1547 /* for bio operations */ 1548 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1549 /* keep migration IO order for LFS mode */ 1550 struct f2fs_rwsem io_order_lock; 1551 mempool_t *write_io_dummy; /* Dummy pages */ 1552 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ 1553 int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */ 1554 1555 /* for checkpoint */ 1556 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1557 int cur_cp_pack; /* remain current cp pack */ 1558 spinlock_t cp_lock; /* for flag in ckpt */ 1559 struct inode *meta_inode; /* cache meta blocks */ 1560 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1561 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1562 struct f2fs_rwsem node_write; /* locking node writes */ 1563 struct f2fs_rwsem node_change; /* locking node change */ 1564 wait_queue_head_t cp_wait; 1565 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1566 long interval_time[MAX_TIME]; /* to store thresholds */ 1567 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1568 1569 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1570 1571 spinlock_t fsync_node_lock; /* for node entry lock */ 1572 struct list_head fsync_node_list; /* node list head */ 1573 unsigned int fsync_seg_id; /* sequence id */ 1574 unsigned int fsync_node_num; /* number of node entries */ 1575 1576 /* for orphan inode, use 0'th array */ 1577 unsigned int max_orphans; /* max orphan inodes */ 1578 1579 /* for inode management */ 1580 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1581 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1582 struct mutex flush_lock; /* for flush exclusion */ 1583 1584 /* for extent tree cache */ 1585 struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; 1586 atomic64_t allocated_data_blocks; /* for block age extent_cache */ 1587 1588 /* The threshold used for hot and warm data seperation*/ 1589 unsigned int hot_data_age_threshold; 1590 unsigned int warm_data_age_threshold; 1591 unsigned int last_age_weight; 1592 1593 /* basic filesystem units */ 1594 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1595 unsigned int log_blocksize; /* log2 block size */ 1596 unsigned int blocksize; /* block size */ 1597 unsigned int root_ino_num; /* root inode number*/ 1598 unsigned int node_ino_num; /* node inode number*/ 1599 unsigned int meta_ino_num; /* meta inode number*/ 1600 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1601 unsigned int blocks_per_seg; /* blocks per segment */ 1602 unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ 1603 unsigned int segs_per_sec; /* segments per section */ 1604 unsigned int secs_per_zone; /* sections per zone */ 1605 unsigned int total_sections; /* total section count */ 1606 unsigned int total_node_count; /* total node block count */ 1607 unsigned int total_valid_node_count; /* valid node block count */ 1608 int dir_level; /* directory level */ 1609 bool readdir_ra; /* readahead inode in readdir */ 1610 u64 max_io_bytes; /* max io bytes to merge IOs */ 1611 1612 block_t user_block_count; /* # of user blocks */ 1613 block_t total_valid_block_count; /* # of valid blocks */ 1614 block_t discard_blks; /* discard command candidats */ 1615 block_t last_valid_block_count; /* for recovery */ 1616 block_t reserved_blocks; /* configurable reserved blocks */ 1617 block_t current_reserved_blocks; /* current reserved blocks */ 1618 1619 /* Additional tracking for no checkpoint mode */ 1620 block_t unusable_block_count; /* # of blocks saved by last cp */ 1621 1622 unsigned int nquota_files; /* # of quota sysfile */ 1623 struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1624 1625 /* # of pages, see count_type */ 1626 atomic_t nr_pages[NR_COUNT_TYPE]; 1627 /* # of allocated blocks */ 1628 struct percpu_counter alloc_valid_block_count; 1629 /* # of node block writes as roll forward recovery */ 1630 struct percpu_counter rf_node_block_count; 1631 1632 /* writeback control */ 1633 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1634 1635 /* valid inode count */ 1636 struct percpu_counter total_valid_inode_count; 1637 1638 struct f2fs_mount_info mount_opt; /* mount options */ 1639 1640 /* for cleaning operations */ 1641 struct f2fs_rwsem gc_lock; /* 1642 * semaphore for GC, avoid 1643 * race between GC and GC or CP 1644 */ 1645 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1646 struct atgc_management am; /* atgc management */ 1647 unsigned int cur_victim_sec; /* current victim section num */ 1648 unsigned int gc_mode; /* current GC state */ 1649 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1650 spinlock_t gc_remaining_trials_lock; 1651 /* remaining trial count for GC_URGENT_* and GC_IDLE_* */ 1652 unsigned int gc_remaining_trials; 1653 1654 /* for skip statistic */ 1655 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1656 1657 /* threshold for gc trials on pinned files */ 1658 u64 gc_pin_file_threshold; 1659 struct f2fs_rwsem pin_sem; 1660 1661 /* maximum # of trials to find a victim segment for SSR and GC */ 1662 unsigned int max_victim_search; 1663 /* migration granularity of garbage collection, unit: segment */ 1664 unsigned int migration_granularity; 1665 1666 /* 1667 * for stat information. 1668 * one is for the LFS mode, and the other is for the SSR mode. 1669 */ 1670 #ifdef CONFIG_F2FS_STAT_FS 1671 struct f2fs_stat_info *stat_info; /* FS status information */ 1672 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1673 unsigned int segment_count[2]; /* # of allocated segments */ 1674 unsigned int block_count[2]; /* # of allocated blocks */ 1675 atomic_t inplace_count; /* # of inplace update */ 1676 /* # of lookup extent cache */ 1677 atomic64_t total_hit_ext[NR_EXTENT_CACHES]; 1678 /* # of hit rbtree extent node */ 1679 atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; 1680 /* # of hit cached extent node */ 1681 atomic64_t read_hit_cached[NR_EXTENT_CACHES]; 1682 /* # of hit largest extent node in read extent cache */ 1683 atomic64_t read_hit_largest; 1684 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1685 atomic_t inline_inode; /* # of inline_data inodes */ 1686 atomic_t inline_dir; /* # of inline_dentry inodes */ 1687 atomic_t compr_inode; /* # of compressed inodes */ 1688 atomic64_t compr_blocks; /* # of compressed blocks */ 1689 atomic_t swapfile_inode; /* # of swapfile inodes */ 1690 atomic_t atomic_files; /* # of opened atomic file */ 1691 atomic_t max_aw_cnt; /* max # of atomic writes */ 1692 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1693 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1694 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1695 #endif 1696 spinlock_t stat_lock; /* lock for stat operations */ 1697 1698 /* to attach REQ_META|REQ_FUA flags */ 1699 unsigned int data_io_flag; 1700 unsigned int node_io_flag; 1701 1702 /* For sysfs support */ 1703 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1704 struct completion s_kobj_unregister; 1705 1706 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1707 struct completion s_stat_kobj_unregister; 1708 1709 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1710 struct completion s_feature_list_kobj_unregister; 1711 1712 /* For shrinker support */ 1713 struct list_head s_list; 1714 struct mutex umount_mutex; 1715 unsigned int shrinker_run_no; 1716 1717 /* For multi devices */ 1718 int s_ndevs; /* number of devices */ 1719 struct f2fs_dev_info *devs; /* for device list */ 1720 unsigned int dirty_device; /* for checkpoint data flush */ 1721 spinlock_t dev_lock; /* protect dirty_device */ 1722 bool aligned_blksize; /* all devices has the same logical blksize */ 1723 1724 /* For write statistics */ 1725 u64 sectors_written_start; 1726 u64 kbytes_written; 1727 1728 /* Reference to checksum algorithm driver via cryptoapi */ 1729 struct crypto_shash *s_chksum_driver; 1730 1731 /* Precomputed FS UUID checksum for seeding other checksums */ 1732 __u32 s_chksum_seed; 1733 1734 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1735 1736 /* 1737 * If we are in irq context, let's update error information into 1738 * on-disk superblock in the work. 1739 */ 1740 struct work_struct s_error_work; 1741 unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ 1742 unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */ 1743 spinlock_t error_lock; /* protect errors/stop_reason array */ 1744 bool error_dirty; /* errors of sb is dirty */ 1745 1746 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1747 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1748 1749 /* For reclaimed segs statistics per each GC mode */ 1750 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1751 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1752 1753 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1754 1755 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1756 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1757 1758 /* For atomic write statistics */ 1759 atomic64_t current_atomic_write; 1760 s64 peak_atomic_write; 1761 u64 committed_atomic_block; 1762 u64 revoked_atomic_block; 1763 1764 #ifdef CONFIG_F2FS_FS_COMPRESSION 1765 struct kmem_cache *page_array_slab; /* page array entry */ 1766 unsigned int page_array_slab_size; /* default page array slab size */ 1767 1768 /* For runtime compression statistics */ 1769 u64 compr_written_block; 1770 u64 compr_saved_block; 1771 u32 compr_new_inode; 1772 1773 /* For compressed block cache */ 1774 struct inode *compress_inode; /* cache compressed blocks */ 1775 unsigned int compress_percent; /* cache page percentage */ 1776 unsigned int compress_watermark; /* cache page watermark */ 1777 atomic_t compress_page_hit; /* cache hit count */ 1778 #endif 1779 1780 #ifdef CONFIG_F2FS_IOSTAT 1781 /* For app/fs IO statistics */ 1782 spinlock_t iostat_lock; 1783 unsigned long long iostat_count[NR_IO_TYPE]; 1784 unsigned long long iostat_bytes[NR_IO_TYPE]; 1785 unsigned long long prev_iostat_bytes[NR_IO_TYPE]; 1786 bool iostat_enable; 1787 unsigned long iostat_next_period; 1788 unsigned int iostat_period_ms; 1789 1790 /* For io latency related statistics info in one iostat period */ 1791 spinlock_t iostat_lat_lock; 1792 struct iostat_lat_info *iostat_io_lat; 1793 #endif 1794 }; 1795 1796 #ifdef CONFIG_F2FS_FAULT_INJECTION 1797 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \ 1798 __builtin_return_address(0)) 1799 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type, 1800 const char *func, const char *parent_func) 1801 { 1802 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1803 1804 if (!ffi->inject_rate) 1805 return false; 1806 1807 if (!IS_FAULT_SET(ffi, type)) 1808 return false; 1809 1810 atomic_inc(&ffi->inject_ops); 1811 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1812 atomic_set(&ffi->inject_ops, 0); 1813 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", 1814 KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type], 1815 func, parent_func); 1816 return true; 1817 } 1818 return false; 1819 } 1820 #else 1821 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1822 { 1823 return false; 1824 } 1825 #endif 1826 1827 /* 1828 * Test if the mounted volume is a multi-device volume. 1829 * - For a single regular disk volume, sbi->s_ndevs is 0. 1830 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1831 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1832 */ 1833 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1834 { 1835 return sbi->s_ndevs > 1; 1836 } 1837 1838 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1839 { 1840 unsigned long now = jiffies; 1841 1842 sbi->last_time[type] = now; 1843 1844 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1845 if (type == REQ_TIME) { 1846 sbi->last_time[DISCARD_TIME] = now; 1847 sbi->last_time[GC_TIME] = now; 1848 } 1849 } 1850 1851 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1852 { 1853 unsigned long interval = sbi->interval_time[type] * HZ; 1854 1855 return time_after(jiffies, sbi->last_time[type] + interval); 1856 } 1857 1858 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1859 int type) 1860 { 1861 unsigned long interval = sbi->interval_time[type] * HZ; 1862 unsigned int wait_ms = 0; 1863 long delta; 1864 1865 delta = (sbi->last_time[type] + interval) - jiffies; 1866 if (delta > 0) 1867 wait_ms = jiffies_to_msecs(delta); 1868 1869 return wait_ms; 1870 } 1871 1872 /* 1873 * Inline functions 1874 */ 1875 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1876 const void *address, unsigned int length) 1877 { 1878 struct { 1879 struct shash_desc shash; 1880 char ctx[4]; 1881 } desc; 1882 int err; 1883 1884 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1885 1886 desc.shash.tfm = sbi->s_chksum_driver; 1887 *(u32 *)desc.ctx = crc; 1888 1889 err = crypto_shash_update(&desc.shash, address, length); 1890 BUG_ON(err); 1891 1892 return *(u32 *)desc.ctx; 1893 } 1894 1895 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1896 unsigned int length) 1897 { 1898 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1899 } 1900 1901 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1902 void *buf, size_t buf_size) 1903 { 1904 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1905 } 1906 1907 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1908 const void *address, unsigned int length) 1909 { 1910 return __f2fs_crc32(sbi, crc, address, length); 1911 } 1912 1913 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1914 { 1915 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1916 } 1917 1918 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1919 { 1920 return sb->s_fs_info; 1921 } 1922 1923 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1924 { 1925 return F2FS_SB(inode->i_sb); 1926 } 1927 1928 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1929 { 1930 return F2FS_I_SB(mapping->host); 1931 } 1932 1933 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1934 { 1935 return F2FS_M_SB(page_file_mapping(page)); 1936 } 1937 1938 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1939 { 1940 return (struct f2fs_super_block *)(sbi->raw_super); 1941 } 1942 1943 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1944 { 1945 return (struct f2fs_checkpoint *)(sbi->ckpt); 1946 } 1947 1948 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1949 { 1950 return (struct f2fs_node *)page_address(page); 1951 } 1952 1953 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1954 { 1955 return &((struct f2fs_node *)page_address(page))->i; 1956 } 1957 1958 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1959 { 1960 return (struct f2fs_nm_info *)(sbi->nm_info); 1961 } 1962 1963 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1964 { 1965 return (struct f2fs_sm_info *)(sbi->sm_info); 1966 } 1967 1968 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1969 { 1970 return (struct sit_info *)(SM_I(sbi)->sit_info); 1971 } 1972 1973 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1974 { 1975 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1976 } 1977 1978 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1979 { 1980 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1981 } 1982 1983 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1984 { 1985 return sbi->meta_inode->i_mapping; 1986 } 1987 1988 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1989 { 1990 return sbi->node_inode->i_mapping; 1991 } 1992 1993 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1994 { 1995 return test_bit(type, &sbi->s_flag); 1996 } 1997 1998 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1999 { 2000 set_bit(type, &sbi->s_flag); 2001 } 2002 2003 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2004 { 2005 clear_bit(type, &sbi->s_flag); 2006 } 2007 2008 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2009 { 2010 return le64_to_cpu(cp->checkpoint_ver); 2011 } 2012 2013 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2014 { 2015 if (type < F2FS_MAX_QUOTAS) 2016 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2017 return 0; 2018 } 2019 2020 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2021 { 2022 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2023 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2024 } 2025 2026 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2027 { 2028 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2029 2030 return ckpt_flags & f; 2031 } 2032 2033 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2034 { 2035 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2036 } 2037 2038 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2039 { 2040 unsigned int ckpt_flags; 2041 2042 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2043 ckpt_flags |= f; 2044 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2045 } 2046 2047 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2048 { 2049 unsigned long flags; 2050 2051 spin_lock_irqsave(&sbi->cp_lock, flags); 2052 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2053 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2054 } 2055 2056 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2057 { 2058 unsigned int ckpt_flags; 2059 2060 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2061 ckpt_flags &= (~f); 2062 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2063 } 2064 2065 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2066 { 2067 unsigned long flags; 2068 2069 spin_lock_irqsave(&sbi->cp_lock, flags); 2070 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2071 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2072 } 2073 2074 #define init_f2fs_rwsem(sem) \ 2075 do { \ 2076 static struct lock_class_key __key; \ 2077 \ 2078 __init_f2fs_rwsem((sem), #sem, &__key); \ 2079 } while (0) 2080 2081 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, 2082 const char *sem_name, struct lock_class_key *key) 2083 { 2084 __init_rwsem(&sem->internal_rwsem, sem_name, key); 2085 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2086 init_waitqueue_head(&sem->read_waiters); 2087 #endif 2088 } 2089 2090 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2091 { 2092 return rwsem_is_locked(&sem->internal_rwsem); 2093 } 2094 2095 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2096 { 2097 return rwsem_is_contended(&sem->internal_rwsem); 2098 } 2099 2100 static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2101 { 2102 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2103 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2104 #else 2105 down_read(&sem->internal_rwsem); 2106 #endif 2107 } 2108 2109 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2110 { 2111 return down_read_trylock(&sem->internal_rwsem); 2112 } 2113 2114 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2115 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2116 { 2117 down_read_nested(&sem->internal_rwsem, subclass); 2118 } 2119 #else 2120 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2121 #endif 2122 2123 static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2124 { 2125 up_read(&sem->internal_rwsem); 2126 } 2127 2128 static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2129 { 2130 down_write(&sem->internal_rwsem); 2131 } 2132 2133 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2134 { 2135 return down_write_trylock(&sem->internal_rwsem); 2136 } 2137 2138 static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2139 { 2140 up_write(&sem->internal_rwsem); 2141 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2142 wake_up_all(&sem->read_waiters); 2143 #endif 2144 } 2145 2146 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2147 { 2148 f2fs_down_read(&sbi->cp_rwsem); 2149 } 2150 2151 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2152 { 2153 if (time_to_inject(sbi, FAULT_LOCK_OP)) 2154 return 0; 2155 return f2fs_down_read_trylock(&sbi->cp_rwsem); 2156 } 2157 2158 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2159 { 2160 f2fs_up_read(&sbi->cp_rwsem); 2161 } 2162 2163 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2164 { 2165 f2fs_down_write(&sbi->cp_rwsem); 2166 } 2167 2168 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2169 { 2170 f2fs_up_write(&sbi->cp_rwsem); 2171 } 2172 2173 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2174 { 2175 int reason = CP_SYNC; 2176 2177 if (test_opt(sbi, FASTBOOT)) 2178 reason = CP_FASTBOOT; 2179 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2180 reason = CP_UMOUNT; 2181 return reason; 2182 } 2183 2184 static inline bool __remain_node_summaries(int reason) 2185 { 2186 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2187 } 2188 2189 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2190 { 2191 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2192 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2193 } 2194 2195 /* 2196 * Check whether the inode has blocks or not 2197 */ 2198 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2199 { 2200 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2201 2202 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2203 } 2204 2205 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2206 { 2207 return ofs == XATTR_NODE_OFFSET; 2208 } 2209 2210 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2211 struct inode *inode, bool cap) 2212 { 2213 if (!inode) 2214 return true; 2215 if (!test_opt(sbi, RESERVE_ROOT)) 2216 return false; 2217 if (IS_NOQUOTA(inode)) 2218 return true; 2219 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2220 return true; 2221 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2222 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2223 return true; 2224 if (cap && capable(CAP_SYS_RESOURCE)) 2225 return true; 2226 return false; 2227 } 2228 2229 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2230 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2231 struct inode *inode, blkcnt_t *count) 2232 { 2233 blkcnt_t diff = 0, release = 0; 2234 block_t avail_user_block_count; 2235 int ret; 2236 2237 ret = dquot_reserve_block(inode, *count); 2238 if (ret) 2239 return ret; 2240 2241 if (time_to_inject(sbi, FAULT_BLOCK)) { 2242 release = *count; 2243 goto release_quota; 2244 } 2245 2246 /* 2247 * let's increase this in prior to actual block count change in order 2248 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2249 */ 2250 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2251 2252 spin_lock(&sbi->stat_lock); 2253 sbi->total_valid_block_count += (block_t)(*count); 2254 avail_user_block_count = sbi->user_block_count - 2255 sbi->current_reserved_blocks; 2256 2257 if (!__allow_reserved_blocks(sbi, inode, true)) 2258 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2259 2260 if (F2FS_IO_ALIGNED(sbi)) 2261 avail_user_block_count -= sbi->blocks_per_seg * 2262 SM_I(sbi)->additional_reserved_segments; 2263 2264 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2265 if (avail_user_block_count > sbi->unusable_block_count) 2266 avail_user_block_count -= sbi->unusable_block_count; 2267 else 2268 avail_user_block_count = 0; 2269 } 2270 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2271 diff = sbi->total_valid_block_count - avail_user_block_count; 2272 if (diff > *count) 2273 diff = *count; 2274 *count -= diff; 2275 release = diff; 2276 sbi->total_valid_block_count -= diff; 2277 if (!*count) { 2278 spin_unlock(&sbi->stat_lock); 2279 goto enospc; 2280 } 2281 } 2282 spin_unlock(&sbi->stat_lock); 2283 2284 if (unlikely(release)) { 2285 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2286 dquot_release_reservation_block(inode, release); 2287 } 2288 f2fs_i_blocks_write(inode, *count, true, true); 2289 return 0; 2290 2291 enospc: 2292 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2293 release_quota: 2294 dquot_release_reservation_block(inode, release); 2295 return -ENOSPC; 2296 } 2297 2298 __printf(2, 3) 2299 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2300 2301 #define f2fs_err(sbi, fmt, ...) \ 2302 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2303 #define f2fs_warn(sbi, fmt, ...) \ 2304 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2305 #define f2fs_notice(sbi, fmt, ...) \ 2306 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2307 #define f2fs_info(sbi, fmt, ...) \ 2308 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2309 #define f2fs_debug(sbi, fmt, ...) \ 2310 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2311 2312 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 2313 static inline bool page_private_##name(struct page *page) \ 2314 { \ 2315 return PagePrivate(page) && \ 2316 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 2317 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2318 } 2319 2320 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 2321 static inline void set_page_private_##name(struct page *page) \ 2322 { \ 2323 if (!PagePrivate(page)) \ 2324 attach_page_private(page, (void *)0); \ 2325 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 2326 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2327 } 2328 2329 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 2330 static inline void clear_page_private_##name(struct page *page) \ 2331 { \ 2332 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 2333 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ 2334 detach_page_private(page); \ 2335 } 2336 2337 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 2338 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 2339 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 2340 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 2341 2342 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 2343 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 2344 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 2345 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 2346 2347 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 2348 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 2349 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 2350 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 2351 2352 static inline unsigned long get_page_private_data(struct page *page) 2353 { 2354 unsigned long data = page_private(page); 2355 2356 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 2357 return 0; 2358 return data >> PAGE_PRIVATE_MAX; 2359 } 2360 2361 static inline void set_page_private_data(struct page *page, unsigned long data) 2362 { 2363 if (!PagePrivate(page)) 2364 attach_page_private(page, (void *)0); 2365 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 2366 page_private(page) |= data << PAGE_PRIVATE_MAX; 2367 } 2368 2369 static inline void clear_page_private_data(struct page *page) 2370 { 2371 page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); 2372 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) 2373 detach_page_private(page); 2374 } 2375 2376 static inline void clear_page_private_all(struct page *page) 2377 { 2378 clear_page_private_data(page); 2379 clear_page_private_reference(page); 2380 clear_page_private_gcing(page); 2381 clear_page_private_inline(page); 2382 2383 f2fs_bug_on(F2FS_P_SB(page), page_private(page)); 2384 } 2385 2386 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2387 struct inode *inode, 2388 block_t count) 2389 { 2390 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2391 2392 spin_lock(&sbi->stat_lock); 2393 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2394 sbi->total_valid_block_count -= (block_t)count; 2395 if (sbi->reserved_blocks && 2396 sbi->current_reserved_blocks < sbi->reserved_blocks) 2397 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2398 sbi->current_reserved_blocks + count); 2399 spin_unlock(&sbi->stat_lock); 2400 if (unlikely(inode->i_blocks < sectors)) { 2401 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2402 inode->i_ino, 2403 (unsigned long long)inode->i_blocks, 2404 (unsigned long long)sectors); 2405 set_sbi_flag(sbi, SBI_NEED_FSCK); 2406 return; 2407 } 2408 f2fs_i_blocks_write(inode, count, false, true); 2409 } 2410 2411 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2412 { 2413 atomic_inc(&sbi->nr_pages[count_type]); 2414 2415 if (count_type == F2FS_DIRTY_DENTS || 2416 count_type == F2FS_DIRTY_NODES || 2417 count_type == F2FS_DIRTY_META || 2418 count_type == F2FS_DIRTY_QDATA || 2419 count_type == F2FS_DIRTY_IMETA) 2420 set_sbi_flag(sbi, SBI_IS_DIRTY); 2421 } 2422 2423 static inline void inode_inc_dirty_pages(struct inode *inode) 2424 { 2425 atomic_inc(&F2FS_I(inode)->dirty_pages); 2426 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2427 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2428 if (IS_NOQUOTA(inode)) 2429 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2430 } 2431 2432 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2433 { 2434 atomic_dec(&sbi->nr_pages[count_type]); 2435 } 2436 2437 static inline void inode_dec_dirty_pages(struct inode *inode) 2438 { 2439 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2440 !S_ISLNK(inode->i_mode)) 2441 return; 2442 2443 atomic_dec(&F2FS_I(inode)->dirty_pages); 2444 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2445 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2446 if (IS_NOQUOTA(inode)) 2447 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2448 } 2449 2450 static inline void inc_atomic_write_cnt(struct inode *inode) 2451 { 2452 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2453 struct f2fs_inode_info *fi = F2FS_I(inode); 2454 u64 current_write; 2455 2456 fi->atomic_write_cnt++; 2457 atomic64_inc(&sbi->current_atomic_write); 2458 current_write = atomic64_read(&sbi->current_atomic_write); 2459 if (current_write > sbi->peak_atomic_write) 2460 sbi->peak_atomic_write = current_write; 2461 } 2462 2463 static inline void release_atomic_write_cnt(struct inode *inode) 2464 { 2465 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2466 struct f2fs_inode_info *fi = F2FS_I(inode); 2467 2468 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write); 2469 fi->atomic_write_cnt = 0; 2470 } 2471 2472 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2473 { 2474 return atomic_read(&sbi->nr_pages[count_type]); 2475 } 2476 2477 static inline int get_dirty_pages(struct inode *inode) 2478 { 2479 return atomic_read(&F2FS_I(inode)->dirty_pages); 2480 } 2481 2482 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2483 { 2484 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2485 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2486 sbi->log_blocks_per_seg; 2487 2488 return segs / sbi->segs_per_sec; 2489 } 2490 2491 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2492 { 2493 return sbi->total_valid_block_count; 2494 } 2495 2496 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2497 { 2498 return sbi->discard_blks; 2499 } 2500 2501 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2502 { 2503 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2504 2505 /* return NAT or SIT bitmap */ 2506 if (flag == NAT_BITMAP) 2507 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2508 else if (flag == SIT_BITMAP) 2509 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2510 2511 return 0; 2512 } 2513 2514 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2515 { 2516 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2517 } 2518 2519 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2520 { 2521 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2522 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2523 int offset; 2524 2525 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2526 offset = (flag == SIT_BITMAP) ? 2527 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2528 /* 2529 * if large_nat_bitmap feature is enabled, leave checksum 2530 * protection for all nat/sit bitmaps. 2531 */ 2532 return tmp_ptr + offset + sizeof(__le32); 2533 } 2534 2535 if (__cp_payload(sbi) > 0) { 2536 if (flag == NAT_BITMAP) 2537 return tmp_ptr; 2538 else 2539 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2540 } else { 2541 offset = (flag == NAT_BITMAP) ? 2542 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2543 return tmp_ptr + offset; 2544 } 2545 } 2546 2547 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2548 { 2549 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2550 2551 if (sbi->cur_cp_pack == 2) 2552 start_addr += sbi->blocks_per_seg; 2553 return start_addr; 2554 } 2555 2556 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2557 { 2558 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2559 2560 if (sbi->cur_cp_pack == 1) 2561 start_addr += sbi->blocks_per_seg; 2562 return start_addr; 2563 } 2564 2565 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2566 { 2567 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2568 } 2569 2570 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2571 { 2572 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2573 } 2574 2575 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 2576 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2577 struct inode *inode, bool is_inode) 2578 { 2579 block_t valid_block_count; 2580 unsigned int valid_node_count, user_block_count; 2581 int err; 2582 2583 if (is_inode) { 2584 if (inode) { 2585 err = dquot_alloc_inode(inode); 2586 if (err) 2587 return err; 2588 } 2589 } else { 2590 err = dquot_reserve_block(inode, 1); 2591 if (err) 2592 return err; 2593 } 2594 2595 if (time_to_inject(sbi, FAULT_BLOCK)) 2596 goto enospc; 2597 2598 spin_lock(&sbi->stat_lock); 2599 2600 valid_block_count = sbi->total_valid_block_count + 2601 sbi->current_reserved_blocks + 1; 2602 2603 if (!__allow_reserved_blocks(sbi, inode, false)) 2604 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2605 2606 if (F2FS_IO_ALIGNED(sbi)) 2607 valid_block_count += sbi->blocks_per_seg * 2608 SM_I(sbi)->additional_reserved_segments; 2609 2610 user_block_count = sbi->user_block_count; 2611 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2612 user_block_count -= sbi->unusable_block_count; 2613 2614 if (unlikely(valid_block_count > user_block_count)) { 2615 spin_unlock(&sbi->stat_lock); 2616 goto enospc; 2617 } 2618 2619 valid_node_count = sbi->total_valid_node_count + 1; 2620 if (unlikely(valid_node_count > sbi->total_node_count)) { 2621 spin_unlock(&sbi->stat_lock); 2622 goto enospc; 2623 } 2624 2625 sbi->total_valid_node_count++; 2626 sbi->total_valid_block_count++; 2627 spin_unlock(&sbi->stat_lock); 2628 2629 if (inode) { 2630 if (is_inode) 2631 f2fs_mark_inode_dirty_sync(inode, true); 2632 else 2633 f2fs_i_blocks_write(inode, 1, true, true); 2634 } 2635 2636 percpu_counter_inc(&sbi->alloc_valid_block_count); 2637 return 0; 2638 2639 enospc: 2640 if (is_inode) { 2641 if (inode) 2642 dquot_free_inode(inode); 2643 } else { 2644 dquot_release_reservation_block(inode, 1); 2645 } 2646 return -ENOSPC; 2647 } 2648 2649 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2650 struct inode *inode, bool is_inode) 2651 { 2652 spin_lock(&sbi->stat_lock); 2653 2654 if (unlikely(!sbi->total_valid_block_count || 2655 !sbi->total_valid_node_count)) { 2656 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", 2657 sbi->total_valid_block_count, 2658 sbi->total_valid_node_count); 2659 set_sbi_flag(sbi, SBI_NEED_FSCK); 2660 } else { 2661 sbi->total_valid_block_count--; 2662 sbi->total_valid_node_count--; 2663 } 2664 2665 if (sbi->reserved_blocks && 2666 sbi->current_reserved_blocks < sbi->reserved_blocks) 2667 sbi->current_reserved_blocks++; 2668 2669 spin_unlock(&sbi->stat_lock); 2670 2671 if (is_inode) { 2672 dquot_free_inode(inode); 2673 } else { 2674 if (unlikely(inode->i_blocks == 0)) { 2675 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2676 inode->i_ino, 2677 (unsigned long long)inode->i_blocks); 2678 set_sbi_flag(sbi, SBI_NEED_FSCK); 2679 return; 2680 } 2681 f2fs_i_blocks_write(inode, 1, false, true); 2682 } 2683 } 2684 2685 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2686 { 2687 return sbi->total_valid_node_count; 2688 } 2689 2690 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2691 { 2692 percpu_counter_inc(&sbi->total_valid_inode_count); 2693 } 2694 2695 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2696 { 2697 percpu_counter_dec(&sbi->total_valid_inode_count); 2698 } 2699 2700 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2701 { 2702 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2703 } 2704 2705 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2706 pgoff_t index, bool for_write) 2707 { 2708 struct page *page; 2709 unsigned int flags; 2710 2711 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2712 if (!for_write) 2713 page = find_get_page_flags(mapping, index, 2714 FGP_LOCK | FGP_ACCESSED); 2715 else 2716 page = find_lock_page(mapping, index); 2717 if (page) 2718 return page; 2719 2720 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) 2721 return NULL; 2722 } 2723 2724 if (!for_write) 2725 return grab_cache_page(mapping, index); 2726 2727 flags = memalloc_nofs_save(); 2728 page = grab_cache_page_write_begin(mapping, index); 2729 memalloc_nofs_restore(flags); 2730 2731 return page; 2732 } 2733 2734 static inline struct page *f2fs_pagecache_get_page( 2735 struct address_space *mapping, pgoff_t index, 2736 int fgp_flags, gfp_t gfp_mask) 2737 { 2738 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) 2739 return NULL; 2740 2741 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2742 } 2743 2744 static inline void f2fs_put_page(struct page *page, int unlock) 2745 { 2746 if (!page) 2747 return; 2748 2749 if (unlock) { 2750 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2751 unlock_page(page); 2752 } 2753 put_page(page); 2754 } 2755 2756 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2757 { 2758 if (dn->node_page) 2759 f2fs_put_page(dn->node_page, 1); 2760 if (dn->inode_page && dn->node_page != dn->inode_page) 2761 f2fs_put_page(dn->inode_page, 0); 2762 dn->node_page = NULL; 2763 dn->inode_page = NULL; 2764 } 2765 2766 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2767 size_t size) 2768 { 2769 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2770 } 2771 2772 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2773 gfp_t flags) 2774 { 2775 void *entry; 2776 2777 entry = kmem_cache_alloc(cachep, flags); 2778 if (!entry) 2779 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2780 return entry; 2781 } 2782 2783 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2784 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2785 { 2786 if (nofail) 2787 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2788 2789 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) 2790 return NULL; 2791 2792 return kmem_cache_alloc(cachep, flags); 2793 } 2794 2795 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2796 { 2797 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2798 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2799 get_pages(sbi, F2FS_WB_CP_DATA) || 2800 get_pages(sbi, F2FS_DIO_READ) || 2801 get_pages(sbi, F2FS_DIO_WRITE)) 2802 return true; 2803 2804 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2805 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2806 return true; 2807 2808 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2809 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2810 return true; 2811 return false; 2812 } 2813 2814 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2815 { 2816 if (sbi->gc_mode == GC_URGENT_HIGH) 2817 return true; 2818 2819 if (is_inflight_io(sbi, type)) 2820 return false; 2821 2822 if (sbi->gc_mode == GC_URGENT_MID) 2823 return true; 2824 2825 if (sbi->gc_mode == GC_URGENT_LOW && 2826 (type == DISCARD_TIME || type == GC_TIME)) 2827 return true; 2828 2829 return f2fs_time_over(sbi, type); 2830 } 2831 2832 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2833 unsigned long index, void *item) 2834 { 2835 while (radix_tree_insert(root, index, item)) 2836 cond_resched(); 2837 } 2838 2839 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2840 2841 static inline bool IS_INODE(struct page *page) 2842 { 2843 struct f2fs_node *p = F2FS_NODE(page); 2844 2845 return RAW_IS_INODE(p); 2846 } 2847 2848 static inline int offset_in_addr(struct f2fs_inode *i) 2849 { 2850 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2851 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2852 } 2853 2854 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2855 { 2856 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2857 } 2858 2859 static inline int f2fs_has_extra_attr(struct inode *inode); 2860 static inline block_t data_blkaddr(struct inode *inode, 2861 struct page *node_page, unsigned int offset) 2862 { 2863 struct f2fs_node *raw_node; 2864 __le32 *addr_array; 2865 int base = 0; 2866 bool is_inode = IS_INODE(node_page); 2867 2868 raw_node = F2FS_NODE(node_page); 2869 2870 if (is_inode) { 2871 if (!inode) 2872 /* from GC path only */ 2873 base = offset_in_addr(&raw_node->i); 2874 else if (f2fs_has_extra_attr(inode)) 2875 base = get_extra_isize(inode); 2876 } 2877 2878 addr_array = blkaddr_in_node(raw_node); 2879 return le32_to_cpu(addr_array[base + offset]); 2880 } 2881 2882 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2883 { 2884 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2885 } 2886 2887 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2888 { 2889 int mask; 2890 2891 addr += (nr >> 3); 2892 mask = BIT(7 - (nr & 0x07)); 2893 return mask & *addr; 2894 } 2895 2896 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2897 { 2898 int mask; 2899 2900 addr += (nr >> 3); 2901 mask = BIT(7 - (nr & 0x07)); 2902 *addr |= mask; 2903 } 2904 2905 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2906 { 2907 int mask; 2908 2909 addr += (nr >> 3); 2910 mask = BIT(7 - (nr & 0x07)); 2911 *addr &= ~mask; 2912 } 2913 2914 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2915 { 2916 int mask; 2917 int ret; 2918 2919 addr += (nr >> 3); 2920 mask = BIT(7 - (nr & 0x07)); 2921 ret = mask & *addr; 2922 *addr |= mask; 2923 return ret; 2924 } 2925 2926 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2927 { 2928 int mask; 2929 int ret; 2930 2931 addr += (nr >> 3); 2932 mask = BIT(7 - (nr & 0x07)); 2933 ret = mask & *addr; 2934 *addr &= ~mask; 2935 return ret; 2936 } 2937 2938 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2939 { 2940 int mask; 2941 2942 addr += (nr >> 3); 2943 mask = BIT(7 - (nr & 0x07)); 2944 *addr ^= mask; 2945 } 2946 2947 /* 2948 * On-disk inode flags (f2fs_inode::i_flags) 2949 */ 2950 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2951 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2952 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2953 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2954 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2955 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2956 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2957 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2958 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2959 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2960 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2961 2962 /* Flags that should be inherited by new inodes from their parent. */ 2963 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2964 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2965 F2FS_CASEFOLD_FL) 2966 2967 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2968 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2969 F2FS_CASEFOLD_FL)) 2970 2971 /* Flags that are appropriate for non-directories/regular files. */ 2972 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2973 2974 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2975 { 2976 if (S_ISDIR(mode)) 2977 return flags; 2978 else if (S_ISREG(mode)) 2979 return flags & F2FS_REG_FLMASK; 2980 else 2981 return flags & F2FS_OTHER_FLMASK; 2982 } 2983 2984 static inline void __mark_inode_dirty_flag(struct inode *inode, 2985 int flag, bool set) 2986 { 2987 switch (flag) { 2988 case FI_INLINE_XATTR: 2989 case FI_INLINE_DATA: 2990 case FI_INLINE_DENTRY: 2991 case FI_NEW_INODE: 2992 if (set) 2993 return; 2994 fallthrough; 2995 case FI_DATA_EXIST: 2996 case FI_INLINE_DOTS: 2997 case FI_PIN_FILE: 2998 case FI_COMPRESS_RELEASED: 2999 f2fs_mark_inode_dirty_sync(inode, true); 3000 } 3001 } 3002 3003 static inline void set_inode_flag(struct inode *inode, int flag) 3004 { 3005 set_bit(flag, F2FS_I(inode)->flags); 3006 __mark_inode_dirty_flag(inode, flag, true); 3007 } 3008 3009 static inline int is_inode_flag_set(struct inode *inode, int flag) 3010 { 3011 return test_bit(flag, F2FS_I(inode)->flags); 3012 } 3013 3014 static inline void clear_inode_flag(struct inode *inode, int flag) 3015 { 3016 clear_bit(flag, F2FS_I(inode)->flags); 3017 __mark_inode_dirty_flag(inode, flag, false); 3018 } 3019 3020 static inline bool f2fs_verity_in_progress(struct inode *inode) 3021 { 3022 return IS_ENABLED(CONFIG_FS_VERITY) && 3023 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 3024 } 3025 3026 static inline void set_acl_inode(struct inode *inode, umode_t mode) 3027 { 3028 F2FS_I(inode)->i_acl_mode = mode; 3029 set_inode_flag(inode, FI_ACL_MODE); 3030 f2fs_mark_inode_dirty_sync(inode, false); 3031 } 3032 3033 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 3034 { 3035 if (inc) 3036 inc_nlink(inode); 3037 else 3038 drop_nlink(inode); 3039 f2fs_mark_inode_dirty_sync(inode, true); 3040 } 3041 3042 static inline void f2fs_i_blocks_write(struct inode *inode, 3043 block_t diff, bool add, bool claim) 3044 { 3045 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3046 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3047 3048 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 3049 if (add) { 3050 if (claim) 3051 dquot_claim_block(inode, diff); 3052 else 3053 dquot_alloc_block_nofail(inode, diff); 3054 } else { 3055 dquot_free_block(inode, diff); 3056 } 3057 3058 f2fs_mark_inode_dirty_sync(inode, true); 3059 if (clean || recover) 3060 set_inode_flag(inode, FI_AUTO_RECOVER); 3061 } 3062 3063 static inline bool f2fs_is_atomic_file(struct inode *inode); 3064 3065 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 3066 { 3067 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3068 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3069 3070 if (i_size_read(inode) == i_size) 3071 return; 3072 3073 i_size_write(inode, i_size); 3074 3075 if (f2fs_is_atomic_file(inode)) 3076 return; 3077 3078 f2fs_mark_inode_dirty_sync(inode, true); 3079 if (clean || recover) 3080 set_inode_flag(inode, FI_AUTO_RECOVER); 3081 } 3082 3083 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 3084 { 3085 F2FS_I(inode)->i_current_depth = depth; 3086 f2fs_mark_inode_dirty_sync(inode, true); 3087 } 3088 3089 static inline void f2fs_i_gc_failures_write(struct inode *inode, 3090 unsigned int count) 3091 { 3092 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 3093 f2fs_mark_inode_dirty_sync(inode, true); 3094 } 3095 3096 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 3097 { 3098 F2FS_I(inode)->i_xattr_nid = xnid; 3099 f2fs_mark_inode_dirty_sync(inode, true); 3100 } 3101 3102 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 3103 { 3104 F2FS_I(inode)->i_pino = pino; 3105 f2fs_mark_inode_dirty_sync(inode, true); 3106 } 3107 3108 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 3109 { 3110 struct f2fs_inode_info *fi = F2FS_I(inode); 3111 3112 if (ri->i_inline & F2FS_INLINE_XATTR) 3113 set_bit(FI_INLINE_XATTR, fi->flags); 3114 if (ri->i_inline & F2FS_INLINE_DATA) 3115 set_bit(FI_INLINE_DATA, fi->flags); 3116 if (ri->i_inline & F2FS_INLINE_DENTRY) 3117 set_bit(FI_INLINE_DENTRY, fi->flags); 3118 if (ri->i_inline & F2FS_DATA_EXIST) 3119 set_bit(FI_DATA_EXIST, fi->flags); 3120 if (ri->i_inline & F2FS_INLINE_DOTS) 3121 set_bit(FI_INLINE_DOTS, fi->flags); 3122 if (ri->i_inline & F2FS_EXTRA_ATTR) 3123 set_bit(FI_EXTRA_ATTR, fi->flags); 3124 if (ri->i_inline & F2FS_PIN_FILE) 3125 set_bit(FI_PIN_FILE, fi->flags); 3126 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 3127 set_bit(FI_COMPRESS_RELEASED, fi->flags); 3128 } 3129 3130 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 3131 { 3132 ri->i_inline = 0; 3133 3134 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 3135 ri->i_inline |= F2FS_INLINE_XATTR; 3136 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 3137 ri->i_inline |= F2FS_INLINE_DATA; 3138 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 3139 ri->i_inline |= F2FS_INLINE_DENTRY; 3140 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 3141 ri->i_inline |= F2FS_DATA_EXIST; 3142 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 3143 ri->i_inline |= F2FS_INLINE_DOTS; 3144 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 3145 ri->i_inline |= F2FS_EXTRA_ATTR; 3146 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3147 ri->i_inline |= F2FS_PIN_FILE; 3148 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3149 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3150 } 3151 3152 static inline int f2fs_has_extra_attr(struct inode *inode) 3153 { 3154 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3155 } 3156 3157 static inline int f2fs_has_inline_xattr(struct inode *inode) 3158 { 3159 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3160 } 3161 3162 static inline int f2fs_compressed_file(struct inode *inode) 3163 { 3164 return S_ISREG(inode->i_mode) && 3165 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3166 } 3167 3168 static inline bool f2fs_need_compress_data(struct inode *inode) 3169 { 3170 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3171 3172 if (!f2fs_compressed_file(inode)) 3173 return false; 3174 3175 if (compress_mode == COMPR_MODE_FS) 3176 return true; 3177 else if (compress_mode == COMPR_MODE_USER && 3178 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3179 return true; 3180 3181 return false; 3182 } 3183 3184 static inline unsigned int addrs_per_inode(struct inode *inode) 3185 { 3186 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3187 get_inline_xattr_addrs(inode); 3188 3189 if (!f2fs_compressed_file(inode)) 3190 return addrs; 3191 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3192 } 3193 3194 static inline unsigned int addrs_per_block(struct inode *inode) 3195 { 3196 if (!f2fs_compressed_file(inode)) 3197 return DEF_ADDRS_PER_BLOCK; 3198 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3199 } 3200 3201 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3202 { 3203 struct f2fs_inode *ri = F2FS_INODE(page); 3204 3205 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3206 get_inline_xattr_addrs(inode)]); 3207 } 3208 3209 static inline int inline_xattr_size(struct inode *inode) 3210 { 3211 if (f2fs_has_inline_xattr(inode)) 3212 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3213 return 0; 3214 } 3215 3216 /* 3217 * Notice: check inline_data flag without inode page lock is unsafe. 3218 * It could change at any time by f2fs_convert_inline_page(). 3219 */ 3220 static inline int f2fs_has_inline_data(struct inode *inode) 3221 { 3222 return is_inode_flag_set(inode, FI_INLINE_DATA); 3223 } 3224 3225 static inline int f2fs_exist_data(struct inode *inode) 3226 { 3227 return is_inode_flag_set(inode, FI_DATA_EXIST); 3228 } 3229 3230 static inline int f2fs_has_inline_dots(struct inode *inode) 3231 { 3232 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3233 } 3234 3235 static inline int f2fs_is_mmap_file(struct inode *inode) 3236 { 3237 return is_inode_flag_set(inode, FI_MMAP_FILE); 3238 } 3239 3240 static inline bool f2fs_is_pinned_file(struct inode *inode) 3241 { 3242 return is_inode_flag_set(inode, FI_PIN_FILE); 3243 } 3244 3245 static inline bool f2fs_is_atomic_file(struct inode *inode) 3246 { 3247 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3248 } 3249 3250 static inline bool f2fs_is_cow_file(struct inode *inode) 3251 { 3252 return is_inode_flag_set(inode, FI_COW_FILE); 3253 } 3254 3255 static inline bool f2fs_is_first_block_written(struct inode *inode) 3256 { 3257 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3258 } 3259 3260 static inline bool f2fs_is_drop_cache(struct inode *inode) 3261 { 3262 return is_inode_flag_set(inode, FI_DROP_CACHE); 3263 } 3264 3265 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3266 { 3267 struct f2fs_inode *ri = F2FS_INODE(page); 3268 int extra_size = get_extra_isize(inode); 3269 3270 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3271 } 3272 3273 static inline int f2fs_has_inline_dentry(struct inode *inode) 3274 { 3275 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3276 } 3277 3278 static inline int is_file(struct inode *inode, int type) 3279 { 3280 return F2FS_I(inode)->i_advise & type; 3281 } 3282 3283 static inline void set_file(struct inode *inode, int type) 3284 { 3285 if (is_file(inode, type)) 3286 return; 3287 F2FS_I(inode)->i_advise |= type; 3288 f2fs_mark_inode_dirty_sync(inode, true); 3289 } 3290 3291 static inline void clear_file(struct inode *inode, int type) 3292 { 3293 if (!is_file(inode, type)) 3294 return; 3295 F2FS_I(inode)->i_advise &= ~type; 3296 f2fs_mark_inode_dirty_sync(inode, true); 3297 } 3298 3299 static inline bool f2fs_is_time_consistent(struct inode *inode) 3300 { 3301 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3302 return false; 3303 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3304 return false; 3305 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3306 return false; 3307 return true; 3308 } 3309 3310 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3311 { 3312 bool ret; 3313 3314 if (dsync) { 3315 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3316 3317 spin_lock(&sbi->inode_lock[DIRTY_META]); 3318 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3319 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3320 return ret; 3321 } 3322 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3323 file_keep_isize(inode) || 3324 i_size_read(inode) & ~PAGE_MASK) 3325 return false; 3326 3327 if (!f2fs_is_time_consistent(inode)) 3328 return false; 3329 3330 spin_lock(&F2FS_I(inode)->i_size_lock); 3331 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3332 spin_unlock(&F2FS_I(inode)->i_size_lock); 3333 3334 return ret; 3335 } 3336 3337 static inline bool f2fs_readonly(struct super_block *sb) 3338 { 3339 return sb_rdonly(sb); 3340 } 3341 3342 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3343 { 3344 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3345 } 3346 3347 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3348 { 3349 if (len == 1 && name[0] == '.') 3350 return true; 3351 3352 if (len == 2 && name[0] == '.' && name[1] == '.') 3353 return true; 3354 3355 return false; 3356 } 3357 3358 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3359 size_t size, gfp_t flags) 3360 { 3361 if (time_to_inject(sbi, FAULT_KMALLOC)) 3362 return NULL; 3363 3364 return kmalloc(size, flags); 3365 } 3366 3367 static inline void *f2fs_getname(struct f2fs_sb_info *sbi) 3368 { 3369 if (time_to_inject(sbi, FAULT_KMALLOC)) 3370 return NULL; 3371 3372 return __getname(); 3373 } 3374 3375 static inline void f2fs_putname(char *buf) 3376 { 3377 __putname(buf); 3378 } 3379 3380 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3381 size_t size, gfp_t flags) 3382 { 3383 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3384 } 3385 3386 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3387 size_t size, gfp_t flags) 3388 { 3389 if (time_to_inject(sbi, FAULT_KVMALLOC)) 3390 return NULL; 3391 3392 return kvmalloc(size, flags); 3393 } 3394 3395 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3396 size_t size, gfp_t flags) 3397 { 3398 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3399 } 3400 3401 static inline int get_extra_isize(struct inode *inode) 3402 { 3403 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3404 } 3405 3406 static inline int get_inline_xattr_addrs(struct inode *inode) 3407 { 3408 return F2FS_I(inode)->i_inline_xattr_size; 3409 } 3410 3411 #define f2fs_get_inode_mode(i) \ 3412 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3413 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3414 3415 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3416 (offsetof(struct f2fs_inode, i_extra_end) - \ 3417 offsetof(struct f2fs_inode, i_extra_isize)) \ 3418 3419 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3420 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3421 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3422 sizeof((f2fs_inode)->field)) \ 3423 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3424 3425 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3426 3427 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3428 3429 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3430 block_t blkaddr, int type); 3431 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3432 block_t blkaddr, int type) 3433 { 3434 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3435 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3436 blkaddr, type); 3437 f2fs_bug_on(sbi, 1); 3438 } 3439 } 3440 3441 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3442 { 3443 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3444 blkaddr == COMPRESS_ADDR) 3445 return false; 3446 return true; 3447 } 3448 3449 /* 3450 * file.c 3451 */ 3452 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3453 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3454 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3455 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3456 int f2fs_truncate(struct inode *inode); 3457 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, 3458 struct kstat *stat, u32 request_mask, unsigned int flags); 3459 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 3460 struct iattr *attr); 3461 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3462 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3463 int f2fs_precache_extents(struct inode *inode); 3464 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3465 int f2fs_fileattr_set(struct mnt_idmap *idmap, 3466 struct dentry *dentry, struct fileattr *fa); 3467 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3468 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3469 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3470 int f2fs_pin_file_control(struct inode *inode, bool inc); 3471 3472 /* 3473 * inode.c 3474 */ 3475 void f2fs_set_inode_flags(struct inode *inode); 3476 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3477 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3478 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3479 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3480 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3481 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3482 void f2fs_update_inode_page(struct inode *inode); 3483 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3484 void f2fs_evict_inode(struct inode *inode); 3485 void f2fs_handle_failed_inode(struct inode *inode); 3486 3487 /* 3488 * namei.c 3489 */ 3490 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3491 bool hot, bool set); 3492 struct dentry *f2fs_get_parent(struct dentry *child); 3493 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 3494 struct inode **new_inode); 3495 3496 /* 3497 * dir.c 3498 */ 3499 int f2fs_init_casefolded_name(const struct inode *dir, 3500 struct f2fs_filename *fname); 3501 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3502 int lookup, struct f2fs_filename *fname); 3503 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3504 struct f2fs_filename *fname); 3505 void f2fs_free_filename(struct f2fs_filename *fname); 3506 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3507 const struct f2fs_filename *fname, int *max_slots); 3508 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3509 unsigned int start_pos, struct fscrypt_str *fstr); 3510 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3511 struct f2fs_dentry_ptr *d); 3512 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3513 const struct f2fs_filename *fname, struct page *dpage); 3514 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3515 unsigned int current_depth); 3516 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3517 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3518 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3519 const struct f2fs_filename *fname, 3520 struct page **res_page); 3521 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3522 const struct qstr *child, struct page **res_page); 3523 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3524 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3525 struct page **page); 3526 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3527 struct page *page, struct inode *inode); 3528 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3529 const struct f2fs_filename *fname); 3530 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3531 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3532 unsigned int bit_pos); 3533 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3534 struct inode *inode, nid_t ino, umode_t mode); 3535 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3536 struct inode *inode, nid_t ino, umode_t mode); 3537 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3538 struct inode *inode, nid_t ino, umode_t mode); 3539 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3540 struct inode *dir, struct inode *inode); 3541 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3542 bool f2fs_empty_dir(struct inode *dir); 3543 3544 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3545 { 3546 if (fscrypt_is_nokey_name(dentry)) 3547 return -ENOKEY; 3548 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3549 inode, inode->i_ino, inode->i_mode); 3550 } 3551 3552 /* 3553 * super.c 3554 */ 3555 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3556 void f2fs_inode_synced(struct inode *inode); 3557 int f2fs_dquot_initialize(struct inode *inode); 3558 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3559 int f2fs_quota_sync(struct super_block *sb, int type); 3560 loff_t max_file_blocks(struct inode *inode); 3561 void f2fs_quota_off_umount(struct super_block *sb); 3562 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); 3563 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, 3564 bool irq_context); 3565 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); 3566 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error); 3567 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3568 int f2fs_sync_fs(struct super_block *sb, int sync); 3569 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3570 3571 /* 3572 * hash.c 3573 */ 3574 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3575 3576 /* 3577 * node.c 3578 */ 3579 struct node_info; 3580 3581 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3582 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3583 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3584 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3585 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3586 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3587 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3588 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3589 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3590 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3591 struct node_info *ni, bool checkpoint_context); 3592 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3593 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3594 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3595 int f2fs_truncate_xattr_node(struct inode *inode); 3596 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3597 unsigned int seq_id); 3598 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3599 int f2fs_remove_inode_page(struct inode *inode); 3600 struct page *f2fs_new_inode_page(struct inode *inode); 3601 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3602 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3603 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3604 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3605 int f2fs_move_node_page(struct page *node_page, int gc_type); 3606 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3607 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3608 struct writeback_control *wbc, bool atomic, 3609 unsigned int *seq_id); 3610 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3611 struct writeback_control *wbc, 3612 bool do_balance, enum iostat_type io_type); 3613 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3614 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3615 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3616 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3617 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3618 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3619 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3620 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3621 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3622 unsigned int segno, struct f2fs_summary_block *sum); 3623 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3624 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3625 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3626 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3627 int __init f2fs_create_node_manager_caches(void); 3628 void f2fs_destroy_node_manager_caches(void); 3629 3630 /* 3631 * segment.c 3632 */ 3633 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3634 int f2fs_commit_atomic_write(struct inode *inode); 3635 void f2fs_abort_atomic_write(struct inode *inode, bool clean); 3636 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3637 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3638 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3639 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3640 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3641 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3642 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3643 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3644 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3645 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3646 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3647 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3648 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3649 struct cp_control *cpc); 3650 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3651 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3652 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3653 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3654 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3655 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3656 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3657 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3658 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3659 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3660 unsigned int *newseg, bool new_sec, int dir); 3661 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3662 unsigned int start, unsigned int end); 3663 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3664 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3665 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3666 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3667 struct cp_control *cpc); 3668 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3669 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3670 block_t blk_addr); 3671 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3672 enum iostat_type io_type); 3673 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3674 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3675 struct f2fs_io_info *fio); 3676 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3677 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3678 block_t old_blkaddr, block_t new_blkaddr, 3679 bool recover_curseg, bool recover_newaddr, 3680 bool from_gc); 3681 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3682 block_t old_addr, block_t new_addr, 3683 unsigned char version, bool recover_curseg, 3684 bool recover_newaddr); 3685 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3686 block_t old_blkaddr, block_t *new_blkaddr, 3687 struct f2fs_summary *sum, int type, 3688 struct f2fs_io_info *fio); 3689 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3690 block_t blkaddr, unsigned int blkcnt); 3691 void f2fs_wait_on_page_writeback(struct page *page, 3692 enum page_type type, bool ordered, bool locked); 3693 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3694 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3695 block_t len); 3696 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3697 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3698 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3699 unsigned int val, int alloc); 3700 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3701 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3702 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3703 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3704 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3705 int __init f2fs_create_segment_manager_caches(void); 3706 void f2fs_destroy_segment_manager_caches(void); 3707 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3708 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3709 unsigned int segno); 3710 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3711 unsigned int segno); 3712 3713 #define DEF_FRAGMENT_SIZE 4 3714 #define MIN_FRAGMENT_SIZE 1 3715 #define MAX_FRAGMENT_SIZE 512 3716 3717 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3718 { 3719 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3720 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3721 } 3722 3723 /* 3724 * checkpoint.c 3725 */ 3726 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 3727 unsigned char reason); 3728 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); 3729 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3730 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3731 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3732 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3733 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3734 block_t blkaddr, int type); 3735 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3736 int type, bool sync); 3737 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 3738 unsigned int ra_blocks); 3739 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3740 long nr_to_write, enum iostat_type io_type); 3741 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3742 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3743 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3744 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3745 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3746 unsigned int devidx, int type); 3747 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3748 unsigned int devidx, int type); 3749 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3750 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3751 void f2fs_add_orphan_inode(struct inode *inode); 3752 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3753 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3754 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3755 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); 3756 void f2fs_remove_dirty_inode(struct inode *inode); 3757 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, 3758 bool from_cp); 3759 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3760 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3761 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3762 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3763 int __init f2fs_create_checkpoint_caches(void); 3764 void f2fs_destroy_checkpoint_caches(void); 3765 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3766 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3767 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3768 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3769 3770 /* 3771 * data.c 3772 */ 3773 int __init f2fs_init_bioset(void); 3774 void f2fs_destroy_bioset(void); 3775 int f2fs_init_bio_entry_cache(void); 3776 void f2fs_destroy_bio_entry_cache(void); 3777 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, 3778 enum page_type type); 3779 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); 3780 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3781 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3782 struct inode *inode, struct page *page, 3783 nid_t ino, enum page_type type); 3784 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3785 struct bio **bio, struct page *page); 3786 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3787 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3788 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3789 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3790 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3791 block_t blk_addr, sector_t *sector); 3792 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3793 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3794 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3795 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3796 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3797 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index); 3798 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3799 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3800 blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs); 3801 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index, 3802 pgoff_t *next_pgofs); 3803 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3804 bool for_write); 3805 struct page *f2fs_get_new_data_page(struct inode *inode, 3806 struct page *ipage, pgoff_t index, bool new_i_size); 3807 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3808 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag); 3809 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3810 u64 start, u64 len); 3811 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3812 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3813 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3814 int f2fs_write_single_data_page(struct page *page, int *submitted, 3815 struct bio **bio, sector_t *last_block, 3816 struct writeback_control *wbc, 3817 enum iostat_type io_type, 3818 int compr_blocks, bool allow_balance); 3819 void f2fs_write_failed(struct inode *inode, loff_t to); 3820 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); 3821 bool f2fs_release_folio(struct folio *folio, gfp_t wait); 3822 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3823 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3824 int f2fs_init_post_read_processing(void); 3825 void f2fs_destroy_post_read_processing(void); 3826 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3827 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3828 extern const struct iomap_ops f2fs_iomap_ops; 3829 3830 /* 3831 * gc.c 3832 */ 3833 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3834 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3835 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3836 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control); 3837 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3838 int f2fs_resize_fs(struct file *filp, __u64 block_count); 3839 int __init f2fs_create_garbage_collection_cache(void); 3840 void f2fs_destroy_garbage_collection_cache(void); 3841 /* victim selection function for cleaning and SSR */ 3842 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, 3843 int gc_type, int type, char alloc_mode, 3844 unsigned long long age); 3845 3846 /* 3847 * recovery.c 3848 */ 3849 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3850 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3851 int __init f2fs_create_recovery_cache(void); 3852 void f2fs_destroy_recovery_cache(void); 3853 3854 /* 3855 * debug.c 3856 */ 3857 #ifdef CONFIG_F2FS_STAT_FS 3858 struct f2fs_stat_info { 3859 struct list_head stat_list; 3860 struct f2fs_sb_info *sbi; 3861 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3862 int main_area_segs, main_area_sections, main_area_zones; 3863 unsigned long long hit_cached[NR_EXTENT_CACHES]; 3864 unsigned long long hit_rbtree[NR_EXTENT_CACHES]; 3865 unsigned long long total_ext[NR_EXTENT_CACHES]; 3866 unsigned long long hit_total[NR_EXTENT_CACHES]; 3867 int ext_tree[NR_EXTENT_CACHES]; 3868 int zombie_tree[NR_EXTENT_CACHES]; 3869 int ext_node[NR_EXTENT_CACHES]; 3870 /* to count memory footprint */ 3871 unsigned long long ext_mem[NR_EXTENT_CACHES]; 3872 /* for read extent cache */ 3873 unsigned long long hit_largest; 3874 /* for block age extent cache */ 3875 unsigned long long allocated_data_blocks; 3876 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3877 int ndirty_data, ndirty_qdata; 3878 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3879 int nats, dirty_nats, sits, dirty_sits; 3880 int free_nids, avail_nids, alloc_nids; 3881 int total_count, utilization; 3882 int bg_gc, nr_wb_cp_data, nr_wb_data; 3883 int nr_rd_data, nr_rd_node, nr_rd_meta; 3884 int nr_dio_read, nr_dio_write; 3885 unsigned int io_skip_bggc, other_skip_bggc; 3886 int nr_flushing, nr_flushed, flush_list_empty; 3887 int nr_discarding, nr_discarded; 3888 int nr_discard_cmd; 3889 unsigned int undiscard_blks; 3890 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3891 unsigned int cur_ckpt_time, peak_ckpt_time; 3892 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3893 int compr_inode, swapfile_inode; 3894 unsigned long long compr_blocks; 3895 int aw_cnt, max_aw_cnt; 3896 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3897 unsigned int bimodal, avg_vblocks; 3898 int util_free, util_valid, util_invalid; 3899 int rsvd_segs, overp_segs; 3900 int dirty_count, node_pages, meta_pages, compress_pages; 3901 int compress_page_hit; 3902 int prefree_count, call_count, cp_count, bg_cp_count; 3903 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3904 int bg_node_segs, bg_data_segs; 3905 int tot_blks, data_blks, node_blks; 3906 int bg_data_blks, bg_node_blks; 3907 int curseg[NR_CURSEG_TYPE]; 3908 int cursec[NR_CURSEG_TYPE]; 3909 int curzone[NR_CURSEG_TYPE]; 3910 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3911 unsigned int full_seg[NR_CURSEG_TYPE]; 3912 unsigned int valid_blks[NR_CURSEG_TYPE]; 3913 3914 unsigned int meta_count[META_MAX]; 3915 unsigned int segment_count[2]; 3916 unsigned int block_count[2]; 3917 unsigned int inplace_count; 3918 unsigned long long base_mem, cache_mem, page_mem; 3919 }; 3920 3921 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3922 { 3923 return (struct f2fs_stat_info *)sbi->stat_info; 3924 } 3925 3926 #define stat_inc_cp_count(si) ((si)->cp_count++) 3927 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3928 #define stat_inc_call_count(si) ((si)->call_count++) 3929 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3930 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3931 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3932 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3933 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3934 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) 3935 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) 3936 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3937 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) 3938 #define stat_inc_inline_xattr(inode) \ 3939 do { \ 3940 if (f2fs_has_inline_xattr(inode)) \ 3941 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3942 } while (0) 3943 #define stat_dec_inline_xattr(inode) \ 3944 do { \ 3945 if (f2fs_has_inline_xattr(inode)) \ 3946 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3947 } while (0) 3948 #define stat_inc_inline_inode(inode) \ 3949 do { \ 3950 if (f2fs_has_inline_data(inode)) \ 3951 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3952 } while (0) 3953 #define stat_dec_inline_inode(inode) \ 3954 do { \ 3955 if (f2fs_has_inline_data(inode)) \ 3956 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3957 } while (0) 3958 #define stat_inc_inline_dir(inode) \ 3959 do { \ 3960 if (f2fs_has_inline_dentry(inode)) \ 3961 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3962 } while (0) 3963 #define stat_dec_inline_dir(inode) \ 3964 do { \ 3965 if (f2fs_has_inline_dentry(inode)) \ 3966 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3967 } while (0) 3968 #define stat_inc_compr_inode(inode) \ 3969 do { \ 3970 if (f2fs_compressed_file(inode)) \ 3971 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3972 } while (0) 3973 #define stat_dec_compr_inode(inode) \ 3974 do { \ 3975 if (f2fs_compressed_file(inode)) \ 3976 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3977 } while (0) 3978 #define stat_add_compr_blocks(inode, blocks) \ 3979 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3980 #define stat_sub_compr_blocks(inode, blocks) \ 3981 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3982 #define stat_inc_swapfile_inode(inode) \ 3983 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) 3984 #define stat_dec_swapfile_inode(inode) \ 3985 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) 3986 #define stat_inc_atomic_inode(inode) \ 3987 (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) 3988 #define stat_dec_atomic_inode(inode) \ 3989 (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) 3990 #define stat_inc_meta_count(sbi, blkaddr) \ 3991 do { \ 3992 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3993 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3994 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3995 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3996 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3997 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3998 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3999 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 4000 } while (0) 4001 #define stat_inc_seg_type(sbi, curseg) \ 4002 ((sbi)->segment_count[(curseg)->alloc_type]++) 4003 #define stat_inc_block_count(sbi, curseg) \ 4004 ((sbi)->block_count[(curseg)->alloc_type]++) 4005 #define stat_inc_inplace_blocks(sbi) \ 4006 (atomic_inc(&(sbi)->inplace_count)) 4007 #define stat_update_max_atomic_write(inode) \ 4008 do { \ 4009 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ 4010 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 4011 if (cur > max) \ 4012 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 4013 } while (0) 4014 #define stat_inc_seg_count(sbi, type, gc_type) \ 4015 do { \ 4016 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4017 si->tot_segs++; \ 4018 if ((type) == SUM_TYPE_DATA) { \ 4019 si->data_segs++; \ 4020 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 4021 } else { \ 4022 si->node_segs++; \ 4023 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 4024 } \ 4025 } while (0) 4026 4027 #define stat_inc_tot_blk_count(si, blks) \ 4028 ((si)->tot_blks += (blks)) 4029 4030 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 4031 do { \ 4032 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4033 stat_inc_tot_blk_count(si, blks); \ 4034 si->data_blks += (blks); \ 4035 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4036 } while (0) 4037 4038 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 4039 do { \ 4040 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4041 stat_inc_tot_blk_count(si, blks); \ 4042 si->node_blks += (blks); \ 4043 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4044 } while (0) 4045 4046 int f2fs_build_stats(struct f2fs_sb_info *sbi); 4047 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 4048 void __init f2fs_create_root_stats(void); 4049 void f2fs_destroy_root_stats(void); 4050 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 4051 #else 4052 #define stat_inc_cp_count(si) do { } while (0) 4053 #define stat_inc_bg_cp_count(si) do { } while (0) 4054 #define stat_inc_call_count(si) do { } while (0) 4055 #define stat_inc_bggc_count(si) do { } while (0) 4056 #define stat_io_skip_bggc_count(sbi) do { } while (0) 4057 #define stat_other_skip_bggc_count(sbi) do { } while (0) 4058 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 4059 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 4060 #define stat_inc_total_hit(sbi, type) do { } while (0) 4061 #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) 4062 #define stat_inc_largest_node_hit(sbi) do { } while (0) 4063 #define stat_inc_cached_node_hit(sbi, type) do { } while (0) 4064 #define stat_inc_inline_xattr(inode) do { } while (0) 4065 #define stat_dec_inline_xattr(inode) do { } while (0) 4066 #define stat_inc_inline_inode(inode) do { } while (0) 4067 #define stat_dec_inline_inode(inode) do { } while (0) 4068 #define stat_inc_inline_dir(inode) do { } while (0) 4069 #define stat_dec_inline_dir(inode) do { } while (0) 4070 #define stat_inc_compr_inode(inode) do { } while (0) 4071 #define stat_dec_compr_inode(inode) do { } while (0) 4072 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 4073 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 4074 #define stat_inc_swapfile_inode(inode) do { } while (0) 4075 #define stat_dec_swapfile_inode(inode) do { } while (0) 4076 #define stat_inc_atomic_inode(inode) do { } while (0) 4077 #define stat_dec_atomic_inode(inode) do { } while (0) 4078 #define stat_update_max_atomic_write(inode) do { } while (0) 4079 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 4080 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 4081 #define stat_inc_block_count(sbi, curseg) do { } while (0) 4082 #define stat_inc_inplace_blocks(sbi) do { } while (0) 4083 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 4084 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 4085 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 4086 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 4087 4088 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 4089 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 4090 static inline void __init f2fs_create_root_stats(void) { } 4091 static inline void f2fs_destroy_root_stats(void) { } 4092 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 4093 #endif 4094 4095 extern const struct file_operations f2fs_dir_operations; 4096 extern const struct file_operations f2fs_file_operations; 4097 extern const struct inode_operations f2fs_file_inode_operations; 4098 extern const struct address_space_operations f2fs_dblock_aops; 4099 extern const struct address_space_operations f2fs_node_aops; 4100 extern const struct address_space_operations f2fs_meta_aops; 4101 extern const struct inode_operations f2fs_dir_inode_operations; 4102 extern const struct inode_operations f2fs_symlink_inode_operations; 4103 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 4104 extern const struct inode_operations f2fs_special_inode_operations; 4105 extern struct kmem_cache *f2fs_inode_entry_slab; 4106 4107 /* 4108 * inline.c 4109 */ 4110 bool f2fs_may_inline_data(struct inode *inode); 4111 bool f2fs_sanity_check_inline_data(struct inode *inode); 4112 bool f2fs_may_inline_dentry(struct inode *inode); 4113 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 4114 void f2fs_truncate_inline_inode(struct inode *inode, 4115 struct page *ipage, u64 from); 4116 int f2fs_read_inline_data(struct inode *inode, struct page *page); 4117 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 4118 int f2fs_convert_inline_inode(struct inode *inode); 4119 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 4120 int f2fs_write_inline_data(struct inode *inode, struct page *page); 4121 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 4122 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 4123 const struct f2fs_filename *fname, 4124 struct page **res_page); 4125 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 4126 struct page *ipage); 4127 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 4128 struct inode *inode, nid_t ino, umode_t mode); 4129 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 4130 struct page *page, struct inode *dir, 4131 struct inode *inode); 4132 bool f2fs_empty_inline_dir(struct inode *dir); 4133 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 4134 struct fscrypt_str *fstr); 4135 int f2fs_inline_data_fiemap(struct inode *inode, 4136 struct fiemap_extent_info *fieinfo, 4137 __u64 start, __u64 len); 4138 4139 /* 4140 * shrinker.c 4141 */ 4142 unsigned long f2fs_shrink_count(struct shrinker *shrink, 4143 struct shrink_control *sc); 4144 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 4145 struct shrink_control *sc); 4146 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 4147 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 4148 4149 /* 4150 * extent_cache.c 4151 */ 4152 bool sanity_check_extent_cache(struct inode *inode); 4153 void f2fs_init_extent_tree(struct inode *inode); 4154 void f2fs_drop_extent_tree(struct inode *inode); 4155 void f2fs_destroy_extent_node(struct inode *inode); 4156 void f2fs_destroy_extent_tree(struct inode *inode); 4157 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4158 int __init f2fs_create_extent_cache(void); 4159 void f2fs_destroy_extent_cache(void); 4160 4161 /* read extent cache ops */ 4162 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); 4163 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, 4164 struct extent_info *ei); 4165 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, 4166 block_t *blkaddr); 4167 void f2fs_update_read_extent_cache(struct dnode_of_data *dn); 4168 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, 4169 pgoff_t fofs, block_t blkaddr, unsigned int len); 4170 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, 4171 int nr_shrink); 4172 4173 /* block age extent cache ops */ 4174 void f2fs_init_age_extent_tree(struct inode *inode); 4175 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, 4176 struct extent_info *ei); 4177 void f2fs_update_age_extent_cache(struct dnode_of_data *dn); 4178 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, 4179 pgoff_t fofs, unsigned int len); 4180 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, 4181 int nr_shrink); 4182 4183 /* 4184 * sysfs.c 4185 */ 4186 #define MIN_RA_MUL 2 4187 #define MAX_RA_MUL 256 4188 4189 int __init f2fs_init_sysfs(void); 4190 void f2fs_exit_sysfs(void); 4191 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4192 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4193 4194 /* verity.c */ 4195 extern const struct fsverity_operations f2fs_verityops; 4196 4197 /* 4198 * crypto support 4199 */ 4200 static inline bool f2fs_encrypted_file(struct inode *inode) 4201 { 4202 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4203 } 4204 4205 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4206 { 4207 #ifdef CONFIG_FS_ENCRYPTION 4208 file_set_encrypt(inode); 4209 f2fs_set_inode_flags(inode); 4210 #endif 4211 } 4212 4213 /* 4214 * Returns true if the reads of the inode's data need to undergo some 4215 * postprocessing step, like decryption or authenticity verification. 4216 */ 4217 static inline bool f2fs_post_read_required(struct inode *inode) 4218 { 4219 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4220 f2fs_compressed_file(inode); 4221 } 4222 4223 /* 4224 * compress.c 4225 */ 4226 #ifdef CONFIG_F2FS_FS_COMPRESSION 4227 bool f2fs_is_compressed_page(struct page *page); 4228 struct page *f2fs_compress_control_page(struct page *page); 4229 int f2fs_prepare_compress_overwrite(struct inode *inode, 4230 struct page **pagep, pgoff_t index, void **fsdata); 4231 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4232 pgoff_t index, unsigned copied); 4233 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4234 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4235 bool f2fs_is_compress_backend_ready(struct inode *inode); 4236 int __init f2fs_init_compress_mempool(void); 4237 void f2fs_destroy_compress_mempool(void); 4238 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); 4239 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4240 block_t blkaddr, bool in_task); 4241 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4242 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4243 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, 4244 int index, int nr_pages, bool uptodate); 4245 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4246 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4247 int f2fs_write_multi_pages(struct compress_ctx *cc, 4248 int *submitted, 4249 struct writeback_control *wbc, 4250 enum iostat_type io_type); 4251 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4252 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, 4253 pgoff_t fofs, block_t blkaddr, 4254 unsigned int llen, unsigned int c_len); 4255 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4256 unsigned nr_pages, sector_t *last_block_in_bio, 4257 bool is_readahead, bool for_write); 4258 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4259 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, 4260 bool in_task); 4261 void f2fs_put_page_dic(struct page *page, bool in_task); 4262 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4263 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4264 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4265 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4266 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4267 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4268 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4269 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4270 int __init f2fs_init_compress_cache(void); 4271 void f2fs_destroy_compress_cache(void); 4272 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4273 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4274 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4275 nid_t ino, block_t blkaddr); 4276 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4277 block_t blkaddr); 4278 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4279 #define inc_compr_inode_stat(inode) \ 4280 do { \ 4281 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4282 sbi->compr_new_inode++; \ 4283 } while (0) 4284 #define add_compr_block_stat(inode, blocks) \ 4285 do { \ 4286 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4287 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4288 sbi->compr_written_block += blocks; \ 4289 sbi->compr_saved_block += diff; \ 4290 } while (0) 4291 #else 4292 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4293 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4294 { 4295 if (!f2fs_compressed_file(inode)) 4296 return true; 4297 /* not support compression */ 4298 return false; 4299 } 4300 static inline struct page *f2fs_compress_control_page(struct page *page) 4301 { 4302 WARN_ON_ONCE(1); 4303 return ERR_PTR(-EINVAL); 4304 } 4305 static inline int __init f2fs_init_compress_mempool(void) { return 0; } 4306 static inline void f2fs_destroy_compress_mempool(void) { } 4307 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, 4308 bool in_task) { } 4309 static inline void f2fs_end_read_compressed_page(struct page *page, 4310 bool failed, block_t blkaddr, bool in_task) 4311 { 4312 WARN_ON_ONCE(1); 4313 } 4314 static inline void f2fs_put_page_dic(struct page *page, bool in_task) 4315 { 4316 WARN_ON_ONCE(1); 4317 } 4318 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4319 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4320 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4321 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4322 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4323 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4324 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4325 static inline void f2fs_destroy_compress_cache(void) { } 4326 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4327 block_t blkaddr) { } 4328 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4329 struct page *page, nid_t ino, block_t blkaddr) { } 4330 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4331 struct page *page, block_t blkaddr) { return false; } 4332 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4333 nid_t ino) { } 4334 #define inc_compr_inode_stat(inode) do { } while (0) 4335 static inline void f2fs_update_read_extent_tree_range_compressed( 4336 struct inode *inode, 4337 pgoff_t fofs, block_t blkaddr, 4338 unsigned int llen, unsigned int c_len) { } 4339 #endif 4340 4341 static inline int set_compress_context(struct inode *inode) 4342 { 4343 #ifdef CONFIG_F2FS_FS_COMPRESSION 4344 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4345 4346 F2FS_I(inode)->i_compress_algorithm = 4347 F2FS_OPTION(sbi).compress_algorithm; 4348 F2FS_I(inode)->i_log_cluster_size = 4349 F2FS_OPTION(sbi).compress_log_size; 4350 F2FS_I(inode)->i_compress_flag = 4351 F2FS_OPTION(sbi).compress_chksum ? 4352 BIT(COMPRESS_CHKSUM) : 0; 4353 F2FS_I(inode)->i_cluster_size = 4354 BIT(F2FS_I(inode)->i_log_cluster_size); 4355 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4356 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4357 F2FS_OPTION(sbi).compress_level) 4358 F2FS_I(inode)->i_compress_level = 4359 F2FS_OPTION(sbi).compress_level; 4360 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4361 set_inode_flag(inode, FI_COMPRESSED_FILE); 4362 stat_inc_compr_inode(inode); 4363 inc_compr_inode_stat(inode); 4364 f2fs_mark_inode_dirty_sync(inode, true); 4365 return 0; 4366 #else 4367 return -EOPNOTSUPP; 4368 #endif 4369 } 4370 4371 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4372 { 4373 struct f2fs_inode_info *fi = F2FS_I(inode); 4374 4375 if (!f2fs_compressed_file(inode)) 4376 return true; 4377 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4378 return false; 4379 4380 fi->i_flags &= ~F2FS_COMPR_FL; 4381 stat_dec_compr_inode(inode); 4382 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4383 f2fs_mark_inode_dirty_sync(inode, true); 4384 return true; 4385 } 4386 4387 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4388 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4389 { \ 4390 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4391 } 4392 4393 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4394 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4395 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4396 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4397 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4398 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4399 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4400 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4401 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4402 F2FS_FEATURE_FUNCS(verity, VERITY); 4403 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4404 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4405 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4406 F2FS_FEATURE_FUNCS(readonly, RO); 4407 4408 #ifdef CONFIG_BLK_DEV_ZONED 4409 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4410 block_t blkaddr) 4411 { 4412 unsigned int zno = blkaddr / sbi->blocks_per_blkz; 4413 4414 return test_bit(zno, FDEV(devi).blkz_seq); 4415 } 4416 #endif 4417 4418 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4419 { 4420 return f2fs_sb_has_blkzoned(sbi); 4421 } 4422 4423 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4424 { 4425 return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev); 4426 } 4427 4428 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4429 { 4430 int i; 4431 4432 if (!f2fs_is_multi_device(sbi)) 4433 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4434 4435 for (i = 0; i < sbi->s_ndevs; i++) 4436 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4437 return true; 4438 return false; 4439 } 4440 4441 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4442 { 4443 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4444 f2fs_hw_should_discard(sbi); 4445 } 4446 4447 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4448 { 4449 int i; 4450 4451 if (!f2fs_is_multi_device(sbi)) 4452 return bdev_read_only(sbi->sb->s_bdev); 4453 4454 for (i = 0; i < sbi->s_ndevs; i++) 4455 if (bdev_read_only(FDEV(i).bdev)) 4456 return true; 4457 return false; 4458 } 4459 4460 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi) 4461 { 4462 return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi); 4463 } 4464 4465 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4466 { 4467 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4468 } 4469 4470 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) 4471 { 4472 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; 4473 } 4474 4475 static inline bool f2fs_may_compress(struct inode *inode) 4476 { 4477 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4478 f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode)) 4479 return false; 4480 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4481 } 4482 4483 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4484 u64 blocks, bool add) 4485 { 4486 struct f2fs_inode_info *fi = F2FS_I(inode); 4487 int diff = fi->i_cluster_size - blocks; 4488 4489 /* don't update i_compr_blocks if saved blocks were released */ 4490 if (!add && !atomic_read(&fi->i_compr_blocks)) 4491 return; 4492 4493 if (add) { 4494 atomic_add(diff, &fi->i_compr_blocks); 4495 stat_add_compr_blocks(inode, diff); 4496 } else { 4497 atomic_sub(diff, &fi->i_compr_blocks); 4498 stat_sub_compr_blocks(inode, diff); 4499 } 4500 f2fs_mark_inode_dirty_sync(inode, true); 4501 } 4502 4503 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4504 int flag) 4505 { 4506 if (!f2fs_is_multi_device(sbi)) 4507 return false; 4508 if (flag != F2FS_GET_BLOCK_DIO) 4509 return false; 4510 return sbi->aligned_blksize; 4511 } 4512 4513 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4514 { 4515 return fsverity_active(inode) && 4516 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4517 } 4518 4519 #ifdef CONFIG_F2FS_FAULT_INJECTION 4520 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4521 unsigned int type); 4522 #else 4523 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4524 #endif 4525 4526 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4527 { 4528 #ifdef CONFIG_QUOTA 4529 if (f2fs_sb_has_quota_ino(sbi)) 4530 return true; 4531 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4532 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4533 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4534 return true; 4535 #endif 4536 return false; 4537 } 4538 4539 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4540 { 4541 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4542 } 4543 4544 static inline void f2fs_io_schedule_timeout(long timeout) 4545 { 4546 set_current_state(TASK_UNINTERRUPTIBLE); 4547 io_schedule_timeout(timeout); 4548 } 4549 4550 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs, 4551 enum page_type type) 4552 { 4553 if (unlikely(f2fs_cp_error(sbi))) 4554 return; 4555 4556 if (ofs == sbi->page_eio_ofs[type]) { 4557 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) 4558 set_ckpt_flags(sbi, CP_ERROR_FLAG); 4559 } else { 4560 sbi->page_eio_ofs[type] = ofs; 4561 sbi->page_eio_cnt[type] = 0; 4562 } 4563 } 4564 4565 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi) 4566 { 4567 return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb); 4568 } 4569 4570 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4571 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4572 4573 #endif /* _LINUX_F2FS_H */ 4574