1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/sched/mm.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bio.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #include <linux/part_stat.h> 27 #include <crypto/hash.h> 28 29 #include <linux/fscrypt.h> 30 #include <linux/fsverity.h> 31 32 struct pagevec; 33 34 #ifdef CONFIG_F2FS_CHECK_FS 35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 36 #else 37 #define f2fs_bug_on(sbi, condition) \ 38 do { \ 39 if (WARN_ON(condition)) \ 40 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 41 } while (0) 42 #endif 43 44 enum { 45 FAULT_KMALLOC, 46 FAULT_KVMALLOC, 47 FAULT_PAGE_ALLOC, 48 FAULT_PAGE_GET, 49 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 50 FAULT_ALLOC_NID, 51 FAULT_ORPHAN, 52 FAULT_BLOCK, 53 FAULT_DIR_DEPTH, 54 FAULT_EVICT_INODE, 55 FAULT_TRUNCATE, 56 FAULT_READ_IO, 57 FAULT_CHECKPOINT, 58 FAULT_DISCARD, 59 FAULT_WRITE_IO, 60 FAULT_SLAB_ALLOC, 61 FAULT_DQUOT_INIT, 62 FAULT_LOCK_OP, 63 FAULT_BLKADDR, 64 FAULT_MAX, 65 }; 66 67 #ifdef CONFIG_F2FS_FAULT_INJECTION 68 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 69 70 struct f2fs_fault_info { 71 atomic_t inject_ops; 72 unsigned int inject_rate; 73 unsigned int inject_type; 74 }; 75 76 extern const char *f2fs_fault_name[FAULT_MAX]; 77 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 78 #endif 79 80 /* 81 * For mount options 82 */ 83 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 84 #define F2FS_MOUNT_DISCARD 0x00000004 85 #define F2FS_MOUNT_NOHEAP 0x00000008 86 #define F2FS_MOUNT_XATTR_USER 0x00000010 87 #define F2FS_MOUNT_POSIX_ACL 0x00000020 88 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 89 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 90 #define F2FS_MOUNT_INLINE_DATA 0x00000100 91 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 92 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 93 #define F2FS_MOUNT_NOBARRIER 0x00000800 94 #define F2FS_MOUNT_FASTBOOT 0x00001000 95 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 96 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 97 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 98 #define F2FS_MOUNT_USRQUOTA 0x00080000 99 #define F2FS_MOUNT_GRPQUOTA 0x00100000 100 #define F2FS_MOUNT_PRJQUOTA 0x00200000 101 #define F2FS_MOUNT_QUOTA 0x00400000 102 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 103 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 104 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 105 #define F2FS_MOUNT_NORECOVERY 0x04000000 106 #define F2FS_MOUNT_ATGC 0x08000000 107 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 108 #define F2FS_MOUNT_GC_MERGE 0x20000000 109 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 110 111 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 112 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 113 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 114 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 115 116 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 117 typecheck(unsigned long long, b) && \ 118 ((long long)((a) - (b)) > 0)) 119 120 typedef u32 block_t; /* 121 * should not change u32, since it is the on-disk block 122 * address format, __le32. 123 */ 124 typedef u32 nid_t; 125 126 #define COMPRESS_EXT_NUM 16 127 128 /* 129 * An implementation of an rwsem that is explicitly unfair to readers. This 130 * prevents priority inversion when a low-priority reader acquires the read lock 131 * while sleeping on the write lock but the write lock is needed by 132 * higher-priority clients. 133 */ 134 135 struct f2fs_rwsem { 136 struct rw_semaphore internal_rwsem; 137 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 138 wait_queue_head_t read_waiters; 139 #endif 140 }; 141 142 struct f2fs_mount_info { 143 unsigned int opt; 144 int write_io_size_bits; /* Write IO size bits */ 145 block_t root_reserved_blocks; /* root reserved blocks */ 146 kuid_t s_resuid; /* reserved blocks for uid */ 147 kgid_t s_resgid; /* reserved blocks for gid */ 148 int active_logs; /* # of active logs */ 149 int inline_xattr_size; /* inline xattr size */ 150 #ifdef CONFIG_F2FS_FAULT_INJECTION 151 struct f2fs_fault_info fault_info; /* For fault injection */ 152 #endif 153 #ifdef CONFIG_QUOTA 154 /* Names of quota files with journalled quota */ 155 char *s_qf_names[MAXQUOTAS]; 156 int s_jquota_fmt; /* Format of quota to use */ 157 #endif 158 /* For which write hints are passed down to block layer */ 159 int alloc_mode; /* segment allocation policy */ 160 int fsync_mode; /* fsync policy */ 161 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 162 int bggc_mode; /* bggc mode: off, on or sync */ 163 int memory_mode; /* memory mode */ 164 int discard_unit; /* 165 * discard command's offset/size should 166 * be aligned to this unit: block, 167 * segment or section 168 */ 169 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 170 block_t unusable_cap_perc; /* percentage for cap */ 171 block_t unusable_cap; /* Amount of space allowed to be 172 * unusable when disabling checkpoint 173 */ 174 175 /* For compression */ 176 unsigned char compress_algorithm; /* algorithm type */ 177 unsigned char compress_log_size; /* cluster log size */ 178 unsigned char compress_level; /* compress level */ 179 bool compress_chksum; /* compressed data chksum */ 180 unsigned char compress_ext_cnt; /* extension count */ 181 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 182 int compress_mode; /* compression mode */ 183 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 184 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 185 }; 186 187 #define F2FS_FEATURE_ENCRYPT 0x0001 188 #define F2FS_FEATURE_BLKZONED 0x0002 189 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 190 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 191 #define F2FS_FEATURE_PRJQUOTA 0x0010 192 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 193 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 194 #define F2FS_FEATURE_QUOTA_INO 0x0080 195 #define F2FS_FEATURE_INODE_CRTIME 0x0100 196 #define F2FS_FEATURE_LOST_FOUND 0x0200 197 #define F2FS_FEATURE_VERITY 0x0400 198 #define F2FS_FEATURE_SB_CHKSUM 0x0800 199 #define F2FS_FEATURE_CASEFOLD 0x1000 200 #define F2FS_FEATURE_COMPRESSION 0x2000 201 #define F2FS_FEATURE_RO 0x4000 202 203 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 204 ((raw_super->feature & cpu_to_le32(mask)) != 0) 205 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 206 #define F2FS_SET_FEATURE(sbi, mask) \ 207 (sbi->raw_super->feature |= cpu_to_le32(mask)) 208 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 209 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 210 211 /* 212 * Default values for user and/or group using reserved blocks 213 */ 214 #define F2FS_DEF_RESUID 0 215 #define F2FS_DEF_RESGID 0 216 217 /* 218 * For checkpoint manager 219 */ 220 enum { 221 NAT_BITMAP, 222 SIT_BITMAP 223 }; 224 225 #define CP_UMOUNT 0x00000001 226 #define CP_FASTBOOT 0x00000002 227 #define CP_SYNC 0x00000004 228 #define CP_RECOVERY 0x00000008 229 #define CP_DISCARD 0x00000010 230 #define CP_TRIMMED 0x00000020 231 #define CP_PAUSE 0x00000040 232 #define CP_RESIZE 0x00000080 233 234 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 235 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 236 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 237 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 238 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 239 #define DEF_CP_INTERVAL 60 /* 60 secs */ 240 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 241 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 242 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 243 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 244 245 struct cp_control { 246 int reason; 247 __u64 trim_start; 248 __u64 trim_end; 249 __u64 trim_minlen; 250 }; 251 252 /* 253 * indicate meta/data type 254 */ 255 enum { 256 META_CP, 257 META_NAT, 258 META_SIT, 259 META_SSA, 260 META_MAX, 261 META_POR, 262 DATA_GENERIC, /* check range only */ 263 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 264 DATA_GENERIC_ENHANCE_READ, /* 265 * strong check on range and segment 266 * bitmap but no warning due to race 267 * condition of read on truncated area 268 * by extent_cache 269 */ 270 DATA_GENERIC_ENHANCE_UPDATE, /* 271 * strong check on range and segment 272 * bitmap for update case 273 */ 274 META_GENERIC, 275 }; 276 277 /* for the list of ino */ 278 enum { 279 ORPHAN_INO, /* for orphan ino list */ 280 APPEND_INO, /* for append ino list */ 281 UPDATE_INO, /* for update ino list */ 282 TRANS_DIR_INO, /* for transactions dir ino list */ 283 FLUSH_INO, /* for multiple device flushing */ 284 MAX_INO_ENTRY, /* max. list */ 285 }; 286 287 struct ino_entry { 288 struct list_head list; /* list head */ 289 nid_t ino; /* inode number */ 290 unsigned int dirty_device; /* dirty device bitmap */ 291 }; 292 293 /* for the list of inodes to be GCed */ 294 struct inode_entry { 295 struct list_head list; /* list head */ 296 struct inode *inode; /* vfs inode pointer */ 297 }; 298 299 struct fsync_node_entry { 300 struct list_head list; /* list head */ 301 struct page *page; /* warm node page pointer */ 302 unsigned int seq_id; /* sequence id */ 303 }; 304 305 struct ckpt_req { 306 struct completion wait; /* completion for checkpoint done */ 307 struct llist_node llnode; /* llist_node to be linked in wait queue */ 308 int ret; /* return code of checkpoint */ 309 ktime_t queue_time; /* request queued time */ 310 }; 311 312 struct ckpt_req_control { 313 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 314 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 315 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 316 atomic_t issued_ckpt; /* # of actually issued ckpts */ 317 atomic_t total_ckpt; /* # of total ckpts */ 318 atomic_t queued_ckpt; /* # of queued ckpts */ 319 struct llist_head issue_list; /* list for command issue */ 320 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 321 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 322 unsigned int peak_time; /* peak wait time in msec until now */ 323 }; 324 325 /* for the bitmap indicate blocks to be discarded */ 326 struct discard_entry { 327 struct list_head list; /* list head */ 328 block_t start_blkaddr; /* start blockaddr of current segment */ 329 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 330 }; 331 332 /* default discard granularity of inner discard thread, unit: block count */ 333 #define DEFAULT_DISCARD_GRANULARITY 16 334 /* default maximum discard granularity of ordered discard, unit: block count */ 335 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16 336 337 /* max discard pend list number */ 338 #define MAX_PLIST_NUM 512 339 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 340 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 341 342 enum { 343 D_PREP, /* initial */ 344 D_PARTIAL, /* partially submitted */ 345 D_SUBMIT, /* all submitted */ 346 D_DONE, /* finished */ 347 }; 348 349 struct discard_info { 350 block_t lstart; /* logical start address */ 351 block_t len; /* length */ 352 block_t start; /* actual start address in dev */ 353 }; 354 355 struct discard_cmd { 356 struct rb_node rb_node; /* rb node located in rb-tree */ 357 union { 358 struct { 359 block_t lstart; /* logical start address */ 360 block_t len; /* length */ 361 block_t start; /* actual start address in dev */ 362 }; 363 struct discard_info di; /* discard info */ 364 365 }; 366 struct list_head list; /* command list */ 367 struct completion wait; /* compleation */ 368 struct block_device *bdev; /* bdev */ 369 unsigned short ref; /* reference count */ 370 unsigned char state; /* state */ 371 unsigned char queued; /* queued discard */ 372 int error; /* bio error */ 373 spinlock_t lock; /* for state/bio_ref updating */ 374 unsigned short bio_ref; /* bio reference count */ 375 }; 376 377 enum { 378 DPOLICY_BG, 379 DPOLICY_FORCE, 380 DPOLICY_FSTRIM, 381 DPOLICY_UMOUNT, 382 MAX_DPOLICY, 383 }; 384 385 struct discard_policy { 386 int type; /* type of discard */ 387 unsigned int min_interval; /* used for candidates exist */ 388 unsigned int mid_interval; /* used for device busy */ 389 unsigned int max_interval; /* used for candidates not exist */ 390 unsigned int max_requests; /* # of discards issued per round */ 391 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 392 bool io_aware; /* issue discard in idle time */ 393 bool sync; /* submit discard with REQ_SYNC flag */ 394 bool ordered; /* issue discard by lba order */ 395 bool timeout; /* discard timeout for put_super */ 396 unsigned int granularity; /* discard granularity */ 397 }; 398 399 struct discard_cmd_control { 400 struct task_struct *f2fs_issue_discard; /* discard thread */ 401 struct list_head entry_list; /* 4KB discard entry list */ 402 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 403 struct list_head wait_list; /* store on-flushing entries */ 404 struct list_head fstrim_list; /* in-flight discard from fstrim */ 405 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 406 unsigned int discard_wake; /* to wake up discard thread */ 407 struct mutex cmd_lock; 408 unsigned int nr_discards; /* # of discards in the list */ 409 unsigned int max_discards; /* max. discards to be issued */ 410 unsigned int max_discard_request; /* max. discard request per round */ 411 unsigned int min_discard_issue_time; /* min. interval between discard issue */ 412 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ 413 unsigned int max_discard_issue_time; /* max. interval between discard issue */ 414 unsigned int discard_granularity; /* discard granularity */ 415 unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */ 416 unsigned int undiscard_blks; /* # of undiscard blocks */ 417 unsigned int next_pos; /* next discard position */ 418 atomic_t issued_discard; /* # of issued discard */ 419 atomic_t queued_discard; /* # of queued discard */ 420 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 421 struct rb_root_cached root; /* root of discard rb-tree */ 422 bool rbtree_check; /* config for consistence check */ 423 }; 424 425 /* for the list of fsync inodes, used only during recovery */ 426 struct fsync_inode_entry { 427 struct list_head list; /* list head */ 428 struct inode *inode; /* vfs inode pointer */ 429 block_t blkaddr; /* block address locating the last fsync */ 430 block_t last_dentry; /* block address locating the last dentry */ 431 }; 432 433 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 434 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 435 436 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 437 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 438 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 439 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 440 441 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 442 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 443 444 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 445 { 446 int before = nats_in_cursum(journal); 447 448 journal->n_nats = cpu_to_le16(before + i); 449 return before; 450 } 451 452 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 453 { 454 int before = sits_in_cursum(journal); 455 456 journal->n_sits = cpu_to_le16(before + i); 457 return before; 458 } 459 460 static inline bool __has_cursum_space(struct f2fs_journal *journal, 461 int size, int type) 462 { 463 if (type == NAT_JOURNAL) 464 return size <= MAX_NAT_JENTRIES(journal); 465 return size <= MAX_SIT_JENTRIES(journal); 466 } 467 468 /* for inline stuff */ 469 #define DEF_INLINE_RESERVED_SIZE 1 470 static inline int get_extra_isize(struct inode *inode); 471 static inline int get_inline_xattr_addrs(struct inode *inode); 472 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 473 (CUR_ADDRS_PER_INODE(inode) - \ 474 get_inline_xattr_addrs(inode) - \ 475 DEF_INLINE_RESERVED_SIZE)) 476 477 /* for inline dir */ 478 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 479 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 480 BITS_PER_BYTE + 1)) 481 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 482 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 483 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 484 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 485 NR_INLINE_DENTRY(inode) + \ 486 INLINE_DENTRY_BITMAP_SIZE(inode))) 487 488 /* 489 * For INODE and NODE manager 490 */ 491 /* for directory operations */ 492 493 struct f2fs_filename { 494 /* 495 * The filename the user specified. This is NULL for some 496 * filesystem-internal operations, e.g. converting an inline directory 497 * to a non-inline one, or roll-forward recovering an encrypted dentry. 498 */ 499 const struct qstr *usr_fname; 500 501 /* 502 * The on-disk filename. For encrypted directories, this is encrypted. 503 * This may be NULL for lookups in an encrypted dir without the key. 504 */ 505 struct fscrypt_str disk_name; 506 507 /* The dirhash of this filename */ 508 f2fs_hash_t hash; 509 510 #ifdef CONFIG_FS_ENCRYPTION 511 /* 512 * For lookups in encrypted directories: either the buffer backing 513 * disk_name, or a buffer that holds the decoded no-key name. 514 */ 515 struct fscrypt_str crypto_buf; 516 #endif 517 #if IS_ENABLED(CONFIG_UNICODE) 518 /* 519 * For casefolded directories: the casefolded name, but it's left NULL 520 * if the original name is not valid Unicode, if the original name is 521 * "." or "..", if the directory is both casefolded and encrypted and 522 * its encryption key is unavailable, or if the filesystem is doing an 523 * internal operation where usr_fname is also NULL. In all these cases 524 * we fall back to treating the name as an opaque byte sequence. 525 */ 526 struct fscrypt_str cf_name; 527 #endif 528 }; 529 530 struct f2fs_dentry_ptr { 531 struct inode *inode; 532 void *bitmap; 533 struct f2fs_dir_entry *dentry; 534 __u8 (*filename)[F2FS_SLOT_LEN]; 535 int max; 536 int nr_bitmap; 537 }; 538 539 static inline void make_dentry_ptr_block(struct inode *inode, 540 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 541 { 542 d->inode = inode; 543 d->max = NR_DENTRY_IN_BLOCK; 544 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 545 d->bitmap = t->dentry_bitmap; 546 d->dentry = t->dentry; 547 d->filename = t->filename; 548 } 549 550 static inline void make_dentry_ptr_inline(struct inode *inode, 551 struct f2fs_dentry_ptr *d, void *t) 552 { 553 int entry_cnt = NR_INLINE_DENTRY(inode); 554 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 555 int reserved_size = INLINE_RESERVED_SIZE(inode); 556 557 d->inode = inode; 558 d->max = entry_cnt; 559 d->nr_bitmap = bitmap_size; 560 d->bitmap = t; 561 d->dentry = t + bitmap_size + reserved_size; 562 d->filename = t + bitmap_size + reserved_size + 563 SIZE_OF_DIR_ENTRY * entry_cnt; 564 } 565 566 /* 567 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 568 * as its node offset to distinguish from index node blocks. 569 * But some bits are used to mark the node block. 570 */ 571 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 572 >> OFFSET_BIT_SHIFT) 573 enum { 574 ALLOC_NODE, /* allocate a new node page if needed */ 575 LOOKUP_NODE, /* look up a node without readahead */ 576 LOOKUP_NODE_RA, /* 577 * look up a node with readahead called 578 * by get_data_block. 579 */ 580 }; 581 582 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 583 584 /* congestion wait timeout value, default: 20ms */ 585 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 586 587 /* maximum retry quota flush count */ 588 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 589 590 /* maximum retry of EIO'ed page */ 591 #define MAX_RETRY_PAGE_EIO 100 592 593 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 594 595 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 596 597 /* dirty segments threshold for triggering CP */ 598 #define DEFAULT_DIRTY_THRESHOLD 4 599 600 /* for in-memory extent cache entry */ 601 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 602 603 /* number of extent info in extent cache we try to shrink */ 604 #define EXTENT_CACHE_SHRINK_NUMBER 128 605 606 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS 607 #define RECOVERY_MIN_RA_BLOCKS 1 608 609 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */ 610 611 struct rb_entry { 612 struct rb_node rb_node; /* rb node located in rb-tree */ 613 union { 614 struct { 615 unsigned int ofs; /* start offset of the entry */ 616 unsigned int len; /* length of the entry */ 617 }; 618 unsigned long long key; /* 64-bits key */ 619 } __packed; 620 }; 621 622 struct extent_info { 623 unsigned int fofs; /* start offset in a file */ 624 unsigned int len; /* length of the extent */ 625 u32 blk; /* start block address of the extent */ 626 #ifdef CONFIG_F2FS_FS_COMPRESSION 627 unsigned int c_len; /* physical extent length of compressed blocks */ 628 #endif 629 }; 630 631 struct extent_node { 632 struct rb_node rb_node; /* rb node located in rb-tree */ 633 struct extent_info ei; /* extent info */ 634 struct list_head list; /* node in global extent list of sbi */ 635 struct extent_tree *et; /* extent tree pointer */ 636 }; 637 638 struct extent_tree { 639 nid_t ino; /* inode number */ 640 struct rb_root_cached root; /* root of extent info rb-tree */ 641 struct extent_node *cached_en; /* recently accessed extent node */ 642 struct extent_info largest; /* largested extent info */ 643 struct list_head list; /* to be used by sbi->zombie_list */ 644 rwlock_t lock; /* protect extent info rb-tree */ 645 atomic_t node_cnt; /* # of extent node in rb-tree*/ 646 bool largest_updated; /* largest extent updated */ 647 }; 648 649 /* 650 * This structure is taken from ext4_map_blocks. 651 * 652 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 653 */ 654 #define F2FS_MAP_NEW (1 << BH_New) 655 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 656 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 657 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 658 F2FS_MAP_UNWRITTEN) 659 660 struct f2fs_map_blocks { 661 struct block_device *m_bdev; /* for multi-device dio */ 662 block_t m_pblk; 663 block_t m_lblk; 664 unsigned int m_len; 665 unsigned int m_flags; 666 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 667 pgoff_t *m_next_extent; /* point to next possible extent */ 668 int m_seg_type; 669 bool m_may_create; /* indicate it is from write path */ 670 bool m_multidev_dio; /* indicate it allows multi-device dio */ 671 }; 672 673 /* for flag in get_data_block */ 674 enum { 675 F2FS_GET_BLOCK_DEFAULT, 676 F2FS_GET_BLOCK_FIEMAP, 677 F2FS_GET_BLOCK_BMAP, 678 F2FS_GET_BLOCK_DIO, 679 F2FS_GET_BLOCK_PRE_DIO, 680 F2FS_GET_BLOCK_PRE_AIO, 681 F2FS_GET_BLOCK_PRECACHE, 682 }; 683 684 /* 685 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 686 */ 687 #define FADVISE_COLD_BIT 0x01 688 #define FADVISE_LOST_PINO_BIT 0x02 689 #define FADVISE_ENCRYPT_BIT 0x04 690 #define FADVISE_ENC_NAME_BIT 0x08 691 #define FADVISE_KEEP_SIZE_BIT 0x10 692 #define FADVISE_HOT_BIT 0x20 693 #define FADVISE_VERITY_BIT 0x40 694 #define FADVISE_TRUNC_BIT 0x80 695 696 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 697 698 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 699 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 700 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 701 702 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 703 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 704 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 705 706 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 707 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 708 709 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 710 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 711 712 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 713 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 714 715 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 716 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 717 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 718 719 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 720 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 721 722 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 723 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 724 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 725 726 #define DEF_DIR_LEVEL 0 727 728 enum { 729 GC_FAILURE_PIN, 730 MAX_GC_FAILURE 731 }; 732 733 /* used for f2fs_inode_info->flags */ 734 enum { 735 FI_NEW_INODE, /* indicate newly allocated inode */ 736 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 737 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 738 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 739 FI_INC_LINK, /* need to increment i_nlink */ 740 FI_ACL_MODE, /* indicate acl mode */ 741 FI_NO_ALLOC, /* should not allocate any blocks */ 742 FI_FREE_NID, /* free allocated nide */ 743 FI_NO_EXTENT, /* not to use the extent cache */ 744 FI_INLINE_XATTR, /* used for inline xattr */ 745 FI_INLINE_DATA, /* used for inline data*/ 746 FI_INLINE_DENTRY, /* used for inline dentry */ 747 FI_APPEND_WRITE, /* inode has appended data */ 748 FI_UPDATE_WRITE, /* inode has in-place-update data */ 749 FI_NEED_IPU, /* used for ipu per file */ 750 FI_ATOMIC_FILE, /* indicate atomic file */ 751 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 752 FI_DROP_CACHE, /* drop dirty page cache */ 753 FI_DATA_EXIST, /* indicate data exists */ 754 FI_INLINE_DOTS, /* indicate inline dot dentries */ 755 FI_SKIP_WRITES, /* should skip data page writeback */ 756 FI_OPU_WRITE, /* used for opu per file */ 757 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 758 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 759 FI_HOT_DATA, /* indicate file is hot */ 760 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 761 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 762 FI_PIN_FILE, /* indicate file should not be gced */ 763 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 764 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 765 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 766 FI_MMAP_FILE, /* indicate file was mmapped */ 767 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 768 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 769 FI_ALIGNED_WRITE, /* enable aligned write */ 770 FI_COW_FILE, /* indicate COW file */ 771 FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ 772 FI_ATOMIC_REPLACE, /* indicate atomic replace */ 773 FI_MAX, /* max flag, never be used */ 774 }; 775 776 struct f2fs_inode_info { 777 struct inode vfs_inode; /* serve a vfs inode */ 778 unsigned long i_flags; /* keep an inode flags for ioctl */ 779 unsigned char i_advise; /* use to give file attribute hints */ 780 unsigned char i_dir_level; /* use for dentry level for large dir */ 781 unsigned int i_current_depth; /* only for directory depth */ 782 /* for gc failure statistic */ 783 unsigned int i_gc_failures[MAX_GC_FAILURE]; 784 unsigned int i_pino; /* parent inode number */ 785 umode_t i_acl_mode; /* keep file acl mode temporarily */ 786 787 /* Use below internally in f2fs*/ 788 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 789 struct f2fs_rwsem i_sem; /* protect fi info */ 790 atomic_t dirty_pages; /* # of dirty pages */ 791 f2fs_hash_t chash; /* hash value of given file name */ 792 unsigned int clevel; /* maximum level of given file name */ 793 struct task_struct *task; /* lookup and create consistency */ 794 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 795 struct task_struct *wb_task; /* indicate inode is in context of writeback */ 796 nid_t i_xattr_nid; /* node id that contains xattrs */ 797 loff_t last_disk_size; /* lastly written file size */ 798 spinlock_t i_size_lock; /* protect last_disk_size */ 799 800 #ifdef CONFIG_QUOTA 801 struct dquot *i_dquot[MAXQUOTAS]; 802 803 /* quota space reservation, managed internally by quota code */ 804 qsize_t i_reserved_quota; 805 #endif 806 struct list_head dirty_list; /* dirty list for dirs and files */ 807 struct list_head gdirty_list; /* linked in global dirty list */ 808 struct task_struct *atomic_write_task; /* store atomic write task */ 809 struct extent_tree *extent_tree; /* cached extent_tree entry */ 810 struct inode *cow_inode; /* copy-on-write inode for atomic write */ 811 812 /* avoid racing between foreground op and gc */ 813 struct f2fs_rwsem i_gc_rwsem[2]; 814 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 815 816 int i_extra_isize; /* size of extra space located in i_addr */ 817 kprojid_t i_projid; /* id for project quota */ 818 int i_inline_xattr_size; /* inline xattr size */ 819 struct timespec64 i_crtime; /* inode creation time */ 820 struct timespec64 i_disk_time[4];/* inode disk times */ 821 822 /* for file compress */ 823 atomic_t i_compr_blocks; /* # of compressed blocks */ 824 unsigned char i_compress_algorithm; /* algorithm type */ 825 unsigned char i_log_cluster_size; /* log of cluster size */ 826 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 827 unsigned short i_compress_flag; /* compress flag */ 828 unsigned int i_cluster_size; /* cluster size */ 829 830 unsigned int atomic_write_cnt; 831 loff_t original_i_size; /* original i_size before atomic write */ 832 }; 833 834 static inline void get_extent_info(struct extent_info *ext, 835 struct f2fs_extent *i_ext) 836 { 837 ext->fofs = le32_to_cpu(i_ext->fofs); 838 ext->blk = le32_to_cpu(i_ext->blk); 839 ext->len = le32_to_cpu(i_ext->len); 840 } 841 842 static inline void set_raw_extent(struct extent_info *ext, 843 struct f2fs_extent *i_ext) 844 { 845 i_ext->fofs = cpu_to_le32(ext->fofs); 846 i_ext->blk = cpu_to_le32(ext->blk); 847 i_ext->len = cpu_to_le32(ext->len); 848 } 849 850 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 851 u32 blk, unsigned int len) 852 { 853 ei->fofs = fofs; 854 ei->blk = blk; 855 ei->len = len; 856 #ifdef CONFIG_F2FS_FS_COMPRESSION 857 ei->c_len = 0; 858 #endif 859 } 860 861 static inline bool __is_discard_mergeable(struct discard_info *back, 862 struct discard_info *front, unsigned int max_len) 863 { 864 return (back->lstart + back->len == front->lstart) && 865 (back->len + front->len <= max_len); 866 } 867 868 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 869 struct discard_info *back, unsigned int max_len) 870 { 871 return __is_discard_mergeable(back, cur, max_len); 872 } 873 874 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 875 struct discard_info *front, unsigned int max_len) 876 { 877 return __is_discard_mergeable(cur, front, max_len); 878 } 879 880 static inline bool __is_extent_mergeable(struct extent_info *back, 881 struct extent_info *front) 882 { 883 #ifdef CONFIG_F2FS_FS_COMPRESSION 884 if (back->c_len && back->len != back->c_len) 885 return false; 886 if (front->c_len && front->len != front->c_len) 887 return false; 888 #endif 889 return (back->fofs + back->len == front->fofs && 890 back->blk + back->len == front->blk); 891 } 892 893 static inline bool __is_back_mergeable(struct extent_info *cur, 894 struct extent_info *back) 895 { 896 return __is_extent_mergeable(back, cur); 897 } 898 899 static inline bool __is_front_mergeable(struct extent_info *cur, 900 struct extent_info *front) 901 { 902 return __is_extent_mergeable(cur, front); 903 } 904 905 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 906 static inline void __try_update_largest_extent(struct extent_tree *et, 907 struct extent_node *en) 908 { 909 if (en->ei.len > et->largest.len) { 910 et->largest = en->ei; 911 et->largest_updated = true; 912 } 913 } 914 915 /* 916 * For free nid management 917 */ 918 enum nid_state { 919 FREE_NID, /* newly added to free nid list */ 920 PREALLOC_NID, /* it is preallocated */ 921 MAX_NID_STATE, 922 }; 923 924 enum nat_state { 925 TOTAL_NAT, 926 DIRTY_NAT, 927 RECLAIMABLE_NAT, 928 MAX_NAT_STATE, 929 }; 930 931 struct f2fs_nm_info { 932 block_t nat_blkaddr; /* base disk address of NAT */ 933 nid_t max_nid; /* maximum possible node ids */ 934 nid_t available_nids; /* # of available node ids */ 935 nid_t next_scan_nid; /* the next nid to be scanned */ 936 nid_t max_rf_node_blocks; /* max # of nodes for recovery */ 937 unsigned int ram_thresh; /* control the memory footprint */ 938 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 939 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 940 941 /* NAT cache management */ 942 struct radix_tree_root nat_root;/* root of the nat entry cache */ 943 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 944 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 945 struct list_head nat_entries; /* cached nat entry list (clean) */ 946 spinlock_t nat_list_lock; /* protect clean nat entry list */ 947 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 948 unsigned int nat_blocks; /* # of nat blocks */ 949 950 /* free node ids management */ 951 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 952 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 953 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 954 spinlock_t nid_list_lock; /* protect nid lists ops */ 955 struct mutex build_lock; /* lock for build free nids */ 956 unsigned char **free_nid_bitmap; 957 unsigned char *nat_block_bitmap; 958 unsigned short *free_nid_count; /* free nid count of NAT block */ 959 960 /* for checkpoint */ 961 char *nat_bitmap; /* NAT bitmap pointer */ 962 963 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 964 unsigned char *nat_bits; /* NAT bits blocks */ 965 unsigned char *full_nat_bits; /* full NAT pages */ 966 unsigned char *empty_nat_bits; /* empty NAT pages */ 967 #ifdef CONFIG_F2FS_CHECK_FS 968 char *nat_bitmap_mir; /* NAT bitmap mirror */ 969 #endif 970 int bitmap_size; /* bitmap size */ 971 }; 972 973 /* 974 * this structure is used as one of function parameters. 975 * all the information are dedicated to a given direct node block determined 976 * by the data offset in a file. 977 */ 978 struct dnode_of_data { 979 struct inode *inode; /* vfs inode pointer */ 980 struct page *inode_page; /* its inode page, NULL is possible */ 981 struct page *node_page; /* cached direct node page */ 982 nid_t nid; /* node id of the direct node block */ 983 unsigned int ofs_in_node; /* data offset in the node page */ 984 bool inode_page_locked; /* inode page is locked or not */ 985 bool node_changed; /* is node block changed */ 986 char cur_level; /* level of hole node page */ 987 char max_level; /* level of current page located */ 988 block_t data_blkaddr; /* block address of the node block */ 989 }; 990 991 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 992 struct page *ipage, struct page *npage, nid_t nid) 993 { 994 memset(dn, 0, sizeof(*dn)); 995 dn->inode = inode; 996 dn->inode_page = ipage; 997 dn->node_page = npage; 998 dn->nid = nid; 999 } 1000 1001 /* 1002 * For SIT manager 1003 * 1004 * By default, there are 6 active log areas across the whole main area. 1005 * When considering hot and cold data separation to reduce cleaning overhead, 1006 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 1007 * respectively. 1008 * In the current design, you should not change the numbers intentionally. 1009 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 1010 * logs individually according to the underlying devices. (default: 6) 1011 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 1012 * data and 8 for node logs. 1013 */ 1014 #define NR_CURSEG_DATA_TYPE (3) 1015 #define NR_CURSEG_NODE_TYPE (3) 1016 #define NR_CURSEG_INMEM_TYPE (2) 1017 #define NR_CURSEG_RO_TYPE (2) 1018 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 1019 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 1020 1021 enum { 1022 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 1023 CURSEG_WARM_DATA, /* data blocks */ 1024 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 1025 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 1026 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 1027 CURSEG_COLD_NODE, /* indirect node blocks */ 1028 NR_PERSISTENT_LOG, /* number of persistent log */ 1029 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 1030 /* pinned file that needs consecutive block address */ 1031 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 1032 NO_CHECK_TYPE, /* number of persistent & inmem log */ 1033 }; 1034 1035 struct flush_cmd { 1036 struct completion wait; 1037 struct llist_node llnode; 1038 nid_t ino; 1039 int ret; 1040 }; 1041 1042 struct flush_cmd_control { 1043 struct task_struct *f2fs_issue_flush; /* flush thread */ 1044 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1045 atomic_t issued_flush; /* # of issued flushes */ 1046 atomic_t queued_flush; /* # of queued flushes */ 1047 struct llist_head issue_list; /* list for command issue */ 1048 struct llist_node *dispatch_list; /* list for command dispatch */ 1049 }; 1050 1051 struct f2fs_sm_info { 1052 struct sit_info *sit_info; /* whole segment information */ 1053 struct free_segmap_info *free_info; /* free segment information */ 1054 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1055 struct curseg_info *curseg_array; /* active segment information */ 1056 1057 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1058 1059 block_t seg0_blkaddr; /* block address of 0'th segment */ 1060 block_t main_blkaddr; /* start block address of main area */ 1061 block_t ssa_blkaddr; /* start block address of SSA area */ 1062 1063 unsigned int segment_count; /* total # of segments */ 1064 unsigned int main_segments; /* # of segments in main area */ 1065 unsigned int reserved_segments; /* # of reserved segments */ 1066 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1067 unsigned int ovp_segments; /* # of overprovision segments */ 1068 1069 /* a threshold to reclaim prefree segments */ 1070 unsigned int rec_prefree_segments; 1071 1072 struct list_head sit_entry_set; /* sit entry set list */ 1073 1074 unsigned int ipu_policy; /* in-place-update policy */ 1075 unsigned int min_ipu_util; /* in-place-update threshold */ 1076 unsigned int min_fsync_blocks; /* threshold for fsync */ 1077 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1078 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1079 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1080 1081 /* for flush command control */ 1082 struct flush_cmd_control *fcc_info; 1083 1084 /* for discard command control */ 1085 struct discard_cmd_control *dcc_info; 1086 }; 1087 1088 /* 1089 * For superblock 1090 */ 1091 /* 1092 * COUNT_TYPE for monitoring 1093 * 1094 * f2fs monitors the number of several block types such as on-writeback, 1095 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1096 */ 1097 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1098 enum count_type { 1099 F2FS_DIRTY_DENTS, 1100 F2FS_DIRTY_DATA, 1101 F2FS_DIRTY_QDATA, 1102 F2FS_DIRTY_NODES, 1103 F2FS_DIRTY_META, 1104 F2FS_DIRTY_IMETA, 1105 F2FS_WB_CP_DATA, 1106 F2FS_WB_DATA, 1107 F2FS_RD_DATA, 1108 F2FS_RD_NODE, 1109 F2FS_RD_META, 1110 F2FS_DIO_WRITE, 1111 F2FS_DIO_READ, 1112 NR_COUNT_TYPE, 1113 }; 1114 1115 /* 1116 * The below are the page types of bios used in submit_bio(). 1117 * The available types are: 1118 * DATA User data pages. It operates as async mode. 1119 * NODE Node pages. It operates as async mode. 1120 * META FS metadata pages such as SIT, NAT, CP. 1121 * NR_PAGE_TYPE The number of page types. 1122 * META_FLUSH Make sure the previous pages are written 1123 * with waiting the bio's completion 1124 * ... Only can be used with META. 1125 */ 1126 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1127 enum page_type { 1128 DATA = 0, 1129 NODE = 1, /* should not change this */ 1130 META, 1131 NR_PAGE_TYPE, 1132 META_FLUSH, 1133 IPU, /* the below types are used by tracepoints only. */ 1134 OPU, 1135 }; 1136 1137 enum temp_type { 1138 HOT = 0, /* must be zero for meta bio */ 1139 WARM, 1140 COLD, 1141 NR_TEMP_TYPE, 1142 }; 1143 1144 enum need_lock_type { 1145 LOCK_REQ = 0, 1146 LOCK_DONE, 1147 LOCK_RETRY, 1148 }; 1149 1150 enum cp_reason_type { 1151 CP_NO_NEEDED, 1152 CP_NON_REGULAR, 1153 CP_COMPRESSED, 1154 CP_HARDLINK, 1155 CP_SB_NEED_CP, 1156 CP_WRONG_PINO, 1157 CP_NO_SPC_ROLL, 1158 CP_NODE_NEED_CP, 1159 CP_FASTBOOT_MODE, 1160 CP_SPEC_LOG_NUM, 1161 CP_RECOVER_DIR, 1162 }; 1163 1164 enum iostat_type { 1165 /* WRITE IO */ 1166 APP_DIRECT_IO, /* app direct write IOs */ 1167 APP_BUFFERED_IO, /* app buffered write IOs */ 1168 APP_WRITE_IO, /* app write IOs */ 1169 APP_MAPPED_IO, /* app mapped IOs */ 1170 APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ 1171 APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ 1172 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1173 FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ 1174 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1175 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1176 FS_GC_DATA_IO, /* data IOs from forground gc */ 1177 FS_GC_NODE_IO, /* node IOs from forground gc */ 1178 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1179 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1180 FS_CP_META_IO, /* meta IOs from checkpoint */ 1181 1182 /* READ IO */ 1183 APP_DIRECT_READ_IO, /* app direct read IOs */ 1184 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1185 APP_READ_IO, /* app read IOs */ 1186 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1187 APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ 1188 APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ 1189 FS_DATA_READ_IO, /* data read IOs */ 1190 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1191 FS_CDATA_READ_IO, /* compressed data read IOs */ 1192 FS_NODE_READ_IO, /* node read IOs */ 1193 FS_META_READ_IO, /* meta read IOs */ 1194 1195 /* other */ 1196 FS_DISCARD, /* discard */ 1197 NR_IO_TYPE, 1198 }; 1199 1200 struct f2fs_io_info { 1201 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1202 nid_t ino; /* inode number */ 1203 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1204 enum temp_type temp; /* contains HOT/WARM/COLD */ 1205 enum req_op op; /* contains REQ_OP_ */ 1206 blk_opf_t op_flags; /* req_flag_bits */ 1207 block_t new_blkaddr; /* new block address to be written */ 1208 block_t old_blkaddr; /* old block address before Cow */ 1209 struct page *page; /* page to be written */ 1210 struct page *encrypted_page; /* encrypted page */ 1211 struct page *compressed_page; /* compressed page */ 1212 struct list_head list; /* serialize IOs */ 1213 bool submitted; /* indicate IO submission */ 1214 int need_lock; /* indicate we need to lock cp_rwsem */ 1215 bool in_list; /* indicate fio is in io_list */ 1216 bool is_por; /* indicate IO is from recovery or not */ 1217 bool retry; /* need to reallocate block address */ 1218 int compr_blocks; /* # of compressed block addresses */ 1219 bool encrypted; /* indicate file is encrypted */ 1220 bool post_read; /* require post read */ 1221 enum iostat_type io_type; /* io type */ 1222 struct writeback_control *io_wbc; /* writeback control */ 1223 struct bio **bio; /* bio for ipu */ 1224 sector_t *last_block; /* last block number in bio */ 1225 unsigned char version; /* version of the node */ 1226 }; 1227 1228 struct bio_entry { 1229 struct bio *bio; 1230 struct list_head list; 1231 }; 1232 1233 #define is_read_io(rw) ((rw) == READ) 1234 struct f2fs_bio_info { 1235 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1236 struct bio *bio; /* bios to merge */ 1237 sector_t last_block_in_bio; /* last block number */ 1238 struct f2fs_io_info fio; /* store buffered io info. */ 1239 struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1240 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1241 struct list_head io_list; /* track fios */ 1242 struct list_head bio_list; /* bio entry list head */ 1243 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1244 }; 1245 1246 #define FDEV(i) (sbi->devs[i]) 1247 #define RDEV(i) (raw_super->devs[i]) 1248 struct f2fs_dev_info { 1249 struct block_device *bdev; 1250 char path[MAX_PATH_LEN]; 1251 unsigned int total_segments; 1252 block_t start_blk; 1253 block_t end_blk; 1254 #ifdef CONFIG_BLK_DEV_ZONED 1255 unsigned int nr_blkz; /* Total number of zones */ 1256 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1257 #endif 1258 }; 1259 1260 enum inode_type { 1261 DIR_INODE, /* for dirty dir inode */ 1262 FILE_INODE, /* for dirty regular/symlink inode */ 1263 DIRTY_META, /* for all dirtied inode metadata */ 1264 NR_INODE_TYPE, 1265 }; 1266 1267 /* for inner inode cache management */ 1268 struct inode_management { 1269 struct radix_tree_root ino_root; /* ino entry array */ 1270 spinlock_t ino_lock; /* for ino entry lock */ 1271 struct list_head ino_list; /* inode list head */ 1272 unsigned long ino_num; /* number of entries */ 1273 }; 1274 1275 /* for GC_AT */ 1276 struct atgc_management { 1277 bool atgc_enabled; /* ATGC is enabled or not */ 1278 struct rb_root_cached root; /* root of victim rb-tree */ 1279 struct list_head victim_list; /* linked with all victim entries */ 1280 unsigned int victim_count; /* victim count in rb-tree */ 1281 unsigned int candidate_ratio; /* candidate ratio */ 1282 unsigned int max_candidate_count; /* max candidate count */ 1283 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1284 unsigned long long age_threshold; /* age threshold */ 1285 }; 1286 1287 struct f2fs_gc_control { 1288 unsigned int victim_segno; /* target victim segment number */ 1289 int init_gc_type; /* FG_GC or BG_GC */ 1290 bool no_bg_gc; /* check the space and stop bg_gc */ 1291 bool should_migrate_blocks; /* should migrate blocks */ 1292 bool err_gc_skipped; /* return EAGAIN if GC skipped */ 1293 unsigned int nr_free_secs; /* # of free sections to do GC */ 1294 }; 1295 1296 /* For s_flag in struct f2fs_sb_info */ 1297 enum { 1298 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1299 SBI_IS_CLOSE, /* specify unmounting */ 1300 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1301 SBI_POR_DOING, /* recovery is doing or not */ 1302 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1303 SBI_NEED_CP, /* need to checkpoint */ 1304 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1305 SBI_IS_RECOVERED, /* recovered orphan/data */ 1306 SBI_CP_DISABLED, /* CP was disabled last mount */ 1307 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1308 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1309 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1310 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1311 SBI_IS_RESIZEFS, /* resizefs is in process */ 1312 SBI_IS_FREEZING, /* freezefs is in process */ 1313 }; 1314 1315 enum { 1316 CP_TIME, 1317 REQ_TIME, 1318 DISCARD_TIME, 1319 GC_TIME, 1320 DISABLE_TIME, 1321 UMOUNT_DISCARD_TIMEOUT, 1322 MAX_TIME, 1323 }; 1324 1325 /* Note that you need to keep synchronization with this gc_mode_names array */ 1326 enum { 1327 GC_NORMAL, 1328 GC_IDLE_CB, 1329 GC_IDLE_GREEDY, 1330 GC_IDLE_AT, 1331 GC_URGENT_HIGH, 1332 GC_URGENT_LOW, 1333 GC_URGENT_MID, 1334 MAX_GC_MODE, 1335 }; 1336 1337 enum { 1338 BGGC_MODE_ON, /* background gc is on */ 1339 BGGC_MODE_OFF, /* background gc is off */ 1340 BGGC_MODE_SYNC, /* 1341 * background gc is on, migrating blocks 1342 * like foreground gc 1343 */ 1344 }; 1345 1346 enum { 1347 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1348 FS_MODE_LFS, /* use lfs allocation only */ 1349 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1350 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1351 }; 1352 1353 enum { 1354 ALLOC_MODE_DEFAULT, /* stay default */ 1355 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1356 }; 1357 1358 enum fsync_mode { 1359 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1360 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1361 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1362 }; 1363 1364 enum { 1365 COMPR_MODE_FS, /* 1366 * automatically compress compression 1367 * enabled files 1368 */ 1369 COMPR_MODE_USER, /* 1370 * automatical compression is disabled. 1371 * user can control the file compression 1372 * using ioctls 1373 */ 1374 }; 1375 1376 enum { 1377 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1378 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1379 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1380 }; 1381 1382 enum { 1383 MEMORY_MODE_NORMAL, /* memory mode for normal devices */ 1384 MEMORY_MODE_LOW, /* memory mode for low memry devices */ 1385 }; 1386 1387 1388 1389 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1390 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1391 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1392 1393 /* 1394 * Layout of f2fs page.private: 1395 * 1396 * Layout A: lowest bit should be 1 1397 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1398 * bit 0 PAGE_PRIVATE_NOT_POINTER 1399 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1400 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1401 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1402 * bit 4 PAGE_PRIVATE_INLINE_INODE 1403 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1404 * bit 6- f2fs private data 1405 * 1406 * Layout B: lowest bit should be 0 1407 * page.private is a wrapped pointer. 1408 */ 1409 enum { 1410 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1411 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1412 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1413 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1414 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1415 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1416 PAGE_PRIVATE_MAX 1417 }; 1418 1419 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1420 static inline bool page_private_##name(struct page *page) \ 1421 { \ 1422 return PagePrivate(page) && \ 1423 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1424 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1425 } 1426 1427 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1428 static inline void set_page_private_##name(struct page *page) \ 1429 { \ 1430 if (!PagePrivate(page)) { \ 1431 get_page(page); \ 1432 SetPagePrivate(page); \ 1433 set_page_private(page, 0); \ 1434 } \ 1435 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1436 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1437 } 1438 1439 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1440 static inline void clear_page_private_##name(struct page *page) \ 1441 { \ 1442 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1443 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1444 set_page_private(page, 0); \ 1445 if (PagePrivate(page)) { \ 1446 ClearPagePrivate(page); \ 1447 put_page(page); \ 1448 }\ 1449 } \ 1450 } 1451 1452 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1453 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1454 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1455 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1456 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1457 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1458 1459 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1460 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1461 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1462 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1463 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1464 1465 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1466 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1467 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1468 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1469 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1470 1471 static inline unsigned long get_page_private_data(struct page *page) 1472 { 1473 unsigned long data = page_private(page); 1474 1475 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1476 return 0; 1477 return data >> PAGE_PRIVATE_MAX; 1478 } 1479 1480 static inline void set_page_private_data(struct page *page, unsigned long data) 1481 { 1482 if (!PagePrivate(page)) { 1483 get_page(page); 1484 SetPagePrivate(page); 1485 set_page_private(page, 0); 1486 } 1487 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1488 page_private(page) |= data << PAGE_PRIVATE_MAX; 1489 } 1490 1491 static inline void clear_page_private_data(struct page *page) 1492 { 1493 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1494 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1495 set_page_private(page, 0); 1496 if (PagePrivate(page)) { 1497 ClearPagePrivate(page); 1498 put_page(page); 1499 } 1500 } 1501 } 1502 1503 /* For compression */ 1504 enum compress_algorithm_type { 1505 COMPRESS_LZO, 1506 COMPRESS_LZ4, 1507 COMPRESS_ZSTD, 1508 COMPRESS_LZORLE, 1509 COMPRESS_MAX, 1510 }; 1511 1512 enum compress_flag { 1513 COMPRESS_CHKSUM, 1514 COMPRESS_MAX_FLAG, 1515 }; 1516 1517 #define COMPRESS_WATERMARK 20 1518 #define COMPRESS_PERCENT 20 1519 1520 #define COMPRESS_DATA_RESERVED_SIZE 4 1521 struct compress_data { 1522 __le32 clen; /* compressed data size */ 1523 __le32 chksum; /* compressed data chksum */ 1524 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1525 u8 cdata[]; /* compressed data */ 1526 }; 1527 1528 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1529 1530 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1531 1532 #define COMPRESS_LEVEL_OFFSET 8 1533 1534 /* compress context */ 1535 struct compress_ctx { 1536 struct inode *inode; /* inode the context belong to */ 1537 pgoff_t cluster_idx; /* cluster index number */ 1538 unsigned int cluster_size; /* page count in cluster */ 1539 unsigned int log_cluster_size; /* log of cluster size */ 1540 struct page **rpages; /* pages store raw data in cluster */ 1541 unsigned int nr_rpages; /* total page number in rpages */ 1542 struct page **cpages; /* pages store compressed data in cluster */ 1543 unsigned int nr_cpages; /* total page number in cpages */ 1544 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1545 void *rbuf; /* virtual mapped address on rpages */ 1546 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1547 size_t rlen; /* valid data length in rbuf */ 1548 size_t clen; /* valid data length in cbuf */ 1549 void *private; /* payload buffer for specified compression algorithm */ 1550 void *private2; /* extra payload buffer */ 1551 }; 1552 1553 /* compress context for write IO path */ 1554 struct compress_io_ctx { 1555 u32 magic; /* magic number to indicate page is compressed */ 1556 struct inode *inode; /* inode the context belong to */ 1557 struct page **rpages; /* pages store raw data in cluster */ 1558 unsigned int nr_rpages; /* total page number in rpages */ 1559 atomic_t pending_pages; /* in-flight compressed page count */ 1560 }; 1561 1562 /* Context for decompressing one cluster on the read IO path */ 1563 struct decompress_io_ctx { 1564 u32 magic; /* magic number to indicate page is compressed */ 1565 struct inode *inode; /* inode the context belong to */ 1566 pgoff_t cluster_idx; /* cluster index number */ 1567 unsigned int cluster_size; /* page count in cluster */ 1568 unsigned int log_cluster_size; /* log of cluster size */ 1569 struct page **rpages; /* pages store raw data in cluster */ 1570 unsigned int nr_rpages; /* total page number in rpages */ 1571 struct page **cpages; /* pages store compressed data in cluster */ 1572 unsigned int nr_cpages; /* total page number in cpages */ 1573 struct page **tpages; /* temp pages to pad holes in cluster */ 1574 void *rbuf; /* virtual mapped address on rpages */ 1575 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1576 size_t rlen; /* valid data length in rbuf */ 1577 size_t clen; /* valid data length in cbuf */ 1578 1579 /* 1580 * The number of compressed pages remaining to be read in this cluster. 1581 * This is initially nr_cpages. It is decremented by 1 each time a page 1582 * has been read (or failed to be read). When it reaches 0, the cluster 1583 * is decompressed (or an error is reported). 1584 * 1585 * If an error occurs before all the pages have been submitted for I/O, 1586 * then this will never reach 0. In this case the I/O submitter is 1587 * responsible for calling f2fs_decompress_end_io() instead. 1588 */ 1589 atomic_t remaining_pages; 1590 1591 /* 1592 * Number of references to this decompress_io_ctx. 1593 * 1594 * One reference is held for I/O completion. This reference is dropped 1595 * after the pagecache pages are updated and unlocked -- either after 1596 * decompression (and verity if enabled), or after an error. 1597 * 1598 * In addition, each compressed page holds a reference while it is in a 1599 * bio. These references are necessary prevent compressed pages from 1600 * being freed while they are still in a bio. 1601 */ 1602 refcount_t refcnt; 1603 1604 bool failed; /* IO error occurred before decompression? */ 1605 bool need_verity; /* need fs-verity verification after decompression? */ 1606 void *private; /* payload buffer for specified decompression algorithm */ 1607 void *private2; /* extra payload buffer */ 1608 struct work_struct verity_work; /* work to verify the decompressed pages */ 1609 struct work_struct free_work; /* work for late free this structure itself */ 1610 }; 1611 1612 #define NULL_CLUSTER ((unsigned int)(~0)) 1613 #define MIN_COMPRESS_LOG_SIZE 2 1614 #define MAX_COMPRESS_LOG_SIZE 8 1615 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1616 1617 struct f2fs_sb_info { 1618 struct super_block *sb; /* pointer to VFS super block */ 1619 struct proc_dir_entry *s_proc; /* proc entry */ 1620 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1621 struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1622 int valid_super_block; /* valid super block no */ 1623 unsigned long s_flag; /* flags for sbi */ 1624 struct mutex writepages; /* mutex for writepages() */ 1625 1626 #ifdef CONFIG_BLK_DEV_ZONED 1627 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1628 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1629 #endif 1630 1631 /* for node-related operations */ 1632 struct f2fs_nm_info *nm_info; /* node manager */ 1633 struct inode *node_inode; /* cache node blocks */ 1634 1635 /* for segment-related operations */ 1636 struct f2fs_sm_info *sm_info; /* segment manager */ 1637 1638 /* for bio operations */ 1639 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1640 /* keep migration IO order for LFS mode */ 1641 struct f2fs_rwsem io_order_lock; 1642 mempool_t *write_io_dummy; /* Dummy pages */ 1643 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ 1644 int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */ 1645 1646 /* for checkpoint */ 1647 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1648 int cur_cp_pack; /* remain current cp pack */ 1649 spinlock_t cp_lock; /* for flag in ckpt */ 1650 struct inode *meta_inode; /* cache meta blocks */ 1651 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1652 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1653 struct f2fs_rwsem node_write; /* locking node writes */ 1654 struct f2fs_rwsem node_change; /* locking node change */ 1655 wait_queue_head_t cp_wait; 1656 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1657 long interval_time[MAX_TIME]; /* to store thresholds */ 1658 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1659 1660 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1661 1662 spinlock_t fsync_node_lock; /* for node entry lock */ 1663 struct list_head fsync_node_list; /* node list head */ 1664 unsigned int fsync_seg_id; /* sequence id */ 1665 unsigned int fsync_node_num; /* number of node entries */ 1666 1667 /* for orphan inode, use 0'th array */ 1668 unsigned int max_orphans; /* max orphan inodes */ 1669 1670 /* for inode management */ 1671 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1672 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1673 struct mutex flush_lock; /* for flush exclusion */ 1674 1675 /* for extent tree cache */ 1676 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1677 struct mutex extent_tree_lock; /* locking extent radix tree */ 1678 struct list_head extent_list; /* lru list for shrinker */ 1679 spinlock_t extent_lock; /* locking extent lru list */ 1680 atomic_t total_ext_tree; /* extent tree count */ 1681 struct list_head zombie_list; /* extent zombie tree list */ 1682 atomic_t total_zombie_tree; /* extent zombie tree count */ 1683 atomic_t total_ext_node; /* extent info count */ 1684 1685 /* basic filesystem units */ 1686 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1687 unsigned int log_blocksize; /* log2 block size */ 1688 unsigned int blocksize; /* block size */ 1689 unsigned int root_ino_num; /* root inode number*/ 1690 unsigned int node_ino_num; /* node inode number*/ 1691 unsigned int meta_ino_num; /* meta inode number*/ 1692 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1693 unsigned int blocks_per_seg; /* blocks per segment */ 1694 unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ 1695 unsigned int segs_per_sec; /* segments per section */ 1696 unsigned int secs_per_zone; /* sections per zone */ 1697 unsigned int total_sections; /* total section count */ 1698 unsigned int total_node_count; /* total node block count */ 1699 unsigned int total_valid_node_count; /* valid node block count */ 1700 int dir_level; /* directory level */ 1701 bool readdir_ra; /* readahead inode in readdir */ 1702 u64 max_io_bytes; /* max io bytes to merge IOs */ 1703 1704 block_t user_block_count; /* # of user blocks */ 1705 block_t total_valid_block_count; /* # of valid blocks */ 1706 block_t discard_blks; /* discard command candidats */ 1707 block_t last_valid_block_count; /* for recovery */ 1708 block_t reserved_blocks; /* configurable reserved blocks */ 1709 block_t current_reserved_blocks; /* current reserved blocks */ 1710 1711 /* Additional tracking for no checkpoint mode */ 1712 block_t unusable_block_count; /* # of blocks saved by last cp */ 1713 1714 unsigned int nquota_files; /* # of quota sysfile */ 1715 struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1716 1717 /* # of pages, see count_type */ 1718 atomic_t nr_pages[NR_COUNT_TYPE]; 1719 /* # of allocated blocks */ 1720 struct percpu_counter alloc_valid_block_count; 1721 /* # of node block writes as roll forward recovery */ 1722 struct percpu_counter rf_node_block_count; 1723 1724 /* writeback control */ 1725 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1726 1727 /* valid inode count */ 1728 struct percpu_counter total_valid_inode_count; 1729 1730 struct f2fs_mount_info mount_opt; /* mount options */ 1731 1732 /* for cleaning operations */ 1733 struct f2fs_rwsem gc_lock; /* 1734 * semaphore for GC, avoid 1735 * race between GC and GC or CP 1736 */ 1737 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1738 struct atgc_management am; /* atgc management */ 1739 unsigned int cur_victim_sec; /* current victim section num */ 1740 unsigned int gc_mode; /* current GC state */ 1741 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1742 spinlock_t gc_remaining_trials_lock; 1743 /* remaining trial count for GC_URGENT_* and GC_IDLE_* */ 1744 unsigned int gc_remaining_trials; 1745 1746 /* for skip statistic */ 1747 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1748 1749 /* threshold for gc trials on pinned files */ 1750 u64 gc_pin_file_threshold; 1751 struct f2fs_rwsem pin_sem; 1752 1753 /* maximum # of trials to find a victim segment for SSR and GC */ 1754 unsigned int max_victim_search; 1755 /* migration granularity of garbage collection, unit: segment */ 1756 unsigned int migration_granularity; 1757 1758 /* 1759 * for stat information. 1760 * one is for the LFS mode, and the other is for the SSR mode. 1761 */ 1762 #ifdef CONFIG_F2FS_STAT_FS 1763 struct f2fs_stat_info *stat_info; /* FS status information */ 1764 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1765 unsigned int segment_count[2]; /* # of allocated segments */ 1766 unsigned int block_count[2]; /* # of allocated blocks */ 1767 atomic_t inplace_count; /* # of inplace update */ 1768 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1769 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1770 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1771 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1772 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1773 atomic_t inline_inode; /* # of inline_data inodes */ 1774 atomic_t inline_dir; /* # of inline_dentry inodes */ 1775 atomic_t compr_inode; /* # of compressed inodes */ 1776 atomic64_t compr_blocks; /* # of compressed blocks */ 1777 atomic_t swapfile_inode; /* # of swapfile inodes */ 1778 atomic_t atomic_files; /* # of opened atomic file */ 1779 atomic_t max_aw_cnt; /* max # of atomic writes */ 1780 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1781 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1782 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1783 #endif 1784 spinlock_t stat_lock; /* lock for stat operations */ 1785 1786 /* to attach REQ_META|REQ_FUA flags */ 1787 unsigned int data_io_flag; 1788 unsigned int node_io_flag; 1789 1790 /* For sysfs support */ 1791 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1792 struct completion s_kobj_unregister; 1793 1794 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1795 struct completion s_stat_kobj_unregister; 1796 1797 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1798 struct completion s_feature_list_kobj_unregister; 1799 1800 /* For shrinker support */ 1801 struct list_head s_list; 1802 struct mutex umount_mutex; 1803 unsigned int shrinker_run_no; 1804 1805 /* For multi devices */ 1806 int s_ndevs; /* number of devices */ 1807 struct f2fs_dev_info *devs; /* for device list */ 1808 unsigned int dirty_device; /* for checkpoint data flush */ 1809 spinlock_t dev_lock; /* protect dirty_device */ 1810 bool aligned_blksize; /* all devices has the same logical blksize */ 1811 1812 /* For write statistics */ 1813 u64 sectors_written_start; 1814 u64 kbytes_written; 1815 1816 /* Reference to checksum algorithm driver via cryptoapi */ 1817 struct crypto_shash *s_chksum_driver; 1818 1819 /* Precomputed FS UUID checksum for seeding other checksums */ 1820 __u32 s_chksum_seed; 1821 1822 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1823 1824 unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ 1825 spinlock_t error_lock; /* protect errors array */ 1826 bool error_dirty; /* errors of sb is dirty */ 1827 1828 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1829 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1830 1831 /* For reclaimed segs statistics per each GC mode */ 1832 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1833 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1834 1835 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1836 1837 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1838 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1839 1840 /* For atomic write statistics */ 1841 atomic64_t current_atomic_write; 1842 s64 peak_atomic_write; 1843 u64 committed_atomic_block; 1844 u64 revoked_atomic_block; 1845 1846 #ifdef CONFIG_F2FS_FS_COMPRESSION 1847 struct kmem_cache *page_array_slab; /* page array entry */ 1848 unsigned int page_array_slab_size; /* default page array slab size */ 1849 1850 /* For runtime compression statistics */ 1851 u64 compr_written_block; 1852 u64 compr_saved_block; 1853 u32 compr_new_inode; 1854 1855 /* For compressed block cache */ 1856 struct inode *compress_inode; /* cache compressed blocks */ 1857 unsigned int compress_percent; /* cache page percentage */ 1858 unsigned int compress_watermark; /* cache page watermark */ 1859 atomic_t compress_page_hit; /* cache hit count */ 1860 #endif 1861 1862 #ifdef CONFIG_F2FS_IOSTAT 1863 /* For app/fs IO statistics */ 1864 spinlock_t iostat_lock; 1865 unsigned long long rw_iostat[NR_IO_TYPE]; 1866 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1867 bool iostat_enable; 1868 unsigned long iostat_next_period; 1869 unsigned int iostat_period_ms; 1870 1871 /* For io latency related statistics info in one iostat period */ 1872 spinlock_t iostat_lat_lock; 1873 struct iostat_lat_info *iostat_io_lat; 1874 #endif 1875 }; 1876 1877 #ifdef CONFIG_F2FS_FAULT_INJECTION 1878 #define f2fs_show_injection_info(sbi, type) \ 1879 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1880 KERN_INFO, sbi->sb->s_id, \ 1881 f2fs_fault_name[type], \ 1882 __func__, __builtin_return_address(0)) 1883 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1884 { 1885 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1886 1887 if (!ffi->inject_rate) 1888 return false; 1889 1890 if (!IS_FAULT_SET(ffi, type)) 1891 return false; 1892 1893 atomic_inc(&ffi->inject_ops); 1894 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1895 atomic_set(&ffi->inject_ops, 0); 1896 return true; 1897 } 1898 return false; 1899 } 1900 #else 1901 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1902 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1903 { 1904 return false; 1905 } 1906 #endif 1907 1908 /* 1909 * Test if the mounted volume is a multi-device volume. 1910 * - For a single regular disk volume, sbi->s_ndevs is 0. 1911 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1912 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1913 */ 1914 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1915 { 1916 return sbi->s_ndevs > 1; 1917 } 1918 1919 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1920 { 1921 unsigned long now = jiffies; 1922 1923 sbi->last_time[type] = now; 1924 1925 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1926 if (type == REQ_TIME) { 1927 sbi->last_time[DISCARD_TIME] = now; 1928 sbi->last_time[GC_TIME] = now; 1929 } 1930 } 1931 1932 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1933 { 1934 unsigned long interval = sbi->interval_time[type] * HZ; 1935 1936 return time_after(jiffies, sbi->last_time[type] + interval); 1937 } 1938 1939 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1940 int type) 1941 { 1942 unsigned long interval = sbi->interval_time[type] * HZ; 1943 unsigned int wait_ms = 0; 1944 long delta; 1945 1946 delta = (sbi->last_time[type] + interval) - jiffies; 1947 if (delta > 0) 1948 wait_ms = jiffies_to_msecs(delta); 1949 1950 return wait_ms; 1951 } 1952 1953 /* 1954 * Inline functions 1955 */ 1956 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1957 const void *address, unsigned int length) 1958 { 1959 struct { 1960 struct shash_desc shash; 1961 char ctx[4]; 1962 } desc; 1963 int err; 1964 1965 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1966 1967 desc.shash.tfm = sbi->s_chksum_driver; 1968 *(u32 *)desc.ctx = crc; 1969 1970 err = crypto_shash_update(&desc.shash, address, length); 1971 BUG_ON(err); 1972 1973 return *(u32 *)desc.ctx; 1974 } 1975 1976 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1977 unsigned int length) 1978 { 1979 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1980 } 1981 1982 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1983 void *buf, size_t buf_size) 1984 { 1985 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1986 } 1987 1988 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1989 const void *address, unsigned int length) 1990 { 1991 return __f2fs_crc32(sbi, crc, address, length); 1992 } 1993 1994 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1995 { 1996 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1997 } 1998 1999 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 2000 { 2001 return sb->s_fs_info; 2002 } 2003 2004 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 2005 { 2006 return F2FS_SB(inode->i_sb); 2007 } 2008 2009 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 2010 { 2011 return F2FS_I_SB(mapping->host); 2012 } 2013 2014 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 2015 { 2016 return F2FS_M_SB(page_file_mapping(page)); 2017 } 2018 2019 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 2020 { 2021 return (struct f2fs_super_block *)(sbi->raw_super); 2022 } 2023 2024 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 2025 { 2026 return (struct f2fs_checkpoint *)(sbi->ckpt); 2027 } 2028 2029 static inline struct f2fs_node *F2FS_NODE(struct page *page) 2030 { 2031 return (struct f2fs_node *)page_address(page); 2032 } 2033 2034 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 2035 { 2036 return &((struct f2fs_node *)page_address(page))->i; 2037 } 2038 2039 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 2040 { 2041 return (struct f2fs_nm_info *)(sbi->nm_info); 2042 } 2043 2044 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 2045 { 2046 return (struct f2fs_sm_info *)(sbi->sm_info); 2047 } 2048 2049 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 2050 { 2051 return (struct sit_info *)(SM_I(sbi)->sit_info); 2052 } 2053 2054 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 2055 { 2056 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 2057 } 2058 2059 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 2060 { 2061 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 2062 } 2063 2064 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2065 { 2066 return sbi->meta_inode->i_mapping; 2067 } 2068 2069 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2070 { 2071 return sbi->node_inode->i_mapping; 2072 } 2073 2074 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2075 { 2076 return test_bit(type, &sbi->s_flag); 2077 } 2078 2079 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2080 { 2081 set_bit(type, &sbi->s_flag); 2082 } 2083 2084 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2085 { 2086 clear_bit(type, &sbi->s_flag); 2087 } 2088 2089 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2090 { 2091 return le64_to_cpu(cp->checkpoint_ver); 2092 } 2093 2094 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2095 { 2096 if (type < F2FS_MAX_QUOTAS) 2097 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2098 return 0; 2099 } 2100 2101 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2102 { 2103 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2104 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2105 } 2106 2107 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2108 { 2109 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2110 2111 return ckpt_flags & f; 2112 } 2113 2114 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2115 { 2116 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2117 } 2118 2119 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2120 { 2121 unsigned int ckpt_flags; 2122 2123 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2124 ckpt_flags |= f; 2125 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2126 } 2127 2128 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2129 { 2130 unsigned long flags; 2131 2132 spin_lock_irqsave(&sbi->cp_lock, flags); 2133 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2134 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2135 } 2136 2137 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2138 { 2139 unsigned int ckpt_flags; 2140 2141 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2142 ckpt_flags &= (~f); 2143 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2144 } 2145 2146 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2147 { 2148 unsigned long flags; 2149 2150 spin_lock_irqsave(&sbi->cp_lock, flags); 2151 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2152 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2153 } 2154 2155 #define init_f2fs_rwsem(sem) \ 2156 do { \ 2157 static struct lock_class_key __key; \ 2158 \ 2159 __init_f2fs_rwsem((sem), #sem, &__key); \ 2160 } while (0) 2161 2162 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, 2163 const char *sem_name, struct lock_class_key *key) 2164 { 2165 __init_rwsem(&sem->internal_rwsem, sem_name, key); 2166 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2167 init_waitqueue_head(&sem->read_waiters); 2168 #endif 2169 } 2170 2171 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2172 { 2173 return rwsem_is_locked(&sem->internal_rwsem); 2174 } 2175 2176 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2177 { 2178 return rwsem_is_contended(&sem->internal_rwsem); 2179 } 2180 2181 static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2182 { 2183 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2184 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2185 #else 2186 down_read(&sem->internal_rwsem); 2187 #endif 2188 } 2189 2190 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2191 { 2192 return down_read_trylock(&sem->internal_rwsem); 2193 } 2194 2195 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2196 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2197 { 2198 down_read_nested(&sem->internal_rwsem, subclass); 2199 } 2200 #else 2201 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2202 #endif 2203 2204 static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2205 { 2206 up_read(&sem->internal_rwsem); 2207 } 2208 2209 static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2210 { 2211 down_write(&sem->internal_rwsem); 2212 } 2213 2214 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2215 { 2216 return down_write_trylock(&sem->internal_rwsem); 2217 } 2218 2219 static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2220 { 2221 up_write(&sem->internal_rwsem); 2222 #ifdef CONFIG_F2FS_UNFAIR_RWSEM 2223 wake_up_all(&sem->read_waiters); 2224 #endif 2225 } 2226 2227 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2228 { 2229 f2fs_down_read(&sbi->cp_rwsem); 2230 } 2231 2232 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2233 { 2234 if (time_to_inject(sbi, FAULT_LOCK_OP)) { 2235 f2fs_show_injection_info(sbi, FAULT_LOCK_OP); 2236 return 0; 2237 } 2238 return f2fs_down_read_trylock(&sbi->cp_rwsem); 2239 } 2240 2241 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2242 { 2243 f2fs_up_read(&sbi->cp_rwsem); 2244 } 2245 2246 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2247 { 2248 f2fs_down_write(&sbi->cp_rwsem); 2249 } 2250 2251 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2252 { 2253 f2fs_up_write(&sbi->cp_rwsem); 2254 } 2255 2256 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2257 { 2258 int reason = CP_SYNC; 2259 2260 if (test_opt(sbi, FASTBOOT)) 2261 reason = CP_FASTBOOT; 2262 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2263 reason = CP_UMOUNT; 2264 return reason; 2265 } 2266 2267 static inline bool __remain_node_summaries(int reason) 2268 { 2269 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2270 } 2271 2272 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2273 { 2274 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2275 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2276 } 2277 2278 /* 2279 * Check whether the inode has blocks or not 2280 */ 2281 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2282 { 2283 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2284 2285 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2286 } 2287 2288 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2289 { 2290 return ofs == XATTR_NODE_OFFSET; 2291 } 2292 2293 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2294 struct inode *inode, bool cap) 2295 { 2296 if (!inode) 2297 return true; 2298 if (!test_opt(sbi, RESERVE_ROOT)) 2299 return false; 2300 if (IS_NOQUOTA(inode)) 2301 return true; 2302 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2303 return true; 2304 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2305 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2306 return true; 2307 if (cap && capable(CAP_SYS_RESOURCE)) 2308 return true; 2309 return false; 2310 } 2311 2312 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2313 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2314 struct inode *inode, blkcnt_t *count) 2315 { 2316 blkcnt_t diff = 0, release = 0; 2317 block_t avail_user_block_count; 2318 int ret; 2319 2320 ret = dquot_reserve_block(inode, *count); 2321 if (ret) 2322 return ret; 2323 2324 if (time_to_inject(sbi, FAULT_BLOCK)) { 2325 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2326 release = *count; 2327 goto release_quota; 2328 } 2329 2330 /* 2331 * let's increase this in prior to actual block count change in order 2332 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2333 */ 2334 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2335 2336 spin_lock(&sbi->stat_lock); 2337 sbi->total_valid_block_count += (block_t)(*count); 2338 avail_user_block_count = sbi->user_block_count - 2339 sbi->current_reserved_blocks; 2340 2341 if (!__allow_reserved_blocks(sbi, inode, true)) 2342 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2343 2344 if (F2FS_IO_ALIGNED(sbi)) 2345 avail_user_block_count -= sbi->blocks_per_seg * 2346 SM_I(sbi)->additional_reserved_segments; 2347 2348 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2349 if (avail_user_block_count > sbi->unusable_block_count) 2350 avail_user_block_count -= sbi->unusable_block_count; 2351 else 2352 avail_user_block_count = 0; 2353 } 2354 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2355 diff = sbi->total_valid_block_count - avail_user_block_count; 2356 if (diff > *count) 2357 diff = *count; 2358 *count -= diff; 2359 release = diff; 2360 sbi->total_valid_block_count -= diff; 2361 if (!*count) { 2362 spin_unlock(&sbi->stat_lock); 2363 goto enospc; 2364 } 2365 } 2366 spin_unlock(&sbi->stat_lock); 2367 2368 if (unlikely(release)) { 2369 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2370 dquot_release_reservation_block(inode, release); 2371 } 2372 f2fs_i_blocks_write(inode, *count, true, true); 2373 return 0; 2374 2375 enospc: 2376 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2377 release_quota: 2378 dquot_release_reservation_block(inode, release); 2379 return -ENOSPC; 2380 } 2381 2382 __printf(2, 3) 2383 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2384 2385 #define f2fs_err(sbi, fmt, ...) \ 2386 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2387 #define f2fs_warn(sbi, fmt, ...) \ 2388 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2389 #define f2fs_notice(sbi, fmt, ...) \ 2390 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2391 #define f2fs_info(sbi, fmt, ...) \ 2392 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2393 #define f2fs_debug(sbi, fmt, ...) \ 2394 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2395 2396 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2397 struct inode *inode, 2398 block_t count) 2399 { 2400 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2401 2402 spin_lock(&sbi->stat_lock); 2403 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2404 sbi->total_valid_block_count -= (block_t)count; 2405 if (sbi->reserved_blocks && 2406 sbi->current_reserved_blocks < sbi->reserved_blocks) 2407 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2408 sbi->current_reserved_blocks + count); 2409 spin_unlock(&sbi->stat_lock); 2410 if (unlikely(inode->i_blocks < sectors)) { 2411 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2412 inode->i_ino, 2413 (unsigned long long)inode->i_blocks, 2414 (unsigned long long)sectors); 2415 set_sbi_flag(sbi, SBI_NEED_FSCK); 2416 return; 2417 } 2418 f2fs_i_blocks_write(inode, count, false, true); 2419 } 2420 2421 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2422 { 2423 atomic_inc(&sbi->nr_pages[count_type]); 2424 2425 if (count_type == F2FS_DIRTY_DENTS || 2426 count_type == F2FS_DIRTY_NODES || 2427 count_type == F2FS_DIRTY_META || 2428 count_type == F2FS_DIRTY_QDATA || 2429 count_type == F2FS_DIRTY_IMETA) 2430 set_sbi_flag(sbi, SBI_IS_DIRTY); 2431 } 2432 2433 static inline void inode_inc_dirty_pages(struct inode *inode) 2434 { 2435 atomic_inc(&F2FS_I(inode)->dirty_pages); 2436 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2437 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2438 if (IS_NOQUOTA(inode)) 2439 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2440 } 2441 2442 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2443 { 2444 atomic_dec(&sbi->nr_pages[count_type]); 2445 } 2446 2447 static inline void inode_dec_dirty_pages(struct inode *inode) 2448 { 2449 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2450 !S_ISLNK(inode->i_mode)) 2451 return; 2452 2453 atomic_dec(&F2FS_I(inode)->dirty_pages); 2454 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2455 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2456 if (IS_NOQUOTA(inode)) 2457 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2458 } 2459 2460 static inline void inc_atomic_write_cnt(struct inode *inode) 2461 { 2462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2463 struct f2fs_inode_info *fi = F2FS_I(inode); 2464 u64 current_write; 2465 2466 fi->atomic_write_cnt++; 2467 atomic64_inc(&sbi->current_atomic_write); 2468 current_write = atomic64_read(&sbi->current_atomic_write); 2469 if (current_write > sbi->peak_atomic_write) 2470 sbi->peak_atomic_write = current_write; 2471 } 2472 2473 static inline void release_atomic_write_cnt(struct inode *inode) 2474 { 2475 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2476 struct f2fs_inode_info *fi = F2FS_I(inode); 2477 2478 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write); 2479 fi->atomic_write_cnt = 0; 2480 } 2481 2482 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2483 { 2484 return atomic_read(&sbi->nr_pages[count_type]); 2485 } 2486 2487 static inline int get_dirty_pages(struct inode *inode) 2488 { 2489 return atomic_read(&F2FS_I(inode)->dirty_pages); 2490 } 2491 2492 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2493 { 2494 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2495 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2496 sbi->log_blocks_per_seg; 2497 2498 return segs / sbi->segs_per_sec; 2499 } 2500 2501 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2502 { 2503 return sbi->total_valid_block_count; 2504 } 2505 2506 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2507 { 2508 return sbi->discard_blks; 2509 } 2510 2511 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2512 { 2513 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2514 2515 /* return NAT or SIT bitmap */ 2516 if (flag == NAT_BITMAP) 2517 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2518 else if (flag == SIT_BITMAP) 2519 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2520 2521 return 0; 2522 } 2523 2524 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2525 { 2526 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2527 } 2528 2529 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2530 { 2531 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2532 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2533 int offset; 2534 2535 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2536 offset = (flag == SIT_BITMAP) ? 2537 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2538 /* 2539 * if large_nat_bitmap feature is enabled, leave checksum 2540 * protection for all nat/sit bitmaps. 2541 */ 2542 return tmp_ptr + offset + sizeof(__le32); 2543 } 2544 2545 if (__cp_payload(sbi) > 0) { 2546 if (flag == NAT_BITMAP) 2547 return tmp_ptr; 2548 else 2549 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2550 } else { 2551 offset = (flag == NAT_BITMAP) ? 2552 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2553 return tmp_ptr + offset; 2554 } 2555 } 2556 2557 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2558 { 2559 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2560 2561 if (sbi->cur_cp_pack == 2) 2562 start_addr += sbi->blocks_per_seg; 2563 return start_addr; 2564 } 2565 2566 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2567 { 2568 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2569 2570 if (sbi->cur_cp_pack == 1) 2571 start_addr += sbi->blocks_per_seg; 2572 return start_addr; 2573 } 2574 2575 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2576 { 2577 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2578 } 2579 2580 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2581 { 2582 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2583 } 2584 2585 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2586 struct inode *inode, bool is_inode) 2587 { 2588 block_t valid_block_count; 2589 unsigned int valid_node_count, user_block_count; 2590 int err; 2591 2592 if (is_inode) { 2593 if (inode) { 2594 err = dquot_alloc_inode(inode); 2595 if (err) 2596 return err; 2597 } 2598 } else { 2599 err = dquot_reserve_block(inode, 1); 2600 if (err) 2601 return err; 2602 } 2603 2604 if (time_to_inject(sbi, FAULT_BLOCK)) { 2605 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2606 goto enospc; 2607 } 2608 2609 spin_lock(&sbi->stat_lock); 2610 2611 valid_block_count = sbi->total_valid_block_count + 2612 sbi->current_reserved_blocks + 1; 2613 2614 if (!__allow_reserved_blocks(sbi, inode, false)) 2615 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2616 2617 if (F2FS_IO_ALIGNED(sbi)) 2618 valid_block_count += sbi->blocks_per_seg * 2619 SM_I(sbi)->additional_reserved_segments; 2620 2621 user_block_count = sbi->user_block_count; 2622 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2623 user_block_count -= sbi->unusable_block_count; 2624 2625 if (unlikely(valid_block_count > user_block_count)) { 2626 spin_unlock(&sbi->stat_lock); 2627 goto enospc; 2628 } 2629 2630 valid_node_count = sbi->total_valid_node_count + 1; 2631 if (unlikely(valid_node_count > sbi->total_node_count)) { 2632 spin_unlock(&sbi->stat_lock); 2633 goto enospc; 2634 } 2635 2636 sbi->total_valid_node_count++; 2637 sbi->total_valid_block_count++; 2638 spin_unlock(&sbi->stat_lock); 2639 2640 if (inode) { 2641 if (is_inode) 2642 f2fs_mark_inode_dirty_sync(inode, true); 2643 else 2644 f2fs_i_blocks_write(inode, 1, true, true); 2645 } 2646 2647 percpu_counter_inc(&sbi->alloc_valid_block_count); 2648 return 0; 2649 2650 enospc: 2651 if (is_inode) { 2652 if (inode) 2653 dquot_free_inode(inode); 2654 } else { 2655 dquot_release_reservation_block(inode, 1); 2656 } 2657 return -ENOSPC; 2658 } 2659 2660 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2661 struct inode *inode, bool is_inode) 2662 { 2663 spin_lock(&sbi->stat_lock); 2664 2665 if (unlikely(!sbi->total_valid_block_count || 2666 !sbi->total_valid_node_count)) { 2667 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", 2668 sbi->total_valid_block_count, 2669 sbi->total_valid_node_count); 2670 set_sbi_flag(sbi, SBI_NEED_FSCK); 2671 } else { 2672 sbi->total_valid_block_count--; 2673 sbi->total_valid_node_count--; 2674 } 2675 2676 if (sbi->reserved_blocks && 2677 sbi->current_reserved_blocks < sbi->reserved_blocks) 2678 sbi->current_reserved_blocks++; 2679 2680 spin_unlock(&sbi->stat_lock); 2681 2682 if (is_inode) { 2683 dquot_free_inode(inode); 2684 } else { 2685 if (unlikely(inode->i_blocks == 0)) { 2686 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2687 inode->i_ino, 2688 (unsigned long long)inode->i_blocks); 2689 set_sbi_flag(sbi, SBI_NEED_FSCK); 2690 return; 2691 } 2692 f2fs_i_blocks_write(inode, 1, false, true); 2693 } 2694 } 2695 2696 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2697 { 2698 return sbi->total_valid_node_count; 2699 } 2700 2701 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2702 { 2703 percpu_counter_inc(&sbi->total_valid_inode_count); 2704 } 2705 2706 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2707 { 2708 percpu_counter_dec(&sbi->total_valid_inode_count); 2709 } 2710 2711 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2712 { 2713 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2714 } 2715 2716 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2717 pgoff_t index, bool for_write) 2718 { 2719 struct page *page; 2720 unsigned int flags; 2721 2722 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2723 if (!for_write) 2724 page = find_get_page_flags(mapping, index, 2725 FGP_LOCK | FGP_ACCESSED); 2726 else 2727 page = find_lock_page(mapping, index); 2728 if (page) 2729 return page; 2730 2731 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2732 f2fs_show_injection_info(F2FS_M_SB(mapping), 2733 FAULT_PAGE_ALLOC); 2734 return NULL; 2735 } 2736 } 2737 2738 if (!for_write) 2739 return grab_cache_page(mapping, index); 2740 2741 flags = memalloc_nofs_save(); 2742 page = grab_cache_page_write_begin(mapping, index); 2743 memalloc_nofs_restore(flags); 2744 2745 return page; 2746 } 2747 2748 static inline struct page *f2fs_pagecache_get_page( 2749 struct address_space *mapping, pgoff_t index, 2750 int fgp_flags, gfp_t gfp_mask) 2751 { 2752 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2753 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2754 return NULL; 2755 } 2756 2757 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2758 } 2759 2760 static inline void f2fs_put_page(struct page *page, int unlock) 2761 { 2762 if (!page) 2763 return; 2764 2765 if (unlock) { 2766 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2767 unlock_page(page); 2768 } 2769 put_page(page); 2770 } 2771 2772 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2773 { 2774 if (dn->node_page) 2775 f2fs_put_page(dn->node_page, 1); 2776 if (dn->inode_page && dn->node_page != dn->inode_page) 2777 f2fs_put_page(dn->inode_page, 0); 2778 dn->node_page = NULL; 2779 dn->inode_page = NULL; 2780 } 2781 2782 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2783 size_t size) 2784 { 2785 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2786 } 2787 2788 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2789 gfp_t flags) 2790 { 2791 void *entry; 2792 2793 entry = kmem_cache_alloc(cachep, flags); 2794 if (!entry) 2795 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2796 return entry; 2797 } 2798 2799 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2800 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2801 { 2802 if (nofail) 2803 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2804 2805 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) { 2806 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC); 2807 return NULL; 2808 } 2809 2810 return kmem_cache_alloc(cachep, flags); 2811 } 2812 2813 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2814 { 2815 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2816 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2817 get_pages(sbi, F2FS_WB_CP_DATA) || 2818 get_pages(sbi, F2FS_DIO_READ) || 2819 get_pages(sbi, F2FS_DIO_WRITE)) 2820 return true; 2821 2822 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2823 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2824 return true; 2825 2826 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2827 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2828 return true; 2829 return false; 2830 } 2831 2832 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2833 { 2834 if (sbi->gc_mode == GC_URGENT_HIGH) 2835 return true; 2836 2837 if (is_inflight_io(sbi, type)) 2838 return false; 2839 2840 if (sbi->gc_mode == GC_URGENT_MID) 2841 return true; 2842 2843 if (sbi->gc_mode == GC_URGENT_LOW && 2844 (type == DISCARD_TIME || type == GC_TIME)) 2845 return true; 2846 2847 return f2fs_time_over(sbi, type); 2848 } 2849 2850 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2851 unsigned long index, void *item) 2852 { 2853 while (radix_tree_insert(root, index, item)) 2854 cond_resched(); 2855 } 2856 2857 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2858 2859 static inline bool IS_INODE(struct page *page) 2860 { 2861 struct f2fs_node *p = F2FS_NODE(page); 2862 2863 return RAW_IS_INODE(p); 2864 } 2865 2866 static inline int offset_in_addr(struct f2fs_inode *i) 2867 { 2868 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2869 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2870 } 2871 2872 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2873 { 2874 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2875 } 2876 2877 static inline int f2fs_has_extra_attr(struct inode *inode); 2878 static inline block_t data_blkaddr(struct inode *inode, 2879 struct page *node_page, unsigned int offset) 2880 { 2881 struct f2fs_node *raw_node; 2882 __le32 *addr_array; 2883 int base = 0; 2884 bool is_inode = IS_INODE(node_page); 2885 2886 raw_node = F2FS_NODE(node_page); 2887 2888 if (is_inode) { 2889 if (!inode) 2890 /* from GC path only */ 2891 base = offset_in_addr(&raw_node->i); 2892 else if (f2fs_has_extra_attr(inode)) 2893 base = get_extra_isize(inode); 2894 } 2895 2896 addr_array = blkaddr_in_node(raw_node); 2897 return le32_to_cpu(addr_array[base + offset]); 2898 } 2899 2900 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2901 { 2902 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2903 } 2904 2905 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2906 { 2907 int mask; 2908 2909 addr += (nr >> 3); 2910 mask = 1 << (7 - (nr & 0x07)); 2911 return mask & *addr; 2912 } 2913 2914 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2915 { 2916 int mask; 2917 2918 addr += (nr >> 3); 2919 mask = 1 << (7 - (nr & 0x07)); 2920 *addr |= mask; 2921 } 2922 2923 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2924 { 2925 int mask; 2926 2927 addr += (nr >> 3); 2928 mask = 1 << (7 - (nr & 0x07)); 2929 *addr &= ~mask; 2930 } 2931 2932 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2933 { 2934 int mask; 2935 int ret; 2936 2937 addr += (nr >> 3); 2938 mask = 1 << (7 - (nr & 0x07)); 2939 ret = mask & *addr; 2940 *addr |= mask; 2941 return ret; 2942 } 2943 2944 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2945 { 2946 int mask; 2947 int ret; 2948 2949 addr += (nr >> 3); 2950 mask = 1 << (7 - (nr & 0x07)); 2951 ret = mask & *addr; 2952 *addr &= ~mask; 2953 return ret; 2954 } 2955 2956 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2957 { 2958 int mask; 2959 2960 addr += (nr >> 3); 2961 mask = 1 << (7 - (nr & 0x07)); 2962 *addr ^= mask; 2963 } 2964 2965 /* 2966 * On-disk inode flags (f2fs_inode::i_flags) 2967 */ 2968 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2969 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2970 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2971 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2972 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2973 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2974 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2975 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2976 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2977 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2978 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2979 2980 /* Flags that should be inherited by new inodes from their parent. */ 2981 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2982 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2983 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2984 2985 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2986 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2987 F2FS_CASEFOLD_FL)) 2988 2989 /* Flags that are appropriate for non-directories/regular files. */ 2990 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2991 2992 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2993 { 2994 if (S_ISDIR(mode)) 2995 return flags; 2996 else if (S_ISREG(mode)) 2997 return flags & F2FS_REG_FLMASK; 2998 else 2999 return flags & F2FS_OTHER_FLMASK; 3000 } 3001 3002 static inline void __mark_inode_dirty_flag(struct inode *inode, 3003 int flag, bool set) 3004 { 3005 switch (flag) { 3006 case FI_INLINE_XATTR: 3007 case FI_INLINE_DATA: 3008 case FI_INLINE_DENTRY: 3009 case FI_NEW_INODE: 3010 if (set) 3011 return; 3012 fallthrough; 3013 case FI_DATA_EXIST: 3014 case FI_INLINE_DOTS: 3015 case FI_PIN_FILE: 3016 case FI_COMPRESS_RELEASED: 3017 f2fs_mark_inode_dirty_sync(inode, true); 3018 } 3019 } 3020 3021 static inline void set_inode_flag(struct inode *inode, int flag) 3022 { 3023 set_bit(flag, F2FS_I(inode)->flags); 3024 __mark_inode_dirty_flag(inode, flag, true); 3025 } 3026 3027 static inline int is_inode_flag_set(struct inode *inode, int flag) 3028 { 3029 return test_bit(flag, F2FS_I(inode)->flags); 3030 } 3031 3032 static inline void clear_inode_flag(struct inode *inode, int flag) 3033 { 3034 clear_bit(flag, F2FS_I(inode)->flags); 3035 __mark_inode_dirty_flag(inode, flag, false); 3036 } 3037 3038 static inline bool f2fs_verity_in_progress(struct inode *inode) 3039 { 3040 return IS_ENABLED(CONFIG_FS_VERITY) && 3041 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 3042 } 3043 3044 static inline void set_acl_inode(struct inode *inode, umode_t mode) 3045 { 3046 F2FS_I(inode)->i_acl_mode = mode; 3047 set_inode_flag(inode, FI_ACL_MODE); 3048 f2fs_mark_inode_dirty_sync(inode, false); 3049 } 3050 3051 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 3052 { 3053 if (inc) 3054 inc_nlink(inode); 3055 else 3056 drop_nlink(inode); 3057 f2fs_mark_inode_dirty_sync(inode, true); 3058 } 3059 3060 static inline void f2fs_i_blocks_write(struct inode *inode, 3061 block_t diff, bool add, bool claim) 3062 { 3063 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3064 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3065 3066 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 3067 if (add) { 3068 if (claim) 3069 dquot_claim_block(inode, diff); 3070 else 3071 dquot_alloc_block_nofail(inode, diff); 3072 } else { 3073 dquot_free_block(inode, diff); 3074 } 3075 3076 f2fs_mark_inode_dirty_sync(inode, true); 3077 if (clean || recover) 3078 set_inode_flag(inode, FI_AUTO_RECOVER); 3079 } 3080 3081 static inline bool f2fs_is_atomic_file(struct inode *inode); 3082 3083 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 3084 { 3085 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3086 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3087 3088 if (i_size_read(inode) == i_size) 3089 return; 3090 3091 i_size_write(inode, i_size); 3092 3093 if (f2fs_is_atomic_file(inode)) 3094 return; 3095 3096 f2fs_mark_inode_dirty_sync(inode, true); 3097 if (clean || recover) 3098 set_inode_flag(inode, FI_AUTO_RECOVER); 3099 } 3100 3101 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 3102 { 3103 F2FS_I(inode)->i_current_depth = depth; 3104 f2fs_mark_inode_dirty_sync(inode, true); 3105 } 3106 3107 static inline void f2fs_i_gc_failures_write(struct inode *inode, 3108 unsigned int count) 3109 { 3110 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 3111 f2fs_mark_inode_dirty_sync(inode, true); 3112 } 3113 3114 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 3115 { 3116 F2FS_I(inode)->i_xattr_nid = xnid; 3117 f2fs_mark_inode_dirty_sync(inode, true); 3118 } 3119 3120 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 3121 { 3122 F2FS_I(inode)->i_pino = pino; 3123 f2fs_mark_inode_dirty_sync(inode, true); 3124 } 3125 3126 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 3127 { 3128 struct f2fs_inode_info *fi = F2FS_I(inode); 3129 3130 if (ri->i_inline & F2FS_INLINE_XATTR) 3131 set_bit(FI_INLINE_XATTR, fi->flags); 3132 if (ri->i_inline & F2FS_INLINE_DATA) 3133 set_bit(FI_INLINE_DATA, fi->flags); 3134 if (ri->i_inline & F2FS_INLINE_DENTRY) 3135 set_bit(FI_INLINE_DENTRY, fi->flags); 3136 if (ri->i_inline & F2FS_DATA_EXIST) 3137 set_bit(FI_DATA_EXIST, fi->flags); 3138 if (ri->i_inline & F2FS_INLINE_DOTS) 3139 set_bit(FI_INLINE_DOTS, fi->flags); 3140 if (ri->i_inline & F2FS_EXTRA_ATTR) 3141 set_bit(FI_EXTRA_ATTR, fi->flags); 3142 if (ri->i_inline & F2FS_PIN_FILE) 3143 set_bit(FI_PIN_FILE, fi->flags); 3144 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 3145 set_bit(FI_COMPRESS_RELEASED, fi->flags); 3146 } 3147 3148 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 3149 { 3150 ri->i_inline = 0; 3151 3152 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 3153 ri->i_inline |= F2FS_INLINE_XATTR; 3154 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 3155 ri->i_inline |= F2FS_INLINE_DATA; 3156 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 3157 ri->i_inline |= F2FS_INLINE_DENTRY; 3158 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 3159 ri->i_inline |= F2FS_DATA_EXIST; 3160 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 3161 ri->i_inline |= F2FS_INLINE_DOTS; 3162 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 3163 ri->i_inline |= F2FS_EXTRA_ATTR; 3164 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3165 ri->i_inline |= F2FS_PIN_FILE; 3166 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3167 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3168 } 3169 3170 static inline int f2fs_has_extra_attr(struct inode *inode) 3171 { 3172 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3173 } 3174 3175 static inline int f2fs_has_inline_xattr(struct inode *inode) 3176 { 3177 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3178 } 3179 3180 static inline int f2fs_compressed_file(struct inode *inode) 3181 { 3182 return S_ISREG(inode->i_mode) && 3183 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3184 } 3185 3186 static inline bool f2fs_need_compress_data(struct inode *inode) 3187 { 3188 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3189 3190 if (!f2fs_compressed_file(inode)) 3191 return false; 3192 3193 if (compress_mode == COMPR_MODE_FS) 3194 return true; 3195 else if (compress_mode == COMPR_MODE_USER && 3196 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3197 return true; 3198 3199 return false; 3200 } 3201 3202 static inline unsigned int addrs_per_inode(struct inode *inode) 3203 { 3204 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3205 get_inline_xattr_addrs(inode); 3206 3207 if (!f2fs_compressed_file(inode)) 3208 return addrs; 3209 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3210 } 3211 3212 static inline unsigned int addrs_per_block(struct inode *inode) 3213 { 3214 if (!f2fs_compressed_file(inode)) 3215 return DEF_ADDRS_PER_BLOCK; 3216 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3217 } 3218 3219 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3220 { 3221 struct f2fs_inode *ri = F2FS_INODE(page); 3222 3223 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3224 get_inline_xattr_addrs(inode)]); 3225 } 3226 3227 static inline int inline_xattr_size(struct inode *inode) 3228 { 3229 if (f2fs_has_inline_xattr(inode)) 3230 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3231 return 0; 3232 } 3233 3234 /* 3235 * Notice: check inline_data flag without inode page lock is unsafe. 3236 * It could change at any time by f2fs_convert_inline_page(). 3237 */ 3238 static inline int f2fs_has_inline_data(struct inode *inode) 3239 { 3240 return is_inode_flag_set(inode, FI_INLINE_DATA); 3241 } 3242 3243 static inline int f2fs_exist_data(struct inode *inode) 3244 { 3245 return is_inode_flag_set(inode, FI_DATA_EXIST); 3246 } 3247 3248 static inline int f2fs_has_inline_dots(struct inode *inode) 3249 { 3250 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3251 } 3252 3253 static inline int f2fs_is_mmap_file(struct inode *inode) 3254 { 3255 return is_inode_flag_set(inode, FI_MMAP_FILE); 3256 } 3257 3258 static inline bool f2fs_is_pinned_file(struct inode *inode) 3259 { 3260 return is_inode_flag_set(inode, FI_PIN_FILE); 3261 } 3262 3263 static inline bool f2fs_is_atomic_file(struct inode *inode) 3264 { 3265 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3266 } 3267 3268 static inline bool f2fs_is_cow_file(struct inode *inode) 3269 { 3270 return is_inode_flag_set(inode, FI_COW_FILE); 3271 } 3272 3273 static inline bool f2fs_is_first_block_written(struct inode *inode) 3274 { 3275 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3276 } 3277 3278 static inline bool f2fs_is_drop_cache(struct inode *inode) 3279 { 3280 return is_inode_flag_set(inode, FI_DROP_CACHE); 3281 } 3282 3283 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3284 { 3285 struct f2fs_inode *ri = F2FS_INODE(page); 3286 int extra_size = get_extra_isize(inode); 3287 3288 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3289 } 3290 3291 static inline int f2fs_has_inline_dentry(struct inode *inode) 3292 { 3293 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3294 } 3295 3296 static inline int is_file(struct inode *inode, int type) 3297 { 3298 return F2FS_I(inode)->i_advise & type; 3299 } 3300 3301 static inline void set_file(struct inode *inode, int type) 3302 { 3303 if (is_file(inode, type)) 3304 return; 3305 F2FS_I(inode)->i_advise |= type; 3306 f2fs_mark_inode_dirty_sync(inode, true); 3307 } 3308 3309 static inline void clear_file(struct inode *inode, int type) 3310 { 3311 if (!is_file(inode, type)) 3312 return; 3313 F2FS_I(inode)->i_advise &= ~type; 3314 f2fs_mark_inode_dirty_sync(inode, true); 3315 } 3316 3317 static inline bool f2fs_is_time_consistent(struct inode *inode) 3318 { 3319 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3320 return false; 3321 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3322 return false; 3323 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3324 return false; 3325 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3326 &F2FS_I(inode)->i_crtime)) 3327 return false; 3328 return true; 3329 } 3330 3331 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3332 { 3333 bool ret; 3334 3335 if (dsync) { 3336 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3337 3338 spin_lock(&sbi->inode_lock[DIRTY_META]); 3339 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3340 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3341 return ret; 3342 } 3343 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3344 file_keep_isize(inode) || 3345 i_size_read(inode) & ~PAGE_MASK) 3346 return false; 3347 3348 if (!f2fs_is_time_consistent(inode)) 3349 return false; 3350 3351 spin_lock(&F2FS_I(inode)->i_size_lock); 3352 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3353 spin_unlock(&F2FS_I(inode)->i_size_lock); 3354 3355 return ret; 3356 } 3357 3358 static inline bool f2fs_readonly(struct super_block *sb) 3359 { 3360 return sb_rdonly(sb); 3361 } 3362 3363 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3364 { 3365 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3366 } 3367 3368 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3369 { 3370 if (len == 1 && name[0] == '.') 3371 return true; 3372 3373 if (len == 2 && name[0] == '.' && name[1] == '.') 3374 return true; 3375 3376 return false; 3377 } 3378 3379 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3380 size_t size, gfp_t flags) 3381 { 3382 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3383 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3384 return NULL; 3385 } 3386 3387 return kmalloc(size, flags); 3388 } 3389 3390 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3391 size_t size, gfp_t flags) 3392 { 3393 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3394 } 3395 3396 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3397 size_t size, gfp_t flags) 3398 { 3399 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3400 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3401 return NULL; 3402 } 3403 3404 return kvmalloc(size, flags); 3405 } 3406 3407 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3408 size_t size, gfp_t flags) 3409 { 3410 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3411 } 3412 3413 static inline int get_extra_isize(struct inode *inode) 3414 { 3415 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3416 } 3417 3418 static inline int get_inline_xattr_addrs(struct inode *inode) 3419 { 3420 return F2FS_I(inode)->i_inline_xattr_size; 3421 } 3422 3423 #define f2fs_get_inode_mode(i) \ 3424 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3425 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3426 3427 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3428 (offsetof(struct f2fs_inode, i_extra_end) - \ 3429 offsetof(struct f2fs_inode, i_extra_isize)) \ 3430 3431 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3432 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3433 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3434 sizeof((f2fs_inode)->field)) \ 3435 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3436 3437 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3438 3439 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3440 3441 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3442 block_t blkaddr, int type); 3443 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3444 block_t blkaddr, int type) 3445 { 3446 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3447 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3448 blkaddr, type); 3449 f2fs_bug_on(sbi, 1); 3450 } 3451 } 3452 3453 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3454 { 3455 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3456 blkaddr == COMPRESS_ADDR) 3457 return false; 3458 return true; 3459 } 3460 3461 /* 3462 * file.c 3463 */ 3464 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3465 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3466 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3467 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3468 int f2fs_truncate(struct inode *inode); 3469 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3470 struct kstat *stat, u32 request_mask, unsigned int flags); 3471 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3472 struct iattr *attr); 3473 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3474 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3475 int f2fs_precache_extents(struct inode *inode); 3476 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3477 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3478 struct dentry *dentry, struct fileattr *fa); 3479 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3480 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3481 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3482 int f2fs_pin_file_control(struct inode *inode, bool inc); 3483 3484 /* 3485 * inode.c 3486 */ 3487 void f2fs_set_inode_flags(struct inode *inode); 3488 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3489 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3490 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3491 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3492 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3493 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3494 void f2fs_update_inode_page(struct inode *inode); 3495 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3496 void f2fs_evict_inode(struct inode *inode); 3497 void f2fs_handle_failed_inode(struct inode *inode); 3498 3499 /* 3500 * namei.c 3501 */ 3502 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3503 bool hot, bool set); 3504 struct dentry *f2fs_get_parent(struct dentry *child); 3505 int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 3506 struct inode **new_inode); 3507 3508 /* 3509 * dir.c 3510 */ 3511 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3512 int f2fs_init_casefolded_name(const struct inode *dir, 3513 struct f2fs_filename *fname); 3514 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3515 int lookup, struct f2fs_filename *fname); 3516 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3517 struct f2fs_filename *fname); 3518 void f2fs_free_filename(struct f2fs_filename *fname); 3519 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3520 const struct f2fs_filename *fname, int *max_slots); 3521 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3522 unsigned int start_pos, struct fscrypt_str *fstr); 3523 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3524 struct f2fs_dentry_ptr *d); 3525 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3526 const struct f2fs_filename *fname, struct page *dpage); 3527 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3528 unsigned int current_depth); 3529 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3530 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3531 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3532 const struct f2fs_filename *fname, 3533 struct page **res_page); 3534 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3535 const struct qstr *child, struct page **res_page); 3536 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3537 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3538 struct page **page); 3539 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3540 struct page *page, struct inode *inode); 3541 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3542 const struct f2fs_filename *fname); 3543 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3544 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3545 unsigned int bit_pos); 3546 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3547 struct inode *inode, nid_t ino, umode_t mode); 3548 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3549 struct inode *inode, nid_t ino, umode_t mode); 3550 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3551 struct inode *inode, nid_t ino, umode_t mode); 3552 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3553 struct inode *dir, struct inode *inode); 3554 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3555 bool f2fs_empty_dir(struct inode *dir); 3556 3557 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3558 { 3559 if (fscrypt_is_nokey_name(dentry)) 3560 return -ENOKEY; 3561 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3562 inode, inode->i_ino, inode->i_mode); 3563 } 3564 3565 /* 3566 * super.c 3567 */ 3568 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3569 void f2fs_inode_synced(struct inode *inode); 3570 int f2fs_dquot_initialize(struct inode *inode); 3571 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3572 int f2fs_quota_sync(struct super_block *sb, int type); 3573 loff_t max_file_blocks(struct inode *inode); 3574 void f2fs_quota_off_umount(struct super_block *sb); 3575 void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason); 3576 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); 3577 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3578 int f2fs_sync_fs(struct super_block *sb, int sync); 3579 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3580 3581 /* 3582 * hash.c 3583 */ 3584 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3585 3586 /* 3587 * node.c 3588 */ 3589 struct node_info; 3590 3591 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3592 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3593 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3594 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3595 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3596 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3597 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3598 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3599 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3600 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3601 struct node_info *ni, bool checkpoint_context); 3602 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3603 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3604 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3605 int f2fs_truncate_xattr_node(struct inode *inode); 3606 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3607 unsigned int seq_id); 3608 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3609 int f2fs_remove_inode_page(struct inode *inode); 3610 struct page *f2fs_new_inode_page(struct inode *inode); 3611 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3612 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3613 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3614 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3615 int f2fs_move_node_page(struct page *node_page, int gc_type); 3616 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3617 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3618 struct writeback_control *wbc, bool atomic, 3619 unsigned int *seq_id); 3620 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3621 struct writeback_control *wbc, 3622 bool do_balance, enum iostat_type io_type); 3623 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3624 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3625 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3626 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3627 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3628 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3629 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3630 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3631 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3632 unsigned int segno, struct f2fs_summary_block *sum); 3633 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3634 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3635 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3636 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3637 int __init f2fs_create_node_manager_caches(void); 3638 void f2fs_destroy_node_manager_caches(void); 3639 3640 /* 3641 * segment.c 3642 */ 3643 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3644 int f2fs_commit_atomic_write(struct inode *inode); 3645 void f2fs_abort_atomic_write(struct inode *inode, bool clean); 3646 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3647 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3648 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3649 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3650 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3651 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3652 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3653 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3654 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3655 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3656 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3657 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3658 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3659 struct cp_control *cpc); 3660 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3661 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3662 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3663 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3664 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3665 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3666 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3667 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3668 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3669 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3670 unsigned int *newseg, bool new_sec, int dir); 3671 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3672 unsigned int start, unsigned int end); 3673 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3674 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3675 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3676 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3677 struct cp_control *cpc); 3678 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3679 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3680 block_t blk_addr); 3681 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3682 enum iostat_type io_type); 3683 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3684 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3685 struct f2fs_io_info *fio); 3686 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3687 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3688 block_t old_blkaddr, block_t new_blkaddr, 3689 bool recover_curseg, bool recover_newaddr, 3690 bool from_gc); 3691 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3692 block_t old_addr, block_t new_addr, 3693 unsigned char version, bool recover_curseg, 3694 bool recover_newaddr); 3695 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3696 block_t old_blkaddr, block_t *new_blkaddr, 3697 struct f2fs_summary *sum, int type, 3698 struct f2fs_io_info *fio); 3699 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3700 block_t blkaddr, unsigned int blkcnt); 3701 void f2fs_wait_on_page_writeback(struct page *page, 3702 enum page_type type, bool ordered, bool locked); 3703 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3704 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3705 block_t len); 3706 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3707 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3708 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3709 unsigned int val, int alloc); 3710 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3711 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3712 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3713 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3714 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3715 int __init f2fs_create_segment_manager_caches(void); 3716 void f2fs_destroy_segment_manager_caches(void); 3717 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3718 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3719 unsigned int segno); 3720 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3721 unsigned int segno); 3722 3723 #define DEF_FRAGMENT_SIZE 4 3724 #define MIN_FRAGMENT_SIZE 1 3725 #define MAX_FRAGMENT_SIZE 512 3726 3727 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3728 { 3729 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3730 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3731 } 3732 3733 /* 3734 * checkpoint.c 3735 */ 3736 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 3737 unsigned char reason); 3738 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); 3739 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3740 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3741 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3742 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3743 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3744 block_t blkaddr, int type); 3745 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3746 int type, bool sync); 3747 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 3748 unsigned int ra_blocks); 3749 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3750 long nr_to_write, enum iostat_type io_type); 3751 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3752 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3753 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3754 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3755 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3756 unsigned int devidx, int type); 3757 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3758 unsigned int devidx, int type); 3759 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3760 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3761 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3762 void f2fs_add_orphan_inode(struct inode *inode); 3763 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3764 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3765 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3766 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); 3767 void f2fs_remove_dirty_inode(struct inode *inode); 3768 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, 3769 bool from_cp); 3770 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3771 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3772 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3773 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3774 int __init f2fs_create_checkpoint_caches(void); 3775 void f2fs_destroy_checkpoint_caches(void); 3776 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3777 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3778 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3779 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3780 3781 /* 3782 * data.c 3783 */ 3784 int __init f2fs_init_bioset(void); 3785 void f2fs_destroy_bioset(void); 3786 int f2fs_init_bio_entry_cache(void); 3787 void f2fs_destroy_bio_entry_cache(void); 3788 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3789 struct bio *bio, enum page_type type); 3790 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); 3791 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3792 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3793 struct inode *inode, struct page *page, 3794 nid_t ino, enum page_type type); 3795 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3796 struct bio **bio, struct page *page); 3797 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3798 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3799 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3800 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3801 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3802 block_t blk_addr, sector_t *sector); 3803 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3804 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3805 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3806 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3807 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3808 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3809 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3810 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3811 blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs); 3812 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index, 3813 pgoff_t *next_pgofs); 3814 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3815 bool for_write); 3816 struct page *f2fs_get_new_data_page(struct inode *inode, 3817 struct page *ipage, pgoff_t index, bool new_i_size); 3818 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3819 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3820 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3821 int create, int flag); 3822 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3823 u64 start, u64 len); 3824 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3825 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3826 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3827 int f2fs_write_single_data_page(struct page *page, int *submitted, 3828 struct bio **bio, sector_t *last_block, 3829 struct writeback_control *wbc, 3830 enum iostat_type io_type, 3831 int compr_blocks, bool allow_balance); 3832 void f2fs_write_failed(struct inode *inode, loff_t to); 3833 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); 3834 bool f2fs_release_folio(struct folio *folio, gfp_t wait); 3835 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3836 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3837 int f2fs_init_post_read_processing(void); 3838 void f2fs_destroy_post_read_processing(void); 3839 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3840 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3841 extern const struct iomap_ops f2fs_iomap_ops; 3842 3843 /* 3844 * gc.c 3845 */ 3846 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3847 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3848 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3849 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control); 3850 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3851 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3852 int __init f2fs_create_garbage_collection_cache(void); 3853 void f2fs_destroy_garbage_collection_cache(void); 3854 3855 /* 3856 * recovery.c 3857 */ 3858 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3859 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3860 int __init f2fs_create_recovery_cache(void); 3861 void f2fs_destroy_recovery_cache(void); 3862 3863 /* 3864 * debug.c 3865 */ 3866 #ifdef CONFIG_F2FS_STAT_FS 3867 struct f2fs_stat_info { 3868 struct list_head stat_list; 3869 struct f2fs_sb_info *sbi; 3870 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3871 int main_area_segs, main_area_sections, main_area_zones; 3872 unsigned long long hit_largest, hit_cached, hit_rbtree; 3873 unsigned long long hit_total, total_ext; 3874 int ext_tree, zombie_tree, ext_node; 3875 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3876 int ndirty_data, ndirty_qdata; 3877 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3878 int nats, dirty_nats, sits, dirty_sits; 3879 int free_nids, avail_nids, alloc_nids; 3880 int total_count, utilization; 3881 int bg_gc, nr_wb_cp_data, nr_wb_data; 3882 int nr_rd_data, nr_rd_node, nr_rd_meta; 3883 int nr_dio_read, nr_dio_write; 3884 unsigned int io_skip_bggc, other_skip_bggc; 3885 int nr_flushing, nr_flushed, flush_list_empty; 3886 int nr_discarding, nr_discarded; 3887 int nr_discard_cmd; 3888 unsigned int undiscard_blks; 3889 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3890 unsigned int cur_ckpt_time, peak_ckpt_time; 3891 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3892 int compr_inode, swapfile_inode; 3893 unsigned long long compr_blocks; 3894 int aw_cnt, max_aw_cnt; 3895 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3896 unsigned int bimodal, avg_vblocks; 3897 int util_free, util_valid, util_invalid; 3898 int rsvd_segs, overp_segs; 3899 int dirty_count, node_pages, meta_pages, compress_pages; 3900 int compress_page_hit; 3901 int prefree_count, call_count, cp_count, bg_cp_count; 3902 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3903 int bg_node_segs, bg_data_segs; 3904 int tot_blks, data_blks, node_blks; 3905 int bg_data_blks, bg_node_blks; 3906 int curseg[NR_CURSEG_TYPE]; 3907 int cursec[NR_CURSEG_TYPE]; 3908 int curzone[NR_CURSEG_TYPE]; 3909 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3910 unsigned int full_seg[NR_CURSEG_TYPE]; 3911 unsigned int valid_blks[NR_CURSEG_TYPE]; 3912 3913 unsigned int meta_count[META_MAX]; 3914 unsigned int segment_count[2]; 3915 unsigned int block_count[2]; 3916 unsigned int inplace_count; 3917 unsigned long long base_mem, cache_mem, page_mem; 3918 }; 3919 3920 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3921 { 3922 return (struct f2fs_stat_info *)sbi->stat_info; 3923 } 3924 3925 #define stat_inc_cp_count(si) ((si)->cp_count++) 3926 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3927 #define stat_inc_call_count(si) ((si)->call_count++) 3928 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3929 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3930 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3931 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3932 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3933 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3934 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3935 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3936 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3937 #define stat_inc_inline_xattr(inode) \ 3938 do { \ 3939 if (f2fs_has_inline_xattr(inode)) \ 3940 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3941 } while (0) 3942 #define stat_dec_inline_xattr(inode) \ 3943 do { \ 3944 if (f2fs_has_inline_xattr(inode)) \ 3945 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3946 } while (0) 3947 #define stat_inc_inline_inode(inode) \ 3948 do { \ 3949 if (f2fs_has_inline_data(inode)) \ 3950 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3951 } while (0) 3952 #define stat_dec_inline_inode(inode) \ 3953 do { \ 3954 if (f2fs_has_inline_data(inode)) \ 3955 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3956 } while (0) 3957 #define stat_inc_inline_dir(inode) \ 3958 do { \ 3959 if (f2fs_has_inline_dentry(inode)) \ 3960 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3961 } while (0) 3962 #define stat_dec_inline_dir(inode) \ 3963 do { \ 3964 if (f2fs_has_inline_dentry(inode)) \ 3965 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3966 } while (0) 3967 #define stat_inc_compr_inode(inode) \ 3968 do { \ 3969 if (f2fs_compressed_file(inode)) \ 3970 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3971 } while (0) 3972 #define stat_dec_compr_inode(inode) \ 3973 do { \ 3974 if (f2fs_compressed_file(inode)) \ 3975 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3976 } while (0) 3977 #define stat_add_compr_blocks(inode, blocks) \ 3978 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3979 #define stat_sub_compr_blocks(inode, blocks) \ 3980 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3981 #define stat_inc_swapfile_inode(inode) \ 3982 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) 3983 #define stat_dec_swapfile_inode(inode) \ 3984 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) 3985 #define stat_inc_atomic_inode(inode) \ 3986 (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) 3987 #define stat_dec_atomic_inode(inode) \ 3988 (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) 3989 #define stat_inc_meta_count(sbi, blkaddr) \ 3990 do { \ 3991 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3992 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3993 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3994 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3995 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3996 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3997 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3998 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3999 } while (0) 4000 #define stat_inc_seg_type(sbi, curseg) \ 4001 ((sbi)->segment_count[(curseg)->alloc_type]++) 4002 #define stat_inc_block_count(sbi, curseg) \ 4003 ((sbi)->block_count[(curseg)->alloc_type]++) 4004 #define stat_inc_inplace_blocks(sbi) \ 4005 (atomic_inc(&(sbi)->inplace_count)) 4006 #define stat_update_max_atomic_write(inode) \ 4007 do { \ 4008 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ 4009 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 4010 if (cur > max) \ 4011 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 4012 } while (0) 4013 #define stat_inc_seg_count(sbi, type, gc_type) \ 4014 do { \ 4015 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4016 si->tot_segs++; \ 4017 if ((type) == SUM_TYPE_DATA) { \ 4018 si->data_segs++; \ 4019 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 4020 } else { \ 4021 si->node_segs++; \ 4022 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 4023 } \ 4024 } while (0) 4025 4026 #define stat_inc_tot_blk_count(si, blks) \ 4027 ((si)->tot_blks += (blks)) 4028 4029 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 4030 do { \ 4031 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4032 stat_inc_tot_blk_count(si, blks); \ 4033 si->data_blks += (blks); \ 4034 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4035 } while (0) 4036 4037 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 4038 do { \ 4039 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 4040 stat_inc_tot_blk_count(si, blks); \ 4041 si->node_blks += (blks); \ 4042 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 4043 } while (0) 4044 4045 int f2fs_build_stats(struct f2fs_sb_info *sbi); 4046 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 4047 void __init f2fs_create_root_stats(void); 4048 void f2fs_destroy_root_stats(void); 4049 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 4050 #else 4051 #define stat_inc_cp_count(si) do { } while (0) 4052 #define stat_inc_bg_cp_count(si) do { } while (0) 4053 #define stat_inc_call_count(si) do { } while (0) 4054 #define stat_inc_bggc_count(si) do { } while (0) 4055 #define stat_io_skip_bggc_count(sbi) do { } while (0) 4056 #define stat_other_skip_bggc_count(sbi) do { } while (0) 4057 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 4058 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 4059 #define stat_inc_total_hit(sbi) do { } while (0) 4060 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 4061 #define stat_inc_largest_node_hit(sbi) do { } while (0) 4062 #define stat_inc_cached_node_hit(sbi) do { } while (0) 4063 #define stat_inc_inline_xattr(inode) do { } while (0) 4064 #define stat_dec_inline_xattr(inode) do { } while (0) 4065 #define stat_inc_inline_inode(inode) do { } while (0) 4066 #define stat_dec_inline_inode(inode) do { } while (0) 4067 #define stat_inc_inline_dir(inode) do { } while (0) 4068 #define stat_dec_inline_dir(inode) do { } while (0) 4069 #define stat_inc_compr_inode(inode) do { } while (0) 4070 #define stat_dec_compr_inode(inode) do { } while (0) 4071 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 4072 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 4073 #define stat_inc_swapfile_inode(inode) do { } while (0) 4074 #define stat_dec_swapfile_inode(inode) do { } while (0) 4075 #define stat_inc_atomic_inode(inode) do { } while (0) 4076 #define stat_dec_atomic_inode(inode) do { } while (0) 4077 #define stat_update_max_atomic_write(inode) do { } while (0) 4078 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 4079 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 4080 #define stat_inc_block_count(sbi, curseg) do { } while (0) 4081 #define stat_inc_inplace_blocks(sbi) do { } while (0) 4082 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 4083 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 4084 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 4085 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 4086 4087 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 4088 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 4089 static inline void __init f2fs_create_root_stats(void) { } 4090 static inline void f2fs_destroy_root_stats(void) { } 4091 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 4092 #endif 4093 4094 extern const struct file_operations f2fs_dir_operations; 4095 extern const struct file_operations f2fs_file_operations; 4096 extern const struct inode_operations f2fs_file_inode_operations; 4097 extern const struct address_space_operations f2fs_dblock_aops; 4098 extern const struct address_space_operations f2fs_node_aops; 4099 extern const struct address_space_operations f2fs_meta_aops; 4100 extern const struct inode_operations f2fs_dir_inode_operations; 4101 extern const struct inode_operations f2fs_symlink_inode_operations; 4102 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 4103 extern const struct inode_operations f2fs_special_inode_operations; 4104 extern struct kmem_cache *f2fs_inode_entry_slab; 4105 4106 /* 4107 * inline.c 4108 */ 4109 bool f2fs_may_inline_data(struct inode *inode); 4110 bool f2fs_sanity_check_inline_data(struct inode *inode); 4111 bool f2fs_may_inline_dentry(struct inode *inode); 4112 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 4113 void f2fs_truncate_inline_inode(struct inode *inode, 4114 struct page *ipage, u64 from); 4115 int f2fs_read_inline_data(struct inode *inode, struct page *page); 4116 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 4117 int f2fs_convert_inline_inode(struct inode *inode); 4118 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 4119 int f2fs_write_inline_data(struct inode *inode, struct page *page); 4120 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 4121 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 4122 const struct f2fs_filename *fname, 4123 struct page **res_page); 4124 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 4125 struct page *ipage); 4126 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 4127 struct inode *inode, nid_t ino, umode_t mode); 4128 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 4129 struct page *page, struct inode *dir, 4130 struct inode *inode); 4131 bool f2fs_empty_inline_dir(struct inode *dir); 4132 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 4133 struct fscrypt_str *fstr); 4134 int f2fs_inline_data_fiemap(struct inode *inode, 4135 struct fiemap_extent_info *fieinfo, 4136 __u64 start, __u64 len); 4137 4138 /* 4139 * shrinker.c 4140 */ 4141 unsigned long f2fs_shrink_count(struct shrinker *shrink, 4142 struct shrink_control *sc); 4143 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 4144 struct shrink_control *sc); 4145 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 4146 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 4147 4148 /* 4149 * extent_cache.c 4150 */ 4151 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 4152 struct rb_entry *cached_re, unsigned int ofs); 4153 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 4154 struct rb_root_cached *root, 4155 struct rb_node **parent, 4156 unsigned long long key, bool *left_most); 4157 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 4158 struct rb_root_cached *root, 4159 struct rb_node **parent, 4160 unsigned int ofs, bool *leftmost); 4161 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 4162 struct rb_entry *cached_re, unsigned int ofs, 4163 struct rb_entry **prev_entry, struct rb_entry **next_entry, 4164 struct rb_node ***insert_p, struct rb_node **insert_parent, 4165 bool force, bool *leftmost); 4166 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 4167 struct rb_root_cached *root, bool check_key); 4168 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 4169 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 4170 void f2fs_drop_extent_tree(struct inode *inode); 4171 unsigned int f2fs_destroy_extent_node(struct inode *inode); 4172 void f2fs_destroy_extent_tree(struct inode *inode); 4173 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 4174 struct extent_info *ei); 4175 void f2fs_update_extent_cache(struct dnode_of_data *dn); 4176 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 4177 pgoff_t fofs, block_t blkaddr, unsigned int len); 4178 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4179 int __init f2fs_create_extent_cache(void); 4180 void f2fs_destroy_extent_cache(void); 4181 4182 /* 4183 * sysfs.c 4184 */ 4185 #define MIN_RA_MUL 2 4186 #define MAX_RA_MUL 256 4187 4188 int __init f2fs_init_sysfs(void); 4189 void f2fs_exit_sysfs(void); 4190 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4191 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4192 4193 /* verity.c */ 4194 extern const struct fsverity_operations f2fs_verityops; 4195 4196 /* 4197 * crypto support 4198 */ 4199 static inline bool f2fs_encrypted_file(struct inode *inode) 4200 { 4201 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4202 } 4203 4204 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4205 { 4206 #ifdef CONFIG_FS_ENCRYPTION 4207 file_set_encrypt(inode); 4208 f2fs_set_inode_flags(inode); 4209 #endif 4210 } 4211 4212 /* 4213 * Returns true if the reads of the inode's data need to undergo some 4214 * postprocessing step, like decryption or authenticity verification. 4215 */ 4216 static inline bool f2fs_post_read_required(struct inode *inode) 4217 { 4218 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4219 f2fs_compressed_file(inode); 4220 } 4221 4222 /* 4223 * compress.c 4224 */ 4225 #ifdef CONFIG_F2FS_FS_COMPRESSION 4226 bool f2fs_is_compressed_page(struct page *page); 4227 struct page *f2fs_compress_control_page(struct page *page); 4228 int f2fs_prepare_compress_overwrite(struct inode *inode, 4229 struct page **pagep, pgoff_t index, void **fsdata); 4230 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4231 pgoff_t index, unsigned copied); 4232 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4233 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4234 bool f2fs_is_compress_backend_ready(struct inode *inode); 4235 int f2fs_init_compress_mempool(void); 4236 void f2fs_destroy_compress_mempool(void); 4237 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); 4238 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4239 block_t blkaddr, bool in_task); 4240 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4241 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4242 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, 4243 int index, int nr_pages, bool uptodate); 4244 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4245 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4246 int f2fs_write_multi_pages(struct compress_ctx *cc, 4247 int *submitted, 4248 struct writeback_control *wbc, 4249 enum iostat_type io_type); 4250 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4251 void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4252 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4253 unsigned int c_len); 4254 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4255 unsigned nr_pages, sector_t *last_block_in_bio, 4256 bool is_readahead, bool for_write); 4257 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4258 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, 4259 bool in_task); 4260 void f2fs_put_page_dic(struct page *page, bool in_task); 4261 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4262 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4263 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4264 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4265 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4266 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4267 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4268 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4269 int __init f2fs_init_compress_cache(void); 4270 void f2fs_destroy_compress_cache(void); 4271 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4272 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4273 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4274 nid_t ino, block_t blkaddr); 4275 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4276 block_t blkaddr); 4277 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4278 #define inc_compr_inode_stat(inode) \ 4279 do { \ 4280 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4281 sbi->compr_new_inode++; \ 4282 } while (0) 4283 #define add_compr_block_stat(inode, blocks) \ 4284 do { \ 4285 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4286 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4287 sbi->compr_written_block += blocks; \ 4288 sbi->compr_saved_block += diff; \ 4289 } while (0) 4290 #else 4291 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4292 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4293 { 4294 if (!f2fs_compressed_file(inode)) 4295 return true; 4296 /* not support compression */ 4297 return false; 4298 } 4299 static inline struct page *f2fs_compress_control_page(struct page *page) 4300 { 4301 WARN_ON_ONCE(1); 4302 return ERR_PTR(-EINVAL); 4303 } 4304 static inline int f2fs_init_compress_mempool(void) { return 0; } 4305 static inline void f2fs_destroy_compress_mempool(void) { } 4306 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, 4307 bool in_task) { } 4308 static inline void f2fs_end_read_compressed_page(struct page *page, 4309 bool failed, block_t blkaddr, bool in_task) 4310 { 4311 WARN_ON_ONCE(1); 4312 } 4313 static inline void f2fs_put_page_dic(struct page *page, bool in_task) 4314 { 4315 WARN_ON_ONCE(1); 4316 } 4317 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4318 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4319 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4320 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4321 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4322 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4323 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4324 static inline void f2fs_destroy_compress_cache(void) { } 4325 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4326 block_t blkaddr) { } 4327 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4328 struct page *page, nid_t ino, block_t blkaddr) { } 4329 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4330 struct page *page, block_t blkaddr) { return false; } 4331 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4332 nid_t ino) { } 4333 #define inc_compr_inode_stat(inode) do { } while (0) 4334 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4335 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4336 unsigned int c_len) { } 4337 #endif 4338 4339 static inline int set_compress_context(struct inode *inode) 4340 { 4341 #ifdef CONFIG_F2FS_FS_COMPRESSION 4342 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4343 4344 F2FS_I(inode)->i_compress_algorithm = 4345 F2FS_OPTION(sbi).compress_algorithm; 4346 F2FS_I(inode)->i_log_cluster_size = 4347 F2FS_OPTION(sbi).compress_log_size; 4348 F2FS_I(inode)->i_compress_flag = 4349 F2FS_OPTION(sbi).compress_chksum ? 4350 1 << COMPRESS_CHKSUM : 0; 4351 F2FS_I(inode)->i_cluster_size = 4352 1 << F2FS_I(inode)->i_log_cluster_size; 4353 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4354 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4355 F2FS_OPTION(sbi).compress_level) 4356 F2FS_I(inode)->i_compress_flag |= 4357 F2FS_OPTION(sbi).compress_level << 4358 COMPRESS_LEVEL_OFFSET; 4359 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4360 set_inode_flag(inode, FI_COMPRESSED_FILE); 4361 stat_inc_compr_inode(inode); 4362 inc_compr_inode_stat(inode); 4363 f2fs_mark_inode_dirty_sync(inode, true); 4364 return 0; 4365 #else 4366 return -EOPNOTSUPP; 4367 #endif 4368 } 4369 4370 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4371 { 4372 struct f2fs_inode_info *fi = F2FS_I(inode); 4373 4374 if (!f2fs_compressed_file(inode)) 4375 return true; 4376 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4377 return false; 4378 4379 fi->i_flags &= ~F2FS_COMPR_FL; 4380 stat_dec_compr_inode(inode); 4381 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4382 f2fs_mark_inode_dirty_sync(inode, true); 4383 return true; 4384 } 4385 4386 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4387 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4388 { \ 4389 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4390 } 4391 4392 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4393 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4394 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4395 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4396 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4397 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4398 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4399 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4400 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4401 F2FS_FEATURE_FUNCS(verity, VERITY); 4402 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4403 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4404 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4405 F2FS_FEATURE_FUNCS(readonly, RO); 4406 4407 static inline bool f2fs_may_extent_tree(struct inode *inode) 4408 { 4409 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4410 4411 if (!test_opt(sbi, EXTENT_CACHE) || 4412 is_inode_flag_set(inode, FI_NO_EXTENT) || 4413 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 4414 !f2fs_sb_has_readonly(sbi))) 4415 return false; 4416 4417 /* 4418 * for recovered files during mount do not create extents 4419 * if shrinker is not registered. 4420 */ 4421 if (list_empty(&sbi->s_list)) 4422 return false; 4423 4424 return S_ISREG(inode->i_mode); 4425 } 4426 4427 #ifdef CONFIG_BLK_DEV_ZONED 4428 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4429 block_t blkaddr) 4430 { 4431 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4432 4433 return test_bit(zno, FDEV(devi).blkz_seq); 4434 } 4435 #endif 4436 4437 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4438 { 4439 return f2fs_sb_has_blkzoned(sbi); 4440 } 4441 4442 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4443 { 4444 return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev); 4445 } 4446 4447 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4448 { 4449 int i; 4450 4451 if (!f2fs_is_multi_device(sbi)) 4452 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4453 4454 for (i = 0; i < sbi->s_ndevs; i++) 4455 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4456 return true; 4457 return false; 4458 } 4459 4460 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4461 { 4462 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4463 f2fs_hw_should_discard(sbi); 4464 } 4465 4466 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4467 { 4468 int i; 4469 4470 if (!f2fs_is_multi_device(sbi)) 4471 return bdev_read_only(sbi->sb->s_bdev); 4472 4473 for (i = 0; i < sbi->s_ndevs; i++) 4474 if (bdev_read_only(FDEV(i).bdev)) 4475 return true; 4476 return false; 4477 } 4478 4479 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4480 { 4481 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4482 } 4483 4484 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) 4485 { 4486 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; 4487 } 4488 4489 static inline bool f2fs_may_compress(struct inode *inode) 4490 { 4491 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4492 f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode)) 4493 return false; 4494 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4495 } 4496 4497 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4498 u64 blocks, bool add) 4499 { 4500 struct f2fs_inode_info *fi = F2FS_I(inode); 4501 int diff = fi->i_cluster_size - blocks; 4502 4503 /* don't update i_compr_blocks if saved blocks were released */ 4504 if (!add && !atomic_read(&fi->i_compr_blocks)) 4505 return; 4506 4507 if (add) { 4508 atomic_add(diff, &fi->i_compr_blocks); 4509 stat_add_compr_blocks(inode, diff); 4510 } else { 4511 atomic_sub(diff, &fi->i_compr_blocks); 4512 stat_sub_compr_blocks(inode, diff); 4513 } 4514 f2fs_mark_inode_dirty_sync(inode, true); 4515 } 4516 4517 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4518 int flag) 4519 { 4520 if (!f2fs_is_multi_device(sbi)) 4521 return false; 4522 if (flag != F2FS_GET_BLOCK_DIO) 4523 return false; 4524 return sbi->aligned_blksize; 4525 } 4526 4527 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4528 { 4529 return fsverity_active(inode) && 4530 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4531 } 4532 4533 #ifdef CONFIG_F2FS_FAULT_INJECTION 4534 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4535 unsigned int type); 4536 #else 4537 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4538 #endif 4539 4540 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4541 { 4542 #ifdef CONFIG_QUOTA 4543 if (f2fs_sb_has_quota_ino(sbi)) 4544 return true; 4545 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4546 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4547 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4548 return true; 4549 #endif 4550 return false; 4551 } 4552 4553 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4554 { 4555 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4556 } 4557 4558 static inline void f2fs_io_schedule_timeout(long timeout) 4559 { 4560 set_current_state(TASK_UNINTERRUPTIBLE); 4561 io_schedule_timeout(timeout); 4562 } 4563 4564 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs, 4565 enum page_type type) 4566 { 4567 if (unlikely(f2fs_cp_error(sbi))) 4568 return; 4569 4570 if (ofs == sbi->page_eio_ofs[type]) { 4571 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) 4572 set_ckpt_flags(sbi, CP_ERROR_FLAG); 4573 } else { 4574 sbi->page_eio_ofs[type] = ofs; 4575 sbi->page_eio_cnt[type] = 0; 4576 } 4577 } 4578 4579 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4580 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4581 4582 #endif /* _LINUX_F2FS_H */ 4583