1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 struct pagevec; 32 33 #ifdef CONFIG_F2FS_CHECK_FS 34 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 35 #else 36 #define f2fs_bug_on(sbi, condition) \ 37 do { \ 38 if (WARN_ON(condition)) \ 39 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 40 } while (0) 41 #endif 42 43 enum { 44 FAULT_KMALLOC, 45 FAULT_KVMALLOC, 46 FAULT_PAGE_ALLOC, 47 FAULT_PAGE_GET, 48 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 49 FAULT_ALLOC_NID, 50 FAULT_ORPHAN, 51 FAULT_BLOCK, 52 FAULT_DIR_DEPTH, 53 FAULT_EVICT_INODE, 54 FAULT_TRUNCATE, 55 FAULT_READ_IO, 56 FAULT_CHECKPOINT, 57 FAULT_DISCARD, 58 FAULT_WRITE_IO, 59 FAULT_SLAB_ALLOC, 60 FAULT_DQUOT_INIT, 61 FAULT_MAX, 62 }; 63 64 #ifdef CONFIG_F2FS_FAULT_INJECTION 65 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 66 67 struct f2fs_fault_info { 68 atomic_t inject_ops; 69 unsigned int inject_rate; 70 unsigned int inject_type; 71 }; 72 73 extern const char *f2fs_fault_name[FAULT_MAX]; 74 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 75 #endif 76 77 /* 78 * For mount options 79 */ 80 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 81 #define F2FS_MOUNT_DISCARD 0x00000004 82 #define F2FS_MOUNT_NOHEAP 0x00000008 83 #define F2FS_MOUNT_XATTR_USER 0x00000010 84 #define F2FS_MOUNT_POSIX_ACL 0x00000020 85 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 86 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 87 #define F2FS_MOUNT_INLINE_DATA 0x00000100 88 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 89 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 90 #define F2FS_MOUNT_NOBARRIER 0x00000800 91 #define F2FS_MOUNT_FASTBOOT 0x00001000 92 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 93 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 94 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 95 #define F2FS_MOUNT_USRQUOTA 0x00080000 96 #define F2FS_MOUNT_GRPQUOTA 0x00100000 97 #define F2FS_MOUNT_PRJQUOTA 0x00200000 98 #define F2FS_MOUNT_QUOTA 0x00400000 99 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 100 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 101 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 102 #define F2FS_MOUNT_NORECOVERY 0x04000000 103 #define F2FS_MOUNT_ATGC 0x08000000 104 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 105 #define F2FS_MOUNT_GC_MERGE 0x20000000 106 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 107 108 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 109 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 110 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 111 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 112 113 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 114 typecheck(unsigned long long, b) && \ 115 ((long long)((a) - (b)) > 0)) 116 117 typedef u32 block_t; /* 118 * should not change u32, since it is the on-disk block 119 * address format, __le32. 120 */ 121 typedef u32 nid_t; 122 123 #define COMPRESS_EXT_NUM 16 124 125 struct f2fs_mount_info { 126 unsigned int opt; 127 int write_io_size_bits; /* Write IO size bits */ 128 block_t root_reserved_blocks; /* root reserved blocks */ 129 kuid_t s_resuid; /* reserved blocks for uid */ 130 kgid_t s_resgid; /* reserved blocks for gid */ 131 int active_logs; /* # of active logs */ 132 int inline_xattr_size; /* inline xattr size */ 133 #ifdef CONFIG_F2FS_FAULT_INJECTION 134 struct f2fs_fault_info fault_info; /* For fault injection */ 135 #endif 136 #ifdef CONFIG_QUOTA 137 /* Names of quota files with journalled quota */ 138 char *s_qf_names[MAXQUOTAS]; 139 int s_jquota_fmt; /* Format of quota to use */ 140 #endif 141 /* For which write hints are passed down to block layer */ 142 int whint_mode; 143 int alloc_mode; /* segment allocation policy */ 144 int fsync_mode; /* fsync policy */ 145 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 146 int bggc_mode; /* bggc mode: off, on or sync */ 147 int discard_unit; /* 148 * discard command's offset/size should 149 * be aligned to this unit: block, 150 * segment or section 151 */ 152 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 153 block_t unusable_cap_perc; /* percentage for cap */ 154 block_t unusable_cap; /* Amount of space allowed to be 155 * unusable when disabling checkpoint 156 */ 157 158 /* For compression */ 159 unsigned char compress_algorithm; /* algorithm type */ 160 unsigned char compress_log_size; /* cluster log size */ 161 unsigned char compress_level; /* compress level */ 162 bool compress_chksum; /* compressed data chksum */ 163 unsigned char compress_ext_cnt; /* extension count */ 164 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 165 int compress_mode; /* compression mode */ 166 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 167 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 168 }; 169 170 #define F2FS_FEATURE_ENCRYPT 0x0001 171 #define F2FS_FEATURE_BLKZONED 0x0002 172 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 173 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 174 #define F2FS_FEATURE_PRJQUOTA 0x0010 175 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 176 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 177 #define F2FS_FEATURE_QUOTA_INO 0x0080 178 #define F2FS_FEATURE_INODE_CRTIME 0x0100 179 #define F2FS_FEATURE_LOST_FOUND 0x0200 180 #define F2FS_FEATURE_VERITY 0x0400 181 #define F2FS_FEATURE_SB_CHKSUM 0x0800 182 #define F2FS_FEATURE_CASEFOLD 0x1000 183 #define F2FS_FEATURE_COMPRESSION 0x2000 184 #define F2FS_FEATURE_RO 0x4000 185 186 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 187 ((raw_super->feature & cpu_to_le32(mask)) != 0) 188 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 189 #define F2FS_SET_FEATURE(sbi, mask) \ 190 (sbi->raw_super->feature |= cpu_to_le32(mask)) 191 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 192 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 193 194 /* 195 * Default values for user and/or group using reserved blocks 196 */ 197 #define F2FS_DEF_RESUID 0 198 #define F2FS_DEF_RESGID 0 199 200 /* 201 * For checkpoint manager 202 */ 203 enum { 204 NAT_BITMAP, 205 SIT_BITMAP 206 }; 207 208 #define CP_UMOUNT 0x00000001 209 #define CP_FASTBOOT 0x00000002 210 #define CP_SYNC 0x00000004 211 #define CP_RECOVERY 0x00000008 212 #define CP_DISCARD 0x00000010 213 #define CP_TRIMMED 0x00000020 214 #define CP_PAUSE 0x00000040 215 #define CP_RESIZE 0x00000080 216 217 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 218 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 219 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 220 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 221 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 222 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 223 #define DEF_CP_INTERVAL 60 /* 60 secs */ 224 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 225 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 226 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 227 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 228 229 struct cp_control { 230 int reason; 231 __u64 trim_start; 232 __u64 trim_end; 233 __u64 trim_minlen; 234 }; 235 236 /* 237 * indicate meta/data type 238 */ 239 enum { 240 META_CP, 241 META_NAT, 242 META_SIT, 243 META_SSA, 244 META_MAX, 245 META_POR, 246 DATA_GENERIC, /* check range only */ 247 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 248 DATA_GENERIC_ENHANCE_READ, /* 249 * strong check on range and segment 250 * bitmap but no warning due to race 251 * condition of read on truncated area 252 * by extent_cache 253 */ 254 META_GENERIC, 255 }; 256 257 /* for the list of ino */ 258 enum { 259 ORPHAN_INO, /* for orphan ino list */ 260 APPEND_INO, /* for append ino list */ 261 UPDATE_INO, /* for update ino list */ 262 TRANS_DIR_INO, /* for trasactions dir ino list */ 263 FLUSH_INO, /* for multiple device flushing */ 264 MAX_INO_ENTRY, /* max. list */ 265 }; 266 267 struct ino_entry { 268 struct list_head list; /* list head */ 269 nid_t ino; /* inode number */ 270 unsigned int dirty_device; /* dirty device bitmap */ 271 }; 272 273 /* for the list of inodes to be GCed */ 274 struct inode_entry { 275 struct list_head list; /* list head */ 276 struct inode *inode; /* vfs inode pointer */ 277 }; 278 279 struct fsync_node_entry { 280 struct list_head list; /* list head */ 281 struct page *page; /* warm node page pointer */ 282 unsigned int seq_id; /* sequence id */ 283 }; 284 285 struct ckpt_req { 286 struct completion wait; /* completion for checkpoint done */ 287 struct llist_node llnode; /* llist_node to be linked in wait queue */ 288 int ret; /* return code of checkpoint */ 289 ktime_t queue_time; /* request queued time */ 290 }; 291 292 struct ckpt_req_control { 293 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 294 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 295 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 296 atomic_t issued_ckpt; /* # of actually issued ckpts */ 297 atomic_t total_ckpt; /* # of total ckpts */ 298 atomic_t queued_ckpt; /* # of queued ckpts */ 299 struct llist_head issue_list; /* list for command issue */ 300 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 301 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 302 unsigned int peak_time; /* peak wait time in msec until now */ 303 }; 304 305 /* for the bitmap indicate blocks to be discarded */ 306 struct discard_entry { 307 struct list_head list; /* list head */ 308 block_t start_blkaddr; /* start blockaddr of current segment */ 309 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 310 }; 311 312 /* default discard granularity of inner discard thread, unit: block count */ 313 #define DEFAULT_DISCARD_GRANULARITY 16 314 315 /* max discard pend list number */ 316 #define MAX_PLIST_NUM 512 317 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 318 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 319 320 enum { 321 D_PREP, /* initial */ 322 D_PARTIAL, /* partially submitted */ 323 D_SUBMIT, /* all submitted */ 324 D_DONE, /* finished */ 325 }; 326 327 struct discard_info { 328 block_t lstart; /* logical start address */ 329 block_t len; /* length */ 330 block_t start; /* actual start address in dev */ 331 }; 332 333 struct discard_cmd { 334 struct rb_node rb_node; /* rb node located in rb-tree */ 335 union { 336 struct { 337 block_t lstart; /* logical start address */ 338 block_t len; /* length */ 339 block_t start; /* actual start address in dev */ 340 }; 341 struct discard_info di; /* discard info */ 342 343 }; 344 struct list_head list; /* command list */ 345 struct completion wait; /* compleation */ 346 struct block_device *bdev; /* bdev */ 347 unsigned short ref; /* reference count */ 348 unsigned char state; /* state */ 349 unsigned char queued; /* queued discard */ 350 int error; /* bio error */ 351 spinlock_t lock; /* for state/bio_ref updating */ 352 unsigned short bio_ref; /* bio reference count */ 353 }; 354 355 enum { 356 DPOLICY_BG, 357 DPOLICY_FORCE, 358 DPOLICY_FSTRIM, 359 DPOLICY_UMOUNT, 360 MAX_DPOLICY, 361 }; 362 363 struct discard_policy { 364 int type; /* type of discard */ 365 unsigned int min_interval; /* used for candidates exist */ 366 unsigned int mid_interval; /* used for device busy */ 367 unsigned int max_interval; /* used for candidates not exist */ 368 unsigned int max_requests; /* # of discards issued per round */ 369 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 370 bool io_aware; /* issue discard in idle time */ 371 bool sync; /* submit discard with REQ_SYNC flag */ 372 bool ordered; /* issue discard by lba order */ 373 bool timeout; /* discard timeout for put_super */ 374 unsigned int granularity; /* discard granularity */ 375 }; 376 377 struct discard_cmd_control { 378 struct task_struct *f2fs_issue_discard; /* discard thread */ 379 struct list_head entry_list; /* 4KB discard entry list */ 380 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 381 struct list_head wait_list; /* store on-flushing entries */ 382 struct list_head fstrim_list; /* in-flight discard from fstrim */ 383 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 384 unsigned int discard_wake; /* to wake up discard thread */ 385 struct mutex cmd_lock; 386 unsigned int nr_discards; /* # of discards in the list */ 387 unsigned int max_discards; /* max. discards to be issued */ 388 unsigned int discard_granularity; /* discard granularity */ 389 unsigned int undiscard_blks; /* # of undiscard blocks */ 390 unsigned int next_pos; /* next discard position */ 391 atomic_t issued_discard; /* # of issued discard */ 392 atomic_t queued_discard; /* # of queued discard */ 393 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 394 struct rb_root_cached root; /* root of discard rb-tree */ 395 bool rbtree_check; /* config for consistence check */ 396 }; 397 398 /* for the list of fsync inodes, used only during recovery */ 399 struct fsync_inode_entry { 400 struct list_head list; /* list head */ 401 struct inode *inode; /* vfs inode pointer */ 402 block_t blkaddr; /* block address locating the last fsync */ 403 block_t last_dentry; /* block address locating the last dentry */ 404 }; 405 406 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 407 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 408 409 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 410 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 411 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 412 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 413 414 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 415 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 416 417 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 418 { 419 int before = nats_in_cursum(journal); 420 421 journal->n_nats = cpu_to_le16(before + i); 422 return before; 423 } 424 425 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 426 { 427 int before = sits_in_cursum(journal); 428 429 journal->n_sits = cpu_to_le16(before + i); 430 return before; 431 } 432 433 static inline bool __has_cursum_space(struct f2fs_journal *journal, 434 int size, int type) 435 { 436 if (type == NAT_JOURNAL) 437 return size <= MAX_NAT_JENTRIES(journal); 438 return size <= MAX_SIT_JENTRIES(journal); 439 } 440 441 /* for inline stuff */ 442 #define DEF_INLINE_RESERVED_SIZE 1 443 static inline int get_extra_isize(struct inode *inode); 444 static inline int get_inline_xattr_addrs(struct inode *inode); 445 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 446 (CUR_ADDRS_PER_INODE(inode) - \ 447 get_inline_xattr_addrs(inode) - \ 448 DEF_INLINE_RESERVED_SIZE)) 449 450 /* for inline dir */ 451 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 452 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 453 BITS_PER_BYTE + 1)) 454 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 455 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 456 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 457 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 458 NR_INLINE_DENTRY(inode) + \ 459 INLINE_DENTRY_BITMAP_SIZE(inode))) 460 461 /* 462 * For INODE and NODE manager 463 */ 464 /* for directory operations */ 465 466 struct f2fs_filename { 467 /* 468 * The filename the user specified. This is NULL for some 469 * filesystem-internal operations, e.g. converting an inline directory 470 * to a non-inline one, or roll-forward recovering an encrypted dentry. 471 */ 472 const struct qstr *usr_fname; 473 474 /* 475 * The on-disk filename. For encrypted directories, this is encrypted. 476 * This may be NULL for lookups in an encrypted dir without the key. 477 */ 478 struct fscrypt_str disk_name; 479 480 /* The dirhash of this filename */ 481 f2fs_hash_t hash; 482 483 #ifdef CONFIG_FS_ENCRYPTION 484 /* 485 * For lookups in encrypted directories: either the buffer backing 486 * disk_name, or a buffer that holds the decoded no-key name. 487 */ 488 struct fscrypt_str crypto_buf; 489 #endif 490 #ifdef CONFIG_UNICODE 491 /* 492 * For casefolded directories: the casefolded name, but it's left NULL 493 * if the original name is not valid Unicode, if the directory is both 494 * casefolded and encrypted and its encryption key is unavailable, or if 495 * the filesystem is doing an internal operation where usr_fname is also 496 * NULL. In all these cases we fall back to treating the name as an 497 * opaque byte sequence. 498 */ 499 struct fscrypt_str cf_name; 500 #endif 501 }; 502 503 struct f2fs_dentry_ptr { 504 struct inode *inode; 505 void *bitmap; 506 struct f2fs_dir_entry *dentry; 507 __u8 (*filename)[F2FS_SLOT_LEN]; 508 int max; 509 int nr_bitmap; 510 }; 511 512 static inline void make_dentry_ptr_block(struct inode *inode, 513 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 514 { 515 d->inode = inode; 516 d->max = NR_DENTRY_IN_BLOCK; 517 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 518 d->bitmap = t->dentry_bitmap; 519 d->dentry = t->dentry; 520 d->filename = t->filename; 521 } 522 523 static inline void make_dentry_ptr_inline(struct inode *inode, 524 struct f2fs_dentry_ptr *d, void *t) 525 { 526 int entry_cnt = NR_INLINE_DENTRY(inode); 527 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 528 int reserved_size = INLINE_RESERVED_SIZE(inode); 529 530 d->inode = inode; 531 d->max = entry_cnt; 532 d->nr_bitmap = bitmap_size; 533 d->bitmap = t; 534 d->dentry = t + bitmap_size + reserved_size; 535 d->filename = t + bitmap_size + reserved_size + 536 SIZE_OF_DIR_ENTRY * entry_cnt; 537 } 538 539 /* 540 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 541 * as its node offset to distinguish from index node blocks. 542 * But some bits are used to mark the node block. 543 */ 544 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 545 >> OFFSET_BIT_SHIFT) 546 enum { 547 ALLOC_NODE, /* allocate a new node page if needed */ 548 LOOKUP_NODE, /* look up a node without readahead */ 549 LOOKUP_NODE_RA, /* 550 * look up a node with readahead called 551 * by get_data_block. 552 */ 553 }; 554 555 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 556 557 /* congestion wait timeout value, default: 20ms */ 558 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 559 560 /* maximum retry quota flush count */ 561 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 562 563 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 564 565 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 566 567 /* dirty segments threshold for triggering CP */ 568 #define DEFAULT_DIRTY_THRESHOLD 4 569 570 /* for in-memory extent cache entry */ 571 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 572 573 /* number of extent info in extent cache we try to shrink */ 574 #define EXTENT_CACHE_SHRINK_NUMBER 128 575 576 struct rb_entry { 577 struct rb_node rb_node; /* rb node located in rb-tree */ 578 union { 579 struct { 580 unsigned int ofs; /* start offset of the entry */ 581 unsigned int len; /* length of the entry */ 582 }; 583 unsigned long long key; /* 64-bits key */ 584 } __packed; 585 }; 586 587 struct extent_info { 588 unsigned int fofs; /* start offset in a file */ 589 unsigned int len; /* length of the extent */ 590 u32 blk; /* start block address of the extent */ 591 #ifdef CONFIG_F2FS_FS_COMPRESSION 592 unsigned int c_len; /* physical extent length of compressed blocks */ 593 #endif 594 }; 595 596 struct extent_node { 597 struct rb_node rb_node; /* rb node located in rb-tree */ 598 struct extent_info ei; /* extent info */ 599 struct list_head list; /* node in global extent list of sbi */ 600 struct extent_tree *et; /* extent tree pointer */ 601 }; 602 603 struct extent_tree { 604 nid_t ino; /* inode number */ 605 struct rb_root_cached root; /* root of extent info rb-tree */ 606 struct extent_node *cached_en; /* recently accessed extent node */ 607 struct extent_info largest; /* largested extent info */ 608 struct list_head list; /* to be used by sbi->zombie_list */ 609 rwlock_t lock; /* protect extent info rb-tree */ 610 atomic_t node_cnt; /* # of extent node in rb-tree*/ 611 bool largest_updated; /* largest extent updated */ 612 }; 613 614 /* 615 * This structure is taken from ext4_map_blocks. 616 * 617 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 618 */ 619 #define F2FS_MAP_NEW (1 << BH_New) 620 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 621 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 622 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 623 F2FS_MAP_UNWRITTEN) 624 625 struct f2fs_map_blocks { 626 struct block_device *m_bdev; /* for multi-device dio */ 627 block_t m_pblk; 628 block_t m_lblk; 629 unsigned int m_len; 630 unsigned int m_flags; 631 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 632 pgoff_t *m_next_extent; /* point to next possible extent */ 633 int m_seg_type; 634 bool m_may_create; /* indicate it is from write path */ 635 bool m_multidev_dio; /* indicate it allows multi-device dio */ 636 }; 637 638 /* for flag in get_data_block */ 639 enum { 640 F2FS_GET_BLOCK_DEFAULT, 641 F2FS_GET_BLOCK_FIEMAP, 642 F2FS_GET_BLOCK_BMAP, 643 F2FS_GET_BLOCK_DIO, 644 F2FS_GET_BLOCK_PRE_DIO, 645 F2FS_GET_BLOCK_PRE_AIO, 646 F2FS_GET_BLOCK_PRECACHE, 647 }; 648 649 /* 650 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 651 */ 652 #define FADVISE_COLD_BIT 0x01 653 #define FADVISE_LOST_PINO_BIT 0x02 654 #define FADVISE_ENCRYPT_BIT 0x04 655 #define FADVISE_ENC_NAME_BIT 0x08 656 #define FADVISE_KEEP_SIZE_BIT 0x10 657 #define FADVISE_HOT_BIT 0x20 658 #define FADVISE_VERITY_BIT 0x40 659 660 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 661 662 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 663 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 664 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 665 666 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 667 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 668 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 669 670 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 671 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 672 673 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 674 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 675 676 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 677 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 678 679 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 680 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 681 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 682 683 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 684 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 685 686 #define DEF_DIR_LEVEL 0 687 688 enum { 689 GC_FAILURE_PIN, 690 GC_FAILURE_ATOMIC, 691 MAX_GC_FAILURE 692 }; 693 694 /* used for f2fs_inode_info->flags */ 695 enum { 696 FI_NEW_INODE, /* indicate newly allocated inode */ 697 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 698 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 699 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 700 FI_INC_LINK, /* need to increment i_nlink */ 701 FI_ACL_MODE, /* indicate acl mode */ 702 FI_NO_ALLOC, /* should not allocate any blocks */ 703 FI_FREE_NID, /* free allocated nide */ 704 FI_NO_EXTENT, /* not to use the extent cache */ 705 FI_INLINE_XATTR, /* used for inline xattr */ 706 FI_INLINE_DATA, /* used for inline data*/ 707 FI_INLINE_DENTRY, /* used for inline dentry */ 708 FI_APPEND_WRITE, /* inode has appended data */ 709 FI_UPDATE_WRITE, /* inode has in-place-update data */ 710 FI_NEED_IPU, /* used for ipu per file */ 711 FI_ATOMIC_FILE, /* indicate atomic file */ 712 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 713 FI_VOLATILE_FILE, /* indicate volatile file */ 714 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 715 FI_DROP_CACHE, /* drop dirty page cache */ 716 FI_DATA_EXIST, /* indicate data exists */ 717 FI_INLINE_DOTS, /* indicate inline dot dentries */ 718 FI_DO_DEFRAG, /* indicate defragment is running */ 719 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 720 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 721 FI_HOT_DATA, /* indicate file is hot */ 722 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 723 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 724 FI_PIN_FILE, /* indicate file should not be gced */ 725 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 726 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 727 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 728 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 729 FI_MMAP_FILE, /* indicate file was mmapped */ 730 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 731 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 732 FI_ALIGNED_WRITE, /* enable aligned write */ 733 FI_MAX, /* max flag, never be used */ 734 }; 735 736 struct f2fs_inode_info { 737 struct inode vfs_inode; /* serve a vfs inode */ 738 unsigned long i_flags; /* keep an inode flags for ioctl */ 739 unsigned char i_advise; /* use to give file attribute hints */ 740 unsigned char i_dir_level; /* use for dentry level for large dir */ 741 unsigned int i_current_depth; /* only for directory depth */ 742 /* for gc failure statistic */ 743 unsigned int i_gc_failures[MAX_GC_FAILURE]; 744 unsigned int i_pino; /* parent inode number */ 745 umode_t i_acl_mode; /* keep file acl mode temporarily */ 746 747 /* Use below internally in f2fs*/ 748 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 749 struct rw_semaphore i_sem; /* protect fi info */ 750 atomic_t dirty_pages; /* # of dirty pages */ 751 f2fs_hash_t chash; /* hash value of given file name */ 752 unsigned int clevel; /* maximum level of given file name */ 753 struct task_struct *task; /* lookup and create consistency */ 754 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 755 nid_t i_xattr_nid; /* node id that contains xattrs */ 756 loff_t last_disk_size; /* lastly written file size */ 757 spinlock_t i_size_lock; /* protect last_disk_size */ 758 759 #ifdef CONFIG_QUOTA 760 struct dquot *i_dquot[MAXQUOTAS]; 761 762 /* quota space reservation, managed internally by quota code */ 763 qsize_t i_reserved_quota; 764 #endif 765 struct list_head dirty_list; /* dirty list for dirs and files */ 766 struct list_head gdirty_list; /* linked in global dirty list */ 767 struct list_head inmem_ilist; /* list for inmem inodes */ 768 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 769 struct task_struct *inmem_task; /* store inmemory task */ 770 struct mutex inmem_lock; /* lock for inmemory pages */ 771 struct extent_tree *extent_tree; /* cached extent_tree entry */ 772 773 /* avoid racing between foreground op and gc */ 774 struct rw_semaphore i_gc_rwsem[2]; 775 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 776 777 int i_extra_isize; /* size of extra space located in i_addr */ 778 kprojid_t i_projid; /* id for project quota */ 779 int i_inline_xattr_size; /* inline xattr size */ 780 struct timespec64 i_crtime; /* inode creation time */ 781 struct timespec64 i_disk_time[4];/* inode disk times */ 782 783 /* for file compress */ 784 atomic_t i_compr_blocks; /* # of compressed blocks */ 785 unsigned char i_compress_algorithm; /* algorithm type */ 786 unsigned char i_log_cluster_size; /* log of cluster size */ 787 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 788 unsigned short i_compress_flag; /* compress flag */ 789 unsigned int i_cluster_size; /* cluster size */ 790 }; 791 792 static inline void get_extent_info(struct extent_info *ext, 793 struct f2fs_extent *i_ext) 794 { 795 ext->fofs = le32_to_cpu(i_ext->fofs); 796 ext->blk = le32_to_cpu(i_ext->blk); 797 ext->len = le32_to_cpu(i_ext->len); 798 } 799 800 static inline void set_raw_extent(struct extent_info *ext, 801 struct f2fs_extent *i_ext) 802 { 803 i_ext->fofs = cpu_to_le32(ext->fofs); 804 i_ext->blk = cpu_to_le32(ext->blk); 805 i_ext->len = cpu_to_le32(ext->len); 806 } 807 808 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 809 u32 blk, unsigned int len) 810 { 811 ei->fofs = fofs; 812 ei->blk = blk; 813 ei->len = len; 814 #ifdef CONFIG_F2FS_FS_COMPRESSION 815 ei->c_len = 0; 816 #endif 817 } 818 819 static inline bool __is_discard_mergeable(struct discard_info *back, 820 struct discard_info *front, unsigned int max_len) 821 { 822 return (back->lstart + back->len == front->lstart) && 823 (back->len + front->len <= max_len); 824 } 825 826 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 827 struct discard_info *back, unsigned int max_len) 828 { 829 return __is_discard_mergeable(back, cur, max_len); 830 } 831 832 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 833 struct discard_info *front, unsigned int max_len) 834 { 835 return __is_discard_mergeable(cur, front, max_len); 836 } 837 838 static inline bool __is_extent_mergeable(struct extent_info *back, 839 struct extent_info *front) 840 { 841 #ifdef CONFIG_F2FS_FS_COMPRESSION 842 if (back->c_len && back->len != back->c_len) 843 return false; 844 if (front->c_len && front->len != front->c_len) 845 return false; 846 #endif 847 return (back->fofs + back->len == front->fofs && 848 back->blk + back->len == front->blk); 849 } 850 851 static inline bool __is_back_mergeable(struct extent_info *cur, 852 struct extent_info *back) 853 { 854 return __is_extent_mergeable(back, cur); 855 } 856 857 static inline bool __is_front_mergeable(struct extent_info *cur, 858 struct extent_info *front) 859 { 860 return __is_extent_mergeable(cur, front); 861 } 862 863 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 864 static inline void __try_update_largest_extent(struct extent_tree *et, 865 struct extent_node *en) 866 { 867 if (en->ei.len > et->largest.len) { 868 et->largest = en->ei; 869 et->largest_updated = true; 870 } 871 } 872 873 /* 874 * For free nid management 875 */ 876 enum nid_state { 877 FREE_NID, /* newly added to free nid list */ 878 PREALLOC_NID, /* it is preallocated */ 879 MAX_NID_STATE, 880 }; 881 882 enum nat_state { 883 TOTAL_NAT, 884 DIRTY_NAT, 885 RECLAIMABLE_NAT, 886 MAX_NAT_STATE, 887 }; 888 889 struct f2fs_nm_info { 890 block_t nat_blkaddr; /* base disk address of NAT */ 891 nid_t max_nid; /* maximum possible node ids */ 892 nid_t available_nids; /* # of available node ids */ 893 nid_t next_scan_nid; /* the next nid to be scanned */ 894 unsigned int ram_thresh; /* control the memory footprint */ 895 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 896 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 897 898 /* NAT cache management */ 899 struct radix_tree_root nat_root;/* root of the nat entry cache */ 900 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 901 struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ 902 struct list_head nat_entries; /* cached nat entry list (clean) */ 903 spinlock_t nat_list_lock; /* protect clean nat entry list */ 904 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 905 unsigned int nat_blocks; /* # of nat blocks */ 906 907 /* free node ids management */ 908 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 909 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 910 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 911 spinlock_t nid_list_lock; /* protect nid lists ops */ 912 struct mutex build_lock; /* lock for build free nids */ 913 unsigned char **free_nid_bitmap; 914 unsigned char *nat_block_bitmap; 915 unsigned short *free_nid_count; /* free nid count of NAT block */ 916 917 /* for checkpoint */ 918 char *nat_bitmap; /* NAT bitmap pointer */ 919 920 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 921 unsigned char *nat_bits; /* NAT bits blocks */ 922 unsigned char *full_nat_bits; /* full NAT pages */ 923 unsigned char *empty_nat_bits; /* empty NAT pages */ 924 #ifdef CONFIG_F2FS_CHECK_FS 925 char *nat_bitmap_mir; /* NAT bitmap mirror */ 926 #endif 927 int bitmap_size; /* bitmap size */ 928 }; 929 930 /* 931 * this structure is used as one of function parameters. 932 * all the information are dedicated to a given direct node block determined 933 * by the data offset in a file. 934 */ 935 struct dnode_of_data { 936 struct inode *inode; /* vfs inode pointer */ 937 struct page *inode_page; /* its inode page, NULL is possible */ 938 struct page *node_page; /* cached direct node page */ 939 nid_t nid; /* node id of the direct node block */ 940 unsigned int ofs_in_node; /* data offset in the node page */ 941 bool inode_page_locked; /* inode page is locked or not */ 942 bool node_changed; /* is node block changed */ 943 char cur_level; /* level of hole node page */ 944 char max_level; /* level of current page located */ 945 block_t data_blkaddr; /* block address of the node block */ 946 }; 947 948 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 949 struct page *ipage, struct page *npage, nid_t nid) 950 { 951 memset(dn, 0, sizeof(*dn)); 952 dn->inode = inode; 953 dn->inode_page = ipage; 954 dn->node_page = npage; 955 dn->nid = nid; 956 } 957 958 /* 959 * For SIT manager 960 * 961 * By default, there are 6 active log areas across the whole main area. 962 * When considering hot and cold data separation to reduce cleaning overhead, 963 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 964 * respectively. 965 * In the current design, you should not change the numbers intentionally. 966 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 967 * logs individually according to the underlying devices. (default: 6) 968 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 969 * data and 8 for node logs. 970 */ 971 #define NR_CURSEG_DATA_TYPE (3) 972 #define NR_CURSEG_NODE_TYPE (3) 973 #define NR_CURSEG_INMEM_TYPE (2) 974 #define NR_CURSEG_RO_TYPE (2) 975 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 976 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 977 978 enum { 979 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 980 CURSEG_WARM_DATA, /* data blocks */ 981 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 982 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 983 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 984 CURSEG_COLD_NODE, /* indirect node blocks */ 985 NR_PERSISTENT_LOG, /* number of persistent log */ 986 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 987 /* pinned file that needs consecutive block address */ 988 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 989 NO_CHECK_TYPE, /* number of persistent & inmem log */ 990 }; 991 992 struct flush_cmd { 993 struct completion wait; 994 struct llist_node llnode; 995 nid_t ino; 996 int ret; 997 }; 998 999 struct flush_cmd_control { 1000 struct task_struct *f2fs_issue_flush; /* flush thread */ 1001 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1002 atomic_t issued_flush; /* # of issued flushes */ 1003 atomic_t queued_flush; /* # of queued flushes */ 1004 struct llist_head issue_list; /* list for command issue */ 1005 struct llist_node *dispatch_list; /* list for command dispatch */ 1006 }; 1007 1008 struct f2fs_sm_info { 1009 struct sit_info *sit_info; /* whole segment information */ 1010 struct free_segmap_info *free_info; /* free segment information */ 1011 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1012 struct curseg_info *curseg_array; /* active segment information */ 1013 1014 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 1015 1016 block_t seg0_blkaddr; /* block address of 0'th segment */ 1017 block_t main_blkaddr; /* start block address of main area */ 1018 block_t ssa_blkaddr; /* start block address of SSA area */ 1019 1020 unsigned int segment_count; /* total # of segments */ 1021 unsigned int main_segments; /* # of segments in main area */ 1022 unsigned int reserved_segments; /* # of reserved segments */ 1023 unsigned int ovp_segments; /* # of overprovision segments */ 1024 1025 /* a threshold to reclaim prefree segments */ 1026 unsigned int rec_prefree_segments; 1027 1028 /* for batched trimming */ 1029 unsigned int trim_sections; /* # of sections to trim */ 1030 1031 struct list_head sit_entry_set; /* sit entry set list */ 1032 1033 unsigned int ipu_policy; /* in-place-update policy */ 1034 unsigned int min_ipu_util; /* in-place-update threshold */ 1035 unsigned int min_fsync_blocks; /* threshold for fsync */ 1036 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1037 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1038 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1039 1040 /* for flush command control */ 1041 struct flush_cmd_control *fcc_info; 1042 1043 /* for discard command control */ 1044 struct discard_cmd_control *dcc_info; 1045 }; 1046 1047 /* 1048 * For superblock 1049 */ 1050 /* 1051 * COUNT_TYPE for monitoring 1052 * 1053 * f2fs monitors the number of several block types such as on-writeback, 1054 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1055 */ 1056 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1057 enum count_type { 1058 F2FS_DIRTY_DENTS, 1059 F2FS_DIRTY_DATA, 1060 F2FS_DIRTY_QDATA, 1061 F2FS_DIRTY_NODES, 1062 F2FS_DIRTY_META, 1063 F2FS_INMEM_PAGES, 1064 F2FS_DIRTY_IMETA, 1065 F2FS_WB_CP_DATA, 1066 F2FS_WB_DATA, 1067 F2FS_RD_DATA, 1068 F2FS_RD_NODE, 1069 F2FS_RD_META, 1070 F2FS_DIO_WRITE, 1071 F2FS_DIO_READ, 1072 NR_COUNT_TYPE, 1073 }; 1074 1075 /* 1076 * The below are the page types of bios used in submit_bio(). 1077 * The available types are: 1078 * DATA User data pages. It operates as async mode. 1079 * NODE Node pages. It operates as async mode. 1080 * META FS metadata pages such as SIT, NAT, CP. 1081 * NR_PAGE_TYPE The number of page types. 1082 * META_FLUSH Make sure the previous pages are written 1083 * with waiting the bio's completion 1084 * ... Only can be used with META. 1085 */ 1086 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1087 enum page_type { 1088 DATA, 1089 NODE, 1090 META, 1091 NR_PAGE_TYPE, 1092 META_FLUSH, 1093 INMEM, /* the below types are used by tracepoints only. */ 1094 INMEM_DROP, 1095 INMEM_INVALIDATE, 1096 INMEM_REVOKE, 1097 IPU, 1098 OPU, 1099 }; 1100 1101 enum temp_type { 1102 HOT = 0, /* must be zero for meta bio */ 1103 WARM, 1104 COLD, 1105 NR_TEMP_TYPE, 1106 }; 1107 1108 enum need_lock_type { 1109 LOCK_REQ = 0, 1110 LOCK_DONE, 1111 LOCK_RETRY, 1112 }; 1113 1114 enum cp_reason_type { 1115 CP_NO_NEEDED, 1116 CP_NON_REGULAR, 1117 CP_COMPRESSED, 1118 CP_HARDLINK, 1119 CP_SB_NEED_CP, 1120 CP_WRONG_PINO, 1121 CP_NO_SPC_ROLL, 1122 CP_NODE_NEED_CP, 1123 CP_FASTBOOT_MODE, 1124 CP_SPEC_LOG_NUM, 1125 CP_RECOVER_DIR, 1126 }; 1127 1128 enum iostat_type { 1129 /* WRITE IO */ 1130 APP_DIRECT_IO, /* app direct write IOs */ 1131 APP_BUFFERED_IO, /* app buffered write IOs */ 1132 APP_WRITE_IO, /* app write IOs */ 1133 APP_MAPPED_IO, /* app mapped IOs */ 1134 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1135 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1136 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1137 FS_GC_DATA_IO, /* data IOs from forground gc */ 1138 FS_GC_NODE_IO, /* node IOs from forground gc */ 1139 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1140 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1141 FS_CP_META_IO, /* meta IOs from checkpoint */ 1142 1143 /* READ IO */ 1144 APP_DIRECT_READ_IO, /* app direct read IOs */ 1145 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1146 APP_READ_IO, /* app read IOs */ 1147 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1148 FS_DATA_READ_IO, /* data read IOs */ 1149 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1150 FS_CDATA_READ_IO, /* compressed data read IOs */ 1151 FS_NODE_READ_IO, /* node read IOs */ 1152 FS_META_READ_IO, /* meta read IOs */ 1153 1154 /* other */ 1155 FS_DISCARD, /* discard */ 1156 NR_IO_TYPE, 1157 }; 1158 1159 struct f2fs_io_info { 1160 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1161 nid_t ino; /* inode number */ 1162 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1163 enum temp_type temp; /* contains HOT/WARM/COLD */ 1164 int op; /* contains REQ_OP_ */ 1165 int op_flags; /* req_flag_bits */ 1166 block_t new_blkaddr; /* new block address to be written */ 1167 block_t old_blkaddr; /* old block address before Cow */ 1168 struct page *page; /* page to be written */ 1169 struct page *encrypted_page; /* encrypted page */ 1170 struct page *compressed_page; /* compressed page */ 1171 struct list_head list; /* serialize IOs */ 1172 bool submitted; /* indicate IO submission */ 1173 int need_lock; /* indicate we need to lock cp_rwsem */ 1174 bool in_list; /* indicate fio is in io_list */ 1175 bool is_por; /* indicate IO is from recovery or not */ 1176 bool retry; /* need to reallocate block address */ 1177 int compr_blocks; /* # of compressed block addresses */ 1178 bool encrypted; /* indicate file is encrypted */ 1179 enum iostat_type io_type; /* io type */ 1180 struct writeback_control *io_wbc; /* writeback control */ 1181 struct bio **bio; /* bio for ipu */ 1182 sector_t *last_block; /* last block number in bio */ 1183 unsigned char version; /* version of the node */ 1184 }; 1185 1186 struct bio_entry { 1187 struct bio *bio; 1188 struct list_head list; 1189 }; 1190 1191 #define is_read_io(rw) ((rw) == READ) 1192 struct f2fs_bio_info { 1193 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1194 struct bio *bio; /* bios to merge */ 1195 sector_t last_block_in_bio; /* last block number */ 1196 struct f2fs_io_info fio; /* store buffered io info. */ 1197 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1198 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1199 struct list_head io_list; /* track fios */ 1200 struct list_head bio_list; /* bio entry list head */ 1201 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1202 }; 1203 1204 #define FDEV(i) (sbi->devs[i]) 1205 #define RDEV(i) (raw_super->devs[i]) 1206 struct f2fs_dev_info { 1207 struct block_device *bdev; 1208 char path[MAX_PATH_LEN]; 1209 unsigned int total_segments; 1210 block_t start_blk; 1211 block_t end_blk; 1212 #ifdef CONFIG_BLK_DEV_ZONED 1213 unsigned int nr_blkz; /* Total number of zones */ 1214 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1215 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1216 #endif 1217 }; 1218 1219 enum inode_type { 1220 DIR_INODE, /* for dirty dir inode */ 1221 FILE_INODE, /* for dirty regular/symlink inode */ 1222 DIRTY_META, /* for all dirtied inode metadata */ 1223 ATOMIC_FILE, /* for all atomic files */ 1224 NR_INODE_TYPE, 1225 }; 1226 1227 /* for inner inode cache management */ 1228 struct inode_management { 1229 struct radix_tree_root ino_root; /* ino entry array */ 1230 spinlock_t ino_lock; /* for ino entry lock */ 1231 struct list_head ino_list; /* inode list head */ 1232 unsigned long ino_num; /* number of entries */ 1233 }; 1234 1235 /* for GC_AT */ 1236 struct atgc_management { 1237 bool atgc_enabled; /* ATGC is enabled or not */ 1238 struct rb_root_cached root; /* root of victim rb-tree */ 1239 struct list_head victim_list; /* linked with all victim entries */ 1240 unsigned int victim_count; /* victim count in rb-tree */ 1241 unsigned int candidate_ratio; /* candidate ratio */ 1242 unsigned int max_candidate_count; /* max candidate count */ 1243 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1244 unsigned long long age_threshold; /* age threshold */ 1245 }; 1246 1247 /* For s_flag in struct f2fs_sb_info */ 1248 enum { 1249 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1250 SBI_IS_CLOSE, /* specify unmounting */ 1251 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1252 SBI_POR_DOING, /* recovery is doing or not */ 1253 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1254 SBI_NEED_CP, /* need to checkpoint */ 1255 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1256 SBI_IS_RECOVERED, /* recovered orphan/data */ 1257 SBI_CP_DISABLED, /* CP was disabled last mount */ 1258 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1259 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1260 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1261 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1262 SBI_IS_RESIZEFS, /* resizefs is in process */ 1263 }; 1264 1265 enum { 1266 CP_TIME, 1267 REQ_TIME, 1268 DISCARD_TIME, 1269 GC_TIME, 1270 DISABLE_TIME, 1271 UMOUNT_DISCARD_TIMEOUT, 1272 MAX_TIME, 1273 }; 1274 1275 enum { 1276 GC_NORMAL, 1277 GC_IDLE_CB, 1278 GC_IDLE_GREEDY, 1279 GC_IDLE_AT, 1280 GC_URGENT_HIGH, 1281 GC_URGENT_LOW, 1282 MAX_GC_MODE, 1283 }; 1284 1285 enum { 1286 BGGC_MODE_ON, /* background gc is on */ 1287 BGGC_MODE_OFF, /* background gc is off */ 1288 BGGC_MODE_SYNC, /* 1289 * background gc is on, migrating blocks 1290 * like foreground gc 1291 */ 1292 }; 1293 1294 enum { 1295 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1296 FS_MODE_LFS, /* use lfs allocation only */ 1297 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1298 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1299 }; 1300 1301 enum { 1302 WHINT_MODE_OFF, /* not pass down write hints */ 1303 WHINT_MODE_USER, /* try to pass down hints given by users */ 1304 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1305 }; 1306 1307 enum { 1308 ALLOC_MODE_DEFAULT, /* stay default */ 1309 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1310 }; 1311 1312 enum fsync_mode { 1313 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1314 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1315 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1316 }; 1317 1318 enum { 1319 COMPR_MODE_FS, /* 1320 * automatically compress compression 1321 * enabled files 1322 */ 1323 COMPR_MODE_USER, /* 1324 * automatical compression is disabled. 1325 * user can control the file compression 1326 * using ioctls 1327 */ 1328 }; 1329 1330 enum { 1331 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1332 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1333 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1334 }; 1335 1336 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1337 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1338 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1339 1340 /* 1341 * Layout of f2fs page.private: 1342 * 1343 * Layout A: lowest bit should be 1 1344 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1345 * bit 0 PAGE_PRIVATE_NOT_POINTER 1346 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1347 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1348 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1349 * bit 4 PAGE_PRIVATE_INLINE_INODE 1350 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1351 * bit 6- f2fs private data 1352 * 1353 * Layout B: lowest bit should be 0 1354 * page.private is a wrapped pointer. 1355 */ 1356 enum { 1357 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1358 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1359 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1360 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1361 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1362 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1363 PAGE_PRIVATE_MAX 1364 }; 1365 1366 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1367 static inline bool page_private_##name(struct page *page) \ 1368 { \ 1369 return PagePrivate(page) && \ 1370 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1371 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1372 } 1373 1374 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1375 static inline void set_page_private_##name(struct page *page) \ 1376 { \ 1377 if (!PagePrivate(page)) { \ 1378 get_page(page); \ 1379 SetPagePrivate(page); \ 1380 set_page_private(page, 0); \ 1381 } \ 1382 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1383 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1384 } 1385 1386 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1387 static inline void clear_page_private_##name(struct page *page) \ 1388 { \ 1389 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1390 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1391 set_page_private(page, 0); \ 1392 if (PagePrivate(page)) { \ 1393 ClearPagePrivate(page); \ 1394 put_page(page); \ 1395 }\ 1396 } \ 1397 } 1398 1399 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1400 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1401 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1402 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1403 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1404 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1405 1406 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1407 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1408 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1409 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1410 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1411 1412 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1413 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1414 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1415 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1416 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1417 1418 static inline unsigned long get_page_private_data(struct page *page) 1419 { 1420 unsigned long data = page_private(page); 1421 1422 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1423 return 0; 1424 return data >> PAGE_PRIVATE_MAX; 1425 } 1426 1427 static inline void set_page_private_data(struct page *page, unsigned long data) 1428 { 1429 if (!PagePrivate(page)) { 1430 get_page(page); 1431 SetPagePrivate(page); 1432 set_page_private(page, 0); 1433 } 1434 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1435 page_private(page) |= data << PAGE_PRIVATE_MAX; 1436 } 1437 1438 static inline void clear_page_private_data(struct page *page) 1439 { 1440 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1441 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1442 set_page_private(page, 0); 1443 if (PagePrivate(page)) { 1444 ClearPagePrivate(page); 1445 put_page(page); 1446 } 1447 } 1448 } 1449 1450 /* For compression */ 1451 enum compress_algorithm_type { 1452 COMPRESS_LZO, 1453 COMPRESS_LZ4, 1454 COMPRESS_ZSTD, 1455 COMPRESS_LZORLE, 1456 COMPRESS_MAX, 1457 }; 1458 1459 enum compress_flag { 1460 COMPRESS_CHKSUM, 1461 COMPRESS_MAX_FLAG, 1462 }; 1463 1464 #define COMPRESS_WATERMARK 20 1465 #define COMPRESS_PERCENT 20 1466 1467 #define COMPRESS_DATA_RESERVED_SIZE 4 1468 struct compress_data { 1469 __le32 clen; /* compressed data size */ 1470 __le32 chksum; /* compressed data chksum */ 1471 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1472 u8 cdata[]; /* compressed data */ 1473 }; 1474 1475 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1476 1477 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1478 1479 #define COMPRESS_LEVEL_OFFSET 8 1480 1481 /* compress context */ 1482 struct compress_ctx { 1483 struct inode *inode; /* inode the context belong to */ 1484 pgoff_t cluster_idx; /* cluster index number */ 1485 unsigned int cluster_size; /* page count in cluster */ 1486 unsigned int log_cluster_size; /* log of cluster size */ 1487 struct page **rpages; /* pages store raw data in cluster */ 1488 unsigned int nr_rpages; /* total page number in rpages */ 1489 struct page **cpages; /* pages store compressed data in cluster */ 1490 unsigned int nr_cpages; /* total page number in cpages */ 1491 void *rbuf; /* virtual mapped address on rpages */ 1492 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1493 size_t rlen; /* valid data length in rbuf */ 1494 size_t clen; /* valid data length in cbuf */ 1495 void *private; /* payload buffer for specified compression algorithm */ 1496 void *private2; /* extra payload buffer */ 1497 }; 1498 1499 /* compress context for write IO path */ 1500 struct compress_io_ctx { 1501 u32 magic; /* magic number to indicate page is compressed */ 1502 struct inode *inode; /* inode the context belong to */ 1503 struct page **rpages; /* pages store raw data in cluster */ 1504 unsigned int nr_rpages; /* total page number in rpages */ 1505 atomic_t pending_pages; /* in-flight compressed page count */ 1506 }; 1507 1508 /* Context for decompressing one cluster on the read IO path */ 1509 struct decompress_io_ctx { 1510 u32 magic; /* magic number to indicate page is compressed */ 1511 struct inode *inode; /* inode the context belong to */ 1512 pgoff_t cluster_idx; /* cluster index number */ 1513 unsigned int cluster_size; /* page count in cluster */ 1514 unsigned int log_cluster_size; /* log of cluster size */ 1515 struct page **rpages; /* pages store raw data in cluster */ 1516 unsigned int nr_rpages; /* total page number in rpages */ 1517 struct page **cpages; /* pages store compressed data in cluster */ 1518 unsigned int nr_cpages; /* total page number in cpages */ 1519 struct page **tpages; /* temp pages to pad holes in cluster */ 1520 void *rbuf; /* virtual mapped address on rpages */ 1521 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1522 size_t rlen; /* valid data length in rbuf */ 1523 size_t clen; /* valid data length in cbuf */ 1524 1525 /* 1526 * The number of compressed pages remaining to be read in this cluster. 1527 * This is initially nr_cpages. It is decremented by 1 each time a page 1528 * has been read (or failed to be read). When it reaches 0, the cluster 1529 * is decompressed (or an error is reported). 1530 * 1531 * If an error occurs before all the pages have been submitted for I/O, 1532 * then this will never reach 0. In this case the I/O submitter is 1533 * responsible for calling f2fs_decompress_end_io() instead. 1534 */ 1535 atomic_t remaining_pages; 1536 1537 /* 1538 * Number of references to this decompress_io_ctx. 1539 * 1540 * One reference is held for I/O completion. This reference is dropped 1541 * after the pagecache pages are updated and unlocked -- either after 1542 * decompression (and verity if enabled), or after an error. 1543 * 1544 * In addition, each compressed page holds a reference while it is in a 1545 * bio. These references are necessary prevent compressed pages from 1546 * being freed while they are still in a bio. 1547 */ 1548 refcount_t refcnt; 1549 1550 bool failed; /* IO error occurred before decompression? */ 1551 bool need_verity; /* need fs-verity verification after decompression? */ 1552 void *private; /* payload buffer for specified decompression algorithm */ 1553 void *private2; /* extra payload buffer */ 1554 struct work_struct verity_work; /* work to verify the decompressed pages */ 1555 }; 1556 1557 #define NULL_CLUSTER ((unsigned int)(~0)) 1558 #define MIN_COMPRESS_LOG_SIZE 2 1559 #define MAX_COMPRESS_LOG_SIZE 8 1560 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1561 1562 struct f2fs_sb_info { 1563 struct super_block *sb; /* pointer to VFS super block */ 1564 struct proc_dir_entry *s_proc; /* proc entry */ 1565 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1566 struct rw_semaphore sb_lock; /* lock for raw super block */ 1567 int valid_super_block; /* valid super block no */ 1568 unsigned long s_flag; /* flags for sbi */ 1569 struct mutex writepages; /* mutex for writepages() */ 1570 1571 #ifdef CONFIG_BLK_DEV_ZONED 1572 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1573 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1574 #endif 1575 1576 /* for node-related operations */ 1577 struct f2fs_nm_info *nm_info; /* node manager */ 1578 struct inode *node_inode; /* cache node blocks */ 1579 1580 /* for segment-related operations */ 1581 struct f2fs_sm_info *sm_info; /* segment manager */ 1582 1583 /* for bio operations */ 1584 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1585 /* keep migration IO order for LFS mode */ 1586 struct rw_semaphore io_order_lock; 1587 mempool_t *write_io_dummy; /* Dummy pages */ 1588 1589 /* for checkpoint */ 1590 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1591 int cur_cp_pack; /* remain current cp pack */ 1592 spinlock_t cp_lock; /* for flag in ckpt */ 1593 struct inode *meta_inode; /* cache meta blocks */ 1594 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1595 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1596 struct rw_semaphore node_write; /* locking node writes */ 1597 struct rw_semaphore node_change; /* locking node change */ 1598 wait_queue_head_t cp_wait; 1599 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1600 long interval_time[MAX_TIME]; /* to store thresholds */ 1601 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1602 1603 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1604 1605 spinlock_t fsync_node_lock; /* for node entry lock */ 1606 struct list_head fsync_node_list; /* node list head */ 1607 unsigned int fsync_seg_id; /* sequence id */ 1608 unsigned int fsync_node_num; /* number of node entries */ 1609 1610 /* for orphan inode, use 0'th array */ 1611 unsigned int max_orphans; /* max orphan inodes */ 1612 1613 /* for inode management */ 1614 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1615 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1616 struct mutex flush_lock; /* for flush exclusion */ 1617 1618 /* for extent tree cache */ 1619 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1620 struct mutex extent_tree_lock; /* locking extent radix tree */ 1621 struct list_head extent_list; /* lru list for shrinker */ 1622 spinlock_t extent_lock; /* locking extent lru list */ 1623 atomic_t total_ext_tree; /* extent tree count */ 1624 struct list_head zombie_list; /* extent zombie tree list */ 1625 atomic_t total_zombie_tree; /* extent zombie tree count */ 1626 atomic_t total_ext_node; /* extent info count */ 1627 1628 /* basic filesystem units */ 1629 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1630 unsigned int log_blocksize; /* log2 block size */ 1631 unsigned int blocksize; /* block size */ 1632 unsigned int root_ino_num; /* root inode number*/ 1633 unsigned int node_ino_num; /* node inode number*/ 1634 unsigned int meta_ino_num; /* meta inode number*/ 1635 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1636 unsigned int blocks_per_seg; /* blocks per segment */ 1637 unsigned int segs_per_sec; /* segments per section */ 1638 unsigned int secs_per_zone; /* sections per zone */ 1639 unsigned int total_sections; /* total section count */ 1640 unsigned int total_node_count; /* total node block count */ 1641 unsigned int total_valid_node_count; /* valid node block count */ 1642 int dir_level; /* directory level */ 1643 int readdir_ra; /* readahead inode in readdir */ 1644 u64 max_io_bytes; /* max io bytes to merge IOs */ 1645 1646 block_t user_block_count; /* # of user blocks */ 1647 block_t total_valid_block_count; /* # of valid blocks */ 1648 block_t discard_blks; /* discard command candidats */ 1649 block_t last_valid_block_count; /* for recovery */ 1650 block_t reserved_blocks; /* configurable reserved blocks */ 1651 block_t current_reserved_blocks; /* current reserved blocks */ 1652 1653 /* Additional tracking for no checkpoint mode */ 1654 block_t unusable_block_count; /* # of blocks saved by last cp */ 1655 1656 unsigned int nquota_files; /* # of quota sysfile */ 1657 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1658 1659 /* # of pages, see count_type */ 1660 atomic_t nr_pages[NR_COUNT_TYPE]; 1661 /* # of allocated blocks */ 1662 struct percpu_counter alloc_valid_block_count; 1663 1664 /* writeback control */ 1665 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1666 1667 /* valid inode count */ 1668 struct percpu_counter total_valid_inode_count; 1669 1670 struct f2fs_mount_info mount_opt; /* mount options */ 1671 1672 /* for cleaning operations */ 1673 struct rw_semaphore gc_lock; /* 1674 * semaphore for GC, avoid 1675 * race between GC and GC or CP 1676 */ 1677 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1678 struct atgc_management am; /* atgc management */ 1679 unsigned int cur_victim_sec; /* current victim section num */ 1680 unsigned int gc_mode; /* current GC state */ 1681 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1682 1683 /* for skip statistic */ 1684 unsigned int atomic_files; /* # of opened atomic file */ 1685 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1686 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1687 1688 /* threshold for gc trials on pinned files */ 1689 u64 gc_pin_file_threshold; 1690 struct rw_semaphore pin_sem; 1691 1692 /* maximum # of trials to find a victim segment for SSR and GC */ 1693 unsigned int max_victim_search; 1694 /* migration granularity of garbage collection, unit: segment */ 1695 unsigned int migration_granularity; 1696 1697 /* 1698 * for stat information. 1699 * one is for the LFS mode, and the other is for the SSR mode. 1700 */ 1701 #ifdef CONFIG_F2FS_STAT_FS 1702 struct f2fs_stat_info *stat_info; /* FS status information */ 1703 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1704 unsigned int segment_count[2]; /* # of allocated segments */ 1705 unsigned int block_count[2]; /* # of allocated blocks */ 1706 atomic_t inplace_count; /* # of inplace update */ 1707 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1708 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1709 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1710 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1711 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1712 atomic_t inline_inode; /* # of inline_data inodes */ 1713 atomic_t inline_dir; /* # of inline_dentry inodes */ 1714 atomic_t compr_inode; /* # of compressed inodes */ 1715 atomic64_t compr_blocks; /* # of compressed blocks */ 1716 atomic_t vw_cnt; /* # of volatile writes */ 1717 atomic_t max_aw_cnt; /* max # of atomic writes */ 1718 atomic_t max_vw_cnt; /* max # of volatile writes */ 1719 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1720 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1721 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1722 #endif 1723 spinlock_t stat_lock; /* lock for stat operations */ 1724 1725 /* to attach REQ_META|REQ_FUA flags */ 1726 unsigned int data_io_flag; 1727 unsigned int node_io_flag; 1728 1729 /* For sysfs suppport */ 1730 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1731 struct completion s_kobj_unregister; 1732 1733 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1734 struct completion s_stat_kobj_unregister; 1735 1736 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1737 struct completion s_feature_list_kobj_unregister; 1738 1739 /* For shrinker support */ 1740 struct list_head s_list; 1741 struct mutex umount_mutex; 1742 unsigned int shrinker_run_no; 1743 1744 /* For multi devices */ 1745 int s_ndevs; /* number of devices */ 1746 struct f2fs_dev_info *devs; /* for device list */ 1747 unsigned int dirty_device; /* for checkpoint data flush */ 1748 spinlock_t dev_lock; /* protect dirty_device */ 1749 bool aligned_blksize; /* all devices has the same logical blksize */ 1750 1751 /* For write statistics */ 1752 u64 sectors_written_start; 1753 u64 kbytes_written; 1754 1755 /* Reference to checksum algorithm driver via cryptoapi */ 1756 struct crypto_shash *s_chksum_driver; 1757 1758 /* Precomputed FS UUID checksum for seeding other checksums */ 1759 __u32 s_chksum_seed; 1760 1761 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1762 1763 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1764 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1765 1766 /* For reclaimed segs statistics per each GC mode */ 1767 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1768 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1769 1770 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1771 1772 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1773 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1774 1775 #ifdef CONFIG_F2FS_FS_COMPRESSION 1776 struct kmem_cache *page_array_slab; /* page array entry */ 1777 unsigned int page_array_slab_size; /* default page array slab size */ 1778 1779 /* For runtime compression statistics */ 1780 u64 compr_written_block; 1781 u64 compr_saved_block; 1782 u32 compr_new_inode; 1783 1784 /* For compressed block cache */ 1785 struct inode *compress_inode; /* cache compressed blocks */ 1786 unsigned int compress_percent; /* cache page percentage */ 1787 unsigned int compress_watermark; /* cache page watermark */ 1788 atomic_t compress_page_hit; /* cache hit count */ 1789 #endif 1790 1791 #ifdef CONFIG_F2FS_IOSTAT 1792 /* For app/fs IO statistics */ 1793 spinlock_t iostat_lock; 1794 unsigned long long rw_iostat[NR_IO_TYPE]; 1795 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1796 bool iostat_enable; 1797 unsigned long iostat_next_period; 1798 unsigned int iostat_period_ms; 1799 1800 /* For io latency related statistics info in one iostat period */ 1801 spinlock_t iostat_lat_lock; 1802 struct iostat_lat_info *iostat_io_lat; 1803 #endif 1804 }; 1805 1806 struct f2fs_private_dio { 1807 struct inode *inode; 1808 void *orig_private; 1809 bio_end_io_t *orig_end_io; 1810 bool write; 1811 }; 1812 1813 #ifdef CONFIG_F2FS_FAULT_INJECTION 1814 #define f2fs_show_injection_info(sbi, type) \ 1815 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1816 KERN_INFO, sbi->sb->s_id, \ 1817 f2fs_fault_name[type], \ 1818 __func__, __builtin_return_address(0)) 1819 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1820 { 1821 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1822 1823 if (!ffi->inject_rate) 1824 return false; 1825 1826 if (!IS_FAULT_SET(ffi, type)) 1827 return false; 1828 1829 atomic_inc(&ffi->inject_ops); 1830 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1831 atomic_set(&ffi->inject_ops, 0); 1832 return true; 1833 } 1834 return false; 1835 } 1836 #else 1837 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1838 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1839 { 1840 return false; 1841 } 1842 #endif 1843 1844 /* 1845 * Test if the mounted volume is a multi-device volume. 1846 * - For a single regular disk volume, sbi->s_ndevs is 0. 1847 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1848 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1849 */ 1850 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1851 { 1852 return sbi->s_ndevs > 1; 1853 } 1854 1855 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1856 { 1857 unsigned long now = jiffies; 1858 1859 sbi->last_time[type] = now; 1860 1861 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1862 if (type == REQ_TIME) { 1863 sbi->last_time[DISCARD_TIME] = now; 1864 sbi->last_time[GC_TIME] = now; 1865 } 1866 } 1867 1868 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1869 { 1870 unsigned long interval = sbi->interval_time[type] * HZ; 1871 1872 return time_after(jiffies, sbi->last_time[type] + interval); 1873 } 1874 1875 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1876 int type) 1877 { 1878 unsigned long interval = sbi->interval_time[type] * HZ; 1879 unsigned int wait_ms = 0; 1880 long delta; 1881 1882 delta = (sbi->last_time[type] + interval) - jiffies; 1883 if (delta > 0) 1884 wait_ms = jiffies_to_msecs(delta); 1885 1886 return wait_ms; 1887 } 1888 1889 /* 1890 * Inline functions 1891 */ 1892 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1893 const void *address, unsigned int length) 1894 { 1895 struct { 1896 struct shash_desc shash; 1897 char ctx[4]; 1898 } desc; 1899 int err; 1900 1901 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1902 1903 desc.shash.tfm = sbi->s_chksum_driver; 1904 *(u32 *)desc.ctx = crc; 1905 1906 err = crypto_shash_update(&desc.shash, address, length); 1907 BUG_ON(err); 1908 1909 return *(u32 *)desc.ctx; 1910 } 1911 1912 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1913 unsigned int length) 1914 { 1915 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1916 } 1917 1918 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1919 void *buf, size_t buf_size) 1920 { 1921 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1922 } 1923 1924 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1925 const void *address, unsigned int length) 1926 { 1927 return __f2fs_crc32(sbi, crc, address, length); 1928 } 1929 1930 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1931 { 1932 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1933 } 1934 1935 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1936 { 1937 return sb->s_fs_info; 1938 } 1939 1940 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1941 { 1942 return F2FS_SB(inode->i_sb); 1943 } 1944 1945 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1946 { 1947 return F2FS_I_SB(mapping->host); 1948 } 1949 1950 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1951 { 1952 return F2FS_M_SB(page_file_mapping(page)); 1953 } 1954 1955 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1956 { 1957 return (struct f2fs_super_block *)(sbi->raw_super); 1958 } 1959 1960 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1961 { 1962 return (struct f2fs_checkpoint *)(sbi->ckpt); 1963 } 1964 1965 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1966 { 1967 return (struct f2fs_node *)page_address(page); 1968 } 1969 1970 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1971 { 1972 return &((struct f2fs_node *)page_address(page))->i; 1973 } 1974 1975 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1976 { 1977 return (struct f2fs_nm_info *)(sbi->nm_info); 1978 } 1979 1980 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1981 { 1982 return (struct f2fs_sm_info *)(sbi->sm_info); 1983 } 1984 1985 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1986 { 1987 return (struct sit_info *)(SM_I(sbi)->sit_info); 1988 } 1989 1990 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1991 { 1992 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1993 } 1994 1995 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1996 { 1997 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1998 } 1999 2000 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2001 { 2002 return sbi->meta_inode->i_mapping; 2003 } 2004 2005 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2006 { 2007 return sbi->node_inode->i_mapping; 2008 } 2009 2010 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2011 { 2012 return test_bit(type, &sbi->s_flag); 2013 } 2014 2015 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2016 { 2017 set_bit(type, &sbi->s_flag); 2018 } 2019 2020 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2021 { 2022 clear_bit(type, &sbi->s_flag); 2023 } 2024 2025 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2026 { 2027 return le64_to_cpu(cp->checkpoint_ver); 2028 } 2029 2030 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2031 { 2032 if (type < F2FS_MAX_QUOTAS) 2033 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2034 return 0; 2035 } 2036 2037 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2038 { 2039 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2040 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2041 } 2042 2043 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2044 { 2045 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2046 2047 return ckpt_flags & f; 2048 } 2049 2050 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2051 { 2052 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2053 } 2054 2055 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2056 { 2057 unsigned int ckpt_flags; 2058 2059 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2060 ckpt_flags |= f; 2061 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2062 } 2063 2064 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2065 { 2066 unsigned long flags; 2067 2068 spin_lock_irqsave(&sbi->cp_lock, flags); 2069 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2070 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2071 } 2072 2073 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2074 { 2075 unsigned int ckpt_flags; 2076 2077 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2078 ckpt_flags &= (~f); 2079 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2080 } 2081 2082 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2083 { 2084 unsigned long flags; 2085 2086 spin_lock_irqsave(&sbi->cp_lock, flags); 2087 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2088 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2089 } 2090 2091 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2092 { 2093 down_read(&sbi->cp_rwsem); 2094 } 2095 2096 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2097 { 2098 return down_read_trylock(&sbi->cp_rwsem); 2099 } 2100 2101 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2102 { 2103 up_read(&sbi->cp_rwsem); 2104 } 2105 2106 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2107 { 2108 down_write(&sbi->cp_rwsem); 2109 } 2110 2111 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2112 { 2113 up_write(&sbi->cp_rwsem); 2114 } 2115 2116 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2117 { 2118 int reason = CP_SYNC; 2119 2120 if (test_opt(sbi, FASTBOOT)) 2121 reason = CP_FASTBOOT; 2122 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2123 reason = CP_UMOUNT; 2124 return reason; 2125 } 2126 2127 static inline bool __remain_node_summaries(int reason) 2128 { 2129 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2130 } 2131 2132 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2133 { 2134 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2135 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2136 } 2137 2138 /* 2139 * Check whether the inode has blocks or not 2140 */ 2141 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2142 { 2143 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2144 2145 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2146 } 2147 2148 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2149 { 2150 return ofs == XATTR_NODE_OFFSET; 2151 } 2152 2153 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2154 struct inode *inode, bool cap) 2155 { 2156 if (!inode) 2157 return true; 2158 if (!test_opt(sbi, RESERVE_ROOT)) 2159 return false; 2160 if (IS_NOQUOTA(inode)) 2161 return true; 2162 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2163 return true; 2164 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2165 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2166 return true; 2167 if (cap && capable(CAP_SYS_RESOURCE)) 2168 return true; 2169 return false; 2170 } 2171 2172 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2173 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2174 struct inode *inode, blkcnt_t *count) 2175 { 2176 blkcnt_t diff = 0, release = 0; 2177 block_t avail_user_block_count; 2178 int ret; 2179 2180 ret = dquot_reserve_block(inode, *count); 2181 if (ret) 2182 return ret; 2183 2184 if (time_to_inject(sbi, FAULT_BLOCK)) { 2185 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2186 release = *count; 2187 goto release_quota; 2188 } 2189 2190 /* 2191 * let's increase this in prior to actual block count change in order 2192 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2193 */ 2194 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2195 2196 spin_lock(&sbi->stat_lock); 2197 sbi->total_valid_block_count += (block_t)(*count); 2198 avail_user_block_count = sbi->user_block_count - 2199 sbi->current_reserved_blocks; 2200 2201 if (!__allow_reserved_blocks(sbi, inode, true)) 2202 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2203 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2204 if (avail_user_block_count > sbi->unusable_block_count) 2205 avail_user_block_count -= sbi->unusable_block_count; 2206 else 2207 avail_user_block_count = 0; 2208 } 2209 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2210 diff = sbi->total_valid_block_count - avail_user_block_count; 2211 if (diff > *count) 2212 diff = *count; 2213 *count -= diff; 2214 release = diff; 2215 sbi->total_valid_block_count -= diff; 2216 if (!*count) { 2217 spin_unlock(&sbi->stat_lock); 2218 goto enospc; 2219 } 2220 } 2221 spin_unlock(&sbi->stat_lock); 2222 2223 if (unlikely(release)) { 2224 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2225 dquot_release_reservation_block(inode, release); 2226 } 2227 f2fs_i_blocks_write(inode, *count, true, true); 2228 return 0; 2229 2230 enospc: 2231 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2232 release_quota: 2233 dquot_release_reservation_block(inode, release); 2234 return -ENOSPC; 2235 } 2236 2237 __printf(2, 3) 2238 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2239 2240 #define f2fs_err(sbi, fmt, ...) \ 2241 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2242 #define f2fs_warn(sbi, fmt, ...) \ 2243 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2244 #define f2fs_notice(sbi, fmt, ...) \ 2245 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2246 #define f2fs_info(sbi, fmt, ...) \ 2247 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2248 #define f2fs_debug(sbi, fmt, ...) \ 2249 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2250 2251 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2252 struct inode *inode, 2253 block_t count) 2254 { 2255 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2256 2257 spin_lock(&sbi->stat_lock); 2258 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2259 sbi->total_valid_block_count -= (block_t)count; 2260 if (sbi->reserved_blocks && 2261 sbi->current_reserved_blocks < sbi->reserved_blocks) 2262 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2263 sbi->current_reserved_blocks + count); 2264 spin_unlock(&sbi->stat_lock); 2265 if (unlikely(inode->i_blocks < sectors)) { 2266 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2267 inode->i_ino, 2268 (unsigned long long)inode->i_blocks, 2269 (unsigned long long)sectors); 2270 set_sbi_flag(sbi, SBI_NEED_FSCK); 2271 return; 2272 } 2273 f2fs_i_blocks_write(inode, count, false, true); 2274 } 2275 2276 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2277 { 2278 atomic_inc(&sbi->nr_pages[count_type]); 2279 2280 if (count_type == F2FS_DIRTY_DENTS || 2281 count_type == F2FS_DIRTY_NODES || 2282 count_type == F2FS_DIRTY_META || 2283 count_type == F2FS_DIRTY_QDATA || 2284 count_type == F2FS_DIRTY_IMETA) 2285 set_sbi_flag(sbi, SBI_IS_DIRTY); 2286 } 2287 2288 static inline void inode_inc_dirty_pages(struct inode *inode) 2289 { 2290 atomic_inc(&F2FS_I(inode)->dirty_pages); 2291 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2292 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2293 if (IS_NOQUOTA(inode)) 2294 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2295 } 2296 2297 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2298 { 2299 atomic_dec(&sbi->nr_pages[count_type]); 2300 } 2301 2302 static inline void inode_dec_dirty_pages(struct inode *inode) 2303 { 2304 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2305 !S_ISLNK(inode->i_mode)) 2306 return; 2307 2308 atomic_dec(&F2FS_I(inode)->dirty_pages); 2309 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2310 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2311 if (IS_NOQUOTA(inode)) 2312 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2313 } 2314 2315 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2316 { 2317 return atomic_read(&sbi->nr_pages[count_type]); 2318 } 2319 2320 static inline int get_dirty_pages(struct inode *inode) 2321 { 2322 return atomic_read(&F2FS_I(inode)->dirty_pages); 2323 } 2324 2325 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2326 { 2327 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2328 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2329 sbi->log_blocks_per_seg; 2330 2331 return segs / sbi->segs_per_sec; 2332 } 2333 2334 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2335 { 2336 return sbi->total_valid_block_count; 2337 } 2338 2339 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2340 { 2341 return sbi->discard_blks; 2342 } 2343 2344 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2345 { 2346 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2347 2348 /* return NAT or SIT bitmap */ 2349 if (flag == NAT_BITMAP) 2350 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2351 else if (flag == SIT_BITMAP) 2352 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2353 2354 return 0; 2355 } 2356 2357 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2358 { 2359 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2360 } 2361 2362 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2363 { 2364 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2365 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2366 int offset; 2367 2368 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2369 offset = (flag == SIT_BITMAP) ? 2370 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2371 /* 2372 * if large_nat_bitmap feature is enabled, leave checksum 2373 * protection for all nat/sit bitmaps. 2374 */ 2375 return tmp_ptr + offset + sizeof(__le32); 2376 } 2377 2378 if (__cp_payload(sbi) > 0) { 2379 if (flag == NAT_BITMAP) 2380 return &ckpt->sit_nat_version_bitmap; 2381 else 2382 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2383 } else { 2384 offset = (flag == NAT_BITMAP) ? 2385 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2386 return tmp_ptr + offset; 2387 } 2388 } 2389 2390 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2391 { 2392 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2393 2394 if (sbi->cur_cp_pack == 2) 2395 start_addr += sbi->blocks_per_seg; 2396 return start_addr; 2397 } 2398 2399 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2400 { 2401 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2402 2403 if (sbi->cur_cp_pack == 1) 2404 start_addr += sbi->blocks_per_seg; 2405 return start_addr; 2406 } 2407 2408 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2409 { 2410 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2411 } 2412 2413 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2414 { 2415 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2416 } 2417 2418 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2419 struct inode *inode, bool is_inode) 2420 { 2421 block_t valid_block_count; 2422 unsigned int valid_node_count, user_block_count; 2423 int err; 2424 2425 if (is_inode) { 2426 if (inode) { 2427 err = dquot_alloc_inode(inode); 2428 if (err) 2429 return err; 2430 } 2431 } else { 2432 err = dquot_reserve_block(inode, 1); 2433 if (err) 2434 return err; 2435 } 2436 2437 if (time_to_inject(sbi, FAULT_BLOCK)) { 2438 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2439 goto enospc; 2440 } 2441 2442 spin_lock(&sbi->stat_lock); 2443 2444 valid_block_count = sbi->total_valid_block_count + 2445 sbi->current_reserved_blocks + 1; 2446 2447 if (!__allow_reserved_blocks(sbi, inode, false)) 2448 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2449 user_block_count = sbi->user_block_count; 2450 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2451 user_block_count -= sbi->unusable_block_count; 2452 2453 if (unlikely(valid_block_count > user_block_count)) { 2454 spin_unlock(&sbi->stat_lock); 2455 goto enospc; 2456 } 2457 2458 valid_node_count = sbi->total_valid_node_count + 1; 2459 if (unlikely(valid_node_count > sbi->total_node_count)) { 2460 spin_unlock(&sbi->stat_lock); 2461 goto enospc; 2462 } 2463 2464 sbi->total_valid_node_count++; 2465 sbi->total_valid_block_count++; 2466 spin_unlock(&sbi->stat_lock); 2467 2468 if (inode) { 2469 if (is_inode) 2470 f2fs_mark_inode_dirty_sync(inode, true); 2471 else 2472 f2fs_i_blocks_write(inode, 1, true, true); 2473 } 2474 2475 percpu_counter_inc(&sbi->alloc_valid_block_count); 2476 return 0; 2477 2478 enospc: 2479 if (is_inode) { 2480 if (inode) 2481 dquot_free_inode(inode); 2482 } else { 2483 dquot_release_reservation_block(inode, 1); 2484 } 2485 return -ENOSPC; 2486 } 2487 2488 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2489 struct inode *inode, bool is_inode) 2490 { 2491 spin_lock(&sbi->stat_lock); 2492 2493 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2494 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2495 2496 sbi->total_valid_node_count--; 2497 sbi->total_valid_block_count--; 2498 if (sbi->reserved_blocks && 2499 sbi->current_reserved_blocks < sbi->reserved_blocks) 2500 sbi->current_reserved_blocks++; 2501 2502 spin_unlock(&sbi->stat_lock); 2503 2504 if (is_inode) { 2505 dquot_free_inode(inode); 2506 } else { 2507 if (unlikely(inode->i_blocks == 0)) { 2508 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2509 inode->i_ino, 2510 (unsigned long long)inode->i_blocks); 2511 set_sbi_flag(sbi, SBI_NEED_FSCK); 2512 return; 2513 } 2514 f2fs_i_blocks_write(inode, 1, false, true); 2515 } 2516 } 2517 2518 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2519 { 2520 return sbi->total_valid_node_count; 2521 } 2522 2523 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2524 { 2525 percpu_counter_inc(&sbi->total_valid_inode_count); 2526 } 2527 2528 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2529 { 2530 percpu_counter_dec(&sbi->total_valid_inode_count); 2531 } 2532 2533 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2534 { 2535 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2536 } 2537 2538 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2539 pgoff_t index, bool for_write) 2540 { 2541 struct page *page; 2542 2543 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2544 if (!for_write) 2545 page = find_get_page_flags(mapping, index, 2546 FGP_LOCK | FGP_ACCESSED); 2547 else 2548 page = find_lock_page(mapping, index); 2549 if (page) 2550 return page; 2551 2552 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2553 f2fs_show_injection_info(F2FS_M_SB(mapping), 2554 FAULT_PAGE_ALLOC); 2555 return NULL; 2556 } 2557 } 2558 2559 if (!for_write) 2560 return grab_cache_page(mapping, index); 2561 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2562 } 2563 2564 static inline struct page *f2fs_pagecache_get_page( 2565 struct address_space *mapping, pgoff_t index, 2566 int fgp_flags, gfp_t gfp_mask) 2567 { 2568 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2569 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2570 return NULL; 2571 } 2572 2573 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2574 } 2575 2576 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2577 { 2578 char *src_kaddr = kmap(src); 2579 char *dst_kaddr = kmap(dst); 2580 2581 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2582 kunmap(dst); 2583 kunmap(src); 2584 } 2585 2586 static inline void f2fs_put_page(struct page *page, int unlock) 2587 { 2588 if (!page) 2589 return; 2590 2591 if (unlock) { 2592 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2593 unlock_page(page); 2594 } 2595 put_page(page); 2596 } 2597 2598 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2599 { 2600 if (dn->node_page) 2601 f2fs_put_page(dn->node_page, 1); 2602 if (dn->inode_page && dn->node_page != dn->inode_page) 2603 f2fs_put_page(dn->inode_page, 0); 2604 dn->node_page = NULL; 2605 dn->inode_page = NULL; 2606 } 2607 2608 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2609 size_t size) 2610 { 2611 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2612 } 2613 2614 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2615 gfp_t flags) 2616 { 2617 void *entry; 2618 2619 entry = kmem_cache_alloc(cachep, flags); 2620 if (!entry) 2621 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2622 return entry; 2623 } 2624 2625 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2626 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2627 { 2628 if (nofail) 2629 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2630 2631 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) { 2632 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC); 2633 return NULL; 2634 } 2635 2636 return kmem_cache_alloc(cachep, flags); 2637 } 2638 2639 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2640 { 2641 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2642 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2643 get_pages(sbi, F2FS_WB_CP_DATA) || 2644 get_pages(sbi, F2FS_DIO_READ) || 2645 get_pages(sbi, F2FS_DIO_WRITE)) 2646 return true; 2647 2648 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2649 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2650 return true; 2651 2652 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2653 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2654 return true; 2655 return false; 2656 } 2657 2658 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2659 { 2660 if (sbi->gc_mode == GC_URGENT_HIGH) 2661 return true; 2662 2663 if (is_inflight_io(sbi, type)) 2664 return false; 2665 2666 if (sbi->gc_mode == GC_URGENT_LOW && 2667 (type == DISCARD_TIME || type == GC_TIME)) 2668 return true; 2669 2670 return f2fs_time_over(sbi, type); 2671 } 2672 2673 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2674 unsigned long index, void *item) 2675 { 2676 while (radix_tree_insert(root, index, item)) 2677 cond_resched(); 2678 } 2679 2680 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2681 2682 static inline bool IS_INODE(struct page *page) 2683 { 2684 struct f2fs_node *p = F2FS_NODE(page); 2685 2686 return RAW_IS_INODE(p); 2687 } 2688 2689 static inline int offset_in_addr(struct f2fs_inode *i) 2690 { 2691 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2692 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2693 } 2694 2695 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2696 { 2697 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2698 } 2699 2700 static inline int f2fs_has_extra_attr(struct inode *inode); 2701 static inline block_t data_blkaddr(struct inode *inode, 2702 struct page *node_page, unsigned int offset) 2703 { 2704 struct f2fs_node *raw_node; 2705 __le32 *addr_array; 2706 int base = 0; 2707 bool is_inode = IS_INODE(node_page); 2708 2709 raw_node = F2FS_NODE(node_page); 2710 2711 if (is_inode) { 2712 if (!inode) 2713 /* from GC path only */ 2714 base = offset_in_addr(&raw_node->i); 2715 else if (f2fs_has_extra_attr(inode)) 2716 base = get_extra_isize(inode); 2717 } 2718 2719 addr_array = blkaddr_in_node(raw_node); 2720 return le32_to_cpu(addr_array[base + offset]); 2721 } 2722 2723 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2724 { 2725 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2726 } 2727 2728 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2729 { 2730 int mask; 2731 2732 addr += (nr >> 3); 2733 mask = 1 << (7 - (nr & 0x07)); 2734 return mask & *addr; 2735 } 2736 2737 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2738 { 2739 int mask; 2740 2741 addr += (nr >> 3); 2742 mask = 1 << (7 - (nr & 0x07)); 2743 *addr |= mask; 2744 } 2745 2746 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2747 { 2748 int mask; 2749 2750 addr += (nr >> 3); 2751 mask = 1 << (7 - (nr & 0x07)); 2752 *addr &= ~mask; 2753 } 2754 2755 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2756 { 2757 int mask; 2758 int ret; 2759 2760 addr += (nr >> 3); 2761 mask = 1 << (7 - (nr & 0x07)); 2762 ret = mask & *addr; 2763 *addr |= mask; 2764 return ret; 2765 } 2766 2767 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2768 { 2769 int mask; 2770 int ret; 2771 2772 addr += (nr >> 3); 2773 mask = 1 << (7 - (nr & 0x07)); 2774 ret = mask & *addr; 2775 *addr &= ~mask; 2776 return ret; 2777 } 2778 2779 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2780 { 2781 int mask; 2782 2783 addr += (nr >> 3); 2784 mask = 1 << (7 - (nr & 0x07)); 2785 *addr ^= mask; 2786 } 2787 2788 /* 2789 * On-disk inode flags (f2fs_inode::i_flags) 2790 */ 2791 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2792 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2793 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2794 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2795 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2796 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2797 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2798 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2799 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2800 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2801 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2802 2803 /* Flags that should be inherited by new inodes from their parent. */ 2804 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2805 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2806 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2807 2808 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2809 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2810 F2FS_CASEFOLD_FL)) 2811 2812 /* Flags that are appropriate for non-directories/regular files. */ 2813 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2814 2815 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2816 { 2817 if (S_ISDIR(mode)) 2818 return flags; 2819 else if (S_ISREG(mode)) 2820 return flags & F2FS_REG_FLMASK; 2821 else 2822 return flags & F2FS_OTHER_FLMASK; 2823 } 2824 2825 static inline void __mark_inode_dirty_flag(struct inode *inode, 2826 int flag, bool set) 2827 { 2828 switch (flag) { 2829 case FI_INLINE_XATTR: 2830 case FI_INLINE_DATA: 2831 case FI_INLINE_DENTRY: 2832 case FI_NEW_INODE: 2833 if (set) 2834 return; 2835 fallthrough; 2836 case FI_DATA_EXIST: 2837 case FI_INLINE_DOTS: 2838 case FI_PIN_FILE: 2839 case FI_COMPRESS_RELEASED: 2840 f2fs_mark_inode_dirty_sync(inode, true); 2841 } 2842 } 2843 2844 static inline void set_inode_flag(struct inode *inode, int flag) 2845 { 2846 set_bit(flag, F2FS_I(inode)->flags); 2847 __mark_inode_dirty_flag(inode, flag, true); 2848 } 2849 2850 static inline int is_inode_flag_set(struct inode *inode, int flag) 2851 { 2852 return test_bit(flag, F2FS_I(inode)->flags); 2853 } 2854 2855 static inline void clear_inode_flag(struct inode *inode, int flag) 2856 { 2857 clear_bit(flag, F2FS_I(inode)->flags); 2858 __mark_inode_dirty_flag(inode, flag, false); 2859 } 2860 2861 static inline bool f2fs_verity_in_progress(struct inode *inode) 2862 { 2863 return IS_ENABLED(CONFIG_FS_VERITY) && 2864 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2865 } 2866 2867 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2868 { 2869 F2FS_I(inode)->i_acl_mode = mode; 2870 set_inode_flag(inode, FI_ACL_MODE); 2871 f2fs_mark_inode_dirty_sync(inode, false); 2872 } 2873 2874 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2875 { 2876 if (inc) 2877 inc_nlink(inode); 2878 else 2879 drop_nlink(inode); 2880 f2fs_mark_inode_dirty_sync(inode, true); 2881 } 2882 2883 static inline void f2fs_i_blocks_write(struct inode *inode, 2884 block_t diff, bool add, bool claim) 2885 { 2886 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2887 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2888 2889 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2890 if (add) { 2891 if (claim) 2892 dquot_claim_block(inode, diff); 2893 else 2894 dquot_alloc_block_nofail(inode, diff); 2895 } else { 2896 dquot_free_block(inode, diff); 2897 } 2898 2899 f2fs_mark_inode_dirty_sync(inode, true); 2900 if (clean || recover) 2901 set_inode_flag(inode, FI_AUTO_RECOVER); 2902 } 2903 2904 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2905 { 2906 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2907 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2908 2909 if (i_size_read(inode) == i_size) 2910 return; 2911 2912 i_size_write(inode, i_size); 2913 f2fs_mark_inode_dirty_sync(inode, true); 2914 if (clean || recover) 2915 set_inode_flag(inode, FI_AUTO_RECOVER); 2916 } 2917 2918 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2919 { 2920 F2FS_I(inode)->i_current_depth = depth; 2921 f2fs_mark_inode_dirty_sync(inode, true); 2922 } 2923 2924 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2925 unsigned int count) 2926 { 2927 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2928 f2fs_mark_inode_dirty_sync(inode, true); 2929 } 2930 2931 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2932 { 2933 F2FS_I(inode)->i_xattr_nid = xnid; 2934 f2fs_mark_inode_dirty_sync(inode, true); 2935 } 2936 2937 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2938 { 2939 F2FS_I(inode)->i_pino = pino; 2940 f2fs_mark_inode_dirty_sync(inode, true); 2941 } 2942 2943 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2944 { 2945 struct f2fs_inode_info *fi = F2FS_I(inode); 2946 2947 if (ri->i_inline & F2FS_INLINE_XATTR) 2948 set_bit(FI_INLINE_XATTR, fi->flags); 2949 if (ri->i_inline & F2FS_INLINE_DATA) 2950 set_bit(FI_INLINE_DATA, fi->flags); 2951 if (ri->i_inline & F2FS_INLINE_DENTRY) 2952 set_bit(FI_INLINE_DENTRY, fi->flags); 2953 if (ri->i_inline & F2FS_DATA_EXIST) 2954 set_bit(FI_DATA_EXIST, fi->flags); 2955 if (ri->i_inline & F2FS_INLINE_DOTS) 2956 set_bit(FI_INLINE_DOTS, fi->flags); 2957 if (ri->i_inline & F2FS_EXTRA_ATTR) 2958 set_bit(FI_EXTRA_ATTR, fi->flags); 2959 if (ri->i_inline & F2FS_PIN_FILE) 2960 set_bit(FI_PIN_FILE, fi->flags); 2961 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 2962 set_bit(FI_COMPRESS_RELEASED, fi->flags); 2963 } 2964 2965 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2966 { 2967 ri->i_inline = 0; 2968 2969 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2970 ri->i_inline |= F2FS_INLINE_XATTR; 2971 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2972 ri->i_inline |= F2FS_INLINE_DATA; 2973 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2974 ri->i_inline |= F2FS_INLINE_DENTRY; 2975 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2976 ri->i_inline |= F2FS_DATA_EXIST; 2977 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2978 ri->i_inline |= F2FS_INLINE_DOTS; 2979 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2980 ri->i_inline |= F2FS_EXTRA_ATTR; 2981 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2982 ri->i_inline |= F2FS_PIN_FILE; 2983 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 2984 ri->i_inline |= F2FS_COMPRESS_RELEASED; 2985 } 2986 2987 static inline int f2fs_has_extra_attr(struct inode *inode) 2988 { 2989 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2990 } 2991 2992 static inline int f2fs_has_inline_xattr(struct inode *inode) 2993 { 2994 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2995 } 2996 2997 static inline int f2fs_compressed_file(struct inode *inode) 2998 { 2999 return S_ISREG(inode->i_mode) && 3000 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3001 } 3002 3003 static inline bool f2fs_need_compress_data(struct inode *inode) 3004 { 3005 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3006 3007 if (!f2fs_compressed_file(inode)) 3008 return false; 3009 3010 if (compress_mode == COMPR_MODE_FS) 3011 return true; 3012 else if (compress_mode == COMPR_MODE_USER && 3013 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3014 return true; 3015 3016 return false; 3017 } 3018 3019 static inline unsigned int addrs_per_inode(struct inode *inode) 3020 { 3021 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3022 get_inline_xattr_addrs(inode); 3023 3024 if (!f2fs_compressed_file(inode)) 3025 return addrs; 3026 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3027 } 3028 3029 static inline unsigned int addrs_per_block(struct inode *inode) 3030 { 3031 if (!f2fs_compressed_file(inode)) 3032 return DEF_ADDRS_PER_BLOCK; 3033 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3034 } 3035 3036 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3037 { 3038 struct f2fs_inode *ri = F2FS_INODE(page); 3039 3040 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3041 get_inline_xattr_addrs(inode)]); 3042 } 3043 3044 static inline int inline_xattr_size(struct inode *inode) 3045 { 3046 if (f2fs_has_inline_xattr(inode)) 3047 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3048 return 0; 3049 } 3050 3051 static inline int f2fs_has_inline_data(struct inode *inode) 3052 { 3053 return is_inode_flag_set(inode, FI_INLINE_DATA); 3054 } 3055 3056 static inline int f2fs_exist_data(struct inode *inode) 3057 { 3058 return is_inode_flag_set(inode, FI_DATA_EXIST); 3059 } 3060 3061 static inline int f2fs_has_inline_dots(struct inode *inode) 3062 { 3063 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3064 } 3065 3066 static inline int f2fs_is_mmap_file(struct inode *inode) 3067 { 3068 return is_inode_flag_set(inode, FI_MMAP_FILE); 3069 } 3070 3071 static inline bool f2fs_is_pinned_file(struct inode *inode) 3072 { 3073 return is_inode_flag_set(inode, FI_PIN_FILE); 3074 } 3075 3076 static inline bool f2fs_is_atomic_file(struct inode *inode) 3077 { 3078 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3079 } 3080 3081 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 3082 { 3083 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 3084 } 3085 3086 static inline bool f2fs_is_volatile_file(struct inode *inode) 3087 { 3088 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 3089 } 3090 3091 static inline bool f2fs_is_first_block_written(struct inode *inode) 3092 { 3093 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3094 } 3095 3096 static inline bool f2fs_is_drop_cache(struct inode *inode) 3097 { 3098 return is_inode_flag_set(inode, FI_DROP_CACHE); 3099 } 3100 3101 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3102 { 3103 struct f2fs_inode *ri = F2FS_INODE(page); 3104 int extra_size = get_extra_isize(inode); 3105 3106 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3107 } 3108 3109 static inline int f2fs_has_inline_dentry(struct inode *inode) 3110 { 3111 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3112 } 3113 3114 static inline int is_file(struct inode *inode, int type) 3115 { 3116 return F2FS_I(inode)->i_advise & type; 3117 } 3118 3119 static inline void set_file(struct inode *inode, int type) 3120 { 3121 F2FS_I(inode)->i_advise |= type; 3122 f2fs_mark_inode_dirty_sync(inode, true); 3123 } 3124 3125 static inline void clear_file(struct inode *inode, int type) 3126 { 3127 F2FS_I(inode)->i_advise &= ~type; 3128 f2fs_mark_inode_dirty_sync(inode, true); 3129 } 3130 3131 static inline bool f2fs_is_time_consistent(struct inode *inode) 3132 { 3133 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3134 return false; 3135 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3136 return false; 3137 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3138 return false; 3139 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3140 &F2FS_I(inode)->i_crtime)) 3141 return false; 3142 return true; 3143 } 3144 3145 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3146 { 3147 bool ret; 3148 3149 if (dsync) { 3150 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3151 3152 spin_lock(&sbi->inode_lock[DIRTY_META]); 3153 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3154 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3155 return ret; 3156 } 3157 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3158 file_keep_isize(inode) || 3159 i_size_read(inode) & ~PAGE_MASK) 3160 return false; 3161 3162 if (!f2fs_is_time_consistent(inode)) 3163 return false; 3164 3165 spin_lock(&F2FS_I(inode)->i_size_lock); 3166 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3167 spin_unlock(&F2FS_I(inode)->i_size_lock); 3168 3169 return ret; 3170 } 3171 3172 static inline bool f2fs_readonly(struct super_block *sb) 3173 { 3174 return sb_rdonly(sb); 3175 } 3176 3177 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3178 { 3179 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3180 } 3181 3182 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3183 { 3184 if (len == 1 && name[0] == '.') 3185 return true; 3186 3187 if (len == 2 && name[0] == '.' && name[1] == '.') 3188 return true; 3189 3190 return false; 3191 } 3192 3193 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3194 size_t size, gfp_t flags) 3195 { 3196 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3197 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3198 return NULL; 3199 } 3200 3201 return kmalloc(size, flags); 3202 } 3203 3204 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3205 size_t size, gfp_t flags) 3206 { 3207 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3208 } 3209 3210 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3211 size_t size, gfp_t flags) 3212 { 3213 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3214 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3215 return NULL; 3216 } 3217 3218 return kvmalloc(size, flags); 3219 } 3220 3221 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3222 size_t size, gfp_t flags) 3223 { 3224 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3225 } 3226 3227 static inline int get_extra_isize(struct inode *inode) 3228 { 3229 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3230 } 3231 3232 static inline int get_inline_xattr_addrs(struct inode *inode) 3233 { 3234 return F2FS_I(inode)->i_inline_xattr_size; 3235 } 3236 3237 #define f2fs_get_inode_mode(i) \ 3238 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3239 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3240 3241 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3242 (offsetof(struct f2fs_inode, i_extra_end) - \ 3243 offsetof(struct f2fs_inode, i_extra_isize)) \ 3244 3245 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3246 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3247 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3248 sizeof((f2fs_inode)->field)) \ 3249 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3250 3251 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3252 3253 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3254 3255 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3256 block_t blkaddr, int type); 3257 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3258 block_t blkaddr, int type) 3259 { 3260 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3261 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3262 blkaddr, type); 3263 f2fs_bug_on(sbi, 1); 3264 } 3265 } 3266 3267 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3268 { 3269 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3270 blkaddr == COMPRESS_ADDR) 3271 return false; 3272 return true; 3273 } 3274 3275 /* 3276 * file.c 3277 */ 3278 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3279 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3280 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3281 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3282 int f2fs_truncate(struct inode *inode); 3283 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3284 struct kstat *stat, u32 request_mask, unsigned int flags); 3285 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3286 struct iattr *attr); 3287 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3288 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3289 int f2fs_precache_extents(struct inode *inode); 3290 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3291 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3292 struct dentry *dentry, struct fileattr *fa); 3293 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3294 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3295 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3296 int f2fs_pin_file_control(struct inode *inode, bool inc); 3297 3298 /* 3299 * inode.c 3300 */ 3301 void f2fs_set_inode_flags(struct inode *inode); 3302 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3303 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3304 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3305 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3306 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3307 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3308 void f2fs_update_inode_page(struct inode *inode); 3309 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3310 void f2fs_evict_inode(struct inode *inode); 3311 void f2fs_handle_failed_inode(struct inode *inode); 3312 3313 /* 3314 * namei.c 3315 */ 3316 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3317 bool hot, bool set); 3318 struct dentry *f2fs_get_parent(struct dentry *child); 3319 3320 /* 3321 * dir.c 3322 */ 3323 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3324 int f2fs_init_casefolded_name(const struct inode *dir, 3325 struct f2fs_filename *fname); 3326 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3327 int lookup, struct f2fs_filename *fname); 3328 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3329 struct f2fs_filename *fname); 3330 void f2fs_free_filename(struct f2fs_filename *fname); 3331 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3332 const struct f2fs_filename *fname, int *max_slots); 3333 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3334 unsigned int start_pos, struct fscrypt_str *fstr); 3335 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3336 struct f2fs_dentry_ptr *d); 3337 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3338 const struct f2fs_filename *fname, struct page *dpage); 3339 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3340 unsigned int current_depth); 3341 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3342 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3343 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3344 const struct f2fs_filename *fname, 3345 struct page **res_page); 3346 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3347 const struct qstr *child, struct page **res_page); 3348 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3349 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3350 struct page **page); 3351 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3352 struct page *page, struct inode *inode); 3353 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3354 const struct f2fs_filename *fname); 3355 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3356 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3357 unsigned int bit_pos); 3358 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3359 struct inode *inode, nid_t ino, umode_t mode); 3360 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3361 struct inode *inode, nid_t ino, umode_t mode); 3362 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3363 struct inode *inode, nid_t ino, umode_t mode); 3364 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3365 struct inode *dir, struct inode *inode); 3366 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3367 bool f2fs_empty_dir(struct inode *dir); 3368 3369 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3370 { 3371 if (fscrypt_is_nokey_name(dentry)) 3372 return -ENOKEY; 3373 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3374 inode, inode->i_ino, inode->i_mode); 3375 } 3376 3377 /* 3378 * super.c 3379 */ 3380 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3381 void f2fs_inode_synced(struct inode *inode); 3382 int f2fs_dquot_initialize(struct inode *inode); 3383 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3384 int f2fs_quota_sync(struct super_block *sb, int type); 3385 loff_t max_file_blocks(struct inode *inode); 3386 void f2fs_quota_off_umount(struct super_block *sb); 3387 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3388 int f2fs_sync_fs(struct super_block *sb, int sync); 3389 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3390 3391 /* 3392 * hash.c 3393 */ 3394 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3395 3396 /* 3397 * node.c 3398 */ 3399 struct node_info; 3400 3401 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3402 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3403 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3404 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3405 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3406 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3407 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3408 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3409 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3410 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3411 struct node_info *ni); 3412 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3413 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3414 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3415 int f2fs_truncate_xattr_node(struct inode *inode); 3416 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3417 unsigned int seq_id); 3418 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3419 int f2fs_remove_inode_page(struct inode *inode); 3420 struct page *f2fs_new_inode_page(struct inode *inode); 3421 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3422 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3423 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3424 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3425 int f2fs_move_node_page(struct page *node_page, int gc_type); 3426 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3427 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3428 struct writeback_control *wbc, bool atomic, 3429 unsigned int *seq_id); 3430 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3431 struct writeback_control *wbc, 3432 bool do_balance, enum iostat_type io_type); 3433 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3434 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3435 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3436 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3437 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3438 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3439 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3440 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3441 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3442 unsigned int segno, struct f2fs_summary_block *sum); 3443 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3444 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3445 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3446 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3447 int __init f2fs_create_node_manager_caches(void); 3448 void f2fs_destroy_node_manager_caches(void); 3449 3450 /* 3451 * segment.c 3452 */ 3453 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3454 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3455 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3456 void f2fs_drop_inmem_pages(struct inode *inode); 3457 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3458 int f2fs_commit_inmem_pages(struct inode *inode); 3459 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3460 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3461 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3462 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3463 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3464 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3465 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3466 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3467 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3468 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3469 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3470 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3471 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3472 struct cp_control *cpc); 3473 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3474 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3475 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3476 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3477 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3478 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3479 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3480 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3481 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3482 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3483 unsigned int *newseg, bool new_sec, int dir); 3484 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3485 unsigned int start, unsigned int end); 3486 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3487 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3488 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3489 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3490 struct cp_control *cpc); 3491 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3492 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3493 block_t blk_addr); 3494 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3495 enum iostat_type io_type); 3496 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3497 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3498 struct f2fs_io_info *fio); 3499 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3500 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3501 block_t old_blkaddr, block_t new_blkaddr, 3502 bool recover_curseg, bool recover_newaddr, 3503 bool from_gc); 3504 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3505 block_t old_addr, block_t new_addr, 3506 unsigned char version, bool recover_curseg, 3507 bool recover_newaddr); 3508 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3509 block_t old_blkaddr, block_t *new_blkaddr, 3510 struct f2fs_summary *sum, int type, 3511 struct f2fs_io_info *fio); 3512 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3513 block_t blkaddr, unsigned int blkcnt); 3514 void f2fs_wait_on_page_writeback(struct page *page, 3515 enum page_type type, bool ordered, bool locked); 3516 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3517 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3518 block_t len); 3519 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3520 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3521 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3522 unsigned int val, int alloc); 3523 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3524 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3525 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3526 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3527 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3528 int __init f2fs_create_segment_manager_caches(void); 3529 void f2fs_destroy_segment_manager_caches(void); 3530 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3531 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3532 enum page_type type, enum temp_type temp); 3533 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3534 unsigned int segno); 3535 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3536 unsigned int segno); 3537 3538 #define DEF_FRAGMENT_SIZE 4 3539 #define MIN_FRAGMENT_SIZE 1 3540 #define MAX_FRAGMENT_SIZE 512 3541 3542 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3543 { 3544 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3545 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3546 } 3547 3548 /* 3549 * checkpoint.c 3550 */ 3551 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3552 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3553 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3554 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3555 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3556 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3557 block_t blkaddr, int type); 3558 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3559 int type, bool sync); 3560 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3561 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3562 long nr_to_write, enum iostat_type io_type); 3563 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3564 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3565 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3566 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3567 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3568 unsigned int devidx, int type); 3569 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3570 unsigned int devidx, int type); 3571 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3572 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3573 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3574 void f2fs_add_orphan_inode(struct inode *inode); 3575 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3576 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3577 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3578 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3579 void f2fs_remove_dirty_inode(struct inode *inode); 3580 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3581 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3582 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3583 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3584 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3585 int __init f2fs_create_checkpoint_caches(void); 3586 void f2fs_destroy_checkpoint_caches(void); 3587 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3588 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3589 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3590 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3591 3592 /* 3593 * data.c 3594 */ 3595 int __init f2fs_init_bioset(void); 3596 void f2fs_destroy_bioset(void); 3597 int f2fs_init_bio_entry_cache(void); 3598 void f2fs_destroy_bio_entry_cache(void); 3599 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3600 struct bio *bio, enum page_type type); 3601 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3602 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3603 struct inode *inode, struct page *page, 3604 nid_t ino, enum page_type type); 3605 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3606 struct bio **bio, struct page *page); 3607 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3608 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3609 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3610 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3611 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3612 block_t blk_addr, struct bio *bio); 3613 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3614 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3615 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3616 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3617 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3618 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3619 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3620 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3621 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3622 int op_flags, bool for_write); 3623 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3624 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3625 bool for_write); 3626 struct page *f2fs_get_new_data_page(struct inode *inode, 3627 struct page *ipage, pgoff_t index, bool new_i_size); 3628 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3629 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3630 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3631 int create, int flag); 3632 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3633 u64 start, u64 len); 3634 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3635 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3636 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3637 int f2fs_write_single_data_page(struct page *page, int *submitted, 3638 struct bio **bio, sector_t *last_block, 3639 struct writeback_control *wbc, 3640 enum iostat_type io_type, 3641 int compr_blocks, bool allow_balance); 3642 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3643 unsigned int length); 3644 int f2fs_release_page(struct page *page, gfp_t wait); 3645 #ifdef CONFIG_MIGRATION 3646 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3647 struct page *page, enum migrate_mode mode); 3648 #endif 3649 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3650 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3651 int f2fs_init_post_read_processing(void); 3652 void f2fs_destroy_post_read_processing(void); 3653 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3654 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3655 3656 /* 3657 * gc.c 3658 */ 3659 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3660 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3661 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3662 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, 3663 unsigned int segno); 3664 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3665 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3666 int __init f2fs_create_garbage_collection_cache(void); 3667 void f2fs_destroy_garbage_collection_cache(void); 3668 3669 /* 3670 * recovery.c 3671 */ 3672 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3673 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3674 int __init f2fs_create_recovery_cache(void); 3675 void f2fs_destroy_recovery_cache(void); 3676 3677 /* 3678 * debug.c 3679 */ 3680 #ifdef CONFIG_F2FS_STAT_FS 3681 struct f2fs_stat_info { 3682 struct list_head stat_list; 3683 struct f2fs_sb_info *sbi; 3684 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3685 int main_area_segs, main_area_sections, main_area_zones; 3686 unsigned long long hit_largest, hit_cached, hit_rbtree; 3687 unsigned long long hit_total, total_ext; 3688 int ext_tree, zombie_tree, ext_node; 3689 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3690 int ndirty_data, ndirty_qdata; 3691 int inmem_pages; 3692 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3693 int nats, dirty_nats, sits, dirty_sits; 3694 int free_nids, avail_nids, alloc_nids; 3695 int total_count, utilization; 3696 int bg_gc, nr_wb_cp_data, nr_wb_data; 3697 int nr_rd_data, nr_rd_node, nr_rd_meta; 3698 int nr_dio_read, nr_dio_write; 3699 unsigned int io_skip_bggc, other_skip_bggc; 3700 int nr_flushing, nr_flushed, flush_list_empty; 3701 int nr_discarding, nr_discarded; 3702 int nr_discard_cmd; 3703 unsigned int undiscard_blks; 3704 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3705 unsigned int cur_ckpt_time, peak_ckpt_time; 3706 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3707 int compr_inode; 3708 unsigned long long compr_blocks; 3709 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3710 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3711 unsigned int bimodal, avg_vblocks; 3712 int util_free, util_valid, util_invalid; 3713 int rsvd_segs, overp_segs; 3714 int dirty_count, node_pages, meta_pages, compress_pages; 3715 int compress_page_hit; 3716 int prefree_count, call_count, cp_count, bg_cp_count; 3717 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3718 int bg_node_segs, bg_data_segs; 3719 int tot_blks, data_blks, node_blks; 3720 int bg_data_blks, bg_node_blks; 3721 unsigned long long skipped_atomic_files[2]; 3722 int curseg[NR_CURSEG_TYPE]; 3723 int cursec[NR_CURSEG_TYPE]; 3724 int curzone[NR_CURSEG_TYPE]; 3725 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3726 unsigned int full_seg[NR_CURSEG_TYPE]; 3727 unsigned int valid_blks[NR_CURSEG_TYPE]; 3728 3729 unsigned int meta_count[META_MAX]; 3730 unsigned int segment_count[2]; 3731 unsigned int block_count[2]; 3732 unsigned int inplace_count; 3733 unsigned long long base_mem, cache_mem, page_mem; 3734 }; 3735 3736 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3737 { 3738 return (struct f2fs_stat_info *)sbi->stat_info; 3739 } 3740 3741 #define stat_inc_cp_count(si) ((si)->cp_count++) 3742 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3743 #define stat_inc_call_count(si) ((si)->call_count++) 3744 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3745 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3746 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3747 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3748 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3749 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3750 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3751 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3752 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3753 #define stat_inc_inline_xattr(inode) \ 3754 do { \ 3755 if (f2fs_has_inline_xattr(inode)) \ 3756 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3757 } while (0) 3758 #define stat_dec_inline_xattr(inode) \ 3759 do { \ 3760 if (f2fs_has_inline_xattr(inode)) \ 3761 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3762 } while (0) 3763 #define stat_inc_inline_inode(inode) \ 3764 do { \ 3765 if (f2fs_has_inline_data(inode)) \ 3766 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3767 } while (0) 3768 #define stat_dec_inline_inode(inode) \ 3769 do { \ 3770 if (f2fs_has_inline_data(inode)) \ 3771 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3772 } while (0) 3773 #define stat_inc_inline_dir(inode) \ 3774 do { \ 3775 if (f2fs_has_inline_dentry(inode)) \ 3776 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3777 } while (0) 3778 #define stat_dec_inline_dir(inode) \ 3779 do { \ 3780 if (f2fs_has_inline_dentry(inode)) \ 3781 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3782 } while (0) 3783 #define stat_inc_compr_inode(inode) \ 3784 do { \ 3785 if (f2fs_compressed_file(inode)) \ 3786 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3787 } while (0) 3788 #define stat_dec_compr_inode(inode) \ 3789 do { \ 3790 if (f2fs_compressed_file(inode)) \ 3791 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3792 } while (0) 3793 #define stat_add_compr_blocks(inode, blocks) \ 3794 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3795 #define stat_sub_compr_blocks(inode, blocks) \ 3796 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3797 #define stat_inc_meta_count(sbi, blkaddr) \ 3798 do { \ 3799 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3800 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3801 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3802 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3803 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3804 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3805 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3806 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3807 } while (0) 3808 #define stat_inc_seg_type(sbi, curseg) \ 3809 ((sbi)->segment_count[(curseg)->alloc_type]++) 3810 #define stat_inc_block_count(sbi, curseg) \ 3811 ((sbi)->block_count[(curseg)->alloc_type]++) 3812 #define stat_inc_inplace_blocks(sbi) \ 3813 (atomic_inc(&(sbi)->inplace_count)) 3814 #define stat_update_max_atomic_write(inode) \ 3815 do { \ 3816 int cur = F2FS_I_SB(inode)->atomic_files; \ 3817 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3818 if (cur > max) \ 3819 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3820 } while (0) 3821 #define stat_inc_volatile_write(inode) \ 3822 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3823 #define stat_dec_volatile_write(inode) \ 3824 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3825 #define stat_update_max_volatile_write(inode) \ 3826 do { \ 3827 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3828 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3829 if (cur > max) \ 3830 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3831 } while (0) 3832 #define stat_inc_seg_count(sbi, type, gc_type) \ 3833 do { \ 3834 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3835 si->tot_segs++; \ 3836 if ((type) == SUM_TYPE_DATA) { \ 3837 si->data_segs++; \ 3838 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3839 } else { \ 3840 si->node_segs++; \ 3841 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3842 } \ 3843 } while (0) 3844 3845 #define stat_inc_tot_blk_count(si, blks) \ 3846 ((si)->tot_blks += (blks)) 3847 3848 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3849 do { \ 3850 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3851 stat_inc_tot_blk_count(si, blks); \ 3852 si->data_blks += (blks); \ 3853 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3854 } while (0) 3855 3856 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3857 do { \ 3858 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3859 stat_inc_tot_blk_count(si, blks); \ 3860 si->node_blks += (blks); \ 3861 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3862 } while (0) 3863 3864 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3865 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3866 void __init f2fs_create_root_stats(void); 3867 void f2fs_destroy_root_stats(void); 3868 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3869 #else 3870 #define stat_inc_cp_count(si) do { } while (0) 3871 #define stat_inc_bg_cp_count(si) do { } while (0) 3872 #define stat_inc_call_count(si) do { } while (0) 3873 #define stat_inc_bggc_count(si) do { } while (0) 3874 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3875 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3876 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3877 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3878 #define stat_inc_total_hit(sbi) do { } while (0) 3879 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3880 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3881 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3882 #define stat_inc_inline_xattr(inode) do { } while (0) 3883 #define stat_dec_inline_xattr(inode) do { } while (0) 3884 #define stat_inc_inline_inode(inode) do { } while (0) 3885 #define stat_dec_inline_inode(inode) do { } while (0) 3886 #define stat_inc_inline_dir(inode) do { } while (0) 3887 #define stat_dec_inline_dir(inode) do { } while (0) 3888 #define stat_inc_compr_inode(inode) do { } while (0) 3889 #define stat_dec_compr_inode(inode) do { } while (0) 3890 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3891 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3892 #define stat_update_max_atomic_write(inode) do { } while (0) 3893 #define stat_inc_volatile_write(inode) do { } while (0) 3894 #define stat_dec_volatile_write(inode) do { } while (0) 3895 #define stat_update_max_volatile_write(inode) do { } while (0) 3896 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3897 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3898 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3899 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3900 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3901 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3902 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3903 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3904 3905 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3906 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3907 static inline void __init f2fs_create_root_stats(void) { } 3908 static inline void f2fs_destroy_root_stats(void) { } 3909 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3910 #endif 3911 3912 extern const struct file_operations f2fs_dir_operations; 3913 extern const struct file_operations f2fs_file_operations; 3914 extern const struct inode_operations f2fs_file_inode_operations; 3915 extern const struct address_space_operations f2fs_dblock_aops; 3916 extern const struct address_space_operations f2fs_node_aops; 3917 extern const struct address_space_operations f2fs_meta_aops; 3918 extern const struct inode_operations f2fs_dir_inode_operations; 3919 extern const struct inode_operations f2fs_symlink_inode_operations; 3920 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3921 extern const struct inode_operations f2fs_special_inode_operations; 3922 extern struct kmem_cache *f2fs_inode_entry_slab; 3923 3924 /* 3925 * inline.c 3926 */ 3927 bool f2fs_may_inline_data(struct inode *inode); 3928 bool f2fs_may_inline_dentry(struct inode *inode); 3929 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3930 void f2fs_truncate_inline_inode(struct inode *inode, 3931 struct page *ipage, u64 from); 3932 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3933 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3934 int f2fs_convert_inline_inode(struct inode *inode); 3935 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3936 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3937 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3938 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3939 const struct f2fs_filename *fname, 3940 struct page **res_page); 3941 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3942 struct page *ipage); 3943 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3944 struct inode *inode, nid_t ino, umode_t mode); 3945 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3946 struct page *page, struct inode *dir, 3947 struct inode *inode); 3948 bool f2fs_empty_inline_dir(struct inode *dir); 3949 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3950 struct fscrypt_str *fstr); 3951 int f2fs_inline_data_fiemap(struct inode *inode, 3952 struct fiemap_extent_info *fieinfo, 3953 __u64 start, __u64 len); 3954 3955 /* 3956 * shrinker.c 3957 */ 3958 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3959 struct shrink_control *sc); 3960 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3961 struct shrink_control *sc); 3962 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3963 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3964 3965 /* 3966 * extent_cache.c 3967 */ 3968 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3969 struct rb_entry *cached_re, unsigned int ofs); 3970 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3971 struct rb_root_cached *root, 3972 struct rb_node **parent, 3973 unsigned long long key, bool *left_most); 3974 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3975 struct rb_root_cached *root, 3976 struct rb_node **parent, 3977 unsigned int ofs, bool *leftmost); 3978 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3979 struct rb_entry *cached_re, unsigned int ofs, 3980 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3981 struct rb_node ***insert_p, struct rb_node **insert_parent, 3982 bool force, bool *leftmost); 3983 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3984 struct rb_root_cached *root, bool check_key); 3985 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3986 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3987 void f2fs_drop_extent_tree(struct inode *inode); 3988 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3989 void f2fs_destroy_extent_tree(struct inode *inode); 3990 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3991 struct extent_info *ei); 3992 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3993 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3994 pgoff_t fofs, block_t blkaddr, unsigned int len); 3995 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3996 int __init f2fs_create_extent_cache(void); 3997 void f2fs_destroy_extent_cache(void); 3998 3999 /* 4000 * sysfs.c 4001 */ 4002 #define MIN_RA_MUL 2 4003 #define MAX_RA_MUL 256 4004 4005 int __init f2fs_init_sysfs(void); 4006 void f2fs_exit_sysfs(void); 4007 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4008 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4009 4010 /* verity.c */ 4011 extern const struct fsverity_operations f2fs_verityops; 4012 4013 /* 4014 * crypto support 4015 */ 4016 static inline bool f2fs_encrypted_file(struct inode *inode) 4017 { 4018 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4019 } 4020 4021 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4022 { 4023 #ifdef CONFIG_FS_ENCRYPTION 4024 file_set_encrypt(inode); 4025 f2fs_set_inode_flags(inode); 4026 #endif 4027 } 4028 4029 /* 4030 * Returns true if the reads of the inode's data need to undergo some 4031 * postprocessing step, like decryption or authenticity verification. 4032 */ 4033 static inline bool f2fs_post_read_required(struct inode *inode) 4034 { 4035 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4036 f2fs_compressed_file(inode); 4037 } 4038 4039 /* 4040 * compress.c 4041 */ 4042 #ifdef CONFIG_F2FS_FS_COMPRESSION 4043 bool f2fs_is_compressed_page(struct page *page); 4044 struct page *f2fs_compress_control_page(struct page *page); 4045 int f2fs_prepare_compress_overwrite(struct inode *inode, 4046 struct page **pagep, pgoff_t index, void **fsdata); 4047 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4048 pgoff_t index, unsigned copied); 4049 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4050 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4051 bool f2fs_is_compress_backend_ready(struct inode *inode); 4052 int f2fs_init_compress_mempool(void); 4053 void f2fs_destroy_compress_mempool(void); 4054 void f2fs_decompress_cluster(struct decompress_io_ctx *dic); 4055 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4056 block_t blkaddr); 4057 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4058 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4059 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec, 4060 int index, int nr_pages); 4061 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4062 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4063 int f2fs_write_multi_pages(struct compress_ctx *cc, 4064 int *submitted, 4065 struct writeback_control *wbc, 4066 enum iostat_type io_type); 4067 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4068 void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4069 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4070 unsigned int c_len); 4071 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4072 unsigned nr_pages, sector_t *last_block_in_bio, 4073 bool is_readahead, bool for_write); 4074 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4075 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 4076 void f2fs_put_page_dic(struct page *page); 4077 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4078 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4079 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4080 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4081 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4082 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4083 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4084 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4085 int __init f2fs_init_compress_cache(void); 4086 void f2fs_destroy_compress_cache(void); 4087 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4088 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4089 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4090 nid_t ino, block_t blkaddr); 4091 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4092 block_t blkaddr); 4093 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4094 #define inc_compr_inode_stat(inode) \ 4095 do { \ 4096 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4097 sbi->compr_new_inode++; \ 4098 } while (0) 4099 #define add_compr_block_stat(inode, blocks) \ 4100 do { \ 4101 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4102 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4103 sbi->compr_written_block += blocks; \ 4104 sbi->compr_saved_block += diff; \ 4105 } while (0) 4106 #else 4107 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4108 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4109 { 4110 if (!f2fs_compressed_file(inode)) 4111 return true; 4112 /* not support compression */ 4113 return false; 4114 } 4115 static inline struct page *f2fs_compress_control_page(struct page *page) 4116 { 4117 WARN_ON_ONCE(1); 4118 return ERR_PTR(-EINVAL); 4119 } 4120 static inline int f2fs_init_compress_mempool(void) { return 0; } 4121 static inline void f2fs_destroy_compress_mempool(void) { } 4122 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } 4123 static inline void f2fs_end_read_compressed_page(struct page *page, 4124 bool failed, block_t blkaddr) 4125 { 4126 WARN_ON_ONCE(1); 4127 } 4128 static inline void f2fs_put_page_dic(struct page *page) 4129 { 4130 WARN_ON_ONCE(1); 4131 } 4132 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4133 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4134 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4135 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4136 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4137 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4138 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4139 static inline void f2fs_destroy_compress_cache(void) { } 4140 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4141 block_t blkaddr) { } 4142 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4143 struct page *page, nid_t ino, block_t blkaddr) { } 4144 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4145 struct page *page, block_t blkaddr) { return false; } 4146 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4147 nid_t ino) { } 4148 #define inc_compr_inode_stat(inode) do { } while (0) 4149 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4150 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4151 unsigned int c_len) { } 4152 #endif 4153 4154 static inline void set_compress_context(struct inode *inode) 4155 { 4156 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4157 4158 F2FS_I(inode)->i_compress_algorithm = 4159 F2FS_OPTION(sbi).compress_algorithm; 4160 F2FS_I(inode)->i_log_cluster_size = 4161 F2FS_OPTION(sbi).compress_log_size; 4162 F2FS_I(inode)->i_compress_flag = 4163 F2FS_OPTION(sbi).compress_chksum ? 4164 1 << COMPRESS_CHKSUM : 0; 4165 F2FS_I(inode)->i_cluster_size = 4166 1 << F2FS_I(inode)->i_log_cluster_size; 4167 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4168 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4169 F2FS_OPTION(sbi).compress_level) 4170 F2FS_I(inode)->i_compress_flag |= 4171 F2FS_OPTION(sbi).compress_level << 4172 COMPRESS_LEVEL_OFFSET; 4173 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4174 set_inode_flag(inode, FI_COMPRESSED_FILE); 4175 stat_inc_compr_inode(inode); 4176 inc_compr_inode_stat(inode); 4177 f2fs_mark_inode_dirty_sync(inode, true); 4178 } 4179 4180 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4181 { 4182 struct f2fs_inode_info *fi = F2FS_I(inode); 4183 4184 if (!f2fs_compressed_file(inode)) 4185 return true; 4186 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4187 return false; 4188 4189 fi->i_flags &= ~F2FS_COMPR_FL; 4190 stat_dec_compr_inode(inode); 4191 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4192 f2fs_mark_inode_dirty_sync(inode, true); 4193 return true; 4194 } 4195 4196 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4197 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4198 { \ 4199 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4200 } 4201 4202 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4203 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4204 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4205 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4206 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4207 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4208 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4209 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4210 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4211 F2FS_FEATURE_FUNCS(verity, VERITY); 4212 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4213 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4214 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4215 F2FS_FEATURE_FUNCS(readonly, RO); 4216 4217 static inline bool f2fs_may_extent_tree(struct inode *inode) 4218 { 4219 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4220 4221 if (!test_opt(sbi, EXTENT_CACHE) || 4222 is_inode_flag_set(inode, FI_NO_EXTENT) || 4223 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 4224 !f2fs_sb_has_readonly(sbi))) 4225 return false; 4226 4227 /* 4228 * for recovered files during mount do not create extents 4229 * if shrinker is not registered. 4230 */ 4231 if (list_empty(&sbi->s_list)) 4232 return false; 4233 4234 return S_ISREG(inode->i_mode); 4235 } 4236 4237 #ifdef CONFIG_BLK_DEV_ZONED 4238 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4239 block_t blkaddr) 4240 { 4241 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4242 4243 return test_bit(zno, FDEV(devi).blkz_seq); 4244 } 4245 #endif 4246 4247 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4248 { 4249 return f2fs_sb_has_blkzoned(sbi); 4250 } 4251 4252 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4253 { 4254 return blk_queue_discard(bdev_get_queue(bdev)) || 4255 bdev_is_zoned(bdev); 4256 } 4257 4258 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4259 { 4260 int i; 4261 4262 if (!f2fs_is_multi_device(sbi)) 4263 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4264 4265 for (i = 0; i < sbi->s_ndevs; i++) 4266 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4267 return true; 4268 return false; 4269 } 4270 4271 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4272 { 4273 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4274 f2fs_hw_should_discard(sbi); 4275 } 4276 4277 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4278 { 4279 int i; 4280 4281 if (!f2fs_is_multi_device(sbi)) 4282 return bdev_read_only(sbi->sb->s_bdev); 4283 4284 for (i = 0; i < sbi->s_ndevs; i++) 4285 if (bdev_read_only(FDEV(i).bdev)) 4286 return true; 4287 return false; 4288 } 4289 4290 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4291 { 4292 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4293 } 4294 4295 static inline bool f2fs_may_compress(struct inode *inode) 4296 { 4297 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4298 f2fs_is_atomic_file(inode) || 4299 f2fs_is_volatile_file(inode)) 4300 return false; 4301 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4302 } 4303 4304 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4305 u64 blocks, bool add) 4306 { 4307 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4308 struct f2fs_inode_info *fi = F2FS_I(inode); 4309 4310 /* don't update i_compr_blocks if saved blocks were released */ 4311 if (!add && !atomic_read(&fi->i_compr_blocks)) 4312 return; 4313 4314 if (add) { 4315 atomic_add(diff, &fi->i_compr_blocks); 4316 stat_add_compr_blocks(inode, diff); 4317 } else { 4318 atomic_sub(diff, &fi->i_compr_blocks); 4319 stat_sub_compr_blocks(inode, diff); 4320 } 4321 f2fs_mark_inode_dirty_sync(inode, true); 4322 } 4323 4324 static inline int block_unaligned_IO(struct inode *inode, 4325 struct kiocb *iocb, struct iov_iter *iter) 4326 { 4327 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4328 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4329 loff_t offset = iocb->ki_pos; 4330 unsigned long align = offset | iov_iter_alignment(iter); 4331 4332 return align & blocksize_mask; 4333 } 4334 4335 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4336 int flag) 4337 { 4338 if (!f2fs_is_multi_device(sbi)) 4339 return false; 4340 if (flag != F2FS_GET_BLOCK_DIO) 4341 return false; 4342 return sbi->aligned_blksize; 4343 } 4344 4345 static inline bool f2fs_force_buffered_io(struct inode *inode, 4346 struct kiocb *iocb, struct iov_iter *iter) 4347 { 4348 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4349 int rw = iov_iter_rw(iter); 4350 4351 if (f2fs_post_read_required(inode)) 4352 return true; 4353 4354 /* disallow direct IO if any of devices has unaligned blksize */ 4355 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) 4356 return true; 4357 /* 4358 * for blkzoned device, fallback direct IO to buffered IO, so 4359 * all IOs can be serialized by log-structured write. 4360 */ 4361 if (f2fs_sb_has_blkzoned(sbi)) 4362 return true; 4363 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4364 if (block_unaligned_IO(inode, iocb, iter)) 4365 return true; 4366 if (F2FS_IO_ALIGNED(sbi)) 4367 return true; 4368 } 4369 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 4370 return true; 4371 4372 return false; 4373 } 4374 4375 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4376 { 4377 return fsverity_active(inode) && 4378 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4379 } 4380 4381 #ifdef CONFIG_F2FS_FAULT_INJECTION 4382 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4383 unsigned int type); 4384 #else 4385 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4386 #endif 4387 4388 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4389 { 4390 #ifdef CONFIG_QUOTA 4391 if (f2fs_sb_has_quota_ino(sbi)) 4392 return true; 4393 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4394 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4395 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4396 return true; 4397 #endif 4398 return false; 4399 } 4400 4401 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4402 { 4403 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4404 } 4405 4406 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4407 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4408 4409 #endif /* _LINUX_F2FS_H */ 4410