1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (WARN_ON(condition)) \ 37 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 38 } while (0) 39 #endif 40 41 enum { 42 FAULT_KMALLOC, 43 FAULT_KVMALLOC, 44 FAULT_PAGE_ALLOC, 45 FAULT_PAGE_GET, 46 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 47 FAULT_ALLOC_NID, 48 FAULT_ORPHAN, 49 FAULT_BLOCK, 50 FAULT_DIR_DEPTH, 51 FAULT_EVICT_INODE, 52 FAULT_TRUNCATE, 53 FAULT_READ_IO, 54 FAULT_CHECKPOINT, 55 FAULT_DISCARD, 56 FAULT_WRITE_IO, 57 FAULT_SLAB_ALLOC, 58 FAULT_DQUOT_INIT, 59 FAULT_LOCK_OP, 60 FAULT_MAX, 61 }; 62 63 #ifdef CONFIG_F2FS_FAULT_INJECTION 64 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 65 66 struct f2fs_fault_info { 67 atomic_t inject_ops; 68 unsigned int inject_rate; 69 unsigned int inject_type; 70 }; 71 72 extern const char *f2fs_fault_name[FAULT_MAX]; 73 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 74 #endif 75 76 /* 77 * For mount options 78 */ 79 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 80 #define F2FS_MOUNT_DISCARD 0x00000004 81 #define F2FS_MOUNT_NOHEAP 0x00000008 82 #define F2FS_MOUNT_XATTR_USER 0x00000010 83 #define F2FS_MOUNT_POSIX_ACL 0x00000020 84 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 85 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 86 #define F2FS_MOUNT_INLINE_DATA 0x00000100 87 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 88 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 89 #define F2FS_MOUNT_NOBARRIER 0x00000800 90 #define F2FS_MOUNT_FASTBOOT 0x00001000 91 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 92 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 93 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 94 #define F2FS_MOUNT_USRQUOTA 0x00080000 95 #define F2FS_MOUNT_GRPQUOTA 0x00100000 96 #define F2FS_MOUNT_PRJQUOTA 0x00200000 97 #define F2FS_MOUNT_QUOTA 0x00400000 98 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 99 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 100 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 101 #define F2FS_MOUNT_NORECOVERY 0x04000000 102 #define F2FS_MOUNT_ATGC 0x08000000 103 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 104 #define F2FS_MOUNT_GC_MERGE 0x20000000 105 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 106 107 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 108 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 109 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 110 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 111 112 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 113 typecheck(unsigned long long, b) && \ 114 ((long long)((a) - (b)) > 0)) 115 116 typedef u32 block_t; /* 117 * should not change u32, since it is the on-disk block 118 * address format, __le32. 119 */ 120 typedef u32 nid_t; 121 122 #define COMPRESS_EXT_NUM 16 123 124 struct f2fs_mount_info { 125 unsigned int opt; 126 int write_io_size_bits; /* Write IO size bits */ 127 block_t root_reserved_blocks; /* root reserved blocks */ 128 kuid_t s_resuid; /* reserved blocks for uid */ 129 kgid_t s_resgid; /* reserved blocks for gid */ 130 int active_logs; /* # of active logs */ 131 int inline_xattr_size; /* inline xattr size */ 132 #ifdef CONFIG_F2FS_FAULT_INJECTION 133 struct f2fs_fault_info fault_info; /* For fault injection */ 134 #endif 135 #ifdef CONFIG_QUOTA 136 /* Names of quota files with journalled quota */ 137 char *s_qf_names[MAXQUOTAS]; 138 int s_jquota_fmt; /* Format of quota to use */ 139 #endif 140 /* For which write hints are passed down to block layer */ 141 int whint_mode; 142 int alloc_mode; /* segment allocation policy */ 143 int fsync_mode; /* fsync policy */ 144 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 145 int bggc_mode; /* bggc mode: off, on or sync */ 146 int discard_unit; /* 147 * discard command's offset/size should 148 * be aligned to this unit: block, 149 * segment or section 150 */ 151 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 152 block_t unusable_cap_perc; /* percentage for cap */ 153 block_t unusable_cap; /* Amount of space allowed to be 154 * unusable when disabling checkpoint 155 */ 156 157 /* For compression */ 158 unsigned char compress_algorithm; /* algorithm type */ 159 unsigned char compress_log_size; /* cluster log size */ 160 unsigned char compress_level; /* compress level */ 161 bool compress_chksum; /* compressed data chksum */ 162 unsigned char compress_ext_cnt; /* extension count */ 163 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 164 int compress_mode; /* compression mode */ 165 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 166 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 167 }; 168 169 #define F2FS_FEATURE_ENCRYPT 0x0001 170 #define F2FS_FEATURE_BLKZONED 0x0002 171 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 172 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 173 #define F2FS_FEATURE_PRJQUOTA 0x0010 174 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 175 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 176 #define F2FS_FEATURE_QUOTA_INO 0x0080 177 #define F2FS_FEATURE_INODE_CRTIME 0x0100 178 #define F2FS_FEATURE_LOST_FOUND 0x0200 179 #define F2FS_FEATURE_VERITY 0x0400 180 #define F2FS_FEATURE_SB_CHKSUM 0x0800 181 #define F2FS_FEATURE_CASEFOLD 0x1000 182 #define F2FS_FEATURE_COMPRESSION 0x2000 183 #define F2FS_FEATURE_RO 0x4000 184 185 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 186 ((raw_super->feature & cpu_to_le32(mask)) != 0) 187 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 188 #define F2FS_SET_FEATURE(sbi, mask) \ 189 (sbi->raw_super->feature |= cpu_to_le32(mask)) 190 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 191 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 192 193 /* 194 * Default values for user and/or group using reserved blocks 195 */ 196 #define F2FS_DEF_RESUID 0 197 #define F2FS_DEF_RESGID 0 198 199 /* 200 * For checkpoint manager 201 */ 202 enum { 203 NAT_BITMAP, 204 SIT_BITMAP 205 }; 206 207 #define CP_UMOUNT 0x00000001 208 #define CP_FASTBOOT 0x00000002 209 #define CP_SYNC 0x00000004 210 #define CP_RECOVERY 0x00000008 211 #define CP_DISCARD 0x00000010 212 #define CP_TRIMMED 0x00000020 213 #define CP_PAUSE 0x00000040 214 #define CP_RESIZE 0x00000080 215 216 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 217 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 218 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 219 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 220 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 221 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 222 #define DEF_CP_INTERVAL 60 /* 60 secs */ 223 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 224 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 225 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 226 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 227 228 struct cp_control { 229 int reason; 230 __u64 trim_start; 231 __u64 trim_end; 232 __u64 trim_minlen; 233 }; 234 235 /* 236 * indicate meta/data type 237 */ 238 enum { 239 META_CP, 240 META_NAT, 241 META_SIT, 242 META_SSA, 243 META_MAX, 244 META_POR, 245 DATA_GENERIC, /* check range only */ 246 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 247 DATA_GENERIC_ENHANCE_READ, /* 248 * strong check on range and segment 249 * bitmap but no warning due to race 250 * condition of read on truncated area 251 * by extent_cache 252 */ 253 META_GENERIC, 254 }; 255 256 /* for the list of ino */ 257 enum { 258 ORPHAN_INO, /* for orphan ino list */ 259 APPEND_INO, /* for append ino list */ 260 UPDATE_INO, /* for update ino list */ 261 TRANS_DIR_INO, /* for trasactions dir ino list */ 262 FLUSH_INO, /* for multiple device flushing */ 263 MAX_INO_ENTRY, /* max. list */ 264 }; 265 266 struct ino_entry { 267 struct list_head list; /* list head */ 268 nid_t ino; /* inode number */ 269 unsigned int dirty_device; /* dirty device bitmap */ 270 }; 271 272 /* for the list of inodes to be GCed */ 273 struct inode_entry { 274 struct list_head list; /* list head */ 275 struct inode *inode; /* vfs inode pointer */ 276 }; 277 278 struct fsync_node_entry { 279 struct list_head list; /* list head */ 280 struct page *page; /* warm node page pointer */ 281 unsigned int seq_id; /* sequence id */ 282 }; 283 284 struct ckpt_req { 285 struct completion wait; /* completion for checkpoint done */ 286 struct llist_node llnode; /* llist_node to be linked in wait queue */ 287 int ret; /* return code of checkpoint */ 288 ktime_t queue_time; /* request queued time */ 289 }; 290 291 struct ckpt_req_control { 292 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 293 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 294 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 295 atomic_t issued_ckpt; /* # of actually issued ckpts */ 296 atomic_t total_ckpt; /* # of total ckpts */ 297 atomic_t queued_ckpt; /* # of queued ckpts */ 298 struct llist_head issue_list; /* list for command issue */ 299 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 300 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 301 unsigned int peak_time; /* peak wait time in msec until now */ 302 }; 303 304 /* for the bitmap indicate blocks to be discarded */ 305 struct discard_entry { 306 struct list_head list; /* list head */ 307 block_t start_blkaddr; /* start blockaddr of current segment */ 308 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 309 }; 310 311 /* default discard granularity of inner discard thread, unit: block count */ 312 #define DEFAULT_DISCARD_GRANULARITY 16 313 314 /* max discard pend list number */ 315 #define MAX_PLIST_NUM 512 316 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 317 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 318 319 enum { 320 D_PREP, /* initial */ 321 D_PARTIAL, /* partially submitted */ 322 D_SUBMIT, /* all submitted */ 323 D_DONE, /* finished */ 324 }; 325 326 struct discard_info { 327 block_t lstart; /* logical start address */ 328 block_t len; /* length */ 329 block_t start; /* actual start address in dev */ 330 }; 331 332 struct discard_cmd { 333 struct rb_node rb_node; /* rb node located in rb-tree */ 334 union { 335 struct { 336 block_t lstart; /* logical start address */ 337 block_t len; /* length */ 338 block_t start; /* actual start address in dev */ 339 }; 340 struct discard_info di; /* discard info */ 341 342 }; 343 struct list_head list; /* command list */ 344 struct completion wait; /* compleation */ 345 struct block_device *bdev; /* bdev */ 346 unsigned short ref; /* reference count */ 347 unsigned char state; /* state */ 348 unsigned char queued; /* queued discard */ 349 int error; /* bio error */ 350 spinlock_t lock; /* for state/bio_ref updating */ 351 unsigned short bio_ref; /* bio reference count */ 352 }; 353 354 enum { 355 DPOLICY_BG, 356 DPOLICY_FORCE, 357 DPOLICY_FSTRIM, 358 DPOLICY_UMOUNT, 359 MAX_DPOLICY, 360 }; 361 362 struct discard_policy { 363 int type; /* type of discard */ 364 unsigned int min_interval; /* used for candidates exist */ 365 unsigned int mid_interval; /* used for device busy */ 366 unsigned int max_interval; /* used for candidates not exist */ 367 unsigned int max_requests; /* # of discards issued per round */ 368 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 369 bool io_aware; /* issue discard in idle time */ 370 bool sync; /* submit discard with REQ_SYNC flag */ 371 bool ordered; /* issue discard by lba order */ 372 bool timeout; /* discard timeout for put_super */ 373 unsigned int granularity; /* discard granularity */ 374 }; 375 376 struct discard_cmd_control { 377 struct task_struct *f2fs_issue_discard; /* discard thread */ 378 struct list_head entry_list; /* 4KB discard entry list */ 379 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 380 struct list_head wait_list; /* store on-flushing entries */ 381 struct list_head fstrim_list; /* in-flight discard from fstrim */ 382 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 383 unsigned int discard_wake; /* to wake up discard thread */ 384 struct mutex cmd_lock; 385 unsigned int nr_discards; /* # of discards in the list */ 386 unsigned int max_discards; /* max. discards to be issued */ 387 unsigned int discard_granularity; /* discard granularity */ 388 unsigned int undiscard_blks; /* # of undiscard blocks */ 389 unsigned int next_pos; /* next discard position */ 390 atomic_t issued_discard; /* # of issued discard */ 391 atomic_t queued_discard; /* # of queued discard */ 392 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 393 struct rb_root_cached root; /* root of discard rb-tree */ 394 bool rbtree_check; /* config for consistence check */ 395 }; 396 397 /* for the list of fsync inodes, used only during recovery */ 398 struct fsync_inode_entry { 399 struct list_head list; /* list head */ 400 struct inode *inode; /* vfs inode pointer */ 401 block_t blkaddr; /* block address locating the last fsync */ 402 block_t last_dentry; /* block address locating the last dentry */ 403 }; 404 405 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 406 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 407 408 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 409 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 410 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 411 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 412 413 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 414 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 415 416 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 417 { 418 int before = nats_in_cursum(journal); 419 420 journal->n_nats = cpu_to_le16(before + i); 421 return before; 422 } 423 424 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 425 { 426 int before = sits_in_cursum(journal); 427 428 journal->n_sits = cpu_to_le16(before + i); 429 return before; 430 } 431 432 static inline bool __has_cursum_space(struct f2fs_journal *journal, 433 int size, int type) 434 { 435 if (type == NAT_JOURNAL) 436 return size <= MAX_NAT_JENTRIES(journal); 437 return size <= MAX_SIT_JENTRIES(journal); 438 } 439 440 /* for inline stuff */ 441 #define DEF_INLINE_RESERVED_SIZE 1 442 static inline int get_extra_isize(struct inode *inode); 443 static inline int get_inline_xattr_addrs(struct inode *inode); 444 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 445 (CUR_ADDRS_PER_INODE(inode) - \ 446 get_inline_xattr_addrs(inode) - \ 447 DEF_INLINE_RESERVED_SIZE)) 448 449 /* for inline dir */ 450 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 451 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 452 BITS_PER_BYTE + 1)) 453 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 454 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 455 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 456 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 457 NR_INLINE_DENTRY(inode) + \ 458 INLINE_DENTRY_BITMAP_SIZE(inode))) 459 460 /* 461 * For INODE and NODE manager 462 */ 463 /* for directory operations */ 464 465 struct f2fs_filename { 466 /* 467 * The filename the user specified. This is NULL for some 468 * filesystem-internal operations, e.g. converting an inline directory 469 * to a non-inline one, or roll-forward recovering an encrypted dentry. 470 */ 471 const struct qstr *usr_fname; 472 473 /* 474 * The on-disk filename. For encrypted directories, this is encrypted. 475 * This may be NULL for lookups in an encrypted dir without the key. 476 */ 477 struct fscrypt_str disk_name; 478 479 /* The dirhash of this filename */ 480 f2fs_hash_t hash; 481 482 #ifdef CONFIG_FS_ENCRYPTION 483 /* 484 * For lookups in encrypted directories: either the buffer backing 485 * disk_name, or a buffer that holds the decoded no-key name. 486 */ 487 struct fscrypt_str crypto_buf; 488 #endif 489 #ifdef CONFIG_UNICODE 490 /* 491 * For casefolded directories: the casefolded name, but it's left NULL 492 * if the original name is not valid Unicode, if the directory is both 493 * casefolded and encrypted and its encryption key is unavailable, or if 494 * the filesystem is doing an internal operation where usr_fname is also 495 * NULL. In all these cases we fall back to treating the name as an 496 * opaque byte sequence. 497 */ 498 struct fscrypt_str cf_name; 499 #endif 500 }; 501 502 struct f2fs_dentry_ptr { 503 struct inode *inode; 504 void *bitmap; 505 struct f2fs_dir_entry *dentry; 506 __u8 (*filename)[F2FS_SLOT_LEN]; 507 int max; 508 int nr_bitmap; 509 }; 510 511 static inline void make_dentry_ptr_block(struct inode *inode, 512 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 513 { 514 d->inode = inode; 515 d->max = NR_DENTRY_IN_BLOCK; 516 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 517 d->bitmap = t->dentry_bitmap; 518 d->dentry = t->dentry; 519 d->filename = t->filename; 520 } 521 522 static inline void make_dentry_ptr_inline(struct inode *inode, 523 struct f2fs_dentry_ptr *d, void *t) 524 { 525 int entry_cnt = NR_INLINE_DENTRY(inode); 526 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 527 int reserved_size = INLINE_RESERVED_SIZE(inode); 528 529 d->inode = inode; 530 d->max = entry_cnt; 531 d->nr_bitmap = bitmap_size; 532 d->bitmap = t; 533 d->dentry = t + bitmap_size + reserved_size; 534 d->filename = t + bitmap_size + reserved_size + 535 SIZE_OF_DIR_ENTRY * entry_cnt; 536 } 537 538 /* 539 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 540 * as its node offset to distinguish from index node blocks. 541 * But some bits are used to mark the node block. 542 */ 543 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 544 >> OFFSET_BIT_SHIFT) 545 enum { 546 ALLOC_NODE, /* allocate a new node page if needed */ 547 LOOKUP_NODE, /* look up a node without readahead */ 548 LOOKUP_NODE_RA, /* 549 * look up a node with readahead called 550 * by get_data_block. 551 */ 552 }; 553 554 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 555 556 /* congestion wait timeout value, default: 20ms */ 557 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 558 559 /* maximum retry quota flush count */ 560 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 561 562 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 563 564 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 565 566 /* dirty segments threshold for triggering CP */ 567 #define DEFAULT_DIRTY_THRESHOLD 4 568 569 /* for in-memory extent cache entry */ 570 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 571 572 /* number of extent info in extent cache we try to shrink */ 573 #define EXTENT_CACHE_SHRINK_NUMBER 128 574 575 struct rb_entry { 576 struct rb_node rb_node; /* rb node located in rb-tree */ 577 union { 578 struct { 579 unsigned int ofs; /* start offset of the entry */ 580 unsigned int len; /* length of the entry */ 581 }; 582 unsigned long long key; /* 64-bits key */ 583 } __packed; 584 }; 585 586 struct extent_info { 587 unsigned int fofs; /* start offset in a file */ 588 unsigned int len; /* length of the extent */ 589 u32 blk; /* start block address of the extent */ 590 #ifdef CONFIG_F2FS_FS_COMPRESSION 591 unsigned int c_len; /* physical extent length of compressed blocks */ 592 #endif 593 }; 594 595 struct extent_node { 596 struct rb_node rb_node; /* rb node located in rb-tree */ 597 struct extent_info ei; /* extent info */ 598 struct list_head list; /* node in global extent list of sbi */ 599 struct extent_tree *et; /* extent tree pointer */ 600 }; 601 602 struct extent_tree { 603 nid_t ino; /* inode number */ 604 struct rb_root_cached root; /* root of extent info rb-tree */ 605 struct extent_node *cached_en; /* recently accessed extent node */ 606 struct extent_info largest; /* largested extent info */ 607 struct list_head list; /* to be used by sbi->zombie_list */ 608 rwlock_t lock; /* protect extent info rb-tree */ 609 atomic_t node_cnt; /* # of extent node in rb-tree*/ 610 bool largest_updated; /* largest extent updated */ 611 }; 612 613 /* 614 * This structure is taken from ext4_map_blocks. 615 * 616 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 617 */ 618 #define F2FS_MAP_NEW (1 << BH_New) 619 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 620 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 621 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 622 F2FS_MAP_UNWRITTEN) 623 624 struct f2fs_map_blocks { 625 struct block_device *m_bdev; /* for multi-device dio */ 626 block_t m_pblk; 627 block_t m_lblk; 628 unsigned int m_len; 629 unsigned int m_flags; 630 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 631 pgoff_t *m_next_extent; /* point to next possible extent */ 632 int m_seg_type; 633 bool m_may_create; /* indicate it is from write path */ 634 bool m_multidev_dio; /* indicate it allows multi-device dio */ 635 }; 636 637 /* for flag in get_data_block */ 638 enum { 639 F2FS_GET_BLOCK_DEFAULT, 640 F2FS_GET_BLOCK_FIEMAP, 641 F2FS_GET_BLOCK_BMAP, 642 F2FS_GET_BLOCK_DIO, 643 F2FS_GET_BLOCK_PRE_DIO, 644 F2FS_GET_BLOCK_PRE_AIO, 645 F2FS_GET_BLOCK_PRECACHE, 646 }; 647 648 /* 649 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 650 */ 651 #define FADVISE_COLD_BIT 0x01 652 #define FADVISE_LOST_PINO_BIT 0x02 653 #define FADVISE_ENCRYPT_BIT 0x04 654 #define FADVISE_ENC_NAME_BIT 0x08 655 #define FADVISE_KEEP_SIZE_BIT 0x10 656 #define FADVISE_HOT_BIT 0x20 657 #define FADVISE_VERITY_BIT 0x40 658 #define FADVISE_TRUNC_BIT 0x80 659 660 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 661 662 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 663 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 664 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 665 666 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 667 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 668 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 669 670 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 671 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 672 673 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 674 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 675 676 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 677 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 678 679 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 680 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 681 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 682 683 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 684 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 685 686 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 687 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 688 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 689 690 #define DEF_DIR_LEVEL 0 691 692 enum { 693 GC_FAILURE_PIN, 694 GC_FAILURE_ATOMIC, 695 MAX_GC_FAILURE 696 }; 697 698 /* used for f2fs_inode_info->flags */ 699 enum { 700 FI_NEW_INODE, /* indicate newly allocated inode */ 701 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 702 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 703 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 704 FI_INC_LINK, /* need to increment i_nlink */ 705 FI_ACL_MODE, /* indicate acl mode */ 706 FI_NO_ALLOC, /* should not allocate any blocks */ 707 FI_FREE_NID, /* free allocated nide */ 708 FI_NO_EXTENT, /* not to use the extent cache */ 709 FI_INLINE_XATTR, /* used for inline xattr */ 710 FI_INLINE_DATA, /* used for inline data*/ 711 FI_INLINE_DENTRY, /* used for inline dentry */ 712 FI_APPEND_WRITE, /* inode has appended data */ 713 FI_UPDATE_WRITE, /* inode has in-place-update data */ 714 FI_NEED_IPU, /* used for ipu per file */ 715 FI_ATOMIC_FILE, /* indicate atomic file */ 716 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 717 FI_VOLATILE_FILE, /* indicate volatile file */ 718 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 719 FI_DROP_CACHE, /* drop dirty page cache */ 720 FI_DATA_EXIST, /* indicate data exists */ 721 FI_INLINE_DOTS, /* indicate inline dot dentries */ 722 FI_DO_DEFRAG, /* indicate defragment is running */ 723 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 724 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 725 FI_HOT_DATA, /* indicate file is hot */ 726 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 727 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 728 FI_PIN_FILE, /* indicate file should not be gced */ 729 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 730 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 731 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 732 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 733 FI_MMAP_FILE, /* indicate file was mmapped */ 734 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 735 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 736 FI_ALIGNED_WRITE, /* enable aligned write */ 737 FI_MAX, /* max flag, never be used */ 738 }; 739 740 struct f2fs_inode_info { 741 struct inode vfs_inode; /* serve a vfs inode */ 742 unsigned long i_flags; /* keep an inode flags for ioctl */ 743 unsigned char i_advise; /* use to give file attribute hints */ 744 unsigned char i_dir_level; /* use for dentry level for large dir */ 745 unsigned int i_current_depth; /* only for directory depth */ 746 /* for gc failure statistic */ 747 unsigned int i_gc_failures[MAX_GC_FAILURE]; 748 unsigned int i_pino; /* parent inode number */ 749 umode_t i_acl_mode; /* keep file acl mode temporarily */ 750 751 /* Use below internally in f2fs*/ 752 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 753 struct rw_semaphore i_sem; /* protect fi info */ 754 atomic_t dirty_pages; /* # of dirty pages */ 755 f2fs_hash_t chash; /* hash value of given file name */ 756 unsigned int clevel; /* maximum level of given file name */ 757 struct task_struct *task; /* lookup and create consistency */ 758 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 759 nid_t i_xattr_nid; /* node id that contains xattrs */ 760 loff_t last_disk_size; /* lastly written file size */ 761 spinlock_t i_size_lock; /* protect last_disk_size */ 762 763 #ifdef CONFIG_QUOTA 764 struct dquot *i_dquot[MAXQUOTAS]; 765 766 /* quota space reservation, managed internally by quota code */ 767 qsize_t i_reserved_quota; 768 #endif 769 struct list_head dirty_list; /* dirty list for dirs and files */ 770 struct list_head gdirty_list; /* linked in global dirty list */ 771 struct list_head inmem_ilist; /* list for inmem inodes */ 772 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 773 struct task_struct *inmem_task; /* store inmemory task */ 774 struct mutex inmem_lock; /* lock for inmemory pages */ 775 struct extent_tree *extent_tree; /* cached extent_tree entry */ 776 777 /* avoid racing between foreground op and gc */ 778 struct rw_semaphore i_gc_rwsem[2]; 779 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 780 781 int i_extra_isize; /* size of extra space located in i_addr */ 782 kprojid_t i_projid; /* id for project quota */ 783 int i_inline_xattr_size; /* inline xattr size */ 784 struct timespec64 i_crtime; /* inode creation time */ 785 struct timespec64 i_disk_time[4];/* inode disk times */ 786 787 /* for file compress */ 788 atomic_t i_compr_blocks; /* # of compressed blocks */ 789 unsigned char i_compress_algorithm; /* algorithm type */ 790 unsigned char i_log_cluster_size; /* log of cluster size */ 791 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 792 unsigned short i_compress_flag; /* compress flag */ 793 unsigned int i_cluster_size; /* cluster size */ 794 }; 795 796 static inline void get_extent_info(struct extent_info *ext, 797 struct f2fs_extent *i_ext) 798 { 799 ext->fofs = le32_to_cpu(i_ext->fofs); 800 ext->blk = le32_to_cpu(i_ext->blk); 801 ext->len = le32_to_cpu(i_ext->len); 802 } 803 804 static inline void set_raw_extent(struct extent_info *ext, 805 struct f2fs_extent *i_ext) 806 { 807 i_ext->fofs = cpu_to_le32(ext->fofs); 808 i_ext->blk = cpu_to_le32(ext->blk); 809 i_ext->len = cpu_to_le32(ext->len); 810 } 811 812 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 813 u32 blk, unsigned int len) 814 { 815 ei->fofs = fofs; 816 ei->blk = blk; 817 ei->len = len; 818 #ifdef CONFIG_F2FS_FS_COMPRESSION 819 ei->c_len = 0; 820 #endif 821 } 822 823 static inline bool __is_discard_mergeable(struct discard_info *back, 824 struct discard_info *front, unsigned int max_len) 825 { 826 return (back->lstart + back->len == front->lstart) && 827 (back->len + front->len <= max_len); 828 } 829 830 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 831 struct discard_info *back, unsigned int max_len) 832 { 833 return __is_discard_mergeable(back, cur, max_len); 834 } 835 836 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 837 struct discard_info *front, unsigned int max_len) 838 { 839 return __is_discard_mergeable(cur, front, max_len); 840 } 841 842 static inline bool __is_extent_mergeable(struct extent_info *back, 843 struct extent_info *front) 844 { 845 #ifdef CONFIG_F2FS_FS_COMPRESSION 846 if (back->c_len && back->len != back->c_len) 847 return false; 848 if (front->c_len && front->len != front->c_len) 849 return false; 850 #endif 851 return (back->fofs + back->len == front->fofs && 852 back->blk + back->len == front->blk); 853 } 854 855 static inline bool __is_back_mergeable(struct extent_info *cur, 856 struct extent_info *back) 857 { 858 return __is_extent_mergeable(back, cur); 859 } 860 861 static inline bool __is_front_mergeable(struct extent_info *cur, 862 struct extent_info *front) 863 { 864 return __is_extent_mergeable(cur, front); 865 } 866 867 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 868 static inline void __try_update_largest_extent(struct extent_tree *et, 869 struct extent_node *en) 870 { 871 if (en->ei.len > et->largest.len) { 872 et->largest = en->ei; 873 et->largest_updated = true; 874 } 875 } 876 877 /* 878 * For free nid management 879 */ 880 enum nid_state { 881 FREE_NID, /* newly added to free nid list */ 882 PREALLOC_NID, /* it is preallocated */ 883 MAX_NID_STATE, 884 }; 885 886 enum nat_state { 887 TOTAL_NAT, 888 DIRTY_NAT, 889 RECLAIMABLE_NAT, 890 MAX_NAT_STATE, 891 }; 892 893 struct f2fs_nm_info { 894 block_t nat_blkaddr; /* base disk address of NAT */ 895 nid_t max_nid; /* maximum possible node ids */ 896 nid_t available_nids; /* # of available node ids */ 897 nid_t next_scan_nid; /* the next nid to be scanned */ 898 unsigned int ram_thresh; /* control the memory footprint */ 899 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 900 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 901 902 /* NAT cache management */ 903 struct radix_tree_root nat_root;/* root of the nat entry cache */ 904 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 905 struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ 906 struct list_head nat_entries; /* cached nat entry list (clean) */ 907 spinlock_t nat_list_lock; /* protect clean nat entry list */ 908 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 909 unsigned int nat_blocks; /* # of nat blocks */ 910 911 /* free node ids management */ 912 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 913 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 914 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 915 spinlock_t nid_list_lock; /* protect nid lists ops */ 916 struct mutex build_lock; /* lock for build free nids */ 917 unsigned char **free_nid_bitmap; 918 unsigned char *nat_block_bitmap; 919 unsigned short *free_nid_count; /* free nid count of NAT block */ 920 921 /* for checkpoint */ 922 char *nat_bitmap; /* NAT bitmap pointer */ 923 924 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 925 unsigned char *nat_bits; /* NAT bits blocks */ 926 unsigned char *full_nat_bits; /* full NAT pages */ 927 unsigned char *empty_nat_bits; /* empty NAT pages */ 928 #ifdef CONFIG_F2FS_CHECK_FS 929 char *nat_bitmap_mir; /* NAT bitmap mirror */ 930 #endif 931 int bitmap_size; /* bitmap size */ 932 }; 933 934 /* 935 * this structure is used as one of function parameters. 936 * all the information are dedicated to a given direct node block determined 937 * by the data offset in a file. 938 */ 939 struct dnode_of_data { 940 struct inode *inode; /* vfs inode pointer */ 941 struct page *inode_page; /* its inode page, NULL is possible */ 942 struct page *node_page; /* cached direct node page */ 943 nid_t nid; /* node id of the direct node block */ 944 unsigned int ofs_in_node; /* data offset in the node page */ 945 bool inode_page_locked; /* inode page is locked or not */ 946 bool node_changed; /* is node block changed */ 947 char cur_level; /* level of hole node page */ 948 char max_level; /* level of current page located */ 949 block_t data_blkaddr; /* block address of the node block */ 950 }; 951 952 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 953 struct page *ipage, struct page *npage, nid_t nid) 954 { 955 memset(dn, 0, sizeof(*dn)); 956 dn->inode = inode; 957 dn->inode_page = ipage; 958 dn->node_page = npage; 959 dn->nid = nid; 960 } 961 962 /* 963 * For SIT manager 964 * 965 * By default, there are 6 active log areas across the whole main area. 966 * When considering hot and cold data separation to reduce cleaning overhead, 967 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 968 * respectively. 969 * In the current design, you should not change the numbers intentionally. 970 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 971 * logs individually according to the underlying devices. (default: 6) 972 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 973 * data and 8 for node logs. 974 */ 975 #define NR_CURSEG_DATA_TYPE (3) 976 #define NR_CURSEG_NODE_TYPE (3) 977 #define NR_CURSEG_INMEM_TYPE (2) 978 #define NR_CURSEG_RO_TYPE (2) 979 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 980 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 981 982 enum { 983 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 984 CURSEG_WARM_DATA, /* data blocks */ 985 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 986 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 987 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 988 CURSEG_COLD_NODE, /* indirect node blocks */ 989 NR_PERSISTENT_LOG, /* number of persistent log */ 990 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 991 /* pinned file that needs consecutive block address */ 992 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 993 NO_CHECK_TYPE, /* number of persistent & inmem log */ 994 }; 995 996 struct flush_cmd { 997 struct completion wait; 998 struct llist_node llnode; 999 nid_t ino; 1000 int ret; 1001 }; 1002 1003 struct flush_cmd_control { 1004 struct task_struct *f2fs_issue_flush; /* flush thread */ 1005 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1006 atomic_t issued_flush; /* # of issued flushes */ 1007 atomic_t queued_flush; /* # of queued flushes */ 1008 struct llist_head issue_list; /* list for command issue */ 1009 struct llist_node *dispatch_list; /* list for command dispatch */ 1010 }; 1011 1012 struct f2fs_sm_info { 1013 struct sit_info *sit_info; /* whole segment information */ 1014 struct free_segmap_info *free_info; /* free segment information */ 1015 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1016 struct curseg_info *curseg_array; /* active segment information */ 1017 1018 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 1019 1020 block_t seg0_blkaddr; /* block address of 0'th segment */ 1021 block_t main_blkaddr; /* start block address of main area */ 1022 block_t ssa_blkaddr; /* start block address of SSA area */ 1023 1024 unsigned int segment_count; /* total # of segments */ 1025 unsigned int main_segments; /* # of segments in main area */ 1026 unsigned int reserved_segments; /* # of reserved segments */ 1027 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1028 unsigned int ovp_segments; /* # of overprovision segments */ 1029 1030 /* a threshold to reclaim prefree segments */ 1031 unsigned int rec_prefree_segments; 1032 1033 /* for batched trimming */ 1034 unsigned int trim_sections; /* # of sections to trim */ 1035 1036 struct list_head sit_entry_set; /* sit entry set list */ 1037 1038 unsigned int ipu_policy; /* in-place-update policy */ 1039 unsigned int min_ipu_util; /* in-place-update threshold */ 1040 unsigned int min_fsync_blocks; /* threshold for fsync */ 1041 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1042 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1043 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1044 1045 /* for flush command control */ 1046 struct flush_cmd_control *fcc_info; 1047 1048 /* for discard command control */ 1049 struct discard_cmd_control *dcc_info; 1050 }; 1051 1052 /* 1053 * For superblock 1054 */ 1055 /* 1056 * COUNT_TYPE for monitoring 1057 * 1058 * f2fs monitors the number of several block types such as on-writeback, 1059 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1060 */ 1061 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1062 enum count_type { 1063 F2FS_DIRTY_DENTS, 1064 F2FS_DIRTY_DATA, 1065 F2FS_DIRTY_QDATA, 1066 F2FS_DIRTY_NODES, 1067 F2FS_DIRTY_META, 1068 F2FS_INMEM_PAGES, 1069 F2FS_DIRTY_IMETA, 1070 F2FS_WB_CP_DATA, 1071 F2FS_WB_DATA, 1072 F2FS_RD_DATA, 1073 F2FS_RD_NODE, 1074 F2FS_RD_META, 1075 F2FS_DIO_WRITE, 1076 F2FS_DIO_READ, 1077 NR_COUNT_TYPE, 1078 }; 1079 1080 /* 1081 * The below are the page types of bios used in submit_bio(). 1082 * The available types are: 1083 * DATA User data pages. It operates as async mode. 1084 * NODE Node pages. It operates as async mode. 1085 * META FS metadata pages such as SIT, NAT, CP. 1086 * NR_PAGE_TYPE The number of page types. 1087 * META_FLUSH Make sure the previous pages are written 1088 * with waiting the bio's completion 1089 * ... Only can be used with META. 1090 */ 1091 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1092 enum page_type { 1093 DATA, 1094 NODE, 1095 META, 1096 NR_PAGE_TYPE, 1097 META_FLUSH, 1098 INMEM, /* the below types are used by tracepoints only. */ 1099 INMEM_DROP, 1100 INMEM_INVALIDATE, 1101 INMEM_REVOKE, 1102 IPU, 1103 OPU, 1104 }; 1105 1106 enum temp_type { 1107 HOT = 0, /* must be zero for meta bio */ 1108 WARM, 1109 COLD, 1110 NR_TEMP_TYPE, 1111 }; 1112 1113 enum need_lock_type { 1114 LOCK_REQ = 0, 1115 LOCK_DONE, 1116 LOCK_RETRY, 1117 }; 1118 1119 enum cp_reason_type { 1120 CP_NO_NEEDED, 1121 CP_NON_REGULAR, 1122 CP_COMPRESSED, 1123 CP_HARDLINK, 1124 CP_SB_NEED_CP, 1125 CP_WRONG_PINO, 1126 CP_NO_SPC_ROLL, 1127 CP_NODE_NEED_CP, 1128 CP_FASTBOOT_MODE, 1129 CP_SPEC_LOG_NUM, 1130 CP_RECOVER_DIR, 1131 }; 1132 1133 enum iostat_type { 1134 /* WRITE IO */ 1135 APP_DIRECT_IO, /* app direct write IOs */ 1136 APP_BUFFERED_IO, /* app buffered write IOs */ 1137 APP_WRITE_IO, /* app write IOs */ 1138 APP_MAPPED_IO, /* app mapped IOs */ 1139 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1140 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1141 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1142 FS_GC_DATA_IO, /* data IOs from forground gc */ 1143 FS_GC_NODE_IO, /* node IOs from forground gc */ 1144 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1145 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1146 FS_CP_META_IO, /* meta IOs from checkpoint */ 1147 1148 /* READ IO */ 1149 APP_DIRECT_READ_IO, /* app direct read IOs */ 1150 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1151 APP_READ_IO, /* app read IOs */ 1152 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1153 FS_DATA_READ_IO, /* data read IOs */ 1154 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1155 FS_CDATA_READ_IO, /* compressed data read IOs */ 1156 FS_NODE_READ_IO, /* node read IOs */ 1157 FS_META_READ_IO, /* meta read IOs */ 1158 1159 /* other */ 1160 FS_DISCARD, /* discard */ 1161 NR_IO_TYPE, 1162 }; 1163 1164 struct f2fs_io_info { 1165 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1166 nid_t ino; /* inode number */ 1167 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1168 enum temp_type temp; /* contains HOT/WARM/COLD */ 1169 int op; /* contains REQ_OP_ */ 1170 int op_flags; /* req_flag_bits */ 1171 block_t new_blkaddr; /* new block address to be written */ 1172 block_t old_blkaddr; /* old block address before Cow */ 1173 struct page *page; /* page to be written */ 1174 struct page *encrypted_page; /* encrypted page */ 1175 struct page *compressed_page; /* compressed page */ 1176 struct list_head list; /* serialize IOs */ 1177 bool submitted; /* indicate IO submission */ 1178 int need_lock; /* indicate we need to lock cp_rwsem */ 1179 bool in_list; /* indicate fio is in io_list */ 1180 bool is_por; /* indicate IO is from recovery or not */ 1181 bool retry; /* need to reallocate block address */ 1182 int compr_blocks; /* # of compressed block addresses */ 1183 bool encrypted; /* indicate file is encrypted */ 1184 enum iostat_type io_type; /* io type */ 1185 struct writeback_control *io_wbc; /* writeback control */ 1186 struct bio **bio; /* bio for ipu */ 1187 sector_t *last_block; /* last block number in bio */ 1188 unsigned char version; /* version of the node */ 1189 }; 1190 1191 struct bio_entry { 1192 struct bio *bio; 1193 struct list_head list; 1194 }; 1195 1196 #define is_read_io(rw) ((rw) == READ) 1197 struct f2fs_bio_info { 1198 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1199 struct bio *bio; /* bios to merge */ 1200 sector_t last_block_in_bio; /* last block number */ 1201 struct f2fs_io_info fio; /* store buffered io info. */ 1202 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1203 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1204 struct list_head io_list; /* track fios */ 1205 struct list_head bio_list; /* bio entry list head */ 1206 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1207 }; 1208 1209 #define FDEV(i) (sbi->devs[i]) 1210 #define RDEV(i) (raw_super->devs[i]) 1211 struct f2fs_dev_info { 1212 struct block_device *bdev; 1213 char path[MAX_PATH_LEN]; 1214 unsigned int total_segments; 1215 block_t start_blk; 1216 block_t end_blk; 1217 #ifdef CONFIG_BLK_DEV_ZONED 1218 unsigned int nr_blkz; /* Total number of zones */ 1219 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1220 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1221 #endif 1222 }; 1223 1224 enum inode_type { 1225 DIR_INODE, /* for dirty dir inode */ 1226 FILE_INODE, /* for dirty regular/symlink inode */ 1227 DIRTY_META, /* for all dirtied inode metadata */ 1228 ATOMIC_FILE, /* for all atomic files */ 1229 NR_INODE_TYPE, 1230 }; 1231 1232 /* for inner inode cache management */ 1233 struct inode_management { 1234 struct radix_tree_root ino_root; /* ino entry array */ 1235 spinlock_t ino_lock; /* for ino entry lock */ 1236 struct list_head ino_list; /* inode list head */ 1237 unsigned long ino_num; /* number of entries */ 1238 }; 1239 1240 /* for GC_AT */ 1241 struct atgc_management { 1242 bool atgc_enabled; /* ATGC is enabled or not */ 1243 struct rb_root_cached root; /* root of victim rb-tree */ 1244 struct list_head victim_list; /* linked with all victim entries */ 1245 unsigned int victim_count; /* victim count in rb-tree */ 1246 unsigned int candidate_ratio; /* candidate ratio */ 1247 unsigned int max_candidate_count; /* max candidate count */ 1248 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1249 unsigned long long age_threshold; /* age threshold */ 1250 }; 1251 1252 /* For s_flag in struct f2fs_sb_info */ 1253 enum { 1254 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1255 SBI_IS_CLOSE, /* specify unmounting */ 1256 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1257 SBI_POR_DOING, /* recovery is doing or not */ 1258 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1259 SBI_NEED_CP, /* need to checkpoint */ 1260 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1261 SBI_IS_RECOVERED, /* recovered orphan/data */ 1262 SBI_CP_DISABLED, /* CP was disabled last mount */ 1263 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1264 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1265 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1266 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1267 SBI_IS_RESIZEFS, /* resizefs is in process */ 1268 }; 1269 1270 enum { 1271 CP_TIME, 1272 REQ_TIME, 1273 DISCARD_TIME, 1274 GC_TIME, 1275 DISABLE_TIME, 1276 UMOUNT_DISCARD_TIMEOUT, 1277 MAX_TIME, 1278 }; 1279 1280 enum { 1281 GC_NORMAL, 1282 GC_IDLE_CB, 1283 GC_IDLE_GREEDY, 1284 GC_IDLE_AT, 1285 GC_URGENT_HIGH, 1286 GC_URGENT_LOW, 1287 MAX_GC_MODE, 1288 }; 1289 1290 enum { 1291 BGGC_MODE_ON, /* background gc is on */ 1292 BGGC_MODE_OFF, /* background gc is off */ 1293 BGGC_MODE_SYNC, /* 1294 * background gc is on, migrating blocks 1295 * like foreground gc 1296 */ 1297 }; 1298 1299 enum { 1300 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1301 FS_MODE_LFS, /* use lfs allocation only */ 1302 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1303 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1304 }; 1305 1306 enum { 1307 WHINT_MODE_OFF, /* not pass down write hints */ 1308 WHINT_MODE_USER, /* try to pass down hints given by users */ 1309 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1310 }; 1311 1312 enum { 1313 ALLOC_MODE_DEFAULT, /* stay default */ 1314 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1315 }; 1316 1317 enum fsync_mode { 1318 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1319 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1320 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1321 }; 1322 1323 enum { 1324 COMPR_MODE_FS, /* 1325 * automatically compress compression 1326 * enabled files 1327 */ 1328 COMPR_MODE_USER, /* 1329 * automatical compression is disabled. 1330 * user can control the file compression 1331 * using ioctls 1332 */ 1333 }; 1334 1335 enum { 1336 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1337 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1338 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1339 }; 1340 1341 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1342 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1343 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1344 1345 /* 1346 * Layout of f2fs page.private: 1347 * 1348 * Layout A: lowest bit should be 1 1349 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1350 * bit 0 PAGE_PRIVATE_NOT_POINTER 1351 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1352 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1353 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1354 * bit 4 PAGE_PRIVATE_INLINE_INODE 1355 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1356 * bit 6- f2fs private data 1357 * 1358 * Layout B: lowest bit should be 0 1359 * page.private is a wrapped pointer. 1360 */ 1361 enum { 1362 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1363 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1364 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1365 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1366 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1367 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1368 PAGE_PRIVATE_MAX 1369 }; 1370 1371 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1372 static inline bool page_private_##name(struct page *page) \ 1373 { \ 1374 return PagePrivate(page) && \ 1375 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1376 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1377 } 1378 1379 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1380 static inline void set_page_private_##name(struct page *page) \ 1381 { \ 1382 if (!PagePrivate(page)) { \ 1383 get_page(page); \ 1384 SetPagePrivate(page); \ 1385 set_page_private(page, 0); \ 1386 } \ 1387 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1388 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1389 } 1390 1391 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1392 static inline void clear_page_private_##name(struct page *page) \ 1393 { \ 1394 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1395 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1396 set_page_private(page, 0); \ 1397 if (PagePrivate(page)) { \ 1398 ClearPagePrivate(page); \ 1399 put_page(page); \ 1400 }\ 1401 } \ 1402 } 1403 1404 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1405 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1406 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1407 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1408 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1409 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1410 1411 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1412 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1413 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1414 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1415 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1416 1417 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1418 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1419 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1420 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1421 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1422 1423 static inline unsigned long get_page_private_data(struct page *page) 1424 { 1425 unsigned long data = page_private(page); 1426 1427 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1428 return 0; 1429 return data >> PAGE_PRIVATE_MAX; 1430 } 1431 1432 static inline void set_page_private_data(struct page *page, unsigned long data) 1433 { 1434 if (!PagePrivate(page)) { 1435 get_page(page); 1436 SetPagePrivate(page); 1437 set_page_private(page, 0); 1438 } 1439 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1440 page_private(page) |= data << PAGE_PRIVATE_MAX; 1441 } 1442 1443 static inline void clear_page_private_data(struct page *page) 1444 { 1445 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1446 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1447 set_page_private(page, 0); 1448 if (PagePrivate(page)) { 1449 ClearPagePrivate(page); 1450 put_page(page); 1451 } 1452 } 1453 } 1454 1455 /* For compression */ 1456 enum compress_algorithm_type { 1457 COMPRESS_LZO, 1458 COMPRESS_LZ4, 1459 COMPRESS_ZSTD, 1460 COMPRESS_LZORLE, 1461 COMPRESS_MAX, 1462 }; 1463 1464 enum compress_flag { 1465 COMPRESS_CHKSUM, 1466 COMPRESS_MAX_FLAG, 1467 }; 1468 1469 #define COMPRESS_WATERMARK 20 1470 #define COMPRESS_PERCENT 20 1471 1472 #define COMPRESS_DATA_RESERVED_SIZE 4 1473 struct compress_data { 1474 __le32 clen; /* compressed data size */ 1475 __le32 chksum; /* compressed data chksum */ 1476 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1477 u8 cdata[]; /* compressed data */ 1478 }; 1479 1480 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1481 1482 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1483 1484 #define COMPRESS_LEVEL_OFFSET 8 1485 1486 /* compress context */ 1487 struct compress_ctx { 1488 struct inode *inode; /* inode the context belong to */ 1489 pgoff_t cluster_idx; /* cluster index number */ 1490 unsigned int cluster_size; /* page count in cluster */ 1491 unsigned int log_cluster_size; /* log of cluster size */ 1492 struct page **rpages; /* pages store raw data in cluster */ 1493 unsigned int nr_rpages; /* total page number in rpages */ 1494 struct page **cpages; /* pages store compressed data in cluster */ 1495 unsigned int nr_cpages; /* total page number in cpages */ 1496 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1497 void *rbuf; /* virtual mapped address on rpages */ 1498 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1499 size_t rlen; /* valid data length in rbuf */ 1500 size_t clen; /* valid data length in cbuf */ 1501 void *private; /* payload buffer for specified compression algorithm */ 1502 void *private2; /* extra payload buffer */ 1503 }; 1504 1505 /* compress context for write IO path */ 1506 struct compress_io_ctx { 1507 u32 magic; /* magic number to indicate page is compressed */ 1508 struct inode *inode; /* inode the context belong to */ 1509 struct page **rpages; /* pages store raw data in cluster */ 1510 unsigned int nr_rpages; /* total page number in rpages */ 1511 atomic_t pending_pages; /* in-flight compressed page count */ 1512 }; 1513 1514 /* Context for decompressing one cluster on the read IO path */ 1515 struct decompress_io_ctx { 1516 u32 magic; /* magic number to indicate page is compressed */ 1517 struct inode *inode; /* inode the context belong to */ 1518 pgoff_t cluster_idx; /* cluster index number */ 1519 unsigned int cluster_size; /* page count in cluster */ 1520 unsigned int log_cluster_size; /* log of cluster size */ 1521 struct page **rpages; /* pages store raw data in cluster */ 1522 unsigned int nr_rpages; /* total page number in rpages */ 1523 struct page **cpages; /* pages store compressed data in cluster */ 1524 unsigned int nr_cpages; /* total page number in cpages */ 1525 struct page **tpages; /* temp pages to pad holes in cluster */ 1526 void *rbuf; /* virtual mapped address on rpages */ 1527 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1528 size_t rlen; /* valid data length in rbuf */ 1529 size_t clen; /* valid data length in cbuf */ 1530 1531 /* 1532 * The number of compressed pages remaining to be read in this cluster. 1533 * This is initially nr_cpages. It is decremented by 1 each time a page 1534 * has been read (or failed to be read). When it reaches 0, the cluster 1535 * is decompressed (or an error is reported). 1536 * 1537 * If an error occurs before all the pages have been submitted for I/O, 1538 * then this will never reach 0. In this case the I/O submitter is 1539 * responsible for calling f2fs_decompress_end_io() instead. 1540 */ 1541 atomic_t remaining_pages; 1542 1543 /* 1544 * Number of references to this decompress_io_ctx. 1545 * 1546 * One reference is held for I/O completion. This reference is dropped 1547 * after the pagecache pages are updated and unlocked -- either after 1548 * decompression (and verity if enabled), or after an error. 1549 * 1550 * In addition, each compressed page holds a reference while it is in a 1551 * bio. These references are necessary prevent compressed pages from 1552 * being freed while they are still in a bio. 1553 */ 1554 refcount_t refcnt; 1555 1556 bool failed; /* IO error occurred before decompression? */ 1557 bool need_verity; /* need fs-verity verification after decompression? */ 1558 void *private; /* payload buffer for specified decompression algorithm */ 1559 void *private2; /* extra payload buffer */ 1560 struct work_struct verity_work; /* work to verify the decompressed pages */ 1561 }; 1562 1563 #define NULL_CLUSTER ((unsigned int)(~0)) 1564 #define MIN_COMPRESS_LOG_SIZE 2 1565 #define MAX_COMPRESS_LOG_SIZE 8 1566 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1567 1568 struct f2fs_sb_info { 1569 struct super_block *sb; /* pointer to VFS super block */ 1570 struct proc_dir_entry *s_proc; /* proc entry */ 1571 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1572 struct rw_semaphore sb_lock; /* lock for raw super block */ 1573 int valid_super_block; /* valid super block no */ 1574 unsigned long s_flag; /* flags for sbi */ 1575 struct mutex writepages; /* mutex for writepages() */ 1576 1577 #ifdef CONFIG_BLK_DEV_ZONED 1578 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1579 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1580 #endif 1581 1582 /* for node-related operations */ 1583 struct f2fs_nm_info *nm_info; /* node manager */ 1584 struct inode *node_inode; /* cache node blocks */ 1585 1586 /* for segment-related operations */ 1587 struct f2fs_sm_info *sm_info; /* segment manager */ 1588 1589 /* for bio operations */ 1590 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1591 /* keep migration IO order for LFS mode */ 1592 struct rw_semaphore io_order_lock; 1593 mempool_t *write_io_dummy; /* Dummy pages */ 1594 1595 /* for checkpoint */ 1596 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1597 int cur_cp_pack; /* remain current cp pack */ 1598 spinlock_t cp_lock; /* for flag in ckpt */ 1599 struct inode *meta_inode; /* cache meta blocks */ 1600 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1601 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1602 struct rw_semaphore node_write; /* locking node writes */ 1603 struct rw_semaphore node_change; /* locking node change */ 1604 wait_queue_head_t cp_wait; 1605 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1606 long interval_time[MAX_TIME]; /* to store thresholds */ 1607 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1608 1609 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1610 1611 spinlock_t fsync_node_lock; /* for node entry lock */ 1612 struct list_head fsync_node_list; /* node list head */ 1613 unsigned int fsync_seg_id; /* sequence id */ 1614 unsigned int fsync_node_num; /* number of node entries */ 1615 1616 /* for orphan inode, use 0'th array */ 1617 unsigned int max_orphans; /* max orphan inodes */ 1618 1619 /* for inode management */ 1620 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1621 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1622 struct mutex flush_lock; /* for flush exclusion */ 1623 1624 /* for extent tree cache */ 1625 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1626 struct mutex extent_tree_lock; /* locking extent radix tree */ 1627 struct list_head extent_list; /* lru list for shrinker */ 1628 spinlock_t extent_lock; /* locking extent lru list */ 1629 atomic_t total_ext_tree; /* extent tree count */ 1630 struct list_head zombie_list; /* extent zombie tree list */ 1631 atomic_t total_zombie_tree; /* extent zombie tree count */ 1632 atomic_t total_ext_node; /* extent info count */ 1633 1634 /* basic filesystem units */ 1635 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1636 unsigned int log_blocksize; /* log2 block size */ 1637 unsigned int blocksize; /* block size */ 1638 unsigned int root_ino_num; /* root inode number*/ 1639 unsigned int node_ino_num; /* node inode number*/ 1640 unsigned int meta_ino_num; /* meta inode number*/ 1641 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1642 unsigned int blocks_per_seg; /* blocks per segment */ 1643 unsigned int segs_per_sec; /* segments per section */ 1644 unsigned int secs_per_zone; /* sections per zone */ 1645 unsigned int total_sections; /* total section count */ 1646 unsigned int total_node_count; /* total node block count */ 1647 unsigned int total_valid_node_count; /* valid node block count */ 1648 int dir_level; /* directory level */ 1649 int readdir_ra; /* readahead inode in readdir */ 1650 u64 max_io_bytes; /* max io bytes to merge IOs */ 1651 1652 block_t user_block_count; /* # of user blocks */ 1653 block_t total_valid_block_count; /* # of valid blocks */ 1654 block_t discard_blks; /* discard command candidats */ 1655 block_t last_valid_block_count; /* for recovery */ 1656 block_t reserved_blocks; /* configurable reserved blocks */ 1657 block_t current_reserved_blocks; /* current reserved blocks */ 1658 1659 /* Additional tracking for no checkpoint mode */ 1660 block_t unusable_block_count; /* # of blocks saved by last cp */ 1661 1662 unsigned int nquota_files; /* # of quota sysfile */ 1663 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1664 1665 /* # of pages, see count_type */ 1666 atomic_t nr_pages[NR_COUNT_TYPE]; 1667 /* # of allocated blocks */ 1668 struct percpu_counter alloc_valid_block_count; 1669 1670 /* writeback control */ 1671 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1672 1673 /* valid inode count */ 1674 struct percpu_counter total_valid_inode_count; 1675 1676 struct f2fs_mount_info mount_opt; /* mount options */ 1677 1678 /* for cleaning operations */ 1679 struct rw_semaphore gc_lock; /* 1680 * semaphore for GC, avoid 1681 * race between GC and GC or CP 1682 */ 1683 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1684 struct atgc_management am; /* atgc management */ 1685 unsigned int cur_victim_sec; /* current victim section num */ 1686 unsigned int gc_mode; /* current GC state */ 1687 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1688 spinlock_t gc_urgent_high_lock; 1689 bool gc_urgent_high_limited; /* indicates having limited trial count */ 1690 unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */ 1691 1692 /* for skip statistic */ 1693 unsigned int atomic_files; /* # of opened atomic file */ 1694 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1695 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1696 1697 /* threshold for gc trials on pinned files */ 1698 u64 gc_pin_file_threshold; 1699 struct rw_semaphore pin_sem; 1700 1701 /* maximum # of trials to find a victim segment for SSR and GC */ 1702 unsigned int max_victim_search; 1703 /* migration granularity of garbage collection, unit: segment */ 1704 unsigned int migration_granularity; 1705 1706 /* 1707 * for stat information. 1708 * one is for the LFS mode, and the other is for the SSR mode. 1709 */ 1710 #ifdef CONFIG_F2FS_STAT_FS 1711 struct f2fs_stat_info *stat_info; /* FS status information */ 1712 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1713 unsigned int segment_count[2]; /* # of allocated segments */ 1714 unsigned int block_count[2]; /* # of allocated blocks */ 1715 atomic_t inplace_count; /* # of inplace update */ 1716 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1717 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1718 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1719 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1720 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1721 atomic_t inline_inode; /* # of inline_data inodes */ 1722 atomic_t inline_dir; /* # of inline_dentry inodes */ 1723 atomic_t compr_inode; /* # of compressed inodes */ 1724 atomic64_t compr_blocks; /* # of compressed blocks */ 1725 atomic_t vw_cnt; /* # of volatile writes */ 1726 atomic_t max_aw_cnt; /* max # of atomic writes */ 1727 atomic_t max_vw_cnt; /* max # of volatile writes */ 1728 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1729 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1730 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1731 #endif 1732 spinlock_t stat_lock; /* lock for stat operations */ 1733 1734 /* to attach REQ_META|REQ_FUA flags */ 1735 unsigned int data_io_flag; 1736 unsigned int node_io_flag; 1737 1738 /* For sysfs suppport */ 1739 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1740 struct completion s_kobj_unregister; 1741 1742 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1743 struct completion s_stat_kobj_unregister; 1744 1745 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1746 struct completion s_feature_list_kobj_unregister; 1747 1748 /* For shrinker support */ 1749 struct list_head s_list; 1750 struct mutex umount_mutex; 1751 unsigned int shrinker_run_no; 1752 1753 /* For multi devices */ 1754 int s_ndevs; /* number of devices */ 1755 struct f2fs_dev_info *devs; /* for device list */ 1756 unsigned int dirty_device; /* for checkpoint data flush */ 1757 spinlock_t dev_lock; /* protect dirty_device */ 1758 bool aligned_blksize; /* all devices has the same logical blksize */ 1759 1760 /* For write statistics */ 1761 u64 sectors_written_start; 1762 u64 kbytes_written; 1763 1764 /* Reference to checksum algorithm driver via cryptoapi */ 1765 struct crypto_shash *s_chksum_driver; 1766 1767 /* Precomputed FS UUID checksum for seeding other checksums */ 1768 __u32 s_chksum_seed; 1769 1770 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1771 1772 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1773 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1774 1775 /* For reclaimed segs statistics per each GC mode */ 1776 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1777 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1778 1779 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1780 1781 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1782 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1783 1784 #ifdef CONFIG_F2FS_FS_COMPRESSION 1785 struct kmem_cache *page_array_slab; /* page array entry */ 1786 unsigned int page_array_slab_size; /* default page array slab size */ 1787 1788 /* For runtime compression statistics */ 1789 u64 compr_written_block; 1790 u64 compr_saved_block; 1791 u32 compr_new_inode; 1792 1793 /* For compressed block cache */ 1794 struct inode *compress_inode; /* cache compressed blocks */ 1795 unsigned int compress_percent; /* cache page percentage */ 1796 unsigned int compress_watermark; /* cache page watermark */ 1797 atomic_t compress_page_hit; /* cache hit count */ 1798 #endif 1799 1800 #ifdef CONFIG_F2FS_IOSTAT 1801 /* For app/fs IO statistics */ 1802 spinlock_t iostat_lock; 1803 unsigned long long rw_iostat[NR_IO_TYPE]; 1804 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1805 bool iostat_enable; 1806 unsigned long iostat_next_period; 1807 unsigned int iostat_period_ms; 1808 1809 /* For io latency related statistics info in one iostat period */ 1810 spinlock_t iostat_lat_lock; 1811 struct iostat_lat_info *iostat_io_lat; 1812 #endif 1813 }; 1814 1815 #ifdef CONFIG_F2FS_FAULT_INJECTION 1816 #define f2fs_show_injection_info(sbi, type) \ 1817 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1818 KERN_INFO, sbi->sb->s_id, \ 1819 f2fs_fault_name[type], \ 1820 __func__, __builtin_return_address(0)) 1821 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1822 { 1823 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1824 1825 if (!ffi->inject_rate) 1826 return false; 1827 1828 if (!IS_FAULT_SET(ffi, type)) 1829 return false; 1830 1831 atomic_inc(&ffi->inject_ops); 1832 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1833 atomic_set(&ffi->inject_ops, 0); 1834 return true; 1835 } 1836 return false; 1837 } 1838 #else 1839 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1840 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1841 { 1842 return false; 1843 } 1844 #endif 1845 1846 /* 1847 * Test if the mounted volume is a multi-device volume. 1848 * - For a single regular disk volume, sbi->s_ndevs is 0. 1849 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1850 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1851 */ 1852 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1853 { 1854 return sbi->s_ndevs > 1; 1855 } 1856 1857 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1858 { 1859 unsigned long now = jiffies; 1860 1861 sbi->last_time[type] = now; 1862 1863 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1864 if (type == REQ_TIME) { 1865 sbi->last_time[DISCARD_TIME] = now; 1866 sbi->last_time[GC_TIME] = now; 1867 } 1868 } 1869 1870 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1871 { 1872 unsigned long interval = sbi->interval_time[type] * HZ; 1873 1874 return time_after(jiffies, sbi->last_time[type] + interval); 1875 } 1876 1877 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1878 int type) 1879 { 1880 unsigned long interval = sbi->interval_time[type] * HZ; 1881 unsigned int wait_ms = 0; 1882 long delta; 1883 1884 delta = (sbi->last_time[type] + interval) - jiffies; 1885 if (delta > 0) 1886 wait_ms = jiffies_to_msecs(delta); 1887 1888 return wait_ms; 1889 } 1890 1891 /* 1892 * Inline functions 1893 */ 1894 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1895 const void *address, unsigned int length) 1896 { 1897 struct { 1898 struct shash_desc shash; 1899 char ctx[4]; 1900 } desc; 1901 int err; 1902 1903 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1904 1905 desc.shash.tfm = sbi->s_chksum_driver; 1906 *(u32 *)desc.ctx = crc; 1907 1908 err = crypto_shash_update(&desc.shash, address, length); 1909 BUG_ON(err); 1910 1911 return *(u32 *)desc.ctx; 1912 } 1913 1914 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1915 unsigned int length) 1916 { 1917 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1918 } 1919 1920 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1921 void *buf, size_t buf_size) 1922 { 1923 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1924 } 1925 1926 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1927 const void *address, unsigned int length) 1928 { 1929 return __f2fs_crc32(sbi, crc, address, length); 1930 } 1931 1932 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1933 { 1934 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1935 } 1936 1937 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1938 { 1939 return sb->s_fs_info; 1940 } 1941 1942 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1943 { 1944 return F2FS_SB(inode->i_sb); 1945 } 1946 1947 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1948 { 1949 return F2FS_I_SB(mapping->host); 1950 } 1951 1952 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1953 { 1954 return F2FS_M_SB(page_file_mapping(page)); 1955 } 1956 1957 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1958 { 1959 return (struct f2fs_super_block *)(sbi->raw_super); 1960 } 1961 1962 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1963 { 1964 return (struct f2fs_checkpoint *)(sbi->ckpt); 1965 } 1966 1967 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1968 { 1969 return (struct f2fs_node *)page_address(page); 1970 } 1971 1972 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1973 { 1974 return &((struct f2fs_node *)page_address(page))->i; 1975 } 1976 1977 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1978 { 1979 return (struct f2fs_nm_info *)(sbi->nm_info); 1980 } 1981 1982 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1983 { 1984 return (struct f2fs_sm_info *)(sbi->sm_info); 1985 } 1986 1987 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1988 { 1989 return (struct sit_info *)(SM_I(sbi)->sit_info); 1990 } 1991 1992 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1993 { 1994 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1995 } 1996 1997 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1998 { 1999 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 2000 } 2001 2002 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2003 { 2004 return sbi->meta_inode->i_mapping; 2005 } 2006 2007 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2008 { 2009 return sbi->node_inode->i_mapping; 2010 } 2011 2012 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2013 { 2014 return test_bit(type, &sbi->s_flag); 2015 } 2016 2017 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2018 { 2019 set_bit(type, &sbi->s_flag); 2020 } 2021 2022 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2023 { 2024 clear_bit(type, &sbi->s_flag); 2025 } 2026 2027 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2028 { 2029 return le64_to_cpu(cp->checkpoint_ver); 2030 } 2031 2032 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2033 { 2034 if (type < F2FS_MAX_QUOTAS) 2035 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2036 return 0; 2037 } 2038 2039 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2040 { 2041 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2042 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2043 } 2044 2045 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2046 { 2047 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2048 2049 return ckpt_flags & f; 2050 } 2051 2052 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2053 { 2054 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2055 } 2056 2057 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2058 { 2059 unsigned int ckpt_flags; 2060 2061 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2062 ckpt_flags |= f; 2063 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2064 } 2065 2066 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2067 { 2068 unsigned long flags; 2069 2070 spin_lock_irqsave(&sbi->cp_lock, flags); 2071 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2072 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2073 } 2074 2075 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2076 { 2077 unsigned int ckpt_flags; 2078 2079 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2080 ckpt_flags &= (~f); 2081 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2082 } 2083 2084 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2085 { 2086 unsigned long flags; 2087 2088 spin_lock_irqsave(&sbi->cp_lock, flags); 2089 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2090 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2091 } 2092 2093 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2094 { 2095 down_read(&sbi->cp_rwsem); 2096 } 2097 2098 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2099 { 2100 if (time_to_inject(sbi, FAULT_LOCK_OP)) { 2101 f2fs_show_injection_info(sbi, FAULT_LOCK_OP); 2102 return 0; 2103 } 2104 return down_read_trylock(&sbi->cp_rwsem); 2105 } 2106 2107 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2108 { 2109 up_read(&sbi->cp_rwsem); 2110 } 2111 2112 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2113 { 2114 down_write(&sbi->cp_rwsem); 2115 } 2116 2117 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2118 { 2119 up_write(&sbi->cp_rwsem); 2120 } 2121 2122 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2123 { 2124 int reason = CP_SYNC; 2125 2126 if (test_opt(sbi, FASTBOOT)) 2127 reason = CP_FASTBOOT; 2128 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2129 reason = CP_UMOUNT; 2130 return reason; 2131 } 2132 2133 static inline bool __remain_node_summaries(int reason) 2134 { 2135 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2136 } 2137 2138 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2139 { 2140 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2141 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2142 } 2143 2144 /* 2145 * Check whether the inode has blocks or not 2146 */ 2147 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2148 { 2149 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2150 2151 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2152 } 2153 2154 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2155 { 2156 return ofs == XATTR_NODE_OFFSET; 2157 } 2158 2159 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2160 struct inode *inode, bool cap) 2161 { 2162 if (!inode) 2163 return true; 2164 if (!test_opt(sbi, RESERVE_ROOT)) 2165 return false; 2166 if (IS_NOQUOTA(inode)) 2167 return true; 2168 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2169 return true; 2170 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2171 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2172 return true; 2173 if (cap && capable(CAP_SYS_RESOURCE)) 2174 return true; 2175 return false; 2176 } 2177 2178 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2179 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2180 struct inode *inode, blkcnt_t *count) 2181 { 2182 blkcnt_t diff = 0, release = 0; 2183 block_t avail_user_block_count; 2184 int ret; 2185 2186 ret = dquot_reserve_block(inode, *count); 2187 if (ret) 2188 return ret; 2189 2190 if (time_to_inject(sbi, FAULT_BLOCK)) { 2191 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2192 release = *count; 2193 goto release_quota; 2194 } 2195 2196 /* 2197 * let's increase this in prior to actual block count change in order 2198 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2199 */ 2200 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2201 2202 spin_lock(&sbi->stat_lock); 2203 sbi->total_valid_block_count += (block_t)(*count); 2204 avail_user_block_count = sbi->user_block_count - 2205 sbi->current_reserved_blocks; 2206 2207 if (!__allow_reserved_blocks(sbi, inode, true)) 2208 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2209 2210 if (F2FS_IO_ALIGNED(sbi)) 2211 avail_user_block_count -= sbi->blocks_per_seg * 2212 SM_I(sbi)->additional_reserved_segments; 2213 2214 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2215 if (avail_user_block_count > sbi->unusable_block_count) 2216 avail_user_block_count -= sbi->unusable_block_count; 2217 else 2218 avail_user_block_count = 0; 2219 } 2220 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2221 diff = sbi->total_valid_block_count - avail_user_block_count; 2222 if (diff > *count) 2223 diff = *count; 2224 *count -= diff; 2225 release = diff; 2226 sbi->total_valid_block_count -= diff; 2227 if (!*count) { 2228 spin_unlock(&sbi->stat_lock); 2229 goto enospc; 2230 } 2231 } 2232 spin_unlock(&sbi->stat_lock); 2233 2234 if (unlikely(release)) { 2235 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2236 dquot_release_reservation_block(inode, release); 2237 } 2238 f2fs_i_blocks_write(inode, *count, true, true); 2239 return 0; 2240 2241 enospc: 2242 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2243 release_quota: 2244 dquot_release_reservation_block(inode, release); 2245 return -ENOSPC; 2246 } 2247 2248 __printf(2, 3) 2249 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2250 2251 #define f2fs_err(sbi, fmt, ...) \ 2252 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2253 #define f2fs_warn(sbi, fmt, ...) \ 2254 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2255 #define f2fs_notice(sbi, fmt, ...) \ 2256 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2257 #define f2fs_info(sbi, fmt, ...) \ 2258 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2259 #define f2fs_debug(sbi, fmt, ...) \ 2260 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2261 2262 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2263 struct inode *inode, 2264 block_t count) 2265 { 2266 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2267 2268 spin_lock(&sbi->stat_lock); 2269 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2270 sbi->total_valid_block_count -= (block_t)count; 2271 if (sbi->reserved_blocks && 2272 sbi->current_reserved_blocks < sbi->reserved_blocks) 2273 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2274 sbi->current_reserved_blocks + count); 2275 spin_unlock(&sbi->stat_lock); 2276 if (unlikely(inode->i_blocks < sectors)) { 2277 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2278 inode->i_ino, 2279 (unsigned long long)inode->i_blocks, 2280 (unsigned long long)sectors); 2281 set_sbi_flag(sbi, SBI_NEED_FSCK); 2282 return; 2283 } 2284 f2fs_i_blocks_write(inode, count, false, true); 2285 } 2286 2287 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2288 { 2289 atomic_inc(&sbi->nr_pages[count_type]); 2290 2291 if (count_type == F2FS_DIRTY_DENTS || 2292 count_type == F2FS_DIRTY_NODES || 2293 count_type == F2FS_DIRTY_META || 2294 count_type == F2FS_DIRTY_QDATA || 2295 count_type == F2FS_DIRTY_IMETA) 2296 set_sbi_flag(sbi, SBI_IS_DIRTY); 2297 } 2298 2299 static inline void inode_inc_dirty_pages(struct inode *inode) 2300 { 2301 atomic_inc(&F2FS_I(inode)->dirty_pages); 2302 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2303 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2304 if (IS_NOQUOTA(inode)) 2305 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2306 } 2307 2308 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2309 { 2310 atomic_dec(&sbi->nr_pages[count_type]); 2311 } 2312 2313 static inline void inode_dec_dirty_pages(struct inode *inode) 2314 { 2315 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2316 !S_ISLNK(inode->i_mode)) 2317 return; 2318 2319 atomic_dec(&F2FS_I(inode)->dirty_pages); 2320 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2321 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2322 if (IS_NOQUOTA(inode)) 2323 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2324 } 2325 2326 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2327 { 2328 return atomic_read(&sbi->nr_pages[count_type]); 2329 } 2330 2331 static inline int get_dirty_pages(struct inode *inode) 2332 { 2333 return atomic_read(&F2FS_I(inode)->dirty_pages); 2334 } 2335 2336 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2337 { 2338 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2339 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2340 sbi->log_blocks_per_seg; 2341 2342 return segs / sbi->segs_per_sec; 2343 } 2344 2345 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2346 { 2347 return sbi->total_valid_block_count; 2348 } 2349 2350 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2351 { 2352 return sbi->discard_blks; 2353 } 2354 2355 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2356 { 2357 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2358 2359 /* return NAT or SIT bitmap */ 2360 if (flag == NAT_BITMAP) 2361 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2362 else if (flag == SIT_BITMAP) 2363 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2364 2365 return 0; 2366 } 2367 2368 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2369 { 2370 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2371 } 2372 2373 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2374 { 2375 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2376 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2377 int offset; 2378 2379 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2380 offset = (flag == SIT_BITMAP) ? 2381 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2382 /* 2383 * if large_nat_bitmap feature is enabled, leave checksum 2384 * protection for all nat/sit bitmaps. 2385 */ 2386 return tmp_ptr + offset + sizeof(__le32); 2387 } 2388 2389 if (__cp_payload(sbi) > 0) { 2390 if (flag == NAT_BITMAP) 2391 return &ckpt->sit_nat_version_bitmap; 2392 else 2393 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2394 } else { 2395 offset = (flag == NAT_BITMAP) ? 2396 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2397 return tmp_ptr + offset; 2398 } 2399 } 2400 2401 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2402 { 2403 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2404 2405 if (sbi->cur_cp_pack == 2) 2406 start_addr += sbi->blocks_per_seg; 2407 return start_addr; 2408 } 2409 2410 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2411 { 2412 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2413 2414 if (sbi->cur_cp_pack == 1) 2415 start_addr += sbi->blocks_per_seg; 2416 return start_addr; 2417 } 2418 2419 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2420 { 2421 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2422 } 2423 2424 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2425 { 2426 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2427 } 2428 2429 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2430 struct inode *inode, bool is_inode) 2431 { 2432 block_t valid_block_count; 2433 unsigned int valid_node_count, user_block_count; 2434 int err; 2435 2436 if (is_inode) { 2437 if (inode) { 2438 err = dquot_alloc_inode(inode); 2439 if (err) 2440 return err; 2441 } 2442 } else { 2443 err = dquot_reserve_block(inode, 1); 2444 if (err) 2445 return err; 2446 } 2447 2448 if (time_to_inject(sbi, FAULT_BLOCK)) { 2449 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2450 goto enospc; 2451 } 2452 2453 spin_lock(&sbi->stat_lock); 2454 2455 valid_block_count = sbi->total_valid_block_count + 2456 sbi->current_reserved_blocks + 1; 2457 2458 if (!__allow_reserved_blocks(sbi, inode, false)) 2459 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2460 2461 if (F2FS_IO_ALIGNED(sbi)) 2462 valid_block_count += sbi->blocks_per_seg * 2463 SM_I(sbi)->additional_reserved_segments; 2464 2465 user_block_count = sbi->user_block_count; 2466 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2467 user_block_count -= sbi->unusable_block_count; 2468 2469 if (unlikely(valid_block_count > user_block_count)) { 2470 spin_unlock(&sbi->stat_lock); 2471 goto enospc; 2472 } 2473 2474 valid_node_count = sbi->total_valid_node_count + 1; 2475 if (unlikely(valid_node_count > sbi->total_node_count)) { 2476 spin_unlock(&sbi->stat_lock); 2477 goto enospc; 2478 } 2479 2480 sbi->total_valid_node_count++; 2481 sbi->total_valid_block_count++; 2482 spin_unlock(&sbi->stat_lock); 2483 2484 if (inode) { 2485 if (is_inode) 2486 f2fs_mark_inode_dirty_sync(inode, true); 2487 else 2488 f2fs_i_blocks_write(inode, 1, true, true); 2489 } 2490 2491 percpu_counter_inc(&sbi->alloc_valid_block_count); 2492 return 0; 2493 2494 enospc: 2495 if (is_inode) { 2496 if (inode) 2497 dquot_free_inode(inode); 2498 } else { 2499 dquot_release_reservation_block(inode, 1); 2500 } 2501 return -ENOSPC; 2502 } 2503 2504 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2505 struct inode *inode, bool is_inode) 2506 { 2507 spin_lock(&sbi->stat_lock); 2508 2509 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2510 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2511 2512 sbi->total_valid_node_count--; 2513 sbi->total_valid_block_count--; 2514 if (sbi->reserved_blocks && 2515 sbi->current_reserved_blocks < sbi->reserved_blocks) 2516 sbi->current_reserved_blocks++; 2517 2518 spin_unlock(&sbi->stat_lock); 2519 2520 if (is_inode) { 2521 dquot_free_inode(inode); 2522 } else { 2523 if (unlikely(inode->i_blocks == 0)) { 2524 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2525 inode->i_ino, 2526 (unsigned long long)inode->i_blocks); 2527 set_sbi_flag(sbi, SBI_NEED_FSCK); 2528 return; 2529 } 2530 f2fs_i_blocks_write(inode, 1, false, true); 2531 } 2532 } 2533 2534 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2535 { 2536 return sbi->total_valid_node_count; 2537 } 2538 2539 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2540 { 2541 percpu_counter_inc(&sbi->total_valid_inode_count); 2542 } 2543 2544 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2545 { 2546 percpu_counter_dec(&sbi->total_valid_inode_count); 2547 } 2548 2549 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2550 { 2551 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2552 } 2553 2554 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2555 pgoff_t index, bool for_write) 2556 { 2557 struct page *page; 2558 2559 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2560 if (!for_write) 2561 page = find_get_page_flags(mapping, index, 2562 FGP_LOCK | FGP_ACCESSED); 2563 else 2564 page = find_lock_page(mapping, index); 2565 if (page) 2566 return page; 2567 2568 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2569 f2fs_show_injection_info(F2FS_M_SB(mapping), 2570 FAULT_PAGE_ALLOC); 2571 return NULL; 2572 } 2573 } 2574 2575 if (!for_write) 2576 return grab_cache_page(mapping, index); 2577 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2578 } 2579 2580 static inline struct page *f2fs_pagecache_get_page( 2581 struct address_space *mapping, pgoff_t index, 2582 int fgp_flags, gfp_t gfp_mask) 2583 { 2584 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2585 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2586 return NULL; 2587 } 2588 2589 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2590 } 2591 2592 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2593 { 2594 char *src_kaddr = kmap(src); 2595 char *dst_kaddr = kmap(dst); 2596 2597 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2598 kunmap(dst); 2599 kunmap(src); 2600 } 2601 2602 static inline void f2fs_put_page(struct page *page, int unlock) 2603 { 2604 if (!page) 2605 return; 2606 2607 if (unlock) { 2608 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2609 unlock_page(page); 2610 } 2611 put_page(page); 2612 } 2613 2614 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2615 { 2616 if (dn->node_page) 2617 f2fs_put_page(dn->node_page, 1); 2618 if (dn->inode_page && dn->node_page != dn->inode_page) 2619 f2fs_put_page(dn->inode_page, 0); 2620 dn->node_page = NULL; 2621 dn->inode_page = NULL; 2622 } 2623 2624 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2625 size_t size) 2626 { 2627 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2628 } 2629 2630 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2631 gfp_t flags) 2632 { 2633 void *entry; 2634 2635 entry = kmem_cache_alloc(cachep, flags); 2636 if (!entry) 2637 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2638 return entry; 2639 } 2640 2641 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2642 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2643 { 2644 if (nofail) 2645 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2646 2647 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) { 2648 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC); 2649 return NULL; 2650 } 2651 2652 return kmem_cache_alloc(cachep, flags); 2653 } 2654 2655 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2656 { 2657 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2658 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2659 get_pages(sbi, F2FS_WB_CP_DATA) || 2660 get_pages(sbi, F2FS_DIO_READ) || 2661 get_pages(sbi, F2FS_DIO_WRITE)) 2662 return true; 2663 2664 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2665 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2666 return true; 2667 2668 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2669 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2670 return true; 2671 return false; 2672 } 2673 2674 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2675 { 2676 if (sbi->gc_mode == GC_URGENT_HIGH) 2677 return true; 2678 2679 if (is_inflight_io(sbi, type)) 2680 return false; 2681 2682 if (sbi->gc_mode == GC_URGENT_LOW && 2683 (type == DISCARD_TIME || type == GC_TIME)) 2684 return true; 2685 2686 return f2fs_time_over(sbi, type); 2687 } 2688 2689 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2690 unsigned long index, void *item) 2691 { 2692 while (radix_tree_insert(root, index, item)) 2693 cond_resched(); 2694 } 2695 2696 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2697 2698 static inline bool IS_INODE(struct page *page) 2699 { 2700 struct f2fs_node *p = F2FS_NODE(page); 2701 2702 return RAW_IS_INODE(p); 2703 } 2704 2705 static inline int offset_in_addr(struct f2fs_inode *i) 2706 { 2707 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2708 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2709 } 2710 2711 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2712 { 2713 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2714 } 2715 2716 static inline int f2fs_has_extra_attr(struct inode *inode); 2717 static inline block_t data_blkaddr(struct inode *inode, 2718 struct page *node_page, unsigned int offset) 2719 { 2720 struct f2fs_node *raw_node; 2721 __le32 *addr_array; 2722 int base = 0; 2723 bool is_inode = IS_INODE(node_page); 2724 2725 raw_node = F2FS_NODE(node_page); 2726 2727 if (is_inode) { 2728 if (!inode) 2729 /* from GC path only */ 2730 base = offset_in_addr(&raw_node->i); 2731 else if (f2fs_has_extra_attr(inode)) 2732 base = get_extra_isize(inode); 2733 } 2734 2735 addr_array = blkaddr_in_node(raw_node); 2736 return le32_to_cpu(addr_array[base + offset]); 2737 } 2738 2739 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2740 { 2741 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2742 } 2743 2744 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2745 { 2746 int mask; 2747 2748 addr += (nr >> 3); 2749 mask = 1 << (7 - (nr & 0x07)); 2750 return mask & *addr; 2751 } 2752 2753 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2754 { 2755 int mask; 2756 2757 addr += (nr >> 3); 2758 mask = 1 << (7 - (nr & 0x07)); 2759 *addr |= mask; 2760 } 2761 2762 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2763 { 2764 int mask; 2765 2766 addr += (nr >> 3); 2767 mask = 1 << (7 - (nr & 0x07)); 2768 *addr &= ~mask; 2769 } 2770 2771 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2772 { 2773 int mask; 2774 int ret; 2775 2776 addr += (nr >> 3); 2777 mask = 1 << (7 - (nr & 0x07)); 2778 ret = mask & *addr; 2779 *addr |= mask; 2780 return ret; 2781 } 2782 2783 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2784 { 2785 int mask; 2786 int ret; 2787 2788 addr += (nr >> 3); 2789 mask = 1 << (7 - (nr & 0x07)); 2790 ret = mask & *addr; 2791 *addr &= ~mask; 2792 return ret; 2793 } 2794 2795 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2796 { 2797 int mask; 2798 2799 addr += (nr >> 3); 2800 mask = 1 << (7 - (nr & 0x07)); 2801 *addr ^= mask; 2802 } 2803 2804 /* 2805 * On-disk inode flags (f2fs_inode::i_flags) 2806 */ 2807 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2808 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2809 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2810 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2811 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2812 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2813 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2814 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2815 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2816 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2817 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2818 2819 /* Flags that should be inherited by new inodes from their parent. */ 2820 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2821 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2822 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2823 2824 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2825 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2826 F2FS_CASEFOLD_FL)) 2827 2828 /* Flags that are appropriate for non-directories/regular files. */ 2829 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2830 2831 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2832 { 2833 if (S_ISDIR(mode)) 2834 return flags; 2835 else if (S_ISREG(mode)) 2836 return flags & F2FS_REG_FLMASK; 2837 else 2838 return flags & F2FS_OTHER_FLMASK; 2839 } 2840 2841 static inline void __mark_inode_dirty_flag(struct inode *inode, 2842 int flag, bool set) 2843 { 2844 switch (flag) { 2845 case FI_INLINE_XATTR: 2846 case FI_INLINE_DATA: 2847 case FI_INLINE_DENTRY: 2848 case FI_NEW_INODE: 2849 if (set) 2850 return; 2851 fallthrough; 2852 case FI_DATA_EXIST: 2853 case FI_INLINE_DOTS: 2854 case FI_PIN_FILE: 2855 case FI_COMPRESS_RELEASED: 2856 f2fs_mark_inode_dirty_sync(inode, true); 2857 } 2858 } 2859 2860 static inline void set_inode_flag(struct inode *inode, int flag) 2861 { 2862 set_bit(flag, F2FS_I(inode)->flags); 2863 __mark_inode_dirty_flag(inode, flag, true); 2864 } 2865 2866 static inline int is_inode_flag_set(struct inode *inode, int flag) 2867 { 2868 return test_bit(flag, F2FS_I(inode)->flags); 2869 } 2870 2871 static inline void clear_inode_flag(struct inode *inode, int flag) 2872 { 2873 clear_bit(flag, F2FS_I(inode)->flags); 2874 __mark_inode_dirty_flag(inode, flag, false); 2875 } 2876 2877 static inline bool f2fs_verity_in_progress(struct inode *inode) 2878 { 2879 return IS_ENABLED(CONFIG_FS_VERITY) && 2880 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2881 } 2882 2883 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2884 { 2885 F2FS_I(inode)->i_acl_mode = mode; 2886 set_inode_flag(inode, FI_ACL_MODE); 2887 f2fs_mark_inode_dirty_sync(inode, false); 2888 } 2889 2890 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2891 { 2892 if (inc) 2893 inc_nlink(inode); 2894 else 2895 drop_nlink(inode); 2896 f2fs_mark_inode_dirty_sync(inode, true); 2897 } 2898 2899 static inline void f2fs_i_blocks_write(struct inode *inode, 2900 block_t diff, bool add, bool claim) 2901 { 2902 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2903 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2904 2905 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2906 if (add) { 2907 if (claim) 2908 dquot_claim_block(inode, diff); 2909 else 2910 dquot_alloc_block_nofail(inode, diff); 2911 } else { 2912 dquot_free_block(inode, diff); 2913 } 2914 2915 f2fs_mark_inode_dirty_sync(inode, true); 2916 if (clean || recover) 2917 set_inode_flag(inode, FI_AUTO_RECOVER); 2918 } 2919 2920 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2921 { 2922 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2923 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2924 2925 if (i_size_read(inode) == i_size) 2926 return; 2927 2928 i_size_write(inode, i_size); 2929 f2fs_mark_inode_dirty_sync(inode, true); 2930 if (clean || recover) 2931 set_inode_flag(inode, FI_AUTO_RECOVER); 2932 } 2933 2934 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2935 { 2936 F2FS_I(inode)->i_current_depth = depth; 2937 f2fs_mark_inode_dirty_sync(inode, true); 2938 } 2939 2940 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2941 unsigned int count) 2942 { 2943 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2944 f2fs_mark_inode_dirty_sync(inode, true); 2945 } 2946 2947 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2948 { 2949 F2FS_I(inode)->i_xattr_nid = xnid; 2950 f2fs_mark_inode_dirty_sync(inode, true); 2951 } 2952 2953 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2954 { 2955 F2FS_I(inode)->i_pino = pino; 2956 f2fs_mark_inode_dirty_sync(inode, true); 2957 } 2958 2959 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2960 { 2961 struct f2fs_inode_info *fi = F2FS_I(inode); 2962 2963 if (ri->i_inline & F2FS_INLINE_XATTR) 2964 set_bit(FI_INLINE_XATTR, fi->flags); 2965 if (ri->i_inline & F2FS_INLINE_DATA) 2966 set_bit(FI_INLINE_DATA, fi->flags); 2967 if (ri->i_inline & F2FS_INLINE_DENTRY) 2968 set_bit(FI_INLINE_DENTRY, fi->flags); 2969 if (ri->i_inline & F2FS_DATA_EXIST) 2970 set_bit(FI_DATA_EXIST, fi->flags); 2971 if (ri->i_inline & F2FS_INLINE_DOTS) 2972 set_bit(FI_INLINE_DOTS, fi->flags); 2973 if (ri->i_inline & F2FS_EXTRA_ATTR) 2974 set_bit(FI_EXTRA_ATTR, fi->flags); 2975 if (ri->i_inline & F2FS_PIN_FILE) 2976 set_bit(FI_PIN_FILE, fi->flags); 2977 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 2978 set_bit(FI_COMPRESS_RELEASED, fi->flags); 2979 } 2980 2981 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2982 { 2983 ri->i_inline = 0; 2984 2985 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2986 ri->i_inline |= F2FS_INLINE_XATTR; 2987 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2988 ri->i_inline |= F2FS_INLINE_DATA; 2989 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2990 ri->i_inline |= F2FS_INLINE_DENTRY; 2991 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2992 ri->i_inline |= F2FS_DATA_EXIST; 2993 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2994 ri->i_inline |= F2FS_INLINE_DOTS; 2995 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2996 ri->i_inline |= F2FS_EXTRA_ATTR; 2997 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2998 ri->i_inline |= F2FS_PIN_FILE; 2999 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3000 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3001 } 3002 3003 static inline int f2fs_has_extra_attr(struct inode *inode) 3004 { 3005 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3006 } 3007 3008 static inline int f2fs_has_inline_xattr(struct inode *inode) 3009 { 3010 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3011 } 3012 3013 static inline int f2fs_compressed_file(struct inode *inode) 3014 { 3015 return S_ISREG(inode->i_mode) && 3016 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3017 } 3018 3019 static inline bool f2fs_need_compress_data(struct inode *inode) 3020 { 3021 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3022 3023 if (!f2fs_compressed_file(inode)) 3024 return false; 3025 3026 if (compress_mode == COMPR_MODE_FS) 3027 return true; 3028 else if (compress_mode == COMPR_MODE_USER && 3029 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3030 return true; 3031 3032 return false; 3033 } 3034 3035 static inline unsigned int addrs_per_inode(struct inode *inode) 3036 { 3037 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3038 get_inline_xattr_addrs(inode); 3039 3040 if (!f2fs_compressed_file(inode)) 3041 return addrs; 3042 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3043 } 3044 3045 static inline unsigned int addrs_per_block(struct inode *inode) 3046 { 3047 if (!f2fs_compressed_file(inode)) 3048 return DEF_ADDRS_PER_BLOCK; 3049 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3050 } 3051 3052 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3053 { 3054 struct f2fs_inode *ri = F2FS_INODE(page); 3055 3056 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3057 get_inline_xattr_addrs(inode)]); 3058 } 3059 3060 static inline int inline_xattr_size(struct inode *inode) 3061 { 3062 if (f2fs_has_inline_xattr(inode)) 3063 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3064 return 0; 3065 } 3066 3067 static inline int f2fs_has_inline_data(struct inode *inode) 3068 { 3069 return is_inode_flag_set(inode, FI_INLINE_DATA); 3070 } 3071 3072 static inline int f2fs_exist_data(struct inode *inode) 3073 { 3074 return is_inode_flag_set(inode, FI_DATA_EXIST); 3075 } 3076 3077 static inline int f2fs_has_inline_dots(struct inode *inode) 3078 { 3079 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3080 } 3081 3082 static inline int f2fs_is_mmap_file(struct inode *inode) 3083 { 3084 return is_inode_flag_set(inode, FI_MMAP_FILE); 3085 } 3086 3087 static inline bool f2fs_is_pinned_file(struct inode *inode) 3088 { 3089 return is_inode_flag_set(inode, FI_PIN_FILE); 3090 } 3091 3092 static inline bool f2fs_is_atomic_file(struct inode *inode) 3093 { 3094 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3095 } 3096 3097 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 3098 { 3099 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 3100 } 3101 3102 static inline bool f2fs_is_volatile_file(struct inode *inode) 3103 { 3104 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 3105 } 3106 3107 static inline bool f2fs_is_first_block_written(struct inode *inode) 3108 { 3109 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3110 } 3111 3112 static inline bool f2fs_is_drop_cache(struct inode *inode) 3113 { 3114 return is_inode_flag_set(inode, FI_DROP_CACHE); 3115 } 3116 3117 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3118 { 3119 struct f2fs_inode *ri = F2FS_INODE(page); 3120 int extra_size = get_extra_isize(inode); 3121 3122 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3123 } 3124 3125 static inline int f2fs_has_inline_dentry(struct inode *inode) 3126 { 3127 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3128 } 3129 3130 static inline int is_file(struct inode *inode, int type) 3131 { 3132 return F2FS_I(inode)->i_advise & type; 3133 } 3134 3135 static inline void set_file(struct inode *inode, int type) 3136 { 3137 if (is_file(inode, type)) 3138 return; 3139 F2FS_I(inode)->i_advise |= type; 3140 f2fs_mark_inode_dirty_sync(inode, true); 3141 } 3142 3143 static inline void clear_file(struct inode *inode, int type) 3144 { 3145 if (!is_file(inode, type)) 3146 return; 3147 F2FS_I(inode)->i_advise &= ~type; 3148 f2fs_mark_inode_dirty_sync(inode, true); 3149 } 3150 3151 static inline bool f2fs_is_time_consistent(struct inode *inode) 3152 { 3153 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3154 return false; 3155 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3156 return false; 3157 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3158 return false; 3159 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3160 &F2FS_I(inode)->i_crtime)) 3161 return false; 3162 return true; 3163 } 3164 3165 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3166 { 3167 bool ret; 3168 3169 if (dsync) { 3170 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3171 3172 spin_lock(&sbi->inode_lock[DIRTY_META]); 3173 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3174 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3175 return ret; 3176 } 3177 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3178 file_keep_isize(inode) || 3179 i_size_read(inode) & ~PAGE_MASK) 3180 return false; 3181 3182 if (!f2fs_is_time_consistent(inode)) 3183 return false; 3184 3185 spin_lock(&F2FS_I(inode)->i_size_lock); 3186 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3187 spin_unlock(&F2FS_I(inode)->i_size_lock); 3188 3189 return ret; 3190 } 3191 3192 static inline bool f2fs_readonly(struct super_block *sb) 3193 { 3194 return sb_rdonly(sb); 3195 } 3196 3197 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3198 { 3199 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3200 } 3201 3202 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3203 { 3204 if (len == 1 && name[0] == '.') 3205 return true; 3206 3207 if (len == 2 && name[0] == '.' && name[1] == '.') 3208 return true; 3209 3210 return false; 3211 } 3212 3213 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3214 size_t size, gfp_t flags) 3215 { 3216 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3217 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3218 return NULL; 3219 } 3220 3221 return kmalloc(size, flags); 3222 } 3223 3224 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3225 size_t size, gfp_t flags) 3226 { 3227 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3228 } 3229 3230 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3231 size_t size, gfp_t flags) 3232 { 3233 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3234 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3235 return NULL; 3236 } 3237 3238 return kvmalloc(size, flags); 3239 } 3240 3241 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3242 size_t size, gfp_t flags) 3243 { 3244 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3245 } 3246 3247 static inline int get_extra_isize(struct inode *inode) 3248 { 3249 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3250 } 3251 3252 static inline int get_inline_xattr_addrs(struct inode *inode) 3253 { 3254 return F2FS_I(inode)->i_inline_xattr_size; 3255 } 3256 3257 #define f2fs_get_inode_mode(i) \ 3258 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3259 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3260 3261 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3262 (offsetof(struct f2fs_inode, i_extra_end) - \ 3263 offsetof(struct f2fs_inode, i_extra_isize)) \ 3264 3265 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3266 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3267 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3268 sizeof((f2fs_inode)->field)) \ 3269 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3270 3271 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3272 3273 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3274 3275 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3276 block_t blkaddr, int type); 3277 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3278 block_t blkaddr, int type) 3279 { 3280 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3281 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3282 blkaddr, type); 3283 f2fs_bug_on(sbi, 1); 3284 } 3285 } 3286 3287 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3288 { 3289 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3290 blkaddr == COMPRESS_ADDR) 3291 return false; 3292 return true; 3293 } 3294 3295 /* 3296 * file.c 3297 */ 3298 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3299 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3300 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3301 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3302 int f2fs_truncate(struct inode *inode); 3303 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3304 struct kstat *stat, u32 request_mask, unsigned int flags); 3305 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3306 struct iattr *attr); 3307 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3308 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3309 int f2fs_precache_extents(struct inode *inode); 3310 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3311 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3312 struct dentry *dentry, struct fileattr *fa); 3313 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3314 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3315 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3316 int f2fs_pin_file_control(struct inode *inode, bool inc); 3317 3318 /* 3319 * inode.c 3320 */ 3321 void f2fs_set_inode_flags(struct inode *inode); 3322 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3323 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3324 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3325 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3326 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3327 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3328 void f2fs_update_inode_page(struct inode *inode); 3329 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3330 void f2fs_evict_inode(struct inode *inode); 3331 void f2fs_handle_failed_inode(struct inode *inode); 3332 3333 /* 3334 * namei.c 3335 */ 3336 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3337 bool hot, bool set); 3338 struct dentry *f2fs_get_parent(struct dentry *child); 3339 3340 /* 3341 * dir.c 3342 */ 3343 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3344 int f2fs_init_casefolded_name(const struct inode *dir, 3345 struct f2fs_filename *fname); 3346 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3347 int lookup, struct f2fs_filename *fname); 3348 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3349 struct f2fs_filename *fname); 3350 void f2fs_free_filename(struct f2fs_filename *fname); 3351 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3352 const struct f2fs_filename *fname, int *max_slots); 3353 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3354 unsigned int start_pos, struct fscrypt_str *fstr); 3355 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3356 struct f2fs_dentry_ptr *d); 3357 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3358 const struct f2fs_filename *fname, struct page *dpage); 3359 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3360 unsigned int current_depth); 3361 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3362 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3363 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3364 const struct f2fs_filename *fname, 3365 struct page **res_page); 3366 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3367 const struct qstr *child, struct page **res_page); 3368 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3369 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3370 struct page **page); 3371 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3372 struct page *page, struct inode *inode); 3373 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3374 const struct f2fs_filename *fname); 3375 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3376 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3377 unsigned int bit_pos); 3378 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3379 struct inode *inode, nid_t ino, umode_t mode); 3380 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3381 struct inode *inode, nid_t ino, umode_t mode); 3382 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3383 struct inode *inode, nid_t ino, umode_t mode); 3384 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3385 struct inode *dir, struct inode *inode); 3386 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3387 bool f2fs_empty_dir(struct inode *dir); 3388 3389 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3390 { 3391 if (fscrypt_is_nokey_name(dentry)) 3392 return -ENOKEY; 3393 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3394 inode, inode->i_ino, inode->i_mode); 3395 } 3396 3397 /* 3398 * super.c 3399 */ 3400 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3401 void f2fs_inode_synced(struct inode *inode); 3402 int f2fs_dquot_initialize(struct inode *inode); 3403 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3404 int f2fs_quota_sync(struct super_block *sb, int type); 3405 loff_t max_file_blocks(struct inode *inode); 3406 void f2fs_quota_off_umount(struct super_block *sb); 3407 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3408 int f2fs_sync_fs(struct super_block *sb, int sync); 3409 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3410 3411 /* 3412 * hash.c 3413 */ 3414 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3415 3416 /* 3417 * node.c 3418 */ 3419 struct node_info; 3420 3421 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3422 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3423 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3424 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3425 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3426 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3427 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3428 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3429 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3430 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3431 struct node_info *ni, bool checkpoint_context); 3432 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3433 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3434 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3435 int f2fs_truncate_xattr_node(struct inode *inode); 3436 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3437 unsigned int seq_id); 3438 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3439 int f2fs_remove_inode_page(struct inode *inode); 3440 struct page *f2fs_new_inode_page(struct inode *inode); 3441 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3442 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3443 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3444 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3445 int f2fs_move_node_page(struct page *node_page, int gc_type); 3446 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3447 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3448 struct writeback_control *wbc, bool atomic, 3449 unsigned int *seq_id); 3450 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3451 struct writeback_control *wbc, 3452 bool do_balance, enum iostat_type io_type); 3453 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3454 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3455 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3456 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3457 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3458 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3459 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3460 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3461 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3462 unsigned int segno, struct f2fs_summary_block *sum); 3463 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3464 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3465 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3466 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3467 int __init f2fs_create_node_manager_caches(void); 3468 void f2fs_destroy_node_manager_caches(void); 3469 3470 /* 3471 * segment.c 3472 */ 3473 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3474 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3475 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3476 void f2fs_drop_inmem_pages(struct inode *inode); 3477 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3478 int f2fs_commit_inmem_pages(struct inode *inode); 3479 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3480 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3481 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3482 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3483 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3484 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3485 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3486 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3487 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3488 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3489 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3490 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3491 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3492 struct cp_control *cpc); 3493 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3494 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3495 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3496 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3497 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3498 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3499 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3500 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3501 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3502 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3503 unsigned int *newseg, bool new_sec, int dir); 3504 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3505 unsigned int start, unsigned int end); 3506 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3507 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3508 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3509 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3510 struct cp_control *cpc); 3511 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3512 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3513 block_t blk_addr); 3514 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3515 enum iostat_type io_type); 3516 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3517 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3518 struct f2fs_io_info *fio); 3519 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3520 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3521 block_t old_blkaddr, block_t new_blkaddr, 3522 bool recover_curseg, bool recover_newaddr, 3523 bool from_gc); 3524 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3525 block_t old_addr, block_t new_addr, 3526 unsigned char version, bool recover_curseg, 3527 bool recover_newaddr); 3528 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3529 block_t old_blkaddr, block_t *new_blkaddr, 3530 struct f2fs_summary *sum, int type, 3531 struct f2fs_io_info *fio); 3532 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3533 block_t blkaddr, unsigned int blkcnt); 3534 void f2fs_wait_on_page_writeback(struct page *page, 3535 enum page_type type, bool ordered, bool locked); 3536 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3537 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3538 block_t len); 3539 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3540 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3541 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3542 unsigned int val, int alloc); 3543 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3544 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3545 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3546 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3547 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3548 int __init f2fs_create_segment_manager_caches(void); 3549 void f2fs_destroy_segment_manager_caches(void); 3550 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3551 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3552 enum page_type type, enum temp_type temp); 3553 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3554 unsigned int segno); 3555 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3556 unsigned int segno); 3557 3558 #define DEF_FRAGMENT_SIZE 4 3559 #define MIN_FRAGMENT_SIZE 1 3560 #define MAX_FRAGMENT_SIZE 512 3561 3562 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3563 { 3564 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3565 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3566 } 3567 3568 /* 3569 * checkpoint.c 3570 */ 3571 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3572 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3573 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3574 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3575 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3576 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3577 block_t blkaddr, int type); 3578 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3579 int type, bool sync); 3580 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3581 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3582 long nr_to_write, enum iostat_type io_type); 3583 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3584 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3585 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3586 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3587 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3588 unsigned int devidx, int type); 3589 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3590 unsigned int devidx, int type); 3591 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3592 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3593 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3594 void f2fs_add_orphan_inode(struct inode *inode); 3595 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3596 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3597 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3598 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3599 void f2fs_remove_dirty_inode(struct inode *inode); 3600 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3601 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3602 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3603 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3604 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3605 int __init f2fs_create_checkpoint_caches(void); 3606 void f2fs_destroy_checkpoint_caches(void); 3607 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3608 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3609 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3610 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3611 3612 /* 3613 * data.c 3614 */ 3615 int __init f2fs_init_bioset(void); 3616 void f2fs_destroy_bioset(void); 3617 int f2fs_init_bio_entry_cache(void); 3618 void f2fs_destroy_bio_entry_cache(void); 3619 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3620 struct bio *bio, enum page_type type); 3621 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3622 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3623 struct inode *inode, struct page *page, 3624 nid_t ino, enum page_type type); 3625 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3626 struct bio **bio, struct page *page); 3627 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3628 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3629 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3630 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3631 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3632 block_t blk_addr, struct bio *bio); 3633 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3634 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3635 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3636 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3637 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3638 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3639 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3640 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3641 int op_flags, bool for_write); 3642 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3643 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3644 bool for_write); 3645 struct page *f2fs_get_new_data_page(struct inode *inode, 3646 struct page *ipage, pgoff_t index, bool new_i_size); 3647 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3648 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3649 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3650 int create, int flag); 3651 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3652 u64 start, u64 len); 3653 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3654 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3655 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3656 int f2fs_write_single_data_page(struct page *page, int *submitted, 3657 struct bio **bio, sector_t *last_block, 3658 struct writeback_control *wbc, 3659 enum iostat_type io_type, 3660 int compr_blocks, bool allow_balance); 3661 void f2fs_write_failed(struct inode *inode, loff_t to); 3662 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3663 unsigned int length); 3664 int f2fs_release_page(struct page *page, gfp_t wait); 3665 #ifdef CONFIG_MIGRATION 3666 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3667 struct page *page, enum migrate_mode mode); 3668 #endif 3669 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3670 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3671 int f2fs_init_post_read_processing(void); 3672 void f2fs_destroy_post_read_processing(void); 3673 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3674 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3675 extern const struct iomap_ops f2fs_iomap_ops; 3676 3677 /* 3678 * gc.c 3679 */ 3680 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3681 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3682 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3683 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, 3684 unsigned int segno); 3685 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3686 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3687 int __init f2fs_create_garbage_collection_cache(void); 3688 void f2fs_destroy_garbage_collection_cache(void); 3689 3690 /* 3691 * recovery.c 3692 */ 3693 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3694 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3695 int __init f2fs_create_recovery_cache(void); 3696 void f2fs_destroy_recovery_cache(void); 3697 3698 /* 3699 * debug.c 3700 */ 3701 #ifdef CONFIG_F2FS_STAT_FS 3702 struct f2fs_stat_info { 3703 struct list_head stat_list; 3704 struct f2fs_sb_info *sbi; 3705 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3706 int main_area_segs, main_area_sections, main_area_zones; 3707 unsigned long long hit_largest, hit_cached, hit_rbtree; 3708 unsigned long long hit_total, total_ext; 3709 int ext_tree, zombie_tree, ext_node; 3710 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3711 int ndirty_data, ndirty_qdata; 3712 int inmem_pages; 3713 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3714 int nats, dirty_nats, sits, dirty_sits; 3715 int free_nids, avail_nids, alloc_nids; 3716 int total_count, utilization; 3717 int bg_gc, nr_wb_cp_data, nr_wb_data; 3718 int nr_rd_data, nr_rd_node, nr_rd_meta; 3719 int nr_dio_read, nr_dio_write; 3720 unsigned int io_skip_bggc, other_skip_bggc; 3721 int nr_flushing, nr_flushed, flush_list_empty; 3722 int nr_discarding, nr_discarded; 3723 int nr_discard_cmd; 3724 unsigned int undiscard_blks; 3725 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3726 unsigned int cur_ckpt_time, peak_ckpt_time; 3727 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3728 int compr_inode; 3729 unsigned long long compr_blocks; 3730 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3731 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3732 unsigned int bimodal, avg_vblocks; 3733 int util_free, util_valid, util_invalid; 3734 int rsvd_segs, overp_segs; 3735 int dirty_count, node_pages, meta_pages, compress_pages; 3736 int compress_page_hit; 3737 int prefree_count, call_count, cp_count, bg_cp_count; 3738 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3739 int bg_node_segs, bg_data_segs; 3740 int tot_blks, data_blks, node_blks; 3741 int bg_data_blks, bg_node_blks; 3742 unsigned long long skipped_atomic_files[2]; 3743 int curseg[NR_CURSEG_TYPE]; 3744 int cursec[NR_CURSEG_TYPE]; 3745 int curzone[NR_CURSEG_TYPE]; 3746 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3747 unsigned int full_seg[NR_CURSEG_TYPE]; 3748 unsigned int valid_blks[NR_CURSEG_TYPE]; 3749 3750 unsigned int meta_count[META_MAX]; 3751 unsigned int segment_count[2]; 3752 unsigned int block_count[2]; 3753 unsigned int inplace_count; 3754 unsigned long long base_mem, cache_mem, page_mem; 3755 }; 3756 3757 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3758 { 3759 return (struct f2fs_stat_info *)sbi->stat_info; 3760 } 3761 3762 #define stat_inc_cp_count(si) ((si)->cp_count++) 3763 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3764 #define stat_inc_call_count(si) ((si)->call_count++) 3765 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3766 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3767 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3768 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3769 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3770 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3771 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3772 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3773 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3774 #define stat_inc_inline_xattr(inode) \ 3775 do { \ 3776 if (f2fs_has_inline_xattr(inode)) \ 3777 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3778 } while (0) 3779 #define stat_dec_inline_xattr(inode) \ 3780 do { \ 3781 if (f2fs_has_inline_xattr(inode)) \ 3782 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3783 } while (0) 3784 #define stat_inc_inline_inode(inode) \ 3785 do { \ 3786 if (f2fs_has_inline_data(inode)) \ 3787 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3788 } while (0) 3789 #define stat_dec_inline_inode(inode) \ 3790 do { \ 3791 if (f2fs_has_inline_data(inode)) \ 3792 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3793 } while (0) 3794 #define stat_inc_inline_dir(inode) \ 3795 do { \ 3796 if (f2fs_has_inline_dentry(inode)) \ 3797 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3798 } while (0) 3799 #define stat_dec_inline_dir(inode) \ 3800 do { \ 3801 if (f2fs_has_inline_dentry(inode)) \ 3802 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3803 } while (0) 3804 #define stat_inc_compr_inode(inode) \ 3805 do { \ 3806 if (f2fs_compressed_file(inode)) \ 3807 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3808 } while (0) 3809 #define stat_dec_compr_inode(inode) \ 3810 do { \ 3811 if (f2fs_compressed_file(inode)) \ 3812 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3813 } while (0) 3814 #define stat_add_compr_blocks(inode, blocks) \ 3815 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3816 #define stat_sub_compr_blocks(inode, blocks) \ 3817 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3818 #define stat_inc_meta_count(sbi, blkaddr) \ 3819 do { \ 3820 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3821 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3822 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3823 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3824 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3825 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3826 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3827 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3828 } while (0) 3829 #define stat_inc_seg_type(sbi, curseg) \ 3830 ((sbi)->segment_count[(curseg)->alloc_type]++) 3831 #define stat_inc_block_count(sbi, curseg) \ 3832 ((sbi)->block_count[(curseg)->alloc_type]++) 3833 #define stat_inc_inplace_blocks(sbi) \ 3834 (atomic_inc(&(sbi)->inplace_count)) 3835 #define stat_update_max_atomic_write(inode) \ 3836 do { \ 3837 int cur = F2FS_I_SB(inode)->atomic_files; \ 3838 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3839 if (cur > max) \ 3840 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3841 } while (0) 3842 #define stat_inc_volatile_write(inode) \ 3843 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3844 #define stat_dec_volatile_write(inode) \ 3845 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3846 #define stat_update_max_volatile_write(inode) \ 3847 do { \ 3848 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3849 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3850 if (cur > max) \ 3851 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3852 } while (0) 3853 #define stat_inc_seg_count(sbi, type, gc_type) \ 3854 do { \ 3855 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3856 si->tot_segs++; \ 3857 if ((type) == SUM_TYPE_DATA) { \ 3858 si->data_segs++; \ 3859 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3860 } else { \ 3861 si->node_segs++; \ 3862 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3863 } \ 3864 } while (0) 3865 3866 #define stat_inc_tot_blk_count(si, blks) \ 3867 ((si)->tot_blks += (blks)) 3868 3869 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3870 do { \ 3871 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3872 stat_inc_tot_blk_count(si, blks); \ 3873 si->data_blks += (blks); \ 3874 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3875 } while (0) 3876 3877 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3878 do { \ 3879 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3880 stat_inc_tot_blk_count(si, blks); \ 3881 si->node_blks += (blks); \ 3882 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3883 } while (0) 3884 3885 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3886 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3887 void __init f2fs_create_root_stats(void); 3888 void f2fs_destroy_root_stats(void); 3889 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3890 #else 3891 #define stat_inc_cp_count(si) do { } while (0) 3892 #define stat_inc_bg_cp_count(si) do { } while (0) 3893 #define stat_inc_call_count(si) do { } while (0) 3894 #define stat_inc_bggc_count(si) do { } while (0) 3895 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3896 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3897 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3898 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3899 #define stat_inc_total_hit(sbi) do { } while (0) 3900 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3901 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3902 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3903 #define stat_inc_inline_xattr(inode) do { } while (0) 3904 #define stat_dec_inline_xattr(inode) do { } while (0) 3905 #define stat_inc_inline_inode(inode) do { } while (0) 3906 #define stat_dec_inline_inode(inode) do { } while (0) 3907 #define stat_inc_inline_dir(inode) do { } while (0) 3908 #define stat_dec_inline_dir(inode) do { } while (0) 3909 #define stat_inc_compr_inode(inode) do { } while (0) 3910 #define stat_dec_compr_inode(inode) do { } while (0) 3911 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3912 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3913 #define stat_update_max_atomic_write(inode) do { } while (0) 3914 #define stat_inc_volatile_write(inode) do { } while (0) 3915 #define stat_dec_volatile_write(inode) do { } while (0) 3916 #define stat_update_max_volatile_write(inode) do { } while (0) 3917 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3918 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3919 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3920 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3921 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3922 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3923 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3924 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3925 3926 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3927 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3928 static inline void __init f2fs_create_root_stats(void) { } 3929 static inline void f2fs_destroy_root_stats(void) { } 3930 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3931 #endif 3932 3933 extern const struct file_operations f2fs_dir_operations; 3934 extern const struct file_operations f2fs_file_operations; 3935 extern const struct inode_operations f2fs_file_inode_operations; 3936 extern const struct address_space_operations f2fs_dblock_aops; 3937 extern const struct address_space_operations f2fs_node_aops; 3938 extern const struct address_space_operations f2fs_meta_aops; 3939 extern const struct inode_operations f2fs_dir_inode_operations; 3940 extern const struct inode_operations f2fs_symlink_inode_operations; 3941 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3942 extern const struct inode_operations f2fs_special_inode_operations; 3943 extern struct kmem_cache *f2fs_inode_entry_slab; 3944 3945 /* 3946 * inline.c 3947 */ 3948 bool f2fs_may_inline_data(struct inode *inode); 3949 bool f2fs_may_inline_dentry(struct inode *inode); 3950 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3951 void f2fs_truncate_inline_inode(struct inode *inode, 3952 struct page *ipage, u64 from); 3953 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3954 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3955 int f2fs_convert_inline_inode(struct inode *inode); 3956 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3957 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3958 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3959 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3960 const struct f2fs_filename *fname, 3961 struct page **res_page); 3962 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3963 struct page *ipage); 3964 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3965 struct inode *inode, nid_t ino, umode_t mode); 3966 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3967 struct page *page, struct inode *dir, 3968 struct inode *inode); 3969 bool f2fs_empty_inline_dir(struct inode *dir); 3970 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3971 struct fscrypt_str *fstr); 3972 int f2fs_inline_data_fiemap(struct inode *inode, 3973 struct fiemap_extent_info *fieinfo, 3974 __u64 start, __u64 len); 3975 3976 /* 3977 * shrinker.c 3978 */ 3979 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3980 struct shrink_control *sc); 3981 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3982 struct shrink_control *sc); 3983 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3984 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3985 3986 /* 3987 * extent_cache.c 3988 */ 3989 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3990 struct rb_entry *cached_re, unsigned int ofs); 3991 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3992 struct rb_root_cached *root, 3993 struct rb_node **parent, 3994 unsigned long long key, bool *left_most); 3995 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3996 struct rb_root_cached *root, 3997 struct rb_node **parent, 3998 unsigned int ofs, bool *leftmost); 3999 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 4000 struct rb_entry *cached_re, unsigned int ofs, 4001 struct rb_entry **prev_entry, struct rb_entry **next_entry, 4002 struct rb_node ***insert_p, struct rb_node **insert_parent, 4003 bool force, bool *leftmost); 4004 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 4005 struct rb_root_cached *root, bool check_key); 4006 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 4007 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 4008 void f2fs_drop_extent_tree(struct inode *inode); 4009 unsigned int f2fs_destroy_extent_node(struct inode *inode); 4010 void f2fs_destroy_extent_tree(struct inode *inode); 4011 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 4012 struct extent_info *ei); 4013 void f2fs_update_extent_cache(struct dnode_of_data *dn); 4014 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 4015 pgoff_t fofs, block_t blkaddr, unsigned int len); 4016 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4017 int __init f2fs_create_extent_cache(void); 4018 void f2fs_destroy_extent_cache(void); 4019 4020 /* 4021 * sysfs.c 4022 */ 4023 #define MIN_RA_MUL 2 4024 #define MAX_RA_MUL 256 4025 4026 int __init f2fs_init_sysfs(void); 4027 void f2fs_exit_sysfs(void); 4028 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4029 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4030 4031 /* verity.c */ 4032 extern const struct fsverity_operations f2fs_verityops; 4033 4034 /* 4035 * crypto support 4036 */ 4037 static inline bool f2fs_encrypted_file(struct inode *inode) 4038 { 4039 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4040 } 4041 4042 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4043 { 4044 #ifdef CONFIG_FS_ENCRYPTION 4045 file_set_encrypt(inode); 4046 f2fs_set_inode_flags(inode); 4047 #endif 4048 } 4049 4050 /* 4051 * Returns true if the reads of the inode's data need to undergo some 4052 * postprocessing step, like decryption or authenticity verification. 4053 */ 4054 static inline bool f2fs_post_read_required(struct inode *inode) 4055 { 4056 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4057 f2fs_compressed_file(inode); 4058 } 4059 4060 /* 4061 * compress.c 4062 */ 4063 #ifdef CONFIG_F2FS_FS_COMPRESSION 4064 bool f2fs_is_compressed_page(struct page *page); 4065 struct page *f2fs_compress_control_page(struct page *page); 4066 int f2fs_prepare_compress_overwrite(struct inode *inode, 4067 struct page **pagep, pgoff_t index, void **fsdata); 4068 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4069 pgoff_t index, unsigned copied); 4070 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4071 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4072 bool f2fs_is_compress_backend_ready(struct inode *inode); 4073 int f2fs_init_compress_mempool(void); 4074 void f2fs_destroy_compress_mempool(void); 4075 void f2fs_decompress_cluster(struct decompress_io_ctx *dic); 4076 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4077 block_t blkaddr); 4078 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4079 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4080 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec, 4081 int index, int nr_pages); 4082 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4083 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4084 int f2fs_write_multi_pages(struct compress_ctx *cc, 4085 int *submitted, 4086 struct writeback_control *wbc, 4087 enum iostat_type io_type); 4088 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4089 void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4090 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4091 unsigned int c_len); 4092 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4093 unsigned nr_pages, sector_t *last_block_in_bio, 4094 bool is_readahead, bool for_write); 4095 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4096 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 4097 void f2fs_put_page_dic(struct page *page); 4098 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4099 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4100 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4101 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4102 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4103 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4104 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4105 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4106 int __init f2fs_init_compress_cache(void); 4107 void f2fs_destroy_compress_cache(void); 4108 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4109 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4110 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4111 nid_t ino, block_t blkaddr); 4112 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4113 block_t blkaddr); 4114 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4115 #define inc_compr_inode_stat(inode) \ 4116 do { \ 4117 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4118 sbi->compr_new_inode++; \ 4119 } while (0) 4120 #define add_compr_block_stat(inode, blocks) \ 4121 do { \ 4122 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4123 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4124 sbi->compr_written_block += blocks; \ 4125 sbi->compr_saved_block += diff; \ 4126 } while (0) 4127 #else 4128 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4129 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4130 { 4131 if (!f2fs_compressed_file(inode)) 4132 return true; 4133 /* not support compression */ 4134 return false; 4135 } 4136 static inline struct page *f2fs_compress_control_page(struct page *page) 4137 { 4138 WARN_ON_ONCE(1); 4139 return ERR_PTR(-EINVAL); 4140 } 4141 static inline int f2fs_init_compress_mempool(void) { return 0; } 4142 static inline void f2fs_destroy_compress_mempool(void) { } 4143 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } 4144 static inline void f2fs_end_read_compressed_page(struct page *page, 4145 bool failed, block_t blkaddr) 4146 { 4147 WARN_ON_ONCE(1); 4148 } 4149 static inline void f2fs_put_page_dic(struct page *page) 4150 { 4151 WARN_ON_ONCE(1); 4152 } 4153 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4154 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4155 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4156 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4157 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4158 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4159 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4160 static inline void f2fs_destroy_compress_cache(void) { } 4161 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4162 block_t blkaddr) { } 4163 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4164 struct page *page, nid_t ino, block_t blkaddr) { } 4165 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4166 struct page *page, block_t blkaddr) { return false; } 4167 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4168 nid_t ino) { } 4169 #define inc_compr_inode_stat(inode) do { } while (0) 4170 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4171 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4172 unsigned int c_len) { } 4173 #endif 4174 4175 static inline void set_compress_context(struct inode *inode) 4176 { 4177 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4178 4179 F2FS_I(inode)->i_compress_algorithm = 4180 F2FS_OPTION(sbi).compress_algorithm; 4181 F2FS_I(inode)->i_log_cluster_size = 4182 F2FS_OPTION(sbi).compress_log_size; 4183 F2FS_I(inode)->i_compress_flag = 4184 F2FS_OPTION(sbi).compress_chksum ? 4185 1 << COMPRESS_CHKSUM : 0; 4186 F2FS_I(inode)->i_cluster_size = 4187 1 << F2FS_I(inode)->i_log_cluster_size; 4188 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4189 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4190 F2FS_OPTION(sbi).compress_level) 4191 F2FS_I(inode)->i_compress_flag |= 4192 F2FS_OPTION(sbi).compress_level << 4193 COMPRESS_LEVEL_OFFSET; 4194 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4195 set_inode_flag(inode, FI_COMPRESSED_FILE); 4196 stat_inc_compr_inode(inode); 4197 inc_compr_inode_stat(inode); 4198 f2fs_mark_inode_dirty_sync(inode, true); 4199 } 4200 4201 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4202 { 4203 struct f2fs_inode_info *fi = F2FS_I(inode); 4204 4205 if (!f2fs_compressed_file(inode)) 4206 return true; 4207 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4208 return false; 4209 4210 fi->i_flags &= ~F2FS_COMPR_FL; 4211 stat_dec_compr_inode(inode); 4212 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4213 f2fs_mark_inode_dirty_sync(inode, true); 4214 return true; 4215 } 4216 4217 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4218 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4219 { \ 4220 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4221 } 4222 4223 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4224 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4225 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4226 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4227 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4228 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4229 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4230 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4231 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4232 F2FS_FEATURE_FUNCS(verity, VERITY); 4233 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4234 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4235 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4236 F2FS_FEATURE_FUNCS(readonly, RO); 4237 4238 static inline bool f2fs_may_extent_tree(struct inode *inode) 4239 { 4240 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4241 4242 if (!test_opt(sbi, EXTENT_CACHE) || 4243 is_inode_flag_set(inode, FI_NO_EXTENT) || 4244 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 4245 !f2fs_sb_has_readonly(sbi))) 4246 return false; 4247 4248 /* 4249 * for recovered files during mount do not create extents 4250 * if shrinker is not registered. 4251 */ 4252 if (list_empty(&sbi->s_list)) 4253 return false; 4254 4255 return S_ISREG(inode->i_mode); 4256 } 4257 4258 #ifdef CONFIG_BLK_DEV_ZONED 4259 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4260 block_t blkaddr) 4261 { 4262 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4263 4264 return test_bit(zno, FDEV(devi).blkz_seq); 4265 } 4266 #endif 4267 4268 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4269 { 4270 return f2fs_sb_has_blkzoned(sbi); 4271 } 4272 4273 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4274 { 4275 return blk_queue_discard(bdev_get_queue(bdev)) || 4276 bdev_is_zoned(bdev); 4277 } 4278 4279 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4280 { 4281 int i; 4282 4283 if (!f2fs_is_multi_device(sbi)) 4284 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4285 4286 for (i = 0; i < sbi->s_ndevs; i++) 4287 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4288 return true; 4289 return false; 4290 } 4291 4292 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4293 { 4294 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4295 f2fs_hw_should_discard(sbi); 4296 } 4297 4298 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4299 { 4300 int i; 4301 4302 if (!f2fs_is_multi_device(sbi)) 4303 return bdev_read_only(sbi->sb->s_bdev); 4304 4305 for (i = 0; i < sbi->s_ndevs; i++) 4306 if (bdev_read_only(FDEV(i).bdev)) 4307 return true; 4308 return false; 4309 } 4310 4311 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4312 { 4313 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4314 } 4315 4316 static inline bool f2fs_may_compress(struct inode *inode) 4317 { 4318 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4319 f2fs_is_atomic_file(inode) || 4320 f2fs_is_volatile_file(inode)) 4321 return false; 4322 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4323 } 4324 4325 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4326 u64 blocks, bool add) 4327 { 4328 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4329 struct f2fs_inode_info *fi = F2FS_I(inode); 4330 4331 /* don't update i_compr_blocks if saved blocks were released */ 4332 if (!add && !atomic_read(&fi->i_compr_blocks)) 4333 return; 4334 4335 if (add) { 4336 atomic_add(diff, &fi->i_compr_blocks); 4337 stat_add_compr_blocks(inode, diff); 4338 } else { 4339 atomic_sub(diff, &fi->i_compr_blocks); 4340 stat_sub_compr_blocks(inode, diff); 4341 } 4342 f2fs_mark_inode_dirty_sync(inode, true); 4343 } 4344 4345 static inline int block_unaligned_IO(struct inode *inode, 4346 struct kiocb *iocb, struct iov_iter *iter) 4347 { 4348 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4349 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4350 loff_t offset = iocb->ki_pos; 4351 unsigned long align = offset | iov_iter_alignment(iter); 4352 4353 return align & blocksize_mask; 4354 } 4355 4356 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4357 int flag) 4358 { 4359 if (!f2fs_is_multi_device(sbi)) 4360 return false; 4361 if (flag != F2FS_GET_BLOCK_DIO) 4362 return false; 4363 return sbi->aligned_blksize; 4364 } 4365 4366 static inline bool f2fs_force_buffered_io(struct inode *inode, 4367 struct kiocb *iocb, struct iov_iter *iter) 4368 { 4369 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4370 int rw = iov_iter_rw(iter); 4371 4372 if (f2fs_post_read_required(inode)) 4373 return true; 4374 4375 /* disallow direct IO if any of devices has unaligned blksize */ 4376 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) 4377 return true; 4378 /* 4379 * for blkzoned device, fallback direct IO to buffered IO, so 4380 * all IOs can be serialized by log-structured write. 4381 */ 4382 if (f2fs_sb_has_blkzoned(sbi)) 4383 return true; 4384 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4385 if (block_unaligned_IO(inode, iocb, iter)) 4386 return true; 4387 if (F2FS_IO_ALIGNED(sbi)) 4388 return true; 4389 } 4390 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 4391 return true; 4392 4393 return false; 4394 } 4395 4396 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4397 { 4398 return fsverity_active(inode) && 4399 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4400 } 4401 4402 #ifdef CONFIG_F2FS_FAULT_INJECTION 4403 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4404 unsigned int type); 4405 #else 4406 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4407 #endif 4408 4409 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4410 { 4411 #ifdef CONFIG_QUOTA 4412 if (f2fs_sb_has_quota_ino(sbi)) 4413 return true; 4414 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4415 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4416 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4417 return true; 4418 #endif 4419 return false; 4420 } 4421 4422 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4423 { 4424 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4425 } 4426 4427 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4428 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4429 4430 #endif /* _LINUX_F2FS_H */ 4431