1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 struct pagevec; 32 33 #ifdef CONFIG_F2FS_CHECK_FS 34 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 35 #else 36 #define f2fs_bug_on(sbi, condition) \ 37 do { \ 38 if (WARN_ON(condition)) \ 39 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 40 } while (0) 41 #endif 42 43 enum { 44 FAULT_KMALLOC, 45 FAULT_KVMALLOC, 46 FAULT_PAGE_ALLOC, 47 FAULT_PAGE_GET, 48 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ 49 FAULT_ALLOC_NID, 50 FAULT_ORPHAN, 51 FAULT_BLOCK, 52 FAULT_DIR_DEPTH, 53 FAULT_EVICT_INODE, 54 FAULT_TRUNCATE, 55 FAULT_READ_IO, 56 FAULT_CHECKPOINT, 57 FAULT_DISCARD, 58 FAULT_WRITE_IO, 59 FAULT_SLAB_ALLOC, 60 FAULT_DQUOT_INIT, 61 FAULT_LOCK_OP, 62 FAULT_MAX, 63 }; 64 65 #ifdef CONFIG_F2FS_FAULT_INJECTION 66 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 67 68 struct f2fs_fault_info { 69 atomic_t inject_ops; 70 unsigned int inject_rate; 71 unsigned int inject_type; 72 }; 73 74 extern const char *f2fs_fault_name[FAULT_MAX]; 75 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 76 #endif 77 78 /* 79 * For mount options 80 */ 81 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 82 #define F2FS_MOUNT_DISCARD 0x00000004 83 #define F2FS_MOUNT_NOHEAP 0x00000008 84 #define F2FS_MOUNT_XATTR_USER 0x00000010 85 #define F2FS_MOUNT_POSIX_ACL 0x00000020 86 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 87 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 88 #define F2FS_MOUNT_INLINE_DATA 0x00000100 89 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 90 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 91 #define F2FS_MOUNT_NOBARRIER 0x00000800 92 #define F2FS_MOUNT_FASTBOOT 0x00001000 93 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 94 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 95 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 96 #define F2FS_MOUNT_USRQUOTA 0x00080000 97 #define F2FS_MOUNT_GRPQUOTA 0x00100000 98 #define F2FS_MOUNT_PRJQUOTA 0x00200000 99 #define F2FS_MOUNT_QUOTA 0x00400000 100 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 101 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 102 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 103 #define F2FS_MOUNT_NORECOVERY 0x04000000 104 #define F2FS_MOUNT_ATGC 0x08000000 105 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 106 #define F2FS_MOUNT_GC_MERGE 0x20000000 107 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 108 109 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 110 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 111 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 112 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 113 114 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 115 typecheck(unsigned long long, b) && \ 116 ((long long)((a) - (b)) > 0)) 117 118 typedef u32 block_t; /* 119 * should not change u32, since it is the on-disk block 120 * address format, __le32. 121 */ 122 typedef u32 nid_t; 123 124 #define COMPRESS_EXT_NUM 16 125 126 /* 127 * An implementation of an rwsem that is explicitly unfair to readers. This 128 * prevents priority inversion when a low-priority reader acquires the read lock 129 * while sleeping on the write lock but the write lock is needed by 130 * higher-priority clients. 131 */ 132 133 struct f2fs_rwsem { 134 struct rw_semaphore internal_rwsem; 135 wait_queue_head_t read_waiters; 136 }; 137 138 struct f2fs_mount_info { 139 unsigned int opt; 140 int write_io_size_bits; /* Write IO size bits */ 141 block_t root_reserved_blocks; /* root reserved blocks */ 142 kuid_t s_resuid; /* reserved blocks for uid */ 143 kgid_t s_resgid; /* reserved blocks for gid */ 144 int active_logs; /* # of active logs */ 145 int inline_xattr_size; /* inline xattr size */ 146 #ifdef CONFIG_F2FS_FAULT_INJECTION 147 struct f2fs_fault_info fault_info; /* For fault injection */ 148 #endif 149 #ifdef CONFIG_QUOTA 150 /* Names of quota files with journalled quota */ 151 char *s_qf_names[MAXQUOTAS]; 152 int s_jquota_fmt; /* Format of quota to use */ 153 #endif 154 /* For which write hints are passed down to block layer */ 155 int whint_mode; 156 int alloc_mode; /* segment allocation policy */ 157 int fsync_mode; /* fsync policy */ 158 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 159 int bggc_mode; /* bggc mode: off, on or sync */ 160 int discard_unit; /* 161 * discard command's offset/size should 162 * be aligned to this unit: block, 163 * segment or section 164 */ 165 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 166 block_t unusable_cap_perc; /* percentage for cap */ 167 block_t unusable_cap; /* Amount of space allowed to be 168 * unusable when disabling checkpoint 169 */ 170 171 /* For compression */ 172 unsigned char compress_algorithm; /* algorithm type */ 173 unsigned char compress_log_size; /* cluster log size */ 174 unsigned char compress_level; /* compress level */ 175 bool compress_chksum; /* compressed data chksum */ 176 unsigned char compress_ext_cnt; /* extension count */ 177 unsigned char nocompress_ext_cnt; /* nocompress extension count */ 178 int compress_mode; /* compression mode */ 179 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 180 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 181 }; 182 183 #define F2FS_FEATURE_ENCRYPT 0x0001 184 #define F2FS_FEATURE_BLKZONED 0x0002 185 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 186 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 187 #define F2FS_FEATURE_PRJQUOTA 0x0010 188 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 189 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 190 #define F2FS_FEATURE_QUOTA_INO 0x0080 191 #define F2FS_FEATURE_INODE_CRTIME 0x0100 192 #define F2FS_FEATURE_LOST_FOUND 0x0200 193 #define F2FS_FEATURE_VERITY 0x0400 194 #define F2FS_FEATURE_SB_CHKSUM 0x0800 195 #define F2FS_FEATURE_CASEFOLD 0x1000 196 #define F2FS_FEATURE_COMPRESSION 0x2000 197 #define F2FS_FEATURE_RO 0x4000 198 199 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 200 ((raw_super->feature & cpu_to_le32(mask)) != 0) 201 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 202 #define F2FS_SET_FEATURE(sbi, mask) \ 203 (sbi->raw_super->feature |= cpu_to_le32(mask)) 204 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 205 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 206 207 /* 208 * Default values for user and/or group using reserved blocks 209 */ 210 #define F2FS_DEF_RESUID 0 211 #define F2FS_DEF_RESGID 0 212 213 /* 214 * For checkpoint manager 215 */ 216 enum { 217 NAT_BITMAP, 218 SIT_BITMAP 219 }; 220 221 #define CP_UMOUNT 0x00000001 222 #define CP_FASTBOOT 0x00000002 223 #define CP_SYNC 0x00000004 224 #define CP_RECOVERY 0x00000008 225 #define CP_DISCARD 0x00000010 226 #define CP_TRIMMED 0x00000020 227 #define CP_PAUSE 0x00000040 228 #define CP_RESIZE 0x00000080 229 230 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 231 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 232 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 233 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 234 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 235 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 236 #define DEF_CP_INTERVAL 60 /* 60 secs */ 237 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 238 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 239 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 240 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 241 242 struct cp_control { 243 int reason; 244 __u64 trim_start; 245 __u64 trim_end; 246 __u64 trim_minlen; 247 }; 248 249 /* 250 * indicate meta/data type 251 */ 252 enum { 253 META_CP, 254 META_NAT, 255 META_SIT, 256 META_SSA, 257 META_MAX, 258 META_POR, 259 DATA_GENERIC, /* check range only */ 260 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 261 DATA_GENERIC_ENHANCE_READ, /* 262 * strong check on range and segment 263 * bitmap but no warning due to race 264 * condition of read on truncated area 265 * by extent_cache 266 */ 267 META_GENERIC, 268 }; 269 270 /* for the list of ino */ 271 enum { 272 ORPHAN_INO, /* for orphan ino list */ 273 APPEND_INO, /* for append ino list */ 274 UPDATE_INO, /* for update ino list */ 275 TRANS_DIR_INO, /* for trasactions dir ino list */ 276 FLUSH_INO, /* for multiple device flushing */ 277 MAX_INO_ENTRY, /* max. list */ 278 }; 279 280 struct ino_entry { 281 struct list_head list; /* list head */ 282 nid_t ino; /* inode number */ 283 unsigned int dirty_device; /* dirty device bitmap */ 284 }; 285 286 /* for the list of inodes to be GCed */ 287 struct inode_entry { 288 struct list_head list; /* list head */ 289 struct inode *inode; /* vfs inode pointer */ 290 }; 291 292 struct fsync_node_entry { 293 struct list_head list; /* list head */ 294 struct page *page; /* warm node page pointer */ 295 unsigned int seq_id; /* sequence id */ 296 }; 297 298 struct ckpt_req { 299 struct completion wait; /* completion for checkpoint done */ 300 struct llist_node llnode; /* llist_node to be linked in wait queue */ 301 int ret; /* return code of checkpoint */ 302 ktime_t queue_time; /* request queued time */ 303 }; 304 305 struct ckpt_req_control { 306 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ 307 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ 308 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ 309 atomic_t issued_ckpt; /* # of actually issued ckpts */ 310 atomic_t total_ckpt; /* # of total ckpts */ 311 atomic_t queued_ckpt; /* # of queued ckpts */ 312 struct llist_head issue_list; /* list for command issue */ 313 spinlock_t stat_lock; /* lock for below checkpoint time stats */ 314 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ 315 unsigned int peak_time; /* peak wait time in msec until now */ 316 }; 317 318 /* for the bitmap indicate blocks to be discarded */ 319 struct discard_entry { 320 struct list_head list; /* list head */ 321 block_t start_blkaddr; /* start blockaddr of current segment */ 322 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 323 }; 324 325 /* default discard granularity of inner discard thread, unit: block count */ 326 #define DEFAULT_DISCARD_GRANULARITY 16 327 328 /* max discard pend list number */ 329 #define MAX_PLIST_NUM 512 330 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 331 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 332 333 enum { 334 D_PREP, /* initial */ 335 D_PARTIAL, /* partially submitted */ 336 D_SUBMIT, /* all submitted */ 337 D_DONE, /* finished */ 338 }; 339 340 struct discard_info { 341 block_t lstart; /* logical start address */ 342 block_t len; /* length */ 343 block_t start; /* actual start address in dev */ 344 }; 345 346 struct discard_cmd { 347 struct rb_node rb_node; /* rb node located in rb-tree */ 348 union { 349 struct { 350 block_t lstart; /* logical start address */ 351 block_t len; /* length */ 352 block_t start; /* actual start address in dev */ 353 }; 354 struct discard_info di; /* discard info */ 355 356 }; 357 struct list_head list; /* command list */ 358 struct completion wait; /* compleation */ 359 struct block_device *bdev; /* bdev */ 360 unsigned short ref; /* reference count */ 361 unsigned char state; /* state */ 362 unsigned char queued; /* queued discard */ 363 int error; /* bio error */ 364 spinlock_t lock; /* for state/bio_ref updating */ 365 unsigned short bio_ref; /* bio reference count */ 366 }; 367 368 enum { 369 DPOLICY_BG, 370 DPOLICY_FORCE, 371 DPOLICY_FSTRIM, 372 DPOLICY_UMOUNT, 373 MAX_DPOLICY, 374 }; 375 376 struct discard_policy { 377 int type; /* type of discard */ 378 unsigned int min_interval; /* used for candidates exist */ 379 unsigned int mid_interval; /* used for device busy */ 380 unsigned int max_interval; /* used for candidates not exist */ 381 unsigned int max_requests; /* # of discards issued per round */ 382 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 383 bool io_aware; /* issue discard in idle time */ 384 bool sync; /* submit discard with REQ_SYNC flag */ 385 bool ordered; /* issue discard by lba order */ 386 bool timeout; /* discard timeout for put_super */ 387 unsigned int granularity; /* discard granularity */ 388 }; 389 390 struct discard_cmd_control { 391 struct task_struct *f2fs_issue_discard; /* discard thread */ 392 struct list_head entry_list; /* 4KB discard entry list */ 393 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 394 struct list_head wait_list; /* store on-flushing entries */ 395 struct list_head fstrim_list; /* in-flight discard from fstrim */ 396 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 397 unsigned int discard_wake; /* to wake up discard thread */ 398 struct mutex cmd_lock; 399 unsigned int nr_discards; /* # of discards in the list */ 400 unsigned int max_discards; /* max. discards to be issued */ 401 unsigned int max_discard_request; /* max. discard request per round */ 402 unsigned int min_discard_issue_time; /* min. interval between discard issue */ 403 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ 404 unsigned int max_discard_issue_time; /* max. interval between discard issue */ 405 unsigned int discard_granularity; /* discard granularity */ 406 unsigned int undiscard_blks; /* # of undiscard blocks */ 407 unsigned int next_pos; /* next discard position */ 408 atomic_t issued_discard; /* # of issued discard */ 409 atomic_t queued_discard; /* # of queued discard */ 410 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 411 struct rb_root_cached root; /* root of discard rb-tree */ 412 bool rbtree_check; /* config for consistence check */ 413 }; 414 415 /* for the list of fsync inodes, used only during recovery */ 416 struct fsync_inode_entry { 417 struct list_head list; /* list head */ 418 struct inode *inode; /* vfs inode pointer */ 419 block_t blkaddr; /* block address locating the last fsync */ 420 block_t last_dentry; /* block address locating the last dentry */ 421 }; 422 423 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 424 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 425 426 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 427 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 428 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 429 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 430 431 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 432 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 433 434 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 435 { 436 int before = nats_in_cursum(journal); 437 438 journal->n_nats = cpu_to_le16(before + i); 439 return before; 440 } 441 442 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 443 { 444 int before = sits_in_cursum(journal); 445 446 journal->n_sits = cpu_to_le16(before + i); 447 return before; 448 } 449 450 static inline bool __has_cursum_space(struct f2fs_journal *journal, 451 int size, int type) 452 { 453 if (type == NAT_JOURNAL) 454 return size <= MAX_NAT_JENTRIES(journal); 455 return size <= MAX_SIT_JENTRIES(journal); 456 } 457 458 /* for inline stuff */ 459 #define DEF_INLINE_RESERVED_SIZE 1 460 static inline int get_extra_isize(struct inode *inode); 461 static inline int get_inline_xattr_addrs(struct inode *inode); 462 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 463 (CUR_ADDRS_PER_INODE(inode) - \ 464 get_inline_xattr_addrs(inode) - \ 465 DEF_INLINE_RESERVED_SIZE)) 466 467 /* for inline dir */ 468 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 469 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 470 BITS_PER_BYTE + 1)) 471 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 472 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 473 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 474 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 475 NR_INLINE_DENTRY(inode) + \ 476 INLINE_DENTRY_BITMAP_SIZE(inode))) 477 478 /* 479 * For INODE and NODE manager 480 */ 481 /* for directory operations */ 482 483 struct f2fs_filename { 484 /* 485 * The filename the user specified. This is NULL for some 486 * filesystem-internal operations, e.g. converting an inline directory 487 * to a non-inline one, or roll-forward recovering an encrypted dentry. 488 */ 489 const struct qstr *usr_fname; 490 491 /* 492 * The on-disk filename. For encrypted directories, this is encrypted. 493 * This may be NULL for lookups in an encrypted dir without the key. 494 */ 495 struct fscrypt_str disk_name; 496 497 /* The dirhash of this filename */ 498 f2fs_hash_t hash; 499 500 #ifdef CONFIG_FS_ENCRYPTION 501 /* 502 * For lookups in encrypted directories: either the buffer backing 503 * disk_name, or a buffer that holds the decoded no-key name. 504 */ 505 struct fscrypt_str crypto_buf; 506 #endif 507 #ifdef CONFIG_UNICODE 508 /* 509 * For casefolded directories: the casefolded name, but it's left NULL 510 * if the original name is not valid Unicode, if the directory is both 511 * casefolded and encrypted and its encryption key is unavailable, or if 512 * the filesystem is doing an internal operation where usr_fname is also 513 * NULL. In all these cases we fall back to treating the name as an 514 * opaque byte sequence. 515 */ 516 struct fscrypt_str cf_name; 517 #endif 518 }; 519 520 struct f2fs_dentry_ptr { 521 struct inode *inode; 522 void *bitmap; 523 struct f2fs_dir_entry *dentry; 524 __u8 (*filename)[F2FS_SLOT_LEN]; 525 int max; 526 int nr_bitmap; 527 }; 528 529 static inline void make_dentry_ptr_block(struct inode *inode, 530 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 531 { 532 d->inode = inode; 533 d->max = NR_DENTRY_IN_BLOCK; 534 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 535 d->bitmap = t->dentry_bitmap; 536 d->dentry = t->dentry; 537 d->filename = t->filename; 538 } 539 540 static inline void make_dentry_ptr_inline(struct inode *inode, 541 struct f2fs_dentry_ptr *d, void *t) 542 { 543 int entry_cnt = NR_INLINE_DENTRY(inode); 544 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 545 int reserved_size = INLINE_RESERVED_SIZE(inode); 546 547 d->inode = inode; 548 d->max = entry_cnt; 549 d->nr_bitmap = bitmap_size; 550 d->bitmap = t; 551 d->dentry = t + bitmap_size + reserved_size; 552 d->filename = t + bitmap_size + reserved_size + 553 SIZE_OF_DIR_ENTRY * entry_cnt; 554 } 555 556 /* 557 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 558 * as its node offset to distinguish from index node blocks. 559 * But some bits are used to mark the node block. 560 */ 561 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 562 >> OFFSET_BIT_SHIFT) 563 enum { 564 ALLOC_NODE, /* allocate a new node page if needed */ 565 LOOKUP_NODE, /* look up a node without readahead */ 566 LOOKUP_NODE_RA, /* 567 * look up a node with readahead called 568 * by get_data_block. 569 */ 570 }; 571 572 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ 573 574 /* congestion wait timeout value, default: 20ms */ 575 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 576 577 /* maximum retry quota flush count */ 578 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 579 580 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 581 582 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 583 584 /* dirty segments threshold for triggering CP */ 585 #define DEFAULT_DIRTY_THRESHOLD 4 586 587 /* for in-memory extent cache entry */ 588 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 589 590 /* number of extent info in extent cache we try to shrink */ 591 #define EXTENT_CACHE_SHRINK_NUMBER 128 592 593 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS 594 #define RECOVERY_MIN_RA_BLOCKS 1 595 596 struct rb_entry { 597 struct rb_node rb_node; /* rb node located in rb-tree */ 598 union { 599 struct { 600 unsigned int ofs; /* start offset of the entry */ 601 unsigned int len; /* length of the entry */ 602 }; 603 unsigned long long key; /* 64-bits key */ 604 } __packed; 605 }; 606 607 struct extent_info { 608 unsigned int fofs; /* start offset in a file */ 609 unsigned int len; /* length of the extent */ 610 u32 blk; /* start block address of the extent */ 611 #ifdef CONFIG_F2FS_FS_COMPRESSION 612 unsigned int c_len; /* physical extent length of compressed blocks */ 613 #endif 614 }; 615 616 struct extent_node { 617 struct rb_node rb_node; /* rb node located in rb-tree */ 618 struct extent_info ei; /* extent info */ 619 struct list_head list; /* node in global extent list of sbi */ 620 struct extent_tree *et; /* extent tree pointer */ 621 }; 622 623 struct extent_tree { 624 nid_t ino; /* inode number */ 625 struct rb_root_cached root; /* root of extent info rb-tree */ 626 struct extent_node *cached_en; /* recently accessed extent node */ 627 struct extent_info largest; /* largested extent info */ 628 struct list_head list; /* to be used by sbi->zombie_list */ 629 rwlock_t lock; /* protect extent info rb-tree */ 630 atomic_t node_cnt; /* # of extent node in rb-tree*/ 631 bool largest_updated; /* largest extent updated */ 632 }; 633 634 /* 635 * This structure is taken from ext4_map_blocks. 636 * 637 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 638 */ 639 #define F2FS_MAP_NEW (1 << BH_New) 640 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 641 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 642 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 643 F2FS_MAP_UNWRITTEN) 644 645 struct f2fs_map_blocks { 646 struct block_device *m_bdev; /* for multi-device dio */ 647 block_t m_pblk; 648 block_t m_lblk; 649 unsigned int m_len; 650 unsigned int m_flags; 651 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 652 pgoff_t *m_next_extent; /* point to next possible extent */ 653 int m_seg_type; 654 bool m_may_create; /* indicate it is from write path */ 655 bool m_multidev_dio; /* indicate it allows multi-device dio */ 656 }; 657 658 /* for flag in get_data_block */ 659 enum { 660 F2FS_GET_BLOCK_DEFAULT, 661 F2FS_GET_BLOCK_FIEMAP, 662 F2FS_GET_BLOCK_BMAP, 663 F2FS_GET_BLOCK_DIO, 664 F2FS_GET_BLOCK_PRE_DIO, 665 F2FS_GET_BLOCK_PRE_AIO, 666 F2FS_GET_BLOCK_PRECACHE, 667 }; 668 669 /* 670 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 671 */ 672 #define FADVISE_COLD_BIT 0x01 673 #define FADVISE_LOST_PINO_BIT 0x02 674 #define FADVISE_ENCRYPT_BIT 0x04 675 #define FADVISE_ENC_NAME_BIT 0x08 676 #define FADVISE_KEEP_SIZE_BIT 0x10 677 #define FADVISE_HOT_BIT 0x20 678 #define FADVISE_VERITY_BIT 0x40 679 #define FADVISE_TRUNC_BIT 0x80 680 681 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 682 683 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 684 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 685 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 686 687 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 688 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 689 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 690 691 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 692 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 693 694 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 695 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 696 697 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 698 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 699 700 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 701 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 702 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 703 704 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 705 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 706 707 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) 708 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) 709 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) 710 711 #define DEF_DIR_LEVEL 0 712 713 enum { 714 GC_FAILURE_PIN, 715 GC_FAILURE_ATOMIC, 716 MAX_GC_FAILURE 717 }; 718 719 /* used for f2fs_inode_info->flags */ 720 enum { 721 FI_NEW_INODE, /* indicate newly allocated inode */ 722 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 723 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 724 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 725 FI_INC_LINK, /* need to increment i_nlink */ 726 FI_ACL_MODE, /* indicate acl mode */ 727 FI_NO_ALLOC, /* should not allocate any blocks */ 728 FI_FREE_NID, /* free allocated nide */ 729 FI_NO_EXTENT, /* not to use the extent cache */ 730 FI_INLINE_XATTR, /* used for inline xattr */ 731 FI_INLINE_DATA, /* used for inline data*/ 732 FI_INLINE_DENTRY, /* used for inline dentry */ 733 FI_APPEND_WRITE, /* inode has appended data */ 734 FI_UPDATE_WRITE, /* inode has in-place-update data */ 735 FI_NEED_IPU, /* used for ipu per file */ 736 FI_ATOMIC_FILE, /* indicate atomic file */ 737 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 738 FI_VOLATILE_FILE, /* indicate volatile file */ 739 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 740 FI_DROP_CACHE, /* drop dirty page cache */ 741 FI_DATA_EXIST, /* indicate data exists */ 742 FI_INLINE_DOTS, /* indicate inline dot dentries */ 743 FI_SKIP_WRITES, /* should skip data page writeback */ 744 FI_OPU_WRITE, /* used for opu per file */ 745 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 746 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ 747 FI_HOT_DATA, /* indicate file is hot */ 748 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 749 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 750 FI_PIN_FILE, /* indicate file should not be gced */ 751 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 752 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 753 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 754 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 755 FI_MMAP_FILE, /* indicate file was mmapped */ 756 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ 757 FI_COMPRESS_RELEASED, /* compressed blocks were released */ 758 FI_ALIGNED_WRITE, /* enable aligned write */ 759 FI_MAX, /* max flag, never be used */ 760 }; 761 762 struct f2fs_inode_info { 763 struct inode vfs_inode; /* serve a vfs inode */ 764 unsigned long i_flags; /* keep an inode flags for ioctl */ 765 unsigned char i_advise; /* use to give file attribute hints */ 766 unsigned char i_dir_level; /* use for dentry level for large dir */ 767 unsigned int i_current_depth; /* only for directory depth */ 768 /* for gc failure statistic */ 769 unsigned int i_gc_failures[MAX_GC_FAILURE]; 770 unsigned int i_pino; /* parent inode number */ 771 umode_t i_acl_mode; /* keep file acl mode temporarily */ 772 773 /* Use below internally in f2fs*/ 774 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 775 struct f2fs_rwsem i_sem; /* protect fi info */ 776 atomic_t dirty_pages; /* # of dirty pages */ 777 f2fs_hash_t chash; /* hash value of given file name */ 778 unsigned int clevel; /* maximum level of given file name */ 779 struct task_struct *task; /* lookup and create consistency */ 780 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 781 nid_t i_xattr_nid; /* node id that contains xattrs */ 782 loff_t last_disk_size; /* lastly written file size */ 783 spinlock_t i_size_lock; /* protect last_disk_size */ 784 785 #ifdef CONFIG_QUOTA 786 struct dquot *i_dquot[MAXQUOTAS]; 787 788 /* quota space reservation, managed internally by quota code */ 789 qsize_t i_reserved_quota; 790 #endif 791 struct list_head dirty_list; /* dirty list for dirs and files */ 792 struct list_head gdirty_list; /* linked in global dirty list */ 793 struct list_head inmem_ilist; /* list for inmem inodes */ 794 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 795 struct task_struct *inmem_task; /* store inmemory task */ 796 struct mutex inmem_lock; /* lock for inmemory pages */ 797 struct extent_tree *extent_tree; /* cached extent_tree entry */ 798 799 /* avoid racing between foreground op and gc */ 800 struct f2fs_rwsem i_gc_rwsem[2]; 801 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ 802 803 int i_extra_isize; /* size of extra space located in i_addr */ 804 kprojid_t i_projid; /* id for project quota */ 805 int i_inline_xattr_size; /* inline xattr size */ 806 struct timespec64 i_crtime; /* inode creation time */ 807 struct timespec64 i_disk_time[4];/* inode disk times */ 808 809 /* for file compress */ 810 atomic_t i_compr_blocks; /* # of compressed blocks */ 811 unsigned char i_compress_algorithm; /* algorithm type */ 812 unsigned char i_log_cluster_size; /* log of cluster size */ 813 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ 814 unsigned short i_compress_flag; /* compress flag */ 815 unsigned int i_cluster_size; /* cluster size */ 816 }; 817 818 static inline void get_extent_info(struct extent_info *ext, 819 struct f2fs_extent *i_ext) 820 { 821 ext->fofs = le32_to_cpu(i_ext->fofs); 822 ext->blk = le32_to_cpu(i_ext->blk); 823 ext->len = le32_to_cpu(i_ext->len); 824 } 825 826 static inline void set_raw_extent(struct extent_info *ext, 827 struct f2fs_extent *i_ext) 828 { 829 i_ext->fofs = cpu_to_le32(ext->fofs); 830 i_ext->blk = cpu_to_le32(ext->blk); 831 i_ext->len = cpu_to_le32(ext->len); 832 } 833 834 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 835 u32 blk, unsigned int len) 836 { 837 ei->fofs = fofs; 838 ei->blk = blk; 839 ei->len = len; 840 #ifdef CONFIG_F2FS_FS_COMPRESSION 841 ei->c_len = 0; 842 #endif 843 } 844 845 static inline bool __is_discard_mergeable(struct discard_info *back, 846 struct discard_info *front, unsigned int max_len) 847 { 848 return (back->lstart + back->len == front->lstart) && 849 (back->len + front->len <= max_len); 850 } 851 852 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 853 struct discard_info *back, unsigned int max_len) 854 { 855 return __is_discard_mergeable(back, cur, max_len); 856 } 857 858 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 859 struct discard_info *front, unsigned int max_len) 860 { 861 return __is_discard_mergeable(cur, front, max_len); 862 } 863 864 static inline bool __is_extent_mergeable(struct extent_info *back, 865 struct extent_info *front) 866 { 867 #ifdef CONFIG_F2FS_FS_COMPRESSION 868 if (back->c_len && back->len != back->c_len) 869 return false; 870 if (front->c_len && front->len != front->c_len) 871 return false; 872 #endif 873 return (back->fofs + back->len == front->fofs && 874 back->blk + back->len == front->blk); 875 } 876 877 static inline bool __is_back_mergeable(struct extent_info *cur, 878 struct extent_info *back) 879 { 880 return __is_extent_mergeable(back, cur); 881 } 882 883 static inline bool __is_front_mergeable(struct extent_info *cur, 884 struct extent_info *front) 885 { 886 return __is_extent_mergeable(cur, front); 887 } 888 889 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 890 static inline void __try_update_largest_extent(struct extent_tree *et, 891 struct extent_node *en) 892 { 893 if (en->ei.len > et->largest.len) { 894 et->largest = en->ei; 895 et->largest_updated = true; 896 } 897 } 898 899 /* 900 * For free nid management 901 */ 902 enum nid_state { 903 FREE_NID, /* newly added to free nid list */ 904 PREALLOC_NID, /* it is preallocated */ 905 MAX_NID_STATE, 906 }; 907 908 enum nat_state { 909 TOTAL_NAT, 910 DIRTY_NAT, 911 RECLAIMABLE_NAT, 912 MAX_NAT_STATE, 913 }; 914 915 struct f2fs_nm_info { 916 block_t nat_blkaddr; /* base disk address of NAT */ 917 nid_t max_nid; /* maximum possible node ids */ 918 nid_t available_nids; /* # of available node ids */ 919 nid_t next_scan_nid; /* the next nid to be scanned */ 920 unsigned int ram_thresh; /* control the memory footprint */ 921 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 922 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 923 924 /* NAT cache management */ 925 struct radix_tree_root nat_root;/* root of the nat entry cache */ 926 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 927 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ 928 struct list_head nat_entries; /* cached nat entry list (clean) */ 929 spinlock_t nat_list_lock; /* protect clean nat entry list */ 930 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ 931 unsigned int nat_blocks; /* # of nat blocks */ 932 933 /* free node ids management */ 934 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 935 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 936 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 937 spinlock_t nid_list_lock; /* protect nid lists ops */ 938 struct mutex build_lock; /* lock for build free nids */ 939 unsigned char **free_nid_bitmap; 940 unsigned char *nat_block_bitmap; 941 unsigned short *free_nid_count; /* free nid count of NAT block */ 942 943 /* for checkpoint */ 944 char *nat_bitmap; /* NAT bitmap pointer */ 945 946 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 947 unsigned char *nat_bits; /* NAT bits blocks */ 948 unsigned char *full_nat_bits; /* full NAT pages */ 949 unsigned char *empty_nat_bits; /* empty NAT pages */ 950 #ifdef CONFIG_F2FS_CHECK_FS 951 char *nat_bitmap_mir; /* NAT bitmap mirror */ 952 #endif 953 int bitmap_size; /* bitmap size */ 954 }; 955 956 /* 957 * this structure is used as one of function parameters. 958 * all the information are dedicated to a given direct node block determined 959 * by the data offset in a file. 960 */ 961 struct dnode_of_data { 962 struct inode *inode; /* vfs inode pointer */ 963 struct page *inode_page; /* its inode page, NULL is possible */ 964 struct page *node_page; /* cached direct node page */ 965 nid_t nid; /* node id of the direct node block */ 966 unsigned int ofs_in_node; /* data offset in the node page */ 967 bool inode_page_locked; /* inode page is locked or not */ 968 bool node_changed; /* is node block changed */ 969 char cur_level; /* level of hole node page */ 970 char max_level; /* level of current page located */ 971 block_t data_blkaddr; /* block address of the node block */ 972 }; 973 974 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 975 struct page *ipage, struct page *npage, nid_t nid) 976 { 977 memset(dn, 0, sizeof(*dn)); 978 dn->inode = inode; 979 dn->inode_page = ipage; 980 dn->node_page = npage; 981 dn->nid = nid; 982 } 983 984 /* 985 * For SIT manager 986 * 987 * By default, there are 6 active log areas across the whole main area. 988 * When considering hot and cold data separation to reduce cleaning overhead, 989 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 990 * respectively. 991 * In the current design, you should not change the numbers intentionally. 992 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 993 * logs individually according to the underlying devices. (default: 6) 994 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 995 * data and 8 for node logs. 996 */ 997 #define NR_CURSEG_DATA_TYPE (3) 998 #define NR_CURSEG_NODE_TYPE (3) 999 #define NR_CURSEG_INMEM_TYPE (2) 1000 #define NR_CURSEG_RO_TYPE (2) 1001 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 1002 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 1003 1004 enum { 1005 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 1006 CURSEG_WARM_DATA, /* data blocks */ 1007 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 1008 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 1009 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 1010 CURSEG_COLD_NODE, /* indirect node blocks */ 1011 NR_PERSISTENT_LOG, /* number of persistent log */ 1012 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 1013 /* pinned file that needs consecutive block address */ 1014 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 1015 NO_CHECK_TYPE, /* number of persistent & inmem log */ 1016 }; 1017 1018 struct flush_cmd { 1019 struct completion wait; 1020 struct llist_node llnode; 1021 nid_t ino; 1022 int ret; 1023 }; 1024 1025 struct flush_cmd_control { 1026 struct task_struct *f2fs_issue_flush; /* flush thread */ 1027 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1028 atomic_t issued_flush; /* # of issued flushes */ 1029 atomic_t queued_flush; /* # of queued flushes */ 1030 struct llist_head issue_list; /* list for command issue */ 1031 struct llist_node *dispatch_list; /* list for command dispatch */ 1032 }; 1033 1034 struct f2fs_sm_info { 1035 struct sit_info *sit_info; /* whole segment information */ 1036 struct free_segmap_info *free_info; /* free segment information */ 1037 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1038 struct curseg_info *curseg_array; /* active segment information */ 1039 1040 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ 1041 1042 block_t seg0_blkaddr; /* block address of 0'th segment */ 1043 block_t main_blkaddr; /* start block address of main area */ 1044 block_t ssa_blkaddr; /* start block address of SSA area */ 1045 1046 unsigned int segment_count; /* total # of segments */ 1047 unsigned int main_segments; /* # of segments in main area */ 1048 unsigned int reserved_segments; /* # of reserved segments */ 1049 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ 1050 unsigned int ovp_segments; /* # of overprovision segments */ 1051 1052 /* a threshold to reclaim prefree segments */ 1053 unsigned int rec_prefree_segments; 1054 1055 /* for batched trimming */ 1056 unsigned int trim_sections; /* # of sections to trim */ 1057 1058 struct list_head sit_entry_set; /* sit entry set list */ 1059 1060 unsigned int ipu_policy; /* in-place-update policy */ 1061 unsigned int min_ipu_util; /* in-place-update threshold */ 1062 unsigned int min_fsync_blocks; /* threshold for fsync */ 1063 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1064 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1065 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1066 1067 /* for flush command control */ 1068 struct flush_cmd_control *fcc_info; 1069 1070 /* for discard command control */ 1071 struct discard_cmd_control *dcc_info; 1072 }; 1073 1074 /* 1075 * For superblock 1076 */ 1077 /* 1078 * COUNT_TYPE for monitoring 1079 * 1080 * f2fs monitors the number of several block types such as on-writeback, 1081 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1082 */ 1083 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1084 enum count_type { 1085 F2FS_DIRTY_DENTS, 1086 F2FS_DIRTY_DATA, 1087 F2FS_DIRTY_QDATA, 1088 F2FS_DIRTY_NODES, 1089 F2FS_DIRTY_META, 1090 F2FS_INMEM_PAGES, 1091 F2FS_DIRTY_IMETA, 1092 F2FS_WB_CP_DATA, 1093 F2FS_WB_DATA, 1094 F2FS_RD_DATA, 1095 F2FS_RD_NODE, 1096 F2FS_RD_META, 1097 F2FS_DIO_WRITE, 1098 F2FS_DIO_READ, 1099 NR_COUNT_TYPE, 1100 }; 1101 1102 /* 1103 * The below are the page types of bios used in submit_bio(). 1104 * The available types are: 1105 * DATA User data pages. It operates as async mode. 1106 * NODE Node pages. It operates as async mode. 1107 * META FS metadata pages such as SIT, NAT, CP. 1108 * NR_PAGE_TYPE The number of page types. 1109 * META_FLUSH Make sure the previous pages are written 1110 * with waiting the bio's completion 1111 * ... Only can be used with META. 1112 */ 1113 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1114 enum page_type { 1115 DATA, 1116 NODE, 1117 META, 1118 NR_PAGE_TYPE, 1119 META_FLUSH, 1120 INMEM, /* the below types are used by tracepoints only. */ 1121 INMEM_DROP, 1122 INMEM_INVALIDATE, 1123 INMEM_REVOKE, 1124 IPU, 1125 OPU, 1126 }; 1127 1128 enum temp_type { 1129 HOT = 0, /* must be zero for meta bio */ 1130 WARM, 1131 COLD, 1132 NR_TEMP_TYPE, 1133 }; 1134 1135 enum need_lock_type { 1136 LOCK_REQ = 0, 1137 LOCK_DONE, 1138 LOCK_RETRY, 1139 }; 1140 1141 enum cp_reason_type { 1142 CP_NO_NEEDED, 1143 CP_NON_REGULAR, 1144 CP_COMPRESSED, 1145 CP_HARDLINK, 1146 CP_SB_NEED_CP, 1147 CP_WRONG_PINO, 1148 CP_NO_SPC_ROLL, 1149 CP_NODE_NEED_CP, 1150 CP_FASTBOOT_MODE, 1151 CP_SPEC_LOG_NUM, 1152 CP_RECOVER_DIR, 1153 }; 1154 1155 enum iostat_type { 1156 /* WRITE IO */ 1157 APP_DIRECT_IO, /* app direct write IOs */ 1158 APP_BUFFERED_IO, /* app buffered write IOs */ 1159 APP_WRITE_IO, /* app write IOs */ 1160 APP_MAPPED_IO, /* app mapped IOs */ 1161 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1162 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1163 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1164 FS_GC_DATA_IO, /* data IOs from forground gc */ 1165 FS_GC_NODE_IO, /* node IOs from forground gc */ 1166 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1167 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1168 FS_CP_META_IO, /* meta IOs from checkpoint */ 1169 1170 /* READ IO */ 1171 APP_DIRECT_READ_IO, /* app direct read IOs */ 1172 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1173 APP_READ_IO, /* app read IOs */ 1174 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1175 FS_DATA_READ_IO, /* data read IOs */ 1176 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1177 FS_CDATA_READ_IO, /* compressed data read IOs */ 1178 FS_NODE_READ_IO, /* node read IOs */ 1179 FS_META_READ_IO, /* meta read IOs */ 1180 1181 /* other */ 1182 FS_DISCARD, /* discard */ 1183 NR_IO_TYPE, 1184 }; 1185 1186 struct f2fs_io_info { 1187 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1188 nid_t ino; /* inode number */ 1189 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1190 enum temp_type temp; /* contains HOT/WARM/COLD */ 1191 int op; /* contains REQ_OP_ */ 1192 int op_flags; /* req_flag_bits */ 1193 block_t new_blkaddr; /* new block address to be written */ 1194 block_t old_blkaddr; /* old block address before Cow */ 1195 struct page *page; /* page to be written */ 1196 struct page *encrypted_page; /* encrypted page */ 1197 struct page *compressed_page; /* compressed page */ 1198 struct list_head list; /* serialize IOs */ 1199 bool submitted; /* indicate IO submission */ 1200 int need_lock; /* indicate we need to lock cp_rwsem */ 1201 bool in_list; /* indicate fio is in io_list */ 1202 bool is_por; /* indicate IO is from recovery or not */ 1203 bool retry; /* need to reallocate block address */ 1204 int compr_blocks; /* # of compressed block addresses */ 1205 bool encrypted; /* indicate file is encrypted */ 1206 enum iostat_type io_type; /* io type */ 1207 struct writeback_control *io_wbc; /* writeback control */ 1208 struct bio **bio; /* bio for ipu */ 1209 sector_t *last_block; /* last block number in bio */ 1210 unsigned char version; /* version of the node */ 1211 }; 1212 1213 struct bio_entry { 1214 struct bio *bio; 1215 struct list_head list; 1216 }; 1217 1218 #define is_read_io(rw) ((rw) == READ) 1219 struct f2fs_bio_info { 1220 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1221 struct bio *bio; /* bios to merge */ 1222 sector_t last_block_in_bio; /* last block number */ 1223 struct f2fs_io_info fio; /* store buffered io info. */ 1224 struct f2fs_rwsem io_rwsem; /* blocking op for bio */ 1225 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1226 struct list_head io_list; /* track fios */ 1227 struct list_head bio_list; /* bio entry list head */ 1228 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ 1229 }; 1230 1231 #define FDEV(i) (sbi->devs[i]) 1232 #define RDEV(i) (raw_super->devs[i]) 1233 struct f2fs_dev_info { 1234 struct block_device *bdev; 1235 char path[MAX_PATH_LEN]; 1236 unsigned int total_segments; 1237 block_t start_blk; 1238 block_t end_blk; 1239 #ifdef CONFIG_BLK_DEV_ZONED 1240 unsigned int nr_blkz; /* Total number of zones */ 1241 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1242 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1243 #endif 1244 }; 1245 1246 enum inode_type { 1247 DIR_INODE, /* for dirty dir inode */ 1248 FILE_INODE, /* for dirty regular/symlink inode */ 1249 DIRTY_META, /* for all dirtied inode metadata */ 1250 ATOMIC_FILE, /* for all atomic files */ 1251 NR_INODE_TYPE, 1252 }; 1253 1254 /* for inner inode cache management */ 1255 struct inode_management { 1256 struct radix_tree_root ino_root; /* ino entry array */ 1257 spinlock_t ino_lock; /* for ino entry lock */ 1258 struct list_head ino_list; /* inode list head */ 1259 unsigned long ino_num; /* number of entries */ 1260 }; 1261 1262 /* for GC_AT */ 1263 struct atgc_management { 1264 bool atgc_enabled; /* ATGC is enabled or not */ 1265 struct rb_root_cached root; /* root of victim rb-tree */ 1266 struct list_head victim_list; /* linked with all victim entries */ 1267 unsigned int victim_count; /* victim count in rb-tree */ 1268 unsigned int candidate_ratio; /* candidate ratio */ 1269 unsigned int max_candidate_count; /* max candidate count */ 1270 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1271 unsigned long long age_threshold; /* age threshold */ 1272 }; 1273 1274 /* For s_flag in struct f2fs_sb_info */ 1275 enum { 1276 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1277 SBI_IS_CLOSE, /* specify unmounting */ 1278 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1279 SBI_POR_DOING, /* recovery is doing or not */ 1280 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1281 SBI_NEED_CP, /* need to checkpoint */ 1282 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1283 SBI_IS_RECOVERED, /* recovered orphan/data */ 1284 SBI_CP_DISABLED, /* CP was disabled last mount */ 1285 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1286 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1287 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1288 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1289 SBI_IS_RESIZEFS, /* resizefs is in process */ 1290 }; 1291 1292 enum { 1293 CP_TIME, 1294 REQ_TIME, 1295 DISCARD_TIME, 1296 GC_TIME, 1297 DISABLE_TIME, 1298 UMOUNT_DISCARD_TIMEOUT, 1299 MAX_TIME, 1300 }; 1301 1302 enum { 1303 GC_NORMAL, 1304 GC_IDLE_CB, 1305 GC_IDLE_GREEDY, 1306 GC_IDLE_AT, 1307 GC_URGENT_HIGH, 1308 GC_URGENT_LOW, 1309 MAX_GC_MODE, 1310 }; 1311 1312 enum { 1313 BGGC_MODE_ON, /* background gc is on */ 1314 BGGC_MODE_OFF, /* background gc is off */ 1315 BGGC_MODE_SYNC, /* 1316 * background gc is on, migrating blocks 1317 * like foreground gc 1318 */ 1319 }; 1320 1321 enum { 1322 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1323 FS_MODE_LFS, /* use lfs allocation only */ 1324 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ 1325 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ 1326 }; 1327 1328 enum { 1329 WHINT_MODE_OFF, /* not pass down write hints */ 1330 WHINT_MODE_USER, /* try to pass down hints given by users */ 1331 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1332 }; 1333 1334 enum { 1335 ALLOC_MODE_DEFAULT, /* stay default */ 1336 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1337 }; 1338 1339 enum fsync_mode { 1340 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1341 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1342 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1343 }; 1344 1345 enum { 1346 COMPR_MODE_FS, /* 1347 * automatically compress compression 1348 * enabled files 1349 */ 1350 COMPR_MODE_USER, /* 1351 * automatical compression is disabled. 1352 * user can control the file compression 1353 * using ioctls 1354 */ 1355 }; 1356 1357 enum { 1358 DISCARD_UNIT_BLOCK, /* basic discard unit is block */ 1359 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ 1360 DISCARD_UNIT_SECTION, /* basic discard unit is section */ 1361 }; 1362 1363 static inline int f2fs_test_bit(unsigned int nr, char *addr); 1364 static inline void f2fs_set_bit(unsigned int nr, char *addr); 1365 static inline void f2fs_clear_bit(unsigned int nr, char *addr); 1366 1367 /* 1368 * Layout of f2fs page.private: 1369 * 1370 * Layout A: lowest bit should be 1 1371 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | 1372 * bit 0 PAGE_PRIVATE_NOT_POINTER 1373 * bit 1 PAGE_PRIVATE_ATOMIC_WRITE 1374 * bit 2 PAGE_PRIVATE_DUMMY_WRITE 1375 * bit 3 PAGE_PRIVATE_ONGOING_MIGRATION 1376 * bit 4 PAGE_PRIVATE_INLINE_INODE 1377 * bit 5 PAGE_PRIVATE_REF_RESOURCE 1378 * bit 6- f2fs private data 1379 * 1380 * Layout B: lowest bit should be 0 1381 * page.private is a wrapped pointer. 1382 */ 1383 enum { 1384 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ 1385 PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ 1386 PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ 1387 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ 1388 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ 1389 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ 1390 PAGE_PRIVATE_MAX 1391 }; 1392 1393 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ 1394 static inline bool page_private_##name(struct page *page) \ 1395 { \ 1396 return PagePrivate(page) && \ 1397 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ 1398 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1399 } 1400 1401 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ 1402 static inline void set_page_private_##name(struct page *page) \ 1403 { \ 1404 if (!PagePrivate(page)) { \ 1405 get_page(page); \ 1406 SetPagePrivate(page); \ 1407 set_page_private(page, 0); \ 1408 } \ 1409 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ 1410 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1411 } 1412 1413 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ 1414 static inline void clear_page_private_##name(struct page *page) \ 1415 { \ 1416 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ 1417 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \ 1418 set_page_private(page, 0); \ 1419 if (PagePrivate(page)) { \ 1420 ClearPagePrivate(page); \ 1421 put_page(page); \ 1422 }\ 1423 } \ 1424 } 1425 1426 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); 1427 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE); 1428 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); 1429 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); 1430 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE); 1431 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); 1432 1433 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); 1434 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); 1435 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); 1436 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE); 1437 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); 1438 1439 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); 1440 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); 1441 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); 1442 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE); 1443 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); 1444 1445 static inline unsigned long get_page_private_data(struct page *page) 1446 { 1447 unsigned long data = page_private(page); 1448 1449 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) 1450 return 0; 1451 return data >> PAGE_PRIVATE_MAX; 1452 } 1453 1454 static inline void set_page_private_data(struct page *page, unsigned long data) 1455 { 1456 if (!PagePrivate(page)) { 1457 get_page(page); 1458 SetPagePrivate(page); 1459 set_page_private(page, 0); 1460 } 1461 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); 1462 page_private(page) |= data << PAGE_PRIVATE_MAX; 1463 } 1464 1465 static inline void clear_page_private_data(struct page *page) 1466 { 1467 page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1; 1468 if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { 1469 set_page_private(page, 0); 1470 if (PagePrivate(page)) { 1471 ClearPagePrivate(page); 1472 put_page(page); 1473 } 1474 } 1475 } 1476 1477 /* For compression */ 1478 enum compress_algorithm_type { 1479 COMPRESS_LZO, 1480 COMPRESS_LZ4, 1481 COMPRESS_ZSTD, 1482 COMPRESS_LZORLE, 1483 COMPRESS_MAX, 1484 }; 1485 1486 enum compress_flag { 1487 COMPRESS_CHKSUM, 1488 COMPRESS_MAX_FLAG, 1489 }; 1490 1491 #define COMPRESS_WATERMARK 20 1492 #define COMPRESS_PERCENT 20 1493 1494 #define COMPRESS_DATA_RESERVED_SIZE 4 1495 struct compress_data { 1496 __le32 clen; /* compressed data size */ 1497 __le32 chksum; /* compressed data chksum */ 1498 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1499 u8 cdata[]; /* compressed data */ 1500 }; 1501 1502 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1503 1504 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1505 1506 #define COMPRESS_LEVEL_OFFSET 8 1507 1508 /* compress context */ 1509 struct compress_ctx { 1510 struct inode *inode; /* inode the context belong to */ 1511 pgoff_t cluster_idx; /* cluster index number */ 1512 unsigned int cluster_size; /* page count in cluster */ 1513 unsigned int log_cluster_size; /* log of cluster size */ 1514 struct page **rpages; /* pages store raw data in cluster */ 1515 unsigned int nr_rpages; /* total page number in rpages */ 1516 struct page **cpages; /* pages store compressed data in cluster */ 1517 unsigned int nr_cpages; /* total page number in cpages */ 1518 unsigned int valid_nr_cpages; /* valid page number in cpages */ 1519 void *rbuf; /* virtual mapped address on rpages */ 1520 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1521 size_t rlen; /* valid data length in rbuf */ 1522 size_t clen; /* valid data length in cbuf */ 1523 void *private; /* payload buffer for specified compression algorithm */ 1524 void *private2; /* extra payload buffer */ 1525 }; 1526 1527 /* compress context for write IO path */ 1528 struct compress_io_ctx { 1529 u32 magic; /* magic number to indicate page is compressed */ 1530 struct inode *inode; /* inode the context belong to */ 1531 struct page **rpages; /* pages store raw data in cluster */ 1532 unsigned int nr_rpages; /* total page number in rpages */ 1533 atomic_t pending_pages; /* in-flight compressed page count */ 1534 }; 1535 1536 /* Context for decompressing one cluster on the read IO path */ 1537 struct decompress_io_ctx { 1538 u32 magic; /* magic number to indicate page is compressed */ 1539 struct inode *inode; /* inode the context belong to */ 1540 pgoff_t cluster_idx; /* cluster index number */ 1541 unsigned int cluster_size; /* page count in cluster */ 1542 unsigned int log_cluster_size; /* log of cluster size */ 1543 struct page **rpages; /* pages store raw data in cluster */ 1544 unsigned int nr_rpages; /* total page number in rpages */ 1545 struct page **cpages; /* pages store compressed data in cluster */ 1546 unsigned int nr_cpages; /* total page number in cpages */ 1547 struct page **tpages; /* temp pages to pad holes in cluster */ 1548 void *rbuf; /* virtual mapped address on rpages */ 1549 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1550 size_t rlen; /* valid data length in rbuf */ 1551 size_t clen; /* valid data length in cbuf */ 1552 1553 /* 1554 * The number of compressed pages remaining to be read in this cluster. 1555 * This is initially nr_cpages. It is decremented by 1 each time a page 1556 * has been read (or failed to be read). When it reaches 0, the cluster 1557 * is decompressed (or an error is reported). 1558 * 1559 * If an error occurs before all the pages have been submitted for I/O, 1560 * then this will never reach 0. In this case the I/O submitter is 1561 * responsible for calling f2fs_decompress_end_io() instead. 1562 */ 1563 atomic_t remaining_pages; 1564 1565 /* 1566 * Number of references to this decompress_io_ctx. 1567 * 1568 * One reference is held for I/O completion. This reference is dropped 1569 * after the pagecache pages are updated and unlocked -- either after 1570 * decompression (and verity if enabled), or after an error. 1571 * 1572 * In addition, each compressed page holds a reference while it is in a 1573 * bio. These references are necessary prevent compressed pages from 1574 * being freed while they are still in a bio. 1575 */ 1576 refcount_t refcnt; 1577 1578 bool failed; /* IO error occurred before decompression? */ 1579 bool need_verity; /* need fs-verity verification after decompression? */ 1580 void *private; /* payload buffer for specified decompression algorithm */ 1581 void *private2; /* extra payload buffer */ 1582 struct work_struct verity_work; /* work to verify the decompressed pages */ 1583 }; 1584 1585 #define NULL_CLUSTER ((unsigned int)(~0)) 1586 #define MIN_COMPRESS_LOG_SIZE 2 1587 #define MAX_COMPRESS_LOG_SIZE 8 1588 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1589 1590 struct f2fs_sb_info { 1591 struct super_block *sb; /* pointer to VFS super block */ 1592 struct proc_dir_entry *s_proc; /* proc entry */ 1593 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1594 struct f2fs_rwsem sb_lock; /* lock for raw super block */ 1595 int valid_super_block; /* valid super block no */ 1596 unsigned long s_flag; /* flags for sbi */ 1597 struct mutex writepages; /* mutex for writepages() */ 1598 1599 #ifdef CONFIG_BLK_DEV_ZONED 1600 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1601 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1602 #endif 1603 1604 /* for node-related operations */ 1605 struct f2fs_nm_info *nm_info; /* node manager */ 1606 struct inode *node_inode; /* cache node blocks */ 1607 1608 /* for segment-related operations */ 1609 struct f2fs_sm_info *sm_info; /* segment manager */ 1610 1611 /* for bio operations */ 1612 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1613 /* keep migration IO order for LFS mode */ 1614 struct f2fs_rwsem io_order_lock; 1615 mempool_t *write_io_dummy; /* Dummy pages */ 1616 1617 /* for checkpoint */ 1618 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1619 int cur_cp_pack; /* remain current cp pack */ 1620 spinlock_t cp_lock; /* for flag in ckpt */ 1621 struct inode *meta_inode; /* cache meta blocks */ 1622 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ 1623 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ 1624 struct f2fs_rwsem node_write; /* locking node writes */ 1625 struct f2fs_rwsem node_change; /* locking node change */ 1626 wait_queue_head_t cp_wait; 1627 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1628 long interval_time[MAX_TIME]; /* to store thresholds */ 1629 struct ckpt_req_control cprc_info; /* for checkpoint request control */ 1630 1631 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1632 1633 spinlock_t fsync_node_lock; /* for node entry lock */ 1634 struct list_head fsync_node_list; /* node list head */ 1635 unsigned int fsync_seg_id; /* sequence id */ 1636 unsigned int fsync_node_num; /* number of node entries */ 1637 1638 /* for orphan inode, use 0'th array */ 1639 unsigned int max_orphans; /* max orphan inodes */ 1640 1641 /* for inode management */ 1642 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1643 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1644 struct mutex flush_lock; /* for flush exclusion */ 1645 1646 /* for extent tree cache */ 1647 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1648 struct mutex extent_tree_lock; /* locking extent radix tree */ 1649 struct list_head extent_list; /* lru list for shrinker */ 1650 spinlock_t extent_lock; /* locking extent lru list */ 1651 atomic_t total_ext_tree; /* extent tree count */ 1652 struct list_head zombie_list; /* extent zombie tree list */ 1653 atomic_t total_zombie_tree; /* extent zombie tree count */ 1654 atomic_t total_ext_node; /* extent info count */ 1655 1656 /* basic filesystem units */ 1657 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1658 unsigned int log_blocksize; /* log2 block size */ 1659 unsigned int blocksize; /* block size */ 1660 unsigned int root_ino_num; /* root inode number*/ 1661 unsigned int node_ino_num; /* node inode number*/ 1662 unsigned int meta_ino_num; /* meta inode number*/ 1663 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1664 unsigned int blocks_per_seg; /* blocks per segment */ 1665 unsigned int segs_per_sec; /* segments per section */ 1666 unsigned int secs_per_zone; /* sections per zone */ 1667 unsigned int total_sections; /* total section count */ 1668 unsigned int total_node_count; /* total node block count */ 1669 unsigned int total_valid_node_count; /* valid node block count */ 1670 int dir_level; /* directory level */ 1671 int readdir_ra; /* readahead inode in readdir */ 1672 u64 max_io_bytes; /* max io bytes to merge IOs */ 1673 1674 block_t user_block_count; /* # of user blocks */ 1675 block_t total_valid_block_count; /* # of valid blocks */ 1676 block_t discard_blks; /* discard command candidats */ 1677 block_t last_valid_block_count; /* for recovery */ 1678 block_t reserved_blocks; /* configurable reserved blocks */ 1679 block_t current_reserved_blocks; /* current reserved blocks */ 1680 1681 /* Additional tracking for no checkpoint mode */ 1682 block_t unusable_block_count; /* # of blocks saved by last cp */ 1683 1684 unsigned int nquota_files; /* # of quota sysfile */ 1685 struct f2fs_rwsem quota_sem; /* blocking cp for flags */ 1686 1687 /* # of pages, see count_type */ 1688 atomic_t nr_pages[NR_COUNT_TYPE]; 1689 /* # of allocated blocks */ 1690 struct percpu_counter alloc_valid_block_count; 1691 1692 /* writeback control */ 1693 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1694 1695 /* valid inode count */ 1696 struct percpu_counter total_valid_inode_count; 1697 1698 struct f2fs_mount_info mount_opt; /* mount options */ 1699 1700 /* for cleaning operations */ 1701 struct f2fs_rwsem gc_lock; /* 1702 * semaphore for GC, avoid 1703 * race between GC and GC or CP 1704 */ 1705 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1706 struct atgc_management am; /* atgc management */ 1707 unsigned int cur_victim_sec; /* current victim section num */ 1708 unsigned int gc_mode; /* current GC state */ 1709 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1710 spinlock_t gc_urgent_high_lock; 1711 bool gc_urgent_high_limited; /* indicates having limited trial count */ 1712 unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */ 1713 1714 /* for skip statistic */ 1715 unsigned int atomic_files; /* # of opened atomic file */ 1716 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1717 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1718 1719 /* threshold for gc trials on pinned files */ 1720 u64 gc_pin_file_threshold; 1721 struct f2fs_rwsem pin_sem; 1722 1723 /* maximum # of trials to find a victim segment for SSR and GC */ 1724 unsigned int max_victim_search; 1725 /* migration granularity of garbage collection, unit: segment */ 1726 unsigned int migration_granularity; 1727 1728 /* 1729 * for stat information. 1730 * one is for the LFS mode, and the other is for the SSR mode. 1731 */ 1732 #ifdef CONFIG_F2FS_STAT_FS 1733 struct f2fs_stat_info *stat_info; /* FS status information */ 1734 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1735 unsigned int segment_count[2]; /* # of allocated segments */ 1736 unsigned int block_count[2]; /* # of allocated blocks */ 1737 atomic_t inplace_count; /* # of inplace update */ 1738 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1739 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1740 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1741 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1742 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1743 atomic_t inline_inode; /* # of inline_data inodes */ 1744 atomic_t inline_dir; /* # of inline_dentry inodes */ 1745 atomic_t compr_inode; /* # of compressed inodes */ 1746 atomic64_t compr_blocks; /* # of compressed blocks */ 1747 atomic_t vw_cnt; /* # of volatile writes */ 1748 atomic_t max_aw_cnt; /* max # of atomic writes */ 1749 atomic_t max_vw_cnt; /* max # of volatile writes */ 1750 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1751 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1752 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1753 #endif 1754 spinlock_t stat_lock; /* lock for stat operations */ 1755 1756 /* to attach REQ_META|REQ_FUA flags */ 1757 unsigned int data_io_flag; 1758 unsigned int node_io_flag; 1759 1760 /* For sysfs suppport */ 1761 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ 1762 struct completion s_kobj_unregister; 1763 1764 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ 1765 struct completion s_stat_kobj_unregister; 1766 1767 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ 1768 struct completion s_feature_list_kobj_unregister; 1769 1770 /* For shrinker support */ 1771 struct list_head s_list; 1772 struct mutex umount_mutex; 1773 unsigned int shrinker_run_no; 1774 1775 /* For multi devices */ 1776 int s_ndevs; /* number of devices */ 1777 struct f2fs_dev_info *devs; /* for device list */ 1778 unsigned int dirty_device; /* for checkpoint data flush */ 1779 spinlock_t dev_lock; /* protect dirty_device */ 1780 bool aligned_blksize; /* all devices has the same logical blksize */ 1781 1782 /* For write statistics */ 1783 u64 sectors_written_start; 1784 u64 kbytes_written; 1785 1786 /* Reference to checksum algorithm driver via cryptoapi */ 1787 struct crypto_shash *s_chksum_driver; 1788 1789 /* Precomputed FS UUID checksum for seeding other checksums */ 1790 __u32 s_chksum_seed; 1791 1792 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1793 1794 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1795 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1796 1797 /* For reclaimed segs statistics per each GC mode */ 1798 unsigned int gc_segment_mode; /* GC state for reclaimed segments */ 1799 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ 1800 1801 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ 1802 1803 int max_fragment_chunk; /* max chunk size for block fragmentation mode */ 1804 int max_fragment_hole; /* max hole size for block fragmentation mode */ 1805 1806 #ifdef CONFIG_F2FS_FS_COMPRESSION 1807 struct kmem_cache *page_array_slab; /* page array entry */ 1808 unsigned int page_array_slab_size; /* default page array slab size */ 1809 1810 /* For runtime compression statistics */ 1811 u64 compr_written_block; 1812 u64 compr_saved_block; 1813 u32 compr_new_inode; 1814 1815 /* For compressed block cache */ 1816 struct inode *compress_inode; /* cache compressed blocks */ 1817 unsigned int compress_percent; /* cache page percentage */ 1818 unsigned int compress_watermark; /* cache page watermark */ 1819 atomic_t compress_page_hit; /* cache hit count */ 1820 #endif 1821 1822 #ifdef CONFIG_F2FS_IOSTAT 1823 /* For app/fs IO statistics */ 1824 spinlock_t iostat_lock; 1825 unsigned long long rw_iostat[NR_IO_TYPE]; 1826 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1827 bool iostat_enable; 1828 unsigned long iostat_next_period; 1829 unsigned int iostat_period_ms; 1830 1831 /* For io latency related statistics info in one iostat period */ 1832 spinlock_t iostat_lat_lock; 1833 struct iostat_lat_info *iostat_io_lat; 1834 #endif 1835 }; 1836 1837 #ifdef CONFIG_F2FS_FAULT_INJECTION 1838 #define f2fs_show_injection_info(sbi, type) \ 1839 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1840 KERN_INFO, sbi->sb->s_id, \ 1841 f2fs_fault_name[type], \ 1842 __func__, __builtin_return_address(0)) 1843 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1844 { 1845 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1846 1847 if (!ffi->inject_rate) 1848 return false; 1849 1850 if (!IS_FAULT_SET(ffi, type)) 1851 return false; 1852 1853 atomic_inc(&ffi->inject_ops); 1854 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1855 atomic_set(&ffi->inject_ops, 0); 1856 return true; 1857 } 1858 return false; 1859 } 1860 #else 1861 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1862 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1863 { 1864 return false; 1865 } 1866 #endif 1867 1868 /* 1869 * Test if the mounted volume is a multi-device volume. 1870 * - For a single regular disk volume, sbi->s_ndevs is 0. 1871 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1872 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1873 */ 1874 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1875 { 1876 return sbi->s_ndevs > 1; 1877 } 1878 1879 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1880 { 1881 unsigned long now = jiffies; 1882 1883 sbi->last_time[type] = now; 1884 1885 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1886 if (type == REQ_TIME) { 1887 sbi->last_time[DISCARD_TIME] = now; 1888 sbi->last_time[GC_TIME] = now; 1889 } 1890 } 1891 1892 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1893 { 1894 unsigned long interval = sbi->interval_time[type] * HZ; 1895 1896 return time_after(jiffies, sbi->last_time[type] + interval); 1897 } 1898 1899 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1900 int type) 1901 { 1902 unsigned long interval = sbi->interval_time[type] * HZ; 1903 unsigned int wait_ms = 0; 1904 long delta; 1905 1906 delta = (sbi->last_time[type] + interval) - jiffies; 1907 if (delta > 0) 1908 wait_ms = jiffies_to_msecs(delta); 1909 1910 return wait_ms; 1911 } 1912 1913 /* 1914 * Inline functions 1915 */ 1916 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1917 const void *address, unsigned int length) 1918 { 1919 struct { 1920 struct shash_desc shash; 1921 char ctx[4]; 1922 } desc; 1923 int err; 1924 1925 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1926 1927 desc.shash.tfm = sbi->s_chksum_driver; 1928 *(u32 *)desc.ctx = crc; 1929 1930 err = crypto_shash_update(&desc.shash, address, length); 1931 BUG_ON(err); 1932 1933 return *(u32 *)desc.ctx; 1934 } 1935 1936 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1937 unsigned int length) 1938 { 1939 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1940 } 1941 1942 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1943 void *buf, size_t buf_size) 1944 { 1945 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1946 } 1947 1948 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1949 const void *address, unsigned int length) 1950 { 1951 return __f2fs_crc32(sbi, crc, address, length); 1952 } 1953 1954 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1955 { 1956 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1957 } 1958 1959 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1960 { 1961 return sb->s_fs_info; 1962 } 1963 1964 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1965 { 1966 return F2FS_SB(inode->i_sb); 1967 } 1968 1969 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1970 { 1971 return F2FS_I_SB(mapping->host); 1972 } 1973 1974 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1975 { 1976 return F2FS_M_SB(page_file_mapping(page)); 1977 } 1978 1979 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1980 { 1981 return (struct f2fs_super_block *)(sbi->raw_super); 1982 } 1983 1984 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1985 { 1986 return (struct f2fs_checkpoint *)(sbi->ckpt); 1987 } 1988 1989 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1990 { 1991 return (struct f2fs_node *)page_address(page); 1992 } 1993 1994 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1995 { 1996 return &((struct f2fs_node *)page_address(page))->i; 1997 } 1998 1999 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 2000 { 2001 return (struct f2fs_nm_info *)(sbi->nm_info); 2002 } 2003 2004 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 2005 { 2006 return (struct f2fs_sm_info *)(sbi->sm_info); 2007 } 2008 2009 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 2010 { 2011 return (struct sit_info *)(SM_I(sbi)->sit_info); 2012 } 2013 2014 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 2015 { 2016 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 2017 } 2018 2019 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 2020 { 2021 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 2022 } 2023 2024 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 2025 { 2026 return sbi->meta_inode->i_mapping; 2027 } 2028 2029 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 2030 { 2031 return sbi->node_inode->i_mapping; 2032 } 2033 2034 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 2035 { 2036 return test_bit(type, &sbi->s_flag); 2037 } 2038 2039 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2040 { 2041 set_bit(type, &sbi->s_flag); 2042 } 2043 2044 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 2045 { 2046 clear_bit(type, &sbi->s_flag); 2047 } 2048 2049 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 2050 { 2051 return le64_to_cpu(cp->checkpoint_ver); 2052 } 2053 2054 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 2055 { 2056 if (type < F2FS_MAX_QUOTAS) 2057 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 2058 return 0; 2059 } 2060 2061 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 2062 { 2063 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 2064 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 2065 } 2066 2067 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2068 { 2069 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2070 2071 return ckpt_flags & f; 2072 } 2073 2074 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2075 { 2076 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 2077 } 2078 2079 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2080 { 2081 unsigned int ckpt_flags; 2082 2083 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2084 ckpt_flags |= f; 2085 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2086 } 2087 2088 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2089 { 2090 unsigned long flags; 2091 2092 spin_lock_irqsave(&sbi->cp_lock, flags); 2093 __set_ckpt_flags(F2FS_CKPT(sbi), f); 2094 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2095 } 2096 2097 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 2098 { 2099 unsigned int ckpt_flags; 2100 2101 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 2102 ckpt_flags &= (~f); 2103 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 2104 } 2105 2106 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 2107 { 2108 unsigned long flags; 2109 2110 spin_lock_irqsave(&sbi->cp_lock, flags); 2111 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 2112 spin_unlock_irqrestore(&sbi->cp_lock, flags); 2113 } 2114 2115 static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem) 2116 { 2117 init_rwsem(&sem->internal_rwsem); 2118 init_waitqueue_head(&sem->read_waiters); 2119 } 2120 2121 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) 2122 { 2123 return rwsem_is_locked(&sem->internal_rwsem); 2124 } 2125 2126 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) 2127 { 2128 return rwsem_is_contended(&sem->internal_rwsem); 2129 } 2130 2131 static inline void f2fs_down_read(struct f2fs_rwsem *sem) 2132 { 2133 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); 2134 } 2135 2136 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) 2137 { 2138 return down_read_trylock(&sem->internal_rwsem); 2139 } 2140 2141 #ifdef CONFIG_DEBUG_LOCK_ALLOC 2142 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) 2143 { 2144 down_read_nested(&sem->internal_rwsem, subclass); 2145 } 2146 #else 2147 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) 2148 #endif 2149 2150 static inline void f2fs_up_read(struct f2fs_rwsem *sem) 2151 { 2152 up_read(&sem->internal_rwsem); 2153 } 2154 2155 static inline void f2fs_down_write(struct f2fs_rwsem *sem) 2156 { 2157 down_write(&sem->internal_rwsem); 2158 } 2159 2160 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) 2161 { 2162 return down_write_trylock(&sem->internal_rwsem); 2163 } 2164 2165 static inline void f2fs_up_write(struct f2fs_rwsem *sem) 2166 { 2167 up_write(&sem->internal_rwsem); 2168 wake_up_all(&sem->read_waiters); 2169 } 2170 2171 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 2172 { 2173 f2fs_down_read(&sbi->cp_rwsem); 2174 } 2175 2176 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 2177 { 2178 if (time_to_inject(sbi, FAULT_LOCK_OP)) { 2179 f2fs_show_injection_info(sbi, FAULT_LOCK_OP); 2180 return 0; 2181 } 2182 return f2fs_down_read_trylock(&sbi->cp_rwsem); 2183 } 2184 2185 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 2186 { 2187 f2fs_up_read(&sbi->cp_rwsem); 2188 } 2189 2190 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 2191 { 2192 f2fs_down_write(&sbi->cp_rwsem); 2193 } 2194 2195 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 2196 { 2197 f2fs_up_write(&sbi->cp_rwsem); 2198 } 2199 2200 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 2201 { 2202 int reason = CP_SYNC; 2203 2204 if (test_opt(sbi, FASTBOOT)) 2205 reason = CP_FASTBOOT; 2206 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 2207 reason = CP_UMOUNT; 2208 return reason; 2209 } 2210 2211 static inline bool __remain_node_summaries(int reason) 2212 { 2213 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 2214 } 2215 2216 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 2217 { 2218 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 2219 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 2220 } 2221 2222 /* 2223 * Check whether the inode has blocks or not 2224 */ 2225 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2226 { 2227 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2228 2229 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2230 } 2231 2232 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2233 { 2234 return ofs == XATTR_NODE_OFFSET; 2235 } 2236 2237 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2238 struct inode *inode, bool cap) 2239 { 2240 if (!inode) 2241 return true; 2242 if (!test_opt(sbi, RESERVE_ROOT)) 2243 return false; 2244 if (IS_NOQUOTA(inode)) 2245 return true; 2246 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2247 return true; 2248 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2249 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2250 return true; 2251 if (cap && capable(CAP_SYS_RESOURCE)) 2252 return true; 2253 return false; 2254 } 2255 2256 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2257 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2258 struct inode *inode, blkcnt_t *count) 2259 { 2260 blkcnt_t diff = 0, release = 0; 2261 block_t avail_user_block_count; 2262 int ret; 2263 2264 ret = dquot_reserve_block(inode, *count); 2265 if (ret) 2266 return ret; 2267 2268 if (time_to_inject(sbi, FAULT_BLOCK)) { 2269 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2270 release = *count; 2271 goto release_quota; 2272 } 2273 2274 /* 2275 * let's increase this in prior to actual block count change in order 2276 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2277 */ 2278 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2279 2280 spin_lock(&sbi->stat_lock); 2281 sbi->total_valid_block_count += (block_t)(*count); 2282 avail_user_block_count = sbi->user_block_count - 2283 sbi->current_reserved_blocks; 2284 2285 if (!__allow_reserved_blocks(sbi, inode, true)) 2286 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2287 2288 if (F2FS_IO_ALIGNED(sbi)) 2289 avail_user_block_count -= sbi->blocks_per_seg * 2290 SM_I(sbi)->additional_reserved_segments; 2291 2292 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2293 if (avail_user_block_count > sbi->unusable_block_count) 2294 avail_user_block_count -= sbi->unusable_block_count; 2295 else 2296 avail_user_block_count = 0; 2297 } 2298 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2299 diff = sbi->total_valid_block_count - avail_user_block_count; 2300 if (diff > *count) 2301 diff = *count; 2302 *count -= diff; 2303 release = diff; 2304 sbi->total_valid_block_count -= diff; 2305 if (!*count) { 2306 spin_unlock(&sbi->stat_lock); 2307 goto enospc; 2308 } 2309 } 2310 spin_unlock(&sbi->stat_lock); 2311 2312 if (unlikely(release)) { 2313 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2314 dquot_release_reservation_block(inode, release); 2315 } 2316 f2fs_i_blocks_write(inode, *count, true, true); 2317 return 0; 2318 2319 enospc: 2320 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2321 release_quota: 2322 dquot_release_reservation_block(inode, release); 2323 return -ENOSPC; 2324 } 2325 2326 __printf(2, 3) 2327 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2328 2329 #define f2fs_err(sbi, fmt, ...) \ 2330 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2331 #define f2fs_warn(sbi, fmt, ...) \ 2332 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2333 #define f2fs_notice(sbi, fmt, ...) \ 2334 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2335 #define f2fs_info(sbi, fmt, ...) \ 2336 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2337 #define f2fs_debug(sbi, fmt, ...) \ 2338 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2339 2340 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2341 struct inode *inode, 2342 block_t count) 2343 { 2344 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2345 2346 spin_lock(&sbi->stat_lock); 2347 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2348 sbi->total_valid_block_count -= (block_t)count; 2349 if (sbi->reserved_blocks && 2350 sbi->current_reserved_blocks < sbi->reserved_blocks) 2351 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2352 sbi->current_reserved_blocks + count); 2353 spin_unlock(&sbi->stat_lock); 2354 if (unlikely(inode->i_blocks < sectors)) { 2355 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2356 inode->i_ino, 2357 (unsigned long long)inode->i_blocks, 2358 (unsigned long long)sectors); 2359 set_sbi_flag(sbi, SBI_NEED_FSCK); 2360 return; 2361 } 2362 f2fs_i_blocks_write(inode, count, false, true); 2363 } 2364 2365 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2366 { 2367 atomic_inc(&sbi->nr_pages[count_type]); 2368 2369 if (count_type == F2FS_DIRTY_DENTS || 2370 count_type == F2FS_DIRTY_NODES || 2371 count_type == F2FS_DIRTY_META || 2372 count_type == F2FS_DIRTY_QDATA || 2373 count_type == F2FS_DIRTY_IMETA) 2374 set_sbi_flag(sbi, SBI_IS_DIRTY); 2375 } 2376 2377 static inline void inode_inc_dirty_pages(struct inode *inode) 2378 { 2379 atomic_inc(&F2FS_I(inode)->dirty_pages); 2380 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2381 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2382 if (IS_NOQUOTA(inode)) 2383 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2384 } 2385 2386 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2387 { 2388 atomic_dec(&sbi->nr_pages[count_type]); 2389 } 2390 2391 static inline void inode_dec_dirty_pages(struct inode *inode) 2392 { 2393 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2394 !S_ISLNK(inode->i_mode)) 2395 return; 2396 2397 atomic_dec(&F2FS_I(inode)->dirty_pages); 2398 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2399 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2400 if (IS_NOQUOTA(inode)) 2401 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2402 } 2403 2404 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2405 { 2406 return atomic_read(&sbi->nr_pages[count_type]); 2407 } 2408 2409 static inline int get_dirty_pages(struct inode *inode) 2410 { 2411 return atomic_read(&F2FS_I(inode)->dirty_pages); 2412 } 2413 2414 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2415 { 2416 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2417 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2418 sbi->log_blocks_per_seg; 2419 2420 return segs / sbi->segs_per_sec; 2421 } 2422 2423 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2424 { 2425 return sbi->total_valid_block_count; 2426 } 2427 2428 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2429 { 2430 return sbi->discard_blks; 2431 } 2432 2433 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2434 { 2435 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2436 2437 /* return NAT or SIT bitmap */ 2438 if (flag == NAT_BITMAP) 2439 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2440 else if (flag == SIT_BITMAP) 2441 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2442 2443 return 0; 2444 } 2445 2446 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2447 { 2448 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2449 } 2450 2451 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2452 { 2453 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2454 void *tmp_ptr = &ckpt->sit_nat_version_bitmap; 2455 int offset; 2456 2457 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2458 offset = (flag == SIT_BITMAP) ? 2459 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2460 /* 2461 * if large_nat_bitmap feature is enabled, leave checksum 2462 * protection for all nat/sit bitmaps. 2463 */ 2464 return tmp_ptr + offset + sizeof(__le32); 2465 } 2466 2467 if (__cp_payload(sbi) > 0) { 2468 if (flag == NAT_BITMAP) 2469 return &ckpt->sit_nat_version_bitmap; 2470 else 2471 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2472 } else { 2473 offset = (flag == NAT_BITMAP) ? 2474 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2475 return tmp_ptr + offset; 2476 } 2477 } 2478 2479 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2480 { 2481 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2482 2483 if (sbi->cur_cp_pack == 2) 2484 start_addr += sbi->blocks_per_seg; 2485 return start_addr; 2486 } 2487 2488 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2489 { 2490 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2491 2492 if (sbi->cur_cp_pack == 1) 2493 start_addr += sbi->blocks_per_seg; 2494 return start_addr; 2495 } 2496 2497 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2498 { 2499 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2500 } 2501 2502 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2503 { 2504 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2505 } 2506 2507 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2508 struct inode *inode, bool is_inode) 2509 { 2510 block_t valid_block_count; 2511 unsigned int valid_node_count, user_block_count; 2512 int err; 2513 2514 if (is_inode) { 2515 if (inode) { 2516 err = dquot_alloc_inode(inode); 2517 if (err) 2518 return err; 2519 } 2520 } else { 2521 err = dquot_reserve_block(inode, 1); 2522 if (err) 2523 return err; 2524 } 2525 2526 if (time_to_inject(sbi, FAULT_BLOCK)) { 2527 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2528 goto enospc; 2529 } 2530 2531 spin_lock(&sbi->stat_lock); 2532 2533 valid_block_count = sbi->total_valid_block_count + 2534 sbi->current_reserved_blocks + 1; 2535 2536 if (!__allow_reserved_blocks(sbi, inode, false)) 2537 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2538 2539 if (F2FS_IO_ALIGNED(sbi)) 2540 valid_block_count += sbi->blocks_per_seg * 2541 SM_I(sbi)->additional_reserved_segments; 2542 2543 user_block_count = sbi->user_block_count; 2544 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2545 user_block_count -= sbi->unusable_block_count; 2546 2547 if (unlikely(valid_block_count > user_block_count)) { 2548 spin_unlock(&sbi->stat_lock); 2549 goto enospc; 2550 } 2551 2552 valid_node_count = sbi->total_valid_node_count + 1; 2553 if (unlikely(valid_node_count > sbi->total_node_count)) { 2554 spin_unlock(&sbi->stat_lock); 2555 goto enospc; 2556 } 2557 2558 sbi->total_valid_node_count++; 2559 sbi->total_valid_block_count++; 2560 spin_unlock(&sbi->stat_lock); 2561 2562 if (inode) { 2563 if (is_inode) 2564 f2fs_mark_inode_dirty_sync(inode, true); 2565 else 2566 f2fs_i_blocks_write(inode, 1, true, true); 2567 } 2568 2569 percpu_counter_inc(&sbi->alloc_valid_block_count); 2570 return 0; 2571 2572 enospc: 2573 if (is_inode) { 2574 if (inode) 2575 dquot_free_inode(inode); 2576 } else { 2577 dquot_release_reservation_block(inode, 1); 2578 } 2579 return -ENOSPC; 2580 } 2581 2582 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2583 struct inode *inode, bool is_inode) 2584 { 2585 spin_lock(&sbi->stat_lock); 2586 2587 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2588 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2589 2590 sbi->total_valid_node_count--; 2591 sbi->total_valid_block_count--; 2592 if (sbi->reserved_blocks && 2593 sbi->current_reserved_blocks < sbi->reserved_blocks) 2594 sbi->current_reserved_blocks++; 2595 2596 spin_unlock(&sbi->stat_lock); 2597 2598 if (is_inode) { 2599 dquot_free_inode(inode); 2600 } else { 2601 if (unlikely(inode->i_blocks == 0)) { 2602 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2603 inode->i_ino, 2604 (unsigned long long)inode->i_blocks); 2605 set_sbi_flag(sbi, SBI_NEED_FSCK); 2606 return; 2607 } 2608 f2fs_i_blocks_write(inode, 1, false, true); 2609 } 2610 } 2611 2612 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2613 { 2614 return sbi->total_valid_node_count; 2615 } 2616 2617 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2618 { 2619 percpu_counter_inc(&sbi->total_valid_inode_count); 2620 } 2621 2622 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2623 { 2624 percpu_counter_dec(&sbi->total_valid_inode_count); 2625 } 2626 2627 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2628 { 2629 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2630 } 2631 2632 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2633 pgoff_t index, bool for_write) 2634 { 2635 struct page *page; 2636 2637 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2638 if (!for_write) 2639 page = find_get_page_flags(mapping, index, 2640 FGP_LOCK | FGP_ACCESSED); 2641 else 2642 page = find_lock_page(mapping, index); 2643 if (page) 2644 return page; 2645 2646 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2647 f2fs_show_injection_info(F2FS_M_SB(mapping), 2648 FAULT_PAGE_ALLOC); 2649 return NULL; 2650 } 2651 } 2652 2653 if (!for_write) 2654 return grab_cache_page(mapping, index); 2655 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2656 } 2657 2658 static inline struct page *f2fs_pagecache_get_page( 2659 struct address_space *mapping, pgoff_t index, 2660 int fgp_flags, gfp_t gfp_mask) 2661 { 2662 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2663 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2664 return NULL; 2665 } 2666 2667 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2668 } 2669 2670 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2671 { 2672 char *src_kaddr = kmap(src); 2673 char *dst_kaddr = kmap(dst); 2674 2675 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2676 kunmap(dst); 2677 kunmap(src); 2678 } 2679 2680 static inline void f2fs_put_page(struct page *page, int unlock) 2681 { 2682 if (!page) 2683 return; 2684 2685 if (unlock) { 2686 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2687 unlock_page(page); 2688 } 2689 put_page(page); 2690 } 2691 2692 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2693 { 2694 if (dn->node_page) 2695 f2fs_put_page(dn->node_page, 1); 2696 if (dn->inode_page && dn->node_page != dn->inode_page) 2697 f2fs_put_page(dn->inode_page, 0); 2698 dn->node_page = NULL; 2699 dn->inode_page = NULL; 2700 } 2701 2702 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2703 size_t size) 2704 { 2705 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2706 } 2707 2708 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, 2709 gfp_t flags) 2710 { 2711 void *entry; 2712 2713 entry = kmem_cache_alloc(cachep, flags); 2714 if (!entry) 2715 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2716 return entry; 2717 } 2718 2719 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2720 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) 2721 { 2722 if (nofail) 2723 return f2fs_kmem_cache_alloc_nofail(cachep, flags); 2724 2725 if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) { 2726 f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC); 2727 return NULL; 2728 } 2729 2730 return kmem_cache_alloc(cachep, flags); 2731 } 2732 2733 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2734 { 2735 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2736 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2737 get_pages(sbi, F2FS_WB_CP_DATA) || 2738 get_pages(sbi, F2FS_DIO_READ) || 2739 get_pages(sbi, F2FS_DIO_WRITE)) 2740 return true; 2741 2742 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2743 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2744 return true; 2745 2746 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2747 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2748 return true; 2749 return false; 2750 } 2751 2752 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2753 { 2754 if (sbi->gc_mode == GC_URGENT_HIGH) 2755 return true; 2756 2757 if (is_inflight_io(sbi, type)) 2758 return false; 2759 2760 if (sbi->gc_mode == GC_URGENT_LOW && 2761 (type == DISCARD_TIME || type == GC_TIME)) 2762 return true; 2763 2764 return f2fs_time_over(sbi, type); 2765 } 2766 2767 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2768 unsigned long index, void *item) 2769 { 2770 while (radix_tree_insert(root, index, item)) 2771 cond_resched(); 2772 } 2773 2774 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2775 2776 static inline bool IS_INODE(struct page *page) 2777 { 2778 struct f2fs_node *p = F2FS_NODE(page); 2779 2780 return RAW_IS_INODE(p); 2781 } 2782 2783 static inline int offset_in_addr(struct f2fs_inode *i) 2784 { 2785 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2786 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2787 } 2788 2789 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2790 { 2791 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2792 } 2793 2794 static inline int f2fs_has_extra_attr(struct inode *inode); 2795 static inline block_t data_blkaddr(struct inode *inode, 2796 struct page *node_page, unsigned int offset) 2797 { 2798 struct f2fs_node *raw_node; 2799 __le32 *addr_array; 2800 int base = 0; 2801 bool is_inode = IS_INODE(node_page); 2802 2803 raw_node = F2FS_NODE(node_page); 2804 2805 if (is_inode) { 2806 if (!inode) 2807 /* from GC path only */ 2808 base = offset_in_addr(&raw_node->i); 2809 else if (f2fs_has_extra_attr(inode)) 2810 base = get_extra_isize(inode); 2811 } 2812 2813 addr_array = blkaddr_in_node(raw_node); 2814 return le32_to_cpu(addr_array[base + offset]); 2815 } 2816 2817 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2818 { 2819 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2820 } 2821 2822 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2823 { 2824 int mask; 2825 2826 addr += (nr >> 3); 2827 mask = 1 << (7 - (nr & 0x07)); 2828 return mask & *addr; 2829 } 2830 2831 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2832 { 2833 int mask; 2834 2835 addr += (nr >> 3); 2836 mask = 1 << (7 - (nr & 0x07)); 2837 *addr |= mask; 2838 } 2839 2840 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2841 { 2842 int mask; 2843 2844 addr += (nr >> 3); 2845 mask = 1 << (7 - (nr & 0x07)); 2846 *addr &= ~mask; 2847 } 2848 2849 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2850 { 2851 int mask; 2852 int ret; 2853 2854 addr += (nr >> 3); 2855 mask = 1 << (7 - (nr & 0x07)); 2856 ret = mask & *addr; 2857 *addr |= mask; 2858 return ret; 2859 } 2860 2861 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2862 { 2863 int mask; 2864 int ret; 2865 2866 addr += (nr >> 3); 2867 mask = 1 << (7 - (nr & 0x07)); 2868 ret = mask & *addr; 2869 *addr &= ~mask; 2870 return ret; 2871 } 2872 2873 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2874 { 2875 int mask; 2876 2877 addr += (nr >> 3); 2878 mask = 1 << (7 - (nr & 0x07)); 2879 *addr ^= mask; 2880 } 2881 2882 /* 2883 * On-disk inode flags (f2fs_inode::i_flags) 2884 */ 2885 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2886 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2887 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2888 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2889 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2890 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2891 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2892 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2893 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2894 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2895 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2896 2897 /* Flags that should be inherited by new inodes from their parent. */ 2898 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2899 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2900 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2901 2902 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2903 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2904 F2FS_CASEFOLD_FL)) 2905 2906 /* Flags that are appropriate for non-directories/regular files. */ 2907 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2908 2909 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2910 { 2911 if (S_ISDIR(mode)) 2912 return flags; 2913 else if (S_ISREG(mode)) 2914 return flags & F2FS_REG_FLMASK; 2915 else 2916 return flags & F2FS_OTHER_FLMASK; 2917 } 2918 2919 static inline void __mark_inode_dirty_flag(struct inode *inode, 2920 int flag, bool set) 2921 { 2922 switch (flag) { 2923 case FI_INLINE_XATTR: 2924 case FI_INLINE_DATA: 2925 case FI_INLINE_DENTRY: 2926 case FI_NEW_INODE: 2927 if (set) 2928 return; 2929 fallthrough; 2930 case FI_DATA_EXIST: 2931 case FI_INLINE_DOTS: 2932 case FI_PIN_FILE: 2933 case FI_COMPRESS_RELEASED: 2934 f2fs_mark_inode_dirty_sync(inode, true); 2935 } 2936 } 2937 2938 static inline void set_inode_flag(struct inode *inode, int flag) 2939 { 2940 set_bit(flag, F2FS_I(inode)->flags); 2941 __mark_inode_dirty_flag(inode, flag, true); 2942 } 2943 2944 static inline int is_inode_flag_set(struct inode *inode, int flag) 2945 { 2946 return test_bit(flag, F2FS_I(inode)->flags); 2947 } 2948 2949 static inline void clear_inode_flag(struct inode *inode, int flag) 2950 { 2951 clear_bit(flag, F2FS_I(inode)->flags); 2952 __mark_inode_dirty_flag(inode, flag, false); 2953 } 2954 2955 static inline bool f2fs_verity_in_progress(struct inode *inode) 2956 { 2957 return IS_ENABLED(CONFIG_FS_VERITY) && 2958 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2959 } 2960 2961 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2962 { 2963 F2FS_I(inode)->i_acl_mode = mode; 2964 set_inode_flag(inode, FI_ACL_MODE); 2965 f2fs_mark_inode_dirty_sync(inode, false); 2966 } 2967 2968 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2969 { 2970 if (inc) 2971 inc_nlink(inode); 2972 else 2973 drop_nlink(inode); 2974 f2fs_mark_inode_dirty_sync(inode, true); 2975 } 2976 2977 static inline void f2fs_i_blocks_write(struct inode *inode, 2978 block_t diff, bool add, bool claim) 2979 { 2980 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2981 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2982 2983 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2984 if (add) { 2985 if (claim) 2986 dquot_claim_block(inode, diff); 2987 else 2988 dquot_alloc_block_nofail(inode, diff); 2989 } else { 2990 dquot_free_block(inode, diff); 2991 } 2992 2993 f2fs_mark_inode_dirty_sync(inode, true); 2994 if (clean || recover) 2995 set_inode_flag(inode, FI_AUTO_RECOVER); 2996 } 2997 2998 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2999 { 3000 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 3001 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 3002 3003 if (i_size_read(inode) == i_size) 3004 return; 3005 3006 i_size_write(inode, i_size); 3007 f2fs_mark_inode_dirty_sync(inode, true); 3008 if (clean || recover) 3009 set_inode_flag(inode, FI_AUTO_RECOVER); 3010 } 3011 3012 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 3013 { 3014 F2FS_I(inode)->i_current_depth = depth; 3015 f2fs_mark_inode_dirty_sync(inode, true); 3016 } 3017 3018 static inline void f2fs_i_gc_failures_write(struct inode *inode, 3019 unsigned int count) 3020 { 3021 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 3022 f2fs_mark_inode_dirty_sync(inode, true); 3023 } 3024 3025 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 3026 { 3027 F2FS_I(inode)->i_xattr_nid = xnid; 3028 f2fs_mark_inode_dirty_sync(inode, true); 3029 } 3030 3031 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 3032 { 3033 F2FS_I(inode)->i_pino = pino; 3034 f2fs_mark_inode_dirty_sync(inode, true); 3035 } 3036 3037 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 3038 { 3039 struct f2fs_inode_info *fi = F2FS_I(inode); 3040 3041 if (ri->i_inline & F2FS_INLINE_XATTR) 3042 set_bit(FI_INLINE_XATTR, fi->flags); 3043 if (ri->i_inline & F2FS_INLINE_DATA) 3044 set_bit(FI_INLINE_DATA, fi->flags); 3045 if (ri->i_inline & F2FS_INLINE_DENTRY) 3046 set_bit(FI_INLINE_DENTRY, fi->flags); 3047 if (ri->i_inline & F2FS_DATA_EXIST) 3048 set_bit(FI_DATA_EXIST, fi->flags); 3049 if (ri->i_inline & F2FS_INLINE_DOTS) 3050 set_bit(FI_INLINE_DOTS, fi->flags); 3051 if (ri->i_inline & F2FS_EXTRA_ATTR) 3052 set_bit(FI_EXTRA_ATTR, fi->flags); 3053 if (ri->i_inline & F2FS_PIN_FILE) 3054 set_bit(FI_PIN_FILE, fi->flags); 3055 if (ri->i_inline & F2FS_COMPRESS_RELEASED) 3056 set_bit(FI_COMPRESS_RELEASED, fi->flags); 3057 } 3058 3059 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 3060 { 3061 ri->i_inline = 0; 3062 3063 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 3064 ri->i_inline |= F2FS_INLINE_XATTR; 3065 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 3066 ri->i_inline |= F2FS_INLINE_DATA; 3067 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 3068 ri->i_inline |= F2FS_INLINE_DENTRY; 3069 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 3070 ri->i_inline |= F2FS_DATA_EXIST; 3071 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 3072 ri->i_inline |= F2FS_INLINE_DOTS; 3073 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 3074 ri->i_inline |= F2FS_EXTRA_ATTR; 3075 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3076 ri->i_inline |= F2FS_PIN_FILE; 3077 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) 3078 ri->i_inline |= F2FS_COMPRESS_RELEASED; 3079 } 3080 3081 static inline int f2fs_has_extra_attr(struct inode *inode) 3082 { 3083 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 3084 } 3085 3086 static inline int f2fs_has_inline_xattr(struct inode *inode) 3087 { 3088 return is_inode_flag_set(inode, FI_INLINE_XATTR); 3089 } 3090 3091 static inline int f2fs_compressed_file(struct inode *inode) 3092 { 3093 return S_ISREG(inode->i_mode) && 3094 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 3095 } 3096 3097 static inline bool f2fs_need_compress_data(struct inode *inode) 3098 { 3099 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; 3100 3101 if (!f2fs_compressed_file(inode)) 3102 return false; 3103 3104 if (compress_mode == COMPR_MODE_FS) 3105 return true; 3106 else if (compress_mode == COMPR_MODE_USER && 3107 is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) 3108 return true; 3109 3110 return false; 3111 } 3112 3113 static inline unsigned int addrs_per_inode(struct inode *inode) 3114 { 3115 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 3116 get_inline_xattr_addrs(inode); 3117 3118 if (!f2fs_compressed_file(inode)) 3119 return addrs; 3120 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 3121 } 3122 3123 static inline unsigned int addrs_per_block(struct inode *inode) 3124 { 3125 if (!f2fs_compressed_file(inode)) 3126 return DEF_ADDRS_PER_BLOCK; 3127 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 3128 } 3129 3130 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 3131 { 3132 struct f2fs_inode *ri = F2FS_INODE(page); 3133 3134 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 3135 get_inline_xattr_addrs(inode)]); 3136 } 3137 3138 static inline int inline_xattr_size(struct inode *inode) 3139 { 3140 if (f2fs_has_inline_xattr(inode)) 3141 return get_inline_xattr_addrs(inode) * sizeof(__le32); 3142 return 0; 3143 } 3144 3145 static inline int f2fs_has_inline_data(struct inode *inode) 3146 { 3147 return is_inode_flag_set(inode, FI_INLINE_DATA); 3148 } 3149 3150 static inline int f2fs_exist_data(struct inode *inode) 3151 { 3152 return is_inode_flag_set(inode, FI_DATA_EXIST); 3153 } 3154 3155 static inline int f2fs_has_inline_dots(struct inode *inode) 3156 { 3157 return is_inode_flag_set(inode, FI_INLINE_DOTS); 3158 } 3159 3160 static inline int f2fs_is_mmap_file(struct inode *inode) 3161 { 3162 return is_inode_flag_set(inode, FI_MMAP_FILE); 3163 } 3164 3165 static inline bool f2fs_is_pinned_file(struct inode *inode) 3166 { 3167 return is_inode_flag_set(inode, FI_PIN_FILE); 3168 } 3169 3170 static inline bool f2fs_is_atomic_file(struct inode *inode) 3171 { 3172 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 3173 } 3174 3175 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 3176 { 3177 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 3178 } 3179 3180 static inline bool f2fs_is_volatile_file(struct inode *inode) 3181 { 3182 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 3183 } 3184 3185 static inline bool f2fs_is_first_block_written(struct inode *inode) 3186 { 3187 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 3188 } 3189 3190 static inline bool f2fs_is_drop_cache(struct inode *inode) 3191 { 3192 return is_inode_flag_set(inode, FI_DROP_CACHE); 3193 } 3194 3195 static inline void *inline_data_addr(struct inode *inode, struct page *page) 3196 { 3197 struct f2fs_inode *ri = F2FS_INODE(page); 3198 int extra_size = get_extra_isize(inode); 3199 3200 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 3201 } 3202 3203 static inline int f2fs_has_inline_dentry(struct inode *inode) 3204 { 3205 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 3206 } 3207 3208 static inline int is_file(struct inode *inode, int type) 3209 { 3210 return F2FS_I(inode)->i_advise & type; 3211 } 3212 3213 static inline void set_file(struct inode *inode, int type) 3214 { 3215 if (is_file(inode, type)) 3216 return; 3217 F2FS_I(inode)->i_advise |= type; 3218 f2fs_mark_inode_dirty_sync(inode, true); 3219 } 3220 3221 static inline void clear_file(struct inode *inode, int type) 3222 { 3223 if (!is_file(inode, type)) 3224 return; 3225 F2FS_I(inode)->i_advise &= ~type; 3226 f2fs_mark_inode_dirty_sync(inode, true); 3227 } 3228 3229 static inline bool f2fs_is_time_consistent(struct inode *inode) 3230 { 3231 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 3232 return false; 3233 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 3234 return false; 3235 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 3236 return false; 3237 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 3238 &F2FS_I(inode)->i_crtime)) 3239 return false; 3240 return true; 3241 } 3242 3243 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 3244 { 3245 bool ret; 3246 3247 if (dsync) { 3248 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3249 3250 spin_lock(&sbi->inode_lock[DIRTY_META]); 3251 ret = list_empty(&F2FS_I(inode)->gdirty_list); 3252 spin_unlock(&sbi->inode_lock[DIRTY_META]); 3253 return ret; 3254 } 3255 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 3256 file_keep_isize(inode) || 3257 i_size_read(inode) & ~PAGE_MASK) 3258 return false; 3259 3260 if (!f2fs_is_time_consistent(inode)) 3261 return false; 3262 3263 spin_lock(&F2FS_I(inode)->i_size_lock); 3264 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 3265 spin_unlock(&F2FS_I(inode)->i_size_lock); 3266 3267 return ret; 3268 } 3269 3270 static inline bool f2fs_readonly(struct super_block *sb) 3271 { 3272 return sb_rdonly(sb); 3273 } 3274 3275 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 3276 { 3277 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3278 } 3279 3280 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3281 { 3282 if (len == 1 && name[0] == '.') 3283 return true; 3284 3285 if (len == 2 && name[0] == '.' && name[1] == '.') 3286 return true; 3287 3288 return false; 3289 } 3290 3291 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3292 size_t size, gfp_t flags) 3293 { 3294 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3295 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3296 return NULL; 3297 } 3298 3299 return kmalloc(size, flags); 3300 } 3301 3302 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3303 size_t size, gfp_t flags) 3304 { 3305 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3306 } 3307 3308 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3309 size_t size, gfp_t flags) 3310 { 3311 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3312 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3313 return NULL; 3314 } 3315 3316 return kvmalloc(size, flags); 3317 } 3318 3319 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3320 size_t size, gfp_t flags) 3321 { 3322 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3323 } 3324 3325 static inline int get_extra_isize(struct inode *inode) 3326 { 3327 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3328 } 3329 3330 static inline int get_inline_xattr_addrs(struct inode *inode) 3331 { 3332 return F2FS_I(inode)->i_inline_xattr_size; 3333 } 3334 3335 #define f2fs_get_inode_mode(i) \ 3336 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3337 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3338 3339 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3340 (offsetof(struct f2fs_inode, i_extra_end) - \ 3341 offsetof(struct f2fs_inode, i_extra_isize)) \ 3342 3343 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3344 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3345 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3346 sizeof((f2fs_inode)->field)) \ 3347 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3348 3349 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3350 3351 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3352 3353 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3354 block_t blkaddr, int type); 3355 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3356 block_t blkaddr, int type) 3357 { 3358 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3359 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3360 blkaddr, type); 3361 f2fs_bug_on(sbi, 1); 3362 } 3363 } 3364 3365 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3366 { 3367 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3368 blkaddr == COMPRESS_ADDR) 3369 return false; 3370 return true; 3371 } 3372 3373 /* 3374 * file.c 3375 */ 3376 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3377 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3378 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3379 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3380 int f2fs_truncate(struct inode *inode); 3381 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 3382 struct kstat *stat, u32 request_mask, unsigned int flags); 3383 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 3384 struct iattr *attr); 3385 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3386 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3387 int f2fs_precache_extents(struct inode *inode); 3388 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); 3389 int f2fs_fileattr_set(struct user_namespace *mnt_userns, 3390 struct dentry *dentry, struct fileattr *fa); 3391 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3392 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3393 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3394 int f2fs_pin_file_control(struct inode *inode, bool inc); 3395 3396 /* 3397 * inode.c 3398 */ 3399 void f2fs_set_inode_flags(struct inode *inode); 3400 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3401 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3402 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3403 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3404 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3405 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3406 void f2fs_update_inode_page(struct inode *inode); 3407 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3408 void f2fs_evict_inode(struct inode *inode); 3409 void f2fs_handle_failed_inode(struct inode *inode); 3410 3411 /* 3412 * namei.c 3413 */ 3414 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3415 bool hot, bool set); 3416 struct dentry *f2fs_get_parent(struct dentry *child); 3417 3418 /* 3419 * dir.c 3420 */ 3421 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3422 int f2fs_init_casefolded_name(const struct inode *dir, 3423 struct f2fs_filename *fname); 3424 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3425 int lookup, struct f2fs_filename *fname); 3426 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3427 struct f2fs_filename *fname); 3428 void f2fs_free_filename(struct f2fs_filename *fname); 3429 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3430 const struct f2fs_filename *fname, int *max_slots); 3431 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3432 unsigned int start_pos, struct fscrypt_str *fstr); 3433 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3434 struct f2fs_dentry_ptr *d); 3435 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3436 const struct f2fs_filename *fname, struct page *dpage); 3437 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3438 unsigned int current_depth); 3439 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3440 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3441 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3442 const struct f2fs_filename *fname, 3443 struct page **res_page); 3444 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3445 const struct qstr *child, struct page **res_page); 3446 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3447 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3448 struct page **page); 3449 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3450 struct page *page, struct inode *inode); 3451 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3452 const struct f2fs_filename *fname); 3453 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3454 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3455 unsigned int bit_pos); 3456 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3457 struct inode *inode, nid_t ino, umode_t mode); 3458 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3459 struct inode *inode, nid_t ino, umode_t mode); 3460 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3461 struct inode *inode, nid_t ino, umode_t mode); 3462 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3463 struct inode *dir, struct inode *inode); 3464 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3465 bool f2fs_empty_dir(struct inode *dir); 3466 3467 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3468 { 3469 if (fscrypt_is_nokey_name(dentry)) 3470 return -ENOKEY; 3471 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3472 inode, inode->i_ino, inode->i_mode); 3473 } 3474 3475 /* 3476 * super.c 3477 */ 3478 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3479 void f2fs_inode_synced(struct inode *inode); 3480 int f2fs_dquot_initialize(struct inode *inode); 3481 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3482 int f2fs_quota_sync(struct super_block *sb, int type); 3483 loff_t max_file_blocks(struct inode *inode); 3484 void f2fs_quota_off_umount(struct super_block *sb); 3485 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3486 int f2fs_sync_fs(struct super_block *sb, int sync); 3487 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3488 3489 /* 3490 * hash.c 3491 */ 3492 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3493 3494 /* 3495 * node.c 3496 */ 3497 struct node_info; 3498 3499 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3500 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3501 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3502 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3503 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3504 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3505 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3506 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3507 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3508 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3509 struct node_info *ni, bool checkpoint_context); 3510 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3511 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3512 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3513 int f2fs_truncate_xattr_node(struct inode *inode); 3514 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3515 unsigned int seq_id); 3516 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); 3517 int f2fs_remove_inode_page(struct inode *inode); 3518 struct page *f2fs_new_inode_page(struct inode *inode); 3519 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3520 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3521 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3522 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3523 int f2fs_move_node_page(struct page *node_page, int gc_type); 3524 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3525 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3526 struct writeback_control *wbc, bool atomic, 3527 unsigned int *seq_id); 3528 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3529 struct writeback_control *wbc, 3530 bool do_balance, enum iostat_type io_type); 3531 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3532 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3533 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3534 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3535 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3536 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3537 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3538 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3539 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3540 unsigned int segno, struct f2fs_summary_block *sum); 3541 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); 3542 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3543 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3544 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3545 int __init f2fs_create_node_manager_caches(void); 3546 void f2fs_destroy_node_manager_caches(void); 3547 3548 /* 3549 * segment.c 3550 */ 3551 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3552 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3553 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3554 void f2fs_drop_inmem_pages(struct inode *inode); 3555 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3556 int f2fs_commit_inmem_pages(struct inode *inode); 3557 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3558 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3559 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3560 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3561 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3562 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3563 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3564 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3565 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); 3566 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3567 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3568 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3569 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3570 struct cp_control *cpc); 3571 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3572 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3573 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3574 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3575 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3576 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); 3577 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3578 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3579 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3580 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3581 unsigned int *newseg, bool new_sec, int dir); 3582 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3583 unsigned int start, unsigned int end); 3584 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); 3585 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3586 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3587 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3588 struct cp_control *cpc); 3589 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3590 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3591 block_t blk_addr); 3592 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3593 enum iostat_type io_type); 3594 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3595 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3596 struct f2fs_io_info *fio); 3597 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3598 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3599 block_t old_blkaddr, block_t new_blkaddr, 3600 bool recover_curseg, bool recover_newaddr, 3601 bool from_gc); 3602 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3603 block_t old_addr, block_t new_addr, 3604 unsigned char version, bool recover_curseg, 3605 bool recover_newaddr); 3606 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3607 block_t old_blkaddr, block_t *new_blkaddr, 3608 struct f2fs_summary *sum, int type, 3609 struct f2fs_io_info *fio); 3610 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, 3611 block_t blkaddr, unsigned int blkcnt); 3612 void f2fs_wait_on_page_writeback(struct page *page, 3613 enum page_type type, bool ordered, bool locked); 3614 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3615 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3616 block_t len); 3617 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3618 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3619 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3620 unsigned int val, int alloc); 3621 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3622 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3623 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3624 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3625 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3626 int __init f2fs_create_segment_manager_caches(void); 3627 void f2fs_destroy_segment_manager_caches(void); 3628 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3629 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3630 enum page_type type, enum temp_type temp); 3631 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3632 unsigned int segno); 3633 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3634 unsigned int segno); 3635 3636 #define DEF_FRAGMENT_SIZE 4 3637 #define MIN_FRAGMENT_SIZE 1 3638 #define MAX_FRAGMENT_SIZE 512 3639 3640 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) 3641 { 3642 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || 3643 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; 3644 } 3645 3646 /* 3647 * checkpoint.c 3648 */ 3649 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3650 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3651 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3652 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3653 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3654 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3655 block_t blkaddr, int type); 3656 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3657 int type, bool sync); 3658 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, 3659 unsigned int ra_blocks); 3660 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3661 long nr_to_write, enum iostat_type io_type); 3662 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3663 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3664 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3665 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3666 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3667 unsigned int devidx, int type); 3668 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3669 unsigned int devidx, int type); 3670 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3671 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3672 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3673 void f2fs_add_orphan_inode(struct inode *inode); 3674 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3675 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3676 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3677 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3678 void f2fs_remove_dirty_inode(struct inode *inode); 3679 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3680 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3681 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3682 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3683 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3684 int __init f2fs_create_checkpoint_caches(void); 3685 void f2fs_destroy_checkpoint_caches(void); 3686 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); 3687 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); 3688 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); 3689 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); 3690 3691 /* 3692 * data.c 3693 */ 3694 int __init f2fs_init_bioset(void); 3695 void f2fs_destroy_bioset(void); 3696 int f2fs_init_bio_entry_cache(void); 3697 void f2fs_destroy_bio_entry_cache(void); 3698 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3699 struct bio *bio, enum page_type type); 3700 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3701 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3702 struct inode *inode, struct page *page, 3703 nid_t ino, enum page_type type); 3704 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3705 struct bio **bio, struct page *page); 3706 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3707 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3708 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3709 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3710 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3711 block_t blk_addr, struct bio *bio); 3712 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3713 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3714 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3715 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3716 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3717 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3718 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3719 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3720 int op_flags, bool for_write); 3721 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3722 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3723 bool for_write); 3724 struct page *f2fs_get_new_data_page(struct inode *inode, 3725 struct page *ipage, pgoff_t index, bool new_i_size); 3726 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3727 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3728 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3729 int create, int flag); 3730 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3731 u64 start, u64 len); 3732 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3733 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3734 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3735 int f2fs_write_single_data_page(struct page *page, int *submitted, 3736 struct bio **bio, sector_t *last_block, 3737 struct writeback_control *wbc, 3738 enum iostat_type io_type, 3739 int compr_blocks, bool allow_balance); 3740 void f2fs_write_failed(struct inode *inode, loff_t to); 3741 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3742 unsigned int length); 3743 int f2fs_release_page(struct page *page, gfp_t wait); 3744 #ifdef CONFIG_MIGRATION 3745 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3746 struct page *page, enum migrate_mode mode); 3747 #endif 3748 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3749 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3750 int f2fs_init_post_read_processing(void); 3751 void f2fs_destroy_post_read_processing(void); 3752 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3753 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3754 extern const struct iomap_ops f2fs_iomap_ops; 3755 3756 /* 3757 * gc.c 3758 */ 3759 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3760 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3761 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3762 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force, 3763 unsigned int segno); 3764 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3765 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3766 int __init f2fs_create_garbage_collection_cache(void); 3767 void f2fs_destroy_garbage_collection_cache(void); 3768 3769 /* 3770 * recovery.c 3771 */ 3772 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3773 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3774 int __init f2fs_create_recovery_cache(void); 3775 void f2fs_destroy_recovery_cache(void); 3776 3777 /* 3778 * debug.c 3779 */ 3780 #ifdef CONFIG_F2FS_STAT_FS 3781 struct f2fs_stat_info { 3782 struct list_head stat_list; 3783 struct f2fs_sb_info *sbi; 3784 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3785 int main_area_segs, main_area_sections, main_area_zones; 3786 unsigned long long hit_largest, hit_cached, hit_rbtree; 3787 unsigned long long hit_total, total_ext; 3788 int ext_tree, zombie_tree, ext_node; 3789 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3790 int ndirty_data, ndirty_qdata; 3791 int inmem_pages; 3792 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3793 int nats, dirty_nats, sits, dirty_sits; 3794 int free_nids, avail_nids, alloc_nids; 3795 int total_count, utilization; 3796 int bg_gc, nr_wb_cp_data, nr_wb_data; 3797 int nr_rd_data, nr_rd_node, nr_rd_meta; 3798 int nr_dio_read, nr_dio_write; 3799 unsigned int io_skip_bggc, other_skip_bggc; 3800 int nr_flushing, nr_flushed, flush_list_empty; 3801 int nr_discarding, nr_discarded; 3802 int nr_discard_cmd; 3803 unsigned int undiscard_blks; 3804 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; 3805 unsigned int cur_ckpt_time, peak_ckpt_time; 3806 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3807 int compr_inode; 3808 unsigned long long compr_blocks; 3809 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3810 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3811 unsigned int bimodal, avg_vblocks; 3812 int util_free, util_valid, util_invalid; 3813 int rsvd_segs, overp_segs; 3814 int dirty_count, node_pages, meta_pages, compress_pages; 3815 int compress_page_hit; 3816 int prefree_count, call_count, cp_count, bg_cp_count; 3817 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3818 int bg_node_segs, bg_data_segs; 3819 int tot_blks, data_blks, node_blks; 3820 int bg_data_blks, bg_node_blks; 3821 unsigned long long skipped_atomic_files[2]; 3822 int curseg[NR_CURSEG_TYPE]; 3823 int cursec[NR_CURSEG_TYPE]; 3824 int curzone[NR_CURSEG_TYPE]; 3825 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3826 unsigned int full_seg[NR_CURSEG_TYPE]; 3827 unsigned int valid_blks[NR_CURSEG_TYPE]; 3828 3829 unsigned int meta_count[META_MAX]; 3830 unsigned int segment_count[2]; 3831 unsigned int block_count[2]; 3832 unsigned int inplace_count; 3833 unsigned long long base_mem, cache_mem, page_mem; 3834 }; 3835 3836 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3837 { 3838 return (struct f2fs_stat_info *)sbi->stat_info; 3839 } 3840 3841 #define stat_inc_cp_count(si) ((si)->cp_count++) 3842 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3843 #define stat_inc_call_count(si) ((si)->call_count++) 3844 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3845 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3846 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3847 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3848 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3849 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3850 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3851 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3852 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3853 #define stat_inc_inline_xattr(inode) \ 3854 do { \ 3855 if (f2fs_has_inline_xattr(inode)) \ 3856 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3857 } while (0) 3858 #define stat_dec_inline_xattr(inode) \ 3859 do { \ 3860 if (f2fs_has_inline_xattr(inode)) \ 3861 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3862 } while (0) 3863 #define stat_inc_inline_inode(inode) \ 3864 do { \ 3865 if (f2fs_has_inline_data(inode)) \ 3866 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3867 } while (0) 3868 #define stat_dec_inline_inode(inode) \ 3869 do { \ 3870 if (f2fs_has_inline_data(inode)) \ 3871 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3872 } while (0) 3873 #define stat_inc_inline_dir(inode) \ 3874 do { \ 3875 if (f2fs_has_inline_dentry(inode)) \ 3876 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3877 } while (0) 3878 #define stat_dec_inline_dir(inode) \ 3879 do { \ 3880 if (f2fs_has_inline_dentry(inode)) \ 3881 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3882 } while (0) 3883 #define stat_inc_compr_inode(inode) \ 3884 do { \ 3885 if (f2fs_compressed_file(inode)) \ 3886 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3887 } while (0) 3888 #define stat_dec_compr_inode(inode) \ 3889 do { \ 3890 if (f2fs_compressed_file(inode)) \ 3891 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3892 } while (0) 3893 #define stat_add_compr_blocks(inode, blocks) \ 3894 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3895 #define stat_sub_compr_blocks(inode, blocks) \ 3896 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3897 #define stat_inc_meta_count(sbi, blkaddr) \ 3898 do { \ 3899 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3900 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3901 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3902 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3903 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3904 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3905 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3906 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3907 } while (0) 3908 #define stat_inc_seg_type(sbi, curseg) \ 3909 ((sbi)->segment_count[(curseg)->alloc_type]++) 3910 #define stat_inc_block_count(sbi, curseg) \ 3911 ((sbi)->block_count[(curseg)->alloc_type]++) 3912 #define stat_inc_inplace_blocks(sbi) \ 3913 (atomic_inc(&(sbi)->inplace_count)) 3914 #define stat_update_max_atomic_write(inode) \ 3915 do { \ 3916 int cur = F2FS_I_SB(inode)->atomic_files; \ 3917 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3918 if (cur > max) \ 3919 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3920 } while (0) 3921 #define stat_inc_volatile_write(inode) \ 3922 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3923 #define stat_dec_volatile_write(inode) \ 3924 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3925 #define stat_update_max_volatile_write(inode) \ 3926 do { \ 3927 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3928 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3929 if (cur > max) \ 3930 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3931 } while (0) 3932 #define stat_inc_seg_count(sbi, type, gc_type) \ 3933 do { \ 3934 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3935 si->tot_segs++; \ 3936 if ((type) == SUM_TYPE_DATA) { \ 3937 si->data_segs++; \ 3938 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3939 } else { \ 3940 si->node_segs++; \ 3941 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3942 } \ 3943 } while (0) 3944 3945 #define stat_inc_tot_blk_count(si, blks) \ 3946 ((si)->tot_blks += (blks)) 3947 3948 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3949 do { \ 3950 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3951 stat_inc_tot_blk_count(si, blks); \ 3952 si->data_blks += (blks); \ 3953 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3954 } while (0) 3955 3956 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3957 do { \ 3958 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3959 stat_inc_tot_blk_count(si, blks); \ 3960 si->node_blks += (blks); \ 3961 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3962 } while (0) 3963 3964 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3965 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3966 void __init f2fs_create_root_stats(void); 3967 void f2fs_destroy_root_stats(void); 3968 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3969 #else 3970 #define stat_inc_cp_count(si) do { } while (0) 3971 #define stat_inc_bg_cp_count(si) do { } while (0) 3972 #define stat_inc_call_count(si) do { } while (0) 3973 #define stat_inc_bggc_count(si) do { } while (0) 3974 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3975 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3976 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3977 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3978 #define stat_inc_total_hit(sbi) do { } while (0) 3979 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3980 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3981 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3982 #define stat_inc_inline_xattr(inode) do { } while (0) 3983 #define stat_dec_inline_xattr(inode) do { } while (0) 3984 #define stat_inc_inline_inode(inode) do { } while (0) 3985 #define stat_dec_inline_inode(inode) do { } while (0) 3986 #define stat_inc_inline_dir(inode) do { } while (0) 3987 #define stat_dec_inline_dir(inode) do { } while (0) 3988 #define stat_inc_compr_inode(inode) do { } while (0) 3989 #define stat_dec_compr_inode(inode) do { } while (0) 3990 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3991 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3992 #define stat_update_max_atomic_write(inode) do { } while (0) 3993 #define stat_inc_volatile_write(inode) do { } while (0) 3994 #define stat_dec_volatile_write(inode) do { } while (0) 3995 #define stat_update_max_volatile_write(inode) do { } while (0) 3996 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3997 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3998 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3999 #define stat_inc_inplace_blocks(sbi) do { } while (0) 4000 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 4001 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 4002 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 4003 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 4004 4005 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 4006 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 4007 static inline void __init f2fs_create_root_stats(void) { } 4008 static inline void f2fs_destroy_root_stats(void) { } 4009 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 4010 #endif 4011 4012 extern const struct file_operations f2fs_dir_operations; 4013 extern const struct file_operations f2fs_file_operations; 4014 extern const struct inode_operations f2fs_file_inode_operations; 4015 extern const struct address_space_operations f2fs_dblock_aops; 4016 extern const struct address_space_operations f2fs_node_aops; 4017 extern const struct address_space_operations f2fs_meta_aops; 4018 extern const struct inode_operations f2fs_dir_inode_operations; 4019 extern const struct inode_operations f2fs_symlink_inode_operations; 4020 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 4021 extern const struct inode_operations f2fs_special_inode_operations; 4022 extern struct kmem_cache *f2fs_inode_entry_slab; 4023 4024 /* 4025 * inline.c 4026 */ 4027 bool f2fs_may_inline_data(struct inode *inode); 4028 bool f2fs_may_inline_dentry(struct inode *inode); 4029 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 4030 void f2fs_truncate_inline_inode(struct inode *inode, 4031 struct page *ipage, u64 from); 4032 int f2fs_read_inline_data(struct inode *inode, struct page *page); 4033 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 4034 int f2fs_convert_inline_inode(struct inode *inode); 4035 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 4036 int f2fs_write_inline_data(struct inode *inode, struct page *page); 4037 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 4038 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 4039 const struct f2fs_filename *fname, 4040 struct page **res_page); 4041 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 4042 struct page *ipage); 4043 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 4044 struct inode *inode, nid_t ino, umode_t mode); 4045 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 4046 struct page *page, struct inode *dir, 4047 struct inode *inode); 4048 bool f2fs_empty_inline_dir(struct inode *dir); 4049 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 4050 struct fscrypt_str *fstr); 4051 int f2fs_inline_data_fiemap(struct inode *inode, 4052 struct fiemap_extent_info *fieinfo, 4053 __u64 start, __u64 len); 4054 4055 /* 4056 * shrinker.c 4057 */ 4058 unsigned long f2fs_shrink_count(struct shrinker *shrink, 4059 struct shrink_control *sc); 4060 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 4061 struct shrink_control *sc); 4062 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 4063 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 4064 4065 /* 4066 * extent_cache.c 4067 */ 4068 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 4069 struct rb_entry *cached_re, unsigned int ofs); 4070 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 4071 struct rb_root_cached *root, 4072 struct rb_node **parent, 4073 unsigned long long key, bool *left_most); 4074 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 4075 struct rb_root_cached *root, 4076 struct rb_node **parent, 4077 unsigned int ofs, bool *leftmost); 4078 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 4079 struct rb_entry *cached_re, unsigned int ofs, 4080 struct rb_entry **prev_entry, struct rb_entry **next_entry, 4081 struct rb_node ***insert_p, struct rb_node **insert_parent, 4082 bool force, bool *leftmost); 4083 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 4084 struct rb_root_cached *root, bool check_key); 4085 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 4086 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 4087 void f2fs_drop_extent_tree(struct inode *inode); 4088 unsigned int f2fs_destroy_extent_node(struct inode *inode); 4089 void f2fs_destroy_extent_tree(struct inode *inode); 4090 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 4091 struct extent_info *ei); 4092 void f2fs_update_extent_cache(struct dnode_of_data *dn); 4093 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 4094 pgoff_t fofs, block_t blkaddr, unsigned int len); 4095 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 4096 int __init f2fs_create_extent_cache(void); 4097 void f2fs_destroy_extent_cache(void); 4098 4099 /* 4100 * sysfs.c 4101 */ 4102 #define MIN_RA_MUL 2 4103 #define MAX_RA_MUL 256 4104 4105 int __init f2fs_init_sysfs(void); 4106 void f2fs_exit_sysfs(void); 4107 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 4108 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 4109 4110 /* verity.c */ 4111 extern const struct fsverity_operations f2fs_verityops; 4112 4113 /* 4114 * crypto support 4115 */ 4116 static inline bool f2fs_encrypted_file(struct inode *inode) 4117 { 4118 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 4119 } 4120 4121 static inline void f2fs_set_encrypted_inode(struct inode *inode) 4122 { 4123 #ifdef CONFIG_FS_ENCRYPTION 4124 file_set_encrypt(inode); 4125 f2fs_set_inode_flags(inode); 4126 #endif 4127 } 4128 4129 /* 4130 * Returns true if the reads of the inode's data need to undergo some 4131 * postprocessing step, like decryption or authenticity verification. 4132 */ 4133 static inline bool f2fs_post_read_required(struct inode *inode) 4134 { 4135 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 4136 f2fs_compressed_file(inode); 4137 } 4138 4139 /* 4140 * compress.c 4141 */ 4142 #ifdef CONFIG_F2FS_FS_COMPRESSION 4143 bool f2fs_is_compressed_page(struct page *page); 4144 struct page *f2fs_compress_control_page(struct page *page); 4145 int f2fs_prepare_compress_overwrite(struct inode *inode, 4146 struct page **pagep, pgoff_t index, void **fsdata); 4147 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 4148 pgoff_t index, unsigned copied); 4149 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 4150 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 4151 bool f2fs_is_compress_backend_ready(struct inode *inode); 4152 int f2fs_init_compress_mempool(void); 4153 void f2fs_destroy_compress_mempool(void); 4154 void f2fs_decompress_cluster(struct decompress_io_ctx *dic); 4155 void f2fs_end_read_compressed_page(struct page *page, bool failed, 4156 block_t blkaddr); 4157 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 4158 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 4159 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec, 4160 int index, int nr_pages); 4161 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); 4162 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 4163 int f2fs_write_multi_pages(struct compress_ctx *cc, 4164 int *submitted, 4165 struct writeback_control *wbc, 4166 enum iostat_type io_type); 4167 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 4168 void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4169 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4170 unsigned int c_len); 4171 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 4172 unsigned nr_pages, sector_t *last_block_in_bio, 4173 bool is_readahead, bool for_write); 4174 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 4175 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); 4176 void f2fs_put_page_dic(struct page *page); 4177 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); 4178 int f2fs_init_compress_ctx(struct compress_ctx *cc); 4179 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); 4180 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 4181 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); 4182 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); 4183 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 4184 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 4185 int __init f2fs_init_compress_cache(void); 4186 void f2fs_destroy_compress_cache(void); 4187 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); 4188 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); 4189 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4190 nid_t ino, block_t blkaddr); 4191 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, 4192 block_t blkaddr); 4193 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); 4194 #define inc_compr_inode_stat(inode) \ 4195 do { \ 4196 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4197 sbi->compr_new_inode++; \ 4198 } while (0) 4199 #define add_compr_block_stat(inode, blocks) \ 4200 do { \ 4201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ 4202 int diff = F2FS_I(inode)->i_cluster_size - blocks; \ 4203 sbi->compr_written_block += blocks; \ 4204 sbi->compr_saved_block += diff; \ 4205 } while (0) 4206 #else 4207 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 4208 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 4209 { 4210 if (!f2fs_compressed_file(inode)) 4211 return true; 4212 /* not support compression */ 4213 return false; 4214 } 4215 static inline struct page *f2fs_compress_control_page(struct page *page) 4216 { 4217 WARN_ON_ONCE(1); 4218 return ERR_PTR(-EINVAL); 4219 } 4220 static inline int f2fs_init_compress_mempool(void) { return 0; } 4221 static inline void f2fs_destroy_compress_mempool(void) { } 4222 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } 4223 static inline void f2fs_end_read_compressed_page(struct page *page, 4224 bool failed, block_t blkaddr) 4225 { 4226 WARN_ON_ONCE(1); 4227 } 4228 static inline void f2fs_put_page_dic(struct page *page) 4229 { 4230 WARN_ON_ONCE(1); 4231 } 4232 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } 4233 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } 4234 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } 4235 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } 4236 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 4237 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 4238 static inline int __init f2fs_init_compress_cache(void) { return 0; } 4239 static inline void f2fs_destroy_compress_cache(void) { } 4240 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, 4241 block_t blkaddr) { } 4242 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, 4243 struct page *page, nid_t ino, block_t blkaddr) { } 4244 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, 4245 struct page *page, block_t blkaddr) { return false; } 4246 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, 4247 nid_t ino) { } 4248 #define inc_compr_inode_stat(inode) do { } while (0) 4249 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode, 4250 pgoff_t fofs, block_t blkaddr, unsigned int llen, 4251 unsigned int c_len) { } 4252 #endif 4253 4254 static inline void set_compress_context(struct inode *inode) 4255 { 4256 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4257 4258 F2FS_I(inode)->i_compress_algorithm = 4259 F2FS_OPTION(sbi).compress_algorithm; 4260 F2FS_I(inode)->i_log_cluster_size = 4261 F2FS_OPTION(sbi).compress_log_size; 4262 F2FS_I(inode)->i_compress_flag = 4263 F2FS_OPTION(sbi).compress_chksum ? 4264 1 << COMPRESS_CHKSUM : 0; 4265 F2FS_I(inode)->i_cluster_size = 4266 1 << F2FS_I(inode)->i_log_cluster_size; 4267 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4268 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4269 F2FS_OPTION(sbi).compress_level) 4270 F2FS_I(inode)->i_compress_flag |= 4271 F2FS_OPTION(sbi).compress_level << 4272 COMPRESS_LEVEL_OFFSET; 4273 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4274 set_inode_flag(inode, FI_COMPRESSED_FILE); 4275 stat_inc_compr_inode(inode); 4276 inc_compr_inode_stat(inode); 4277 f2fs_mark_inode_dirty_sync(inode, true); 4278 } 4279 4280 static inline bool f2fs_disable_compressed_file(struct inode *inode) 4281 { 4282 struct f2fs_inode_info *fi = F2FS_I(inode); 4283 4284 if (!f2fs_compressed_file(inode)) 4285 return true; 4286 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) 4287 return false; 4288 4289 fi->i_flags &= ~F2FS_COMPR_FL; 4290 stat_dec_compr_inode(inode); 4291 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4292 f2fs_mark_inode_dirty_sync(inode, true); 4293 return true; 4294 } 4295 4296 #define F2FS_FEATURE_FUNCS(name, flagname) \ 4297 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 4298 { \ 4299 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4300 } 4301 4302 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4303 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4304 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4305 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4306 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4307 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4308 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4309 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4310 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4311 F2FS_FEATURE_FUNCS(verity, VERITY); 4312 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4313 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4314 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4315 F2FS_FEATURE_FUNCS(readonly, RO); 4316 4317 static inline bool f2fs_may_extent_tree(struct inode *inode) 4318 { 4319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4320 4321 if (!test_opt(sbi, EXTENT_CACHE) || 4322 is_inode_flag_set(inode, FI_NO_EXTENT) || 4323 (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && 4324 !f2fs_sb_has_readonly(sbi))) 4325 return false; 4326 4327 /* 4328 * for recovered files during mount do not create extents 4329 * if shrinker is not registered. 4330 */ 4331 if (list_empty(&sbi->s_list)) 4332 return false; 4333 4334 return S_ISREG(inode->i_mode); 4335 } 4336 4337 #ifdef CONFIG_BLK_DEV_ZONED 4338 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4339 block_t blkaddr) 4340 { 4341 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4342 4343 return test_bit(zno, FDEV(devi).blkz_seq); 4344 } 4345 #endif 4346 4347 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4348 { 4349 return f2fs_sb_has_blkzoned(sbi); 4350 } 4351 4352 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4353 { 4354 return blk_queue_discard(bdev_get_queue(bdev)) || 4355 bdev_is_zoned(bdev); 4356 } 4357 4358 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4359 { 4360 int i; 4361 4362 if (!f2fs_is_multi_device(sbi)) 4363 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4364 4365 for (i = 0; i < sbi->s_ndevs; i++) 4366 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4367 return true; 4368 return false; 4369 } 4370 4371 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4372 { 4373 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4374 f2fs_hw_should_discard(sbi); 4375 } 4376 4377 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4378 { 4379 int i; 4380 4381 if (!f2fs_is_multi_device(sbi)) 4382 return bdev_read_only(sbi->sb->s_bdev); 4383 4384 for (i = 0; i < sbi->s_ndevs; i++) 4385 if (bdev_read_only(FDEV(i).bdev)) 4386 return true; 4387 return false; 4388 } 4389 4390 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4391 { 4392 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4393 } 4394 4395 static inline bool f2fs_may_compress(struct inode *inode) 4396 { 4397 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4398 f2fs_is_atomic_file(inode) || 4399 f2fs_is_volatile_file(inode)) 4400 return false; 4401 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4402 } 4403 4404 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4405 u64 blocks, bool add) 4406 { 4407 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4408 struct f2fs_inode_info *fi = F2FS_I(inode); 4409 4410 /* don't update i_compr_blocks if saved blocks were released */ 4411 if (!add && !atomic_read(&fi->i_compr_blocks)) 4412 return; 4413 4414 if (add) { 4415 atomic_add(diff, &fi->i_compr_blocks); 4416 stat_add_compr_blocks(inode, diff); 4417 } else { 4418 atomic_sub(diff, &fi->i_compr_blocks); 4419 stat_sub_compr_blocks(inode, diff); 4420 } 4421 f2fs_mark_inode_dirty_sync(inode, true); 4422 } 4423 4424 static inline int block_unaligned_IO(struct inode *inode, 4425 struct kiocb *iocb, struct iov_iter *iter) 4426 { 4427 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4428 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4429 loff_t offset = iocb->ki_pos; 4430 unsigned long align = offset | iov_iter_alignment(iter); 4431 4432 return align & blocksize_mask; 4433 } 4434 4435 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, 4436 int flag) 4437 { 4438 if (!f2fs_is_multi_device(sbi)) 4439 return false; 4440 if (flag != F2FS_GET_BLOCK_DIO) 4441 return false; 4442 return sbi->aligned_blksize; 4443 } 4444 4445 static inline bool f2fs_force_buffered_io(struct inode *inode, 4446 struct kiocb *iocb, struct iov_iter *iter) 4447 { 4448 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4449 int rw = iov_iter_rw(iter); 4450 4451 if (f2fs_post_read_required(inode)) 4452 return true; 4453 4454 /* disallow direct IO if any of devices has unaligned blksize */ 4455 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) 4456 return true; 4457 /* 4458 * for blkzoned device, fallback direct IO to buffered IO, so 4459 * all IOs can be serialized by log-structured write. 4460 */ 4461 if (f2fs_sb_has_blkzoned(sbi)) 4462 return true; 4463 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4464 if (block_unaligned_IO(inode, iocb, iter)) 4465 return true; 4466 if (F2FS_IO_ALIGNED(sbi)) 4467 return true; 4468 } 4469 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 4470 return true; 4471 4472 return false; 4473 } 4474 4475 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) 4476 { 4477 return fsverity_active(inode) && 4478 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 4479 } 4480 4481 #ifdef CONFIG_F2FS_FAULT_INJECTION 4482 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4483 unsigned int type); 4484 #else 4485 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4486 #endif 4487 4488 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4489 { 4490 #ifdef CONFIG_QUOTA 4491 if (f2fs_sb_has_quota_ino(sbi)) 4492 return true; 4493 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4494 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4495 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4496 return true; 4497 #endif 4498 return false; 4499 } 4500 4501 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) 4502 { 4503 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; 4504 } 4505 4506 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4507 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4508 4509 #endif /* _LINUX_F2FS_H */ 4510