1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (WARN_ON(condition)) \ 37 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 38 } while (0) 39 #endif 40 41 enum { 42 FAULT_KMALLOC, 43 FAULT_KVMALLOC, 44 FAULT_PAGE_ALLOC, 45 FAULT_PAGE_GET, 46 FAULT_ALLOC_BIO, 47 FAULT_ALLOC_NID, 48 FAULT_ORPHAN, 49 FAULT_BLOCK, 50 FAULT_DIR_DEPTH, 51 FAULT_EVICT_INODE, 52 FAULT_TRUNCATE, 53 FAULT_READ_IO, 54 FAULT_CHECKPOINT, 55 FAULT_DISCARD, 56 FAULT_WRITE_IO, 57 FAULT_MAX, 58 }; 59 60 #ifdef CONFIG_F2FS_FAULT_INJECTION 61 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 62 63 struct f2fs_fault_info { 64 atomic_t inject_ops; 65 unsigned int inject_rate; 66 unsigned int inject_type; 67 }; 68 69 extern const char *f2fs_fault_name[FAULT_MAX]; 70 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 71 #endif 72 73 /* 74 * For mount options 75 */ 76 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 77 #define F2FS_MOUNT_DISCARD 0x00000004 78 #define F2FS_MOUNT_NOHEAP 0x00000008 79 #define F2FS_MOUNT_XATTR_USER 0x00000010 80 #define F2FS_MOUNT_POSIX_ACL 0x00000020 81 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 82 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 83 #define F2FS_MOUNT_INLINE_DATA 0x00000100 84 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 85 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 86 #define F2FS_MOUNT_NOBARRIER 0x00000800 87 #define F2FS_MOUNT_FASTBOOT 0x00001000 88 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 89 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 90 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 91 #define F2FS_MOUNT_USRQUOTA 0x00080000 92 #define F2FS_MOUNT_GRPQUOTA 0x00100000 93 #define F2FS_MOUNT_PRJQUOTA 0x00200000 94 #define F2FS_MOUNT_QUOTA 0x00400000 95 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 96 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 97 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 98 #define F2FS_MOUNT_NORECOVERY 0x04000000 99 #define F2FS_MOUNT_ATGC 0x08000000 100 101 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 102 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 103 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 104 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 105 106 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 107 typecheck(unsigned long long, b) && \ 108 ((long long)((a) - (b)) > 0)) 109 110 typedef u32 block_t; /* 111 * should not change u32, since it is the on-disk block 112 * address format, __le32. 113 */ 114 typedef u32 nid_t; 115 116 #define COMPRESS_EXT_NUM 16 117 118 struct f2fs_mount_info { 119 unsigned int opt; 120 int write_io_size_bits; /* Write IO size bits */ 121 block_t root_reserved_blocks; /* root reserved blocks */ 122 kuid_t s_resuid; /* reserved blocks for uid */ 123 kgid_t s_resgid; /* reserved blocks for gid */ 124 int active_logs; /* # of active logs */ 125 int inline_xattr_size; /* inline xattr size */ 126 #ifdef CONFIG_F2FS_FAULT_INJECTION 127 struct f2fs_fault_info fault_info; /* For fault injection */ 128 #endif 129 #ifdef CONFIG_QUOTA 130 /* Names of quota files with journalled quota */ 131 char *s_qf_names[MAXQUOTAS]; 132 int s_jquota_fmt; /* Format of quota to use */ 133 #endif 134 /* For which write hints are passed down to block layer */ 135 int whint_mode; 136 int alloc_mode; /* segment allocation policy */ 137 int fsync_mode; /* fsync policy */ 138 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 139 int bggc_mode; /* bggc mode: off, on or sync */ 140 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ 141 block_t unusable_cap_perc; /* percentage for cap */ 142 block_t unusable_cap; /* Amount of space allowed to be 143 * unusable when disabling checkpoint 144 */ 145 146 /* For compression */ 147 unsigned char compress_algorithm; /* algorithm type */ 148 unsigned char compress_log_size; /* cluster log size */ 149 bool compress_chksum; /* compressed data chksum */ 150 unsigned char compress_ext_cnt; /* extension count */ 151 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 152 }; 153 154 #define F2FS_FEATURE_ENCRYPT 0x0001 155 #define F2FS_FEATURE_BLKZONED 0x0002 156 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 157 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 158 #define F2FS_FEATURE_PRJQUOTA 0x0010 159 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 160 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 161 #define F2FS_FEATURE_QUOTA_INO 0x0080 162 #define F2FS_FEATURE_INODE_CRTIME 0x0100 163 #define F2FS_FEATURE_LOST_FOUND 0x0200 164 #define F2FS_FEATURE_VERITY 0x0400 165 #define F2FS_FEATURE_SB_CHKSUM 0x0800 166 #define F2FS_FEATURE_CASEFOLD 0x1000 167 #define F2FS_FEATURE_COMPRESSION 0x2000 168 169 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 170 ((raw_super->feature & cpu_to_le32(mask)) != 0) 171 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 172 #define F2FS_SET_FEATURE(sbi, mask) \ 173 (sbi->raw_super->feature |= cpu_to_le32(mask)) 174 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 175 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 176 177 /* 178 * Default values for user and/or group using reserved blocks 179 */ 180 #define F2FS_DEF_RESUID 0 181 #define F2FS_DEF_RESGID 0 182 183 /* 184 * For checkpoint manager 185 */ 186 enum { 187 NAT_BITMAP, 188 SIT_BITMAP 189 }; 190 191 #define CP_UMOUNT 0x00000001 192 #define CP_FASTBOOT 0x00000002 193 #define CP_SYNC 0x00000004 194 #define CP_RECOVERY 0x00000008 195 #define CP_DISCARD 0x00000010 196 #define CP_TRIMMED 0x00000020 197 #define CP_PAUSE 0x00000040 198 #define CP_RESIZE 0x00000080 199 200 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 201 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 202 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 203 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 204 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 205 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 206 #define DEF_CP_INTERVAL 60 /* 60 secs */ 207 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 208 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 209 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 210 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 211 212 struct cp_control { 213 int reason; 214 __u64 trim_start; 215 __u64 trim_end; 216 __u64 trim_minlen; 217 }; 218 219 /* 220 * indicate meta/data type 221 */ 222 enum { 223 META_CP, 224 META_NAT, 225 META_SIT, 226 META_SSA, 227 META_MAX, 228 META_POR, 229 DATA_GENERIC, /* check range only */ 230 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 231 DATA_GENERIC_ENHANCE_READ, /* 232 * strong check on range and segment 233 * bitmap but no warning due to race 234 * condition of read on truncated area 235 * by extent_cache 236 */ 237 META_GENERIC, 238 }; 239 240 /* for the list of ino */ 241 enum { 242 ORPHAN_INO, /* for orphan ino list */ 243 APPEND_INO, /* for append ino list */ 244 UPDATE_INO, /* for update ino list */ 245 TRANS_DIR_INO, /* for trasactions dir ino list */ 246 FLUSH_INO, /* for multiple device flushing */ 247 MAX_INO_ENTRY, /* max. list */ 248 }; 249 250 struct ino_entry { 251 struct list_head list; /* list head */ 252 nid_t ino; /* inode number */ 253 unsigned int dirty_device; /* dirty device bitmap */ 254 }; 255 256 /* for the list of inodes to be GCed */ 257 struct inode_entry { 258 struct list_head list; /* list head */ 259 struct inode *inode; /* vfs inode pointer */ 260 }; 261 262 struct fsync_node_entry { 263 struct list_head list; /* list head */ 264 struct page *page; /* warm node page pointer */ 265 unsigned int seq_id; /* sequence id */ 266 }; 267 268 /* for the bitmap indicate blocks to be discarded */ 269 struct discard_entry { 270 struct list_head list; /* list head */ 271 block_t start_blkaddr; /* start blockaddr of current segment */ 272 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 273 }; 274 275 /* default discard granularity of inner discard thread, unit: block count */ 276 #define DEFAULT_DISCARD_GRANULARITY 16 277 278 /* max discard pend list number */ 279 #define MAX_PLIST_NUM 512 280 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 281 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 282 283 enum { 284 D_PREP, /* initial */ 285 D_PARTIAL, /* partially submitted */ 286 D_SUBMIT, /* all submitted */ 287 D_DONE, /* finished */ 288 }; 289 290 struct discard_info { 291 block_t lstart; /* logical start address */ 292 block_t len; /* length */ 293 block_t start; /* actual start address in dev */ 294 }; 295 296 struct discard_cmd { 297 struct rb_node rb_node; /* rb node located in rb-tree */ 298 union { 299 struct { 300 block_t lstart; /* logical start address */ 301 block_t len; /* length */ 302 block_t start; /* actual start address in dev */ 303 }; 304 struct discard_info di; /* discard info */ 305 306 }; 307 struct list_head list; /* command list */ 308 struct completion wait; /* compleation */ 309 struct block_device *bdev; /* bdev */ 310 unsigned short ref; /* reference count */ 311 unsigned char state; /* state */ 312 unsigned char queued; /* queued discard */ 313 int error; /* bio error */ 314 spinlock_t lock; /* for state/bio_ref updating */ 315 unsigned short bio_ref; /* bio reference count */ 316 }; 317 318 enum { 319 DPOLICY_BG, 320 DPOLICY_FORCE, 321 DPOLICY_FSTRIM, 322 DPOLICY_UMOUNT, 323 MAX_DPOLICY, 324 }; 325 326 struct discard_policy { 327 int type; /* type of discard */ 328 unsigned int min_interval; /* used for candidates exist */ 329 unsigned int mid_interval; /* used for device busy */ 330 unsigned int max_interval; /* used for candidates not exist */ 331 unsigned int max_requests; /* # of discards issued per round */ 332 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 333 bool io_aware; /* issue discard in idle time */ 334 bool sync; /* submit discard with REQ_SYNC flag */ 335 bool ordered; /* issue discard by lba order */ 336 bool timeout; /* discard timeout for put_super */ 337 unsigned int granularity; /* discard granularity */ 338 }; 339 340 struct discard_cmd_control { 341 struct task_struct *f2fs_issue_discard; /* discard thread */ 342 struct list_head entry_list; /* 4KB discard entry list */ 343 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 344 struct list_head wait_list; /* store on-flushing entries */ 345 struct list_head fstrim_list; /* in-flight discard from fstrim */ 346 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 347 unsigned int discard_wake; /* to wake up discard thread */ 348 struct mutex cmd_lock; 349 unsigned int nr_discards; /* # of discards in the list */ 350 unsigned int max_discards; /* max. discards to be issued */ 351 unsigned int discard_granularity; /* discard granularity */ 352 unsigned int undiscard_blks; /* # of undiscard blocks */ 353 unsigned int next_pos; /* next discard position */ 354 atomic_t issued_discard; /* # of issued discard */ 355 atomic_t queued_discard; /* # of queued discard */ 356 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 357 struct rb_root_cached root; /* root of discard rb-tree */ 358 bool rbtree_check; /* config for consistence check */ 359 }; 360 361 /* for the list of fsync inodes, used only during recovery */ 362 struct fsync_inode_entry { 363 struct list_head list; /* list head */ 364 struct inode *inode; /* vfs inode pointer */ 365 block_t blkaddr; /* block address locating the last fsync */ 366 block_t last_dentry; /* block address locating the last dentry */ 367 }; 368 369 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 370 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 371 372 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 373 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 374 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 375 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 376 377 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 378 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 379 380 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 381 { 382 int before = nats_in_cursum(journal); 383 384 journal->n_nats = cpu_to_le16(before + i); 385 return before; 386 } 387 388 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 389 { 390 int before = sits_in_cursum(journal); 391 392 journal->n_sits = cpu_to_le16(before + i); 393 return before; 394 } 395 396 static inline bool __has_cursum_space(struct f2fs_journal *journal, 397 int size, int type) 398 { 399 if (type == NAT_JOURNAL) 400 return size <= MAX_NAT_JENTRIES(journal); 401 return size <= MAX_SIT_JENTRIES(journal); 402 } 403 404 /* for inline stuff */ 405 #define DEF_INLINE_RESERVED_SIZE 1 406 static inline int get_extra_isize(struct inode *inode); 407 static inline int get_inline_xattr_addrs(struct inode *inode); 408 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 409 (CUR_ADDRS_PER_INODE(inode) - \ 410 get_inline_xattr_addrs(inode) - \ 411 DEF_INLINE_RESERVED_SIZE)) 412 413 /* for inline dir */ 414 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 415 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 416 BITS_PER_BYTE + 1)) 417 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 418 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 419 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 420 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 421 NR_INLINE_DENTRY(inode) + \ 422 INLINE_DENTRY_BITMAP_SIZE(inode))) 423 424 /* 425 * For INODE and NODE manager 426 */ 427 /* for directory operations */ 428 429 struct f2fs_filename { 430 /* 431 * The filename the user specified. This is NULL for some 432 * filesystem-internal operations, e.g. converting an inline directory 433 * to a non-inline one, or roll-forward recovering an encrypted dentry. 434 */ 435 const struct qstr *usr_fname; 436 437 /* 438 * The on-disk filename. For encrypted directories, this is encrypted. 439 * This may be NULL for lookups in an encrypted dir without the key. 440 */ 441 struct fscrypt_str disk_name; 442 443 /* The dirhash of this filename */ 444 f2fs_hash_t hash; 445 446 #ifdef CONFIG_FS_ENCRYPTION 447 /* 448 * For lookups in encrypted directories: either the buffer backing 449 * disk_name, or a buffer that holds the decoded no-key name. 450 */ 451 struct fscrypt_str crypto_buf; 452 #endif 453 #ifdef CONFIG_UNICODE 454 /* 455 * For casefolded directories: the casefolded name, but it's left NULL 456 * if the original name is not valid Unicode, if the directory is both 457 * casefolded and encrypted and its encryption key is unavailable, or if 458 * the filesystem is doing an internal operation where usr_fname is also 459 * NULL. In all these cases we fall back to treating the name as an 460 * opaque byte sequence. 461 */ 462 struct fscrypt_str cf_name; 463 #endif 464 }; 465 466 struct f2fs_dentry_ptr { 467 struct inode *inode; 468 void *bitmap; 469 struct f2fs_dir_entry *dentry; 470 __u8 (*filename)[F2FS_SLOT_LEN]; 471 int max; 472 int nr_bitmap; 473 }; 474 475 static inline void make_dentry_ptr_block(struct inode *inode, 476 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 477 { 478 d->inode = inode; 479 d->max = NR_DENTRY_IN_BLOCK; 480 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 481 d->bitmap = t->dentry_bitmap; 482 d->dentry = t->dentry; 483 d->filename = t->filename; 484 } 485 486 static inline void make_dentry_ptr_inline(struct inode *inode, 487 struct f2fs_dentry_ptr *d, void *t) 488 { 489 int entry_cnt = NR_INLINE_DENTRY(inode); 490 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 491 int reserved_size = INLINE_RESERVED_SIZE(inode); 492 493 d->inode = inode; 494 d->max = entry_cnt; 495 d->nr_bitmap = bitmap_size; 496 d->bitmap = t; 497 d->dentry = t + bitmap_size + reserved_size; 498 d->filename = t + bitmap_size + reserved_size + 499 SIZE_OF_DIR_ENTRY * entry_cnt; 500 } 501 502 /* 503 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 504 * as its node offset to distinguish from index node blocks. 505 * But some bits are used to mark the node block. 506 */ 507 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 508 >> OFFSET_BIT_SHIFT) 509 enum { 510 ALLOC_NODE, /* allocate a new node page if needed */ 511 LOOKUP_NODE, /* look up a node without readahead */ 512 LOOKUP_NODE_RA, /* 513 * look up a node with readahead called 514 * by get_data_block. 515 */ 516 }; 517 518 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 519 520 /* congestion wait timeout value, default: 20ms */ 521 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 522 523 /* maximum retry quota flush count */ 524 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 525 526 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 527 528 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 529 530 /* for in-memory extent cache entry */ 531 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 532 533 /* number of extent info in extent cache we try to shrink */ 534 #define EXTENT_CACHE_SHRINK_NUMBER 128 535 536 struct rb_entry { 537 struct rb_node rb_node; /* rb node located in rb-tree */ 538 union { 539 struct { 540 unsigned int ofs; /* start offset of the entry */ 541 unsigned int len; /* length of the entry */ 542 }; 543 unsigned long long key; /* 64-bits key */ 544 } __packed; 545 }; 546 547 struct extent_info { 548 unsigned int fofs; /* start offset in a file */ 549 unsigned int len; /* length of the extent */ 550 u32 blk; /* start block address of the extent */ 551 }; 552 553 struct extent_node { 554 struct rb_node rb_node; /* rb node located in rb-tree */ 555 struct extent_info ei; /* extent info */ 556 struct list_head list; /* node in global extent list of sbi */ 557 struct extent_tree *et; /* extent tree pointer */ 558 }; 559 560 struct extent_tree { 561 nid_t ino; /* inode number */ 562 struct rb_root_cached root; /* root of extent info rb-tree */ 563 struct extent_node *cached_en; /* recently accessed extent node */ 564 struct extent_info largest; /* largested extent info */ 565 struct list_head list; /* to be used by sbi->zombie_list */ 566 rwlock_t lock; /* protect extent info rb-tree */ 567 atomic_t node_cnt; /* # of extent node in rb-tree*/ 568 bool largest_updated; /* largest extent updated */ 569 }; 570 571 /* 572 * This structure is taken from ext4_map_blocks. 573 * 574 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 575 */ 576 #define F2FS_MAP_NEW (1 << BH_New) 577 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 578 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 579 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 580 F2FS_MAP_UNWRITTEN) 581 582 struct f2fs_map_blocks { 583 block_t m_pblk; 584 block_t m_lblk; 585 unsigned int m_len; 586 unsigned int m_flags; 587 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 588 pgoff_t *m_next_extent; /* point to next possible extent */ 589 int m_seg_type; 590 bool m_may_create; /* indicate it is from write path */ 591 }; 592 593 /* for flag in get_data_block */ 594 enum { 595 F2FS_GET_BLOCK_DEFAULT, 596 F2FS_GET_BLOCK_FIEMAP, 597 F2FS_GET_BLOCK_BMAP, 598 F2FS_GET_BLOCK_DIO, 599 F2FS_GET_BLOCK_PRE_DIO, 600 F2FS_GET_BLOCK_PRE_AIO, 601 F2FS_GET_BLOCK_PRECACHE, 602 }; 603 604 /* 605 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 606 */ 607 #define FADVISE_COLD_BIT 0x01 608 #define FADVISE_LOST_PINO_BIT 0x02 609 #define FADVISE_ENCRYPT_BIT 0x04 610 #define FADVISE_ENC_NAME_BIT 0x08 611 #define FADVISE_KEEP_SIZE_BIT 0x10 612 #define FADVISE_HOT_BIT 0x20 613 #define FADVISE_VERITY_BIT 0x40 614 615 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 616 617 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 618 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 619 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 620 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 621 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 622 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 623 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 624 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 625 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 626 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 627 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 628 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 629 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 630 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 631 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 632 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 633 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 634 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 635 636 #define DEF_DIR_LEVEL 0 637 638 enum { 639 GC_FAILURE_PIN, 640 GC_FAILURE_ATOMIC, 641 MAX_GC_FAILURE 642 }; 643 644 /* used for f2fs_inode_info->flags */ 645 enum { 646 FI_NEW_INODE, /* indicate newly allocated inode */ 647 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 648 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 649 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 650 FI_INC_LINK, /* need to increment i_nlink */ 651 FI_ACL_MODE, /* indicate acl mode */ 652 FI_NO_ALLOC, /* should not allocate any blocks */ 653 FI_FREE_NID, /* free allocated nide */ 654 FI_NO_EXTENT, /* not to use the extent cache */ 655 FI_INLINE_XATTR, /* used for inline xattr */ 656 FI_INLINE_DATA, /* used for inline data*/ 657 FI_INLINE_DENTRY, /* used for inline dentry */ 658 FI_APPEND_WRITE, /* inode has appended data */ 659 FI_UPDATE_WRITE, /* inode has in-place-update data */ 660 FI_NEED_IPU, /* used for ipu per file */ 661 FI_ATOMIC_FILE, /* indicate atomic file */ 662 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 663 FI_VOLATILE_FILE, /* indicate volatile file */ 664 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 665 FI_DROP_CACHE, /* drop dirty page cache */ 666 FI_DATA_EXIST, /* indicate data exists */ 667 FI_INLINE_DOTS, /* indicate inline dot dentries */ 668 FI_DO_DEFRAG, /* indicate defragment is running */ 669 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 670 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 671 FI_HOT_DATA, /* indicate file is hot */ 672 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 673 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 674 FI_PIN_FILE, /* indicate file should not be gced */ 675 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 676 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 677 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 678 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ 679 FI_MMAP_FILE, /* indicate file was mmapped */ 680 FI_MAX, /* max flag, never be used */ 681 }; 682 683 struct f2fs_inode_info { 684 struct inode vfs_inode; /* serve a vfs inode */ 685 unsigned long i_flags; /* keep an inode flags for ioctl */ 686 unsigned char i_advise; /* use to give file attribute hints */ 687 unsigned char i_dir_level; /* use for dentry level for large dir */ 688 unsigned int i_current_depth; /* only for directory depth */ 689 /* for gc failure statistic */ 690 unsigned int i_gc_failures[MAX_GC_FAILURE]; 691 unsigned int i_pino; /* parent inode number */ 692 umode_t i_acl_mode; /* keep file acl mode temporarily */ 693 694 /* Use below internally in f2fs*/ 695 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 696 struct rw_semaphore i_sem; /* protect fi info */ 697 atomic_t dirty_pages; /* # of dirty pages */ 698 f2fs_hash_t chash; /* hash value of given file name */ 699 unsigned int clevel; /* maximum level of given file name */ 700 struct task_struct *task; /* lookup and create consistency */ 701 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 702 nid_t i_xattr_nid; /* node id that contains xattrs */ 703 loff_t last_disk_size; /* lastly written file size */ 704 spinlock_t i_size_lock; /* protect last_disk_size */ 705 706 #ifdef CONFIG_QUOTA 707 struct dquot *i_dquot[MAXQUOTAS]; 708 709 /* quota space reservation, managed internally by quota code */ 710 qsize_t i_reserved_quota; 711 #endif 712 struct list_head dirty_list; /* dirty list for dirs and files */ 713 struct list_head gdirty_list; /* linked in global dirty list */ 714 struct list_head inmem_ilist; /* list for inmem inodes */ 715 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 716 struct task_struct *inmem_task; /* store inmemory task */ 717 struct mutex inmem_lock; /* lock for inmemory pages */ 718 pgoff_t ra_offset; /* ongoing readahead offset */ 719 struct extent_tree *extent_tree; /* cached extent_tree entry */ 720 721 /* avoid racing between foreground op and gc */ 722 struct rw_semaphore i_gc_rwsem[2]; 723 struct rw_semaphore i_mmap_sem; 724 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 725 726 int i_extra_isize; /* size of extra space located in i_addr */ 727 kprojid_t i_projid; /* id for project quota */ 728 int i_inline_xattr_size; /* inline xattr size */ 729 struct timespec64 i_crtime; /* inode creation time */ 730 struct timespec64 i_disk_time[4];/* inode disk times */ 731 732 /* for file compress */ 733 atomic_t i_compr_blocks; /* # of compressed blocks */ 734 unsigned char i_compress_algorithm; /* algorithm type */ 735 unsigned char i_log_cluster_size; /* log of cluster size */ 736 unsigned short i_compress_flag; /* compress flag */ 737 unsigned int i_cluster_size; /* cluster size */ 738 }; 739 740 static inline void get_extent_info(struct extent_info *ext, 741 struct f2fs_extent *i_ext) 742 { 743 ext->fofs = le32_to_cpu(i_ext->fofs); 744 ext->blk = le32_to_cpu(i_ext->blk); 745 ext->len = le32_to_cpu(i_ext->len); 746 } 747 748 static inline void set_raw_extent(struct extent_info *ext, 749 struct f2fs_extent *i_ext) 750 { 751 i_ext->fofs = cpu_to_le32(ext->fofs); 752 i_ext->blk = cpu_to_le32(ext->blk); 753 i_ext->len = cpu_to_le32(ext->len); 754 } 755 756 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 757 u32 blk, unsigned int len) 758 { 759 ei->fofs = fofs; 760 ei->blk = blk; 761 ei->len = len; 762 } 763 764 static inline bool __is_discard_mergeable(struct discard_info *back, 765 struct discard_info *front, unsigned int max_len) 766 { 767 return (back->lstart + back->len == front->lstart) && 768 (back->len + front->len <= max_len); 769 } 770 771 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 772 struct discard_info *back, unsigned int max_len) 773 { 774 return __is_discard_mergeable(back, cur, max_len); 775 } 776 777 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 778 struct discard_info *front, unsigned int max_len) 779 { 780 return __is_discard_mergeable(cur, front, max_len); 781 } 782 783 static inline bool __is_extent_mergeable(struct extent_info *back, 784 struct extent_info *front) 785 { 786 return (back->fofs + back->len == front->fofs && 787 back->blk + back->len == front->blk); 788 } 789 790 static inline bool __is_back_mergeable(struct extent_info *cur, 791 struct extent_info *back) 792 { 793 return __is_extent_mergeable(back, cur); 794 } 795 796 static inline bool __is_front_mergeable(struct extent_info *cur, 797 struct extent_info *front) 798 { 799 return __is_extent_mergeable(cur, front); 800 } 801 802 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 803 static inline void __try_update_largest_extent(struct extent_tree *et, 804 struct extent_node *en) 805 { 806 if (en->ei.len > et->largest.len) { 807 et->largest = en->ei; 808 et->largest_updated = true; 809 } 810 } 811 812 /* 813 * For free nid management 814 */ 815 enum nid_state { 816 FREE_NID, /* newly added to free nid list */ 817 PREALLOC_NID, /* it is preallocated */ 818 MAX_NID_STATE, 819 }; 820 821 struct f2fs_nm_info { 822 block_t nat_blkaddr; /* base disk address of NAT */ 823 nid_t max_nid; /* maximum possible node ids */ 824 nid_t available_nids; /* # of available node ids */ 825 nid_t next_scan_nid; /* the next nid to be scanned */ 826 unsigned int ram_thresh; /* control the memory footprint */ 827 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 828 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 829 830 /* NAT cache management */ 831 struct radix_tree_root nat_root;/* root of the nat entry cache */ 832 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 833 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 834 struct list_head nat_entries; /* cached nat entry list (clean) */ 835 spinlock_t nat_list_lock; /* protect clean nat entry list */ 836 unsigned int nat_cnt; /* the # of cached nat entries */ 837 unsigned int dirty_nat_cnt; /* total num of nat entries in set */ 838 unsigned int nat_blocks; /* # of nat blocks */ 839 840 /* free node ids management */ 841 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 842 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 843 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 844 spinlock_t nid_list_lock; /* protect nid lists ops */ 845 struct mutex build_lock; /* lock for build free nids */ 846 unsigned char **free_nid_bitmap; 847 unsigned char *nat_block_bitmap; 848 unsigned short *free_nid_count; /* free nid count of NAT block */ 849 850 /* for checkpoint */ 851 char *nat_bitmap; /* NAT bitmap pointer */ 852 853 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 854 unsigned char *nat_bits; /* NAT bits blocks */ 855 unsigned char *full_nat_bits; /* full NAT pages */ 856 unsigned char *empty_nat_bits; /* empty NAT pages */ 857 #ifdef CONFIG_F2FS_CHECK_FS 858 char *nat_bitmap_mir; /* NAT bitmap mirror */ 859 #endif 860 int bitmap_size; /* bitmap size */ 861 }; 862 863 /* 864 * this structure is used as one of function parameters. 865 * all the information are dedicated to a given direct node block determined 866 * by the data offset in a file. 867 */ 868 struct dnode_of_data { 869 struct inode *inode; /* vfs inode pointer */ 870 struct page *inode_page; /* its inode page, NULL is possible */ 871 struct page *node_page; /* cached direct node page */ 872 nid_t nid; /* node id of the direct node block */ 873 unsigned int ofs_in_node; /* data offset in the node page */ 874 bool inode_page_locked; /* inode page is locked or not */ 875 bool node_changed; /* is node block changed */ 876 char cur_level; /* level of hole node page */ 877 char max_level; /* level of current page located */ 878 block_t data_blkaddr; /* block address of the node block */ 879 }; 880 881 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 882 struct page *ipage, struct page *npage, nid_t nid) 883 { 884 memset(dn, 0, sizeof(*dn)); 885 dn->inode = inode; 886 dn->inode_page = ipage; 887 dn->node_page = npage; 888 dn->nid = nid; 889 } 890 891 /* 892 * For SIT manager 893 * 894 * By default, there are 6 active log areas across the whole main area. 895 * When considering hot and cold data separation to reduce cleaning overhead, 896 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 897 * respectively. 898 * In the current design, you should not change the numbers intentionally. 899 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 900 * logs individually according to the underlying devices. (default: 6) 901 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 902 * data and 8 for node logs. 903 */ 904 #define NR_CURSEG_DATA_TYPE (3) 905 #define NR_CURSEG_NODE_TYPE (3) 906 #define NR_CURSEG_INMEM_TYPE (2) 907 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 908 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 909 910 enum { 911 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 912 CURSEG_WARM_DATA, /* data blocks */ 913 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 914 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 915 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 916 CURSEG_COLD_NODE, /* indirect node blocks */ 917 NR_PERSISTENT_LOG, /* number of persistent log */ 918 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 919 /* pinned file that needs consecutive block address */ 920 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 921 NO_CHECK_TYPE, /* number of persistent & inmem log */ 922 }; 923 924 struct flush_cmd { 925 struct completion wait; 926 struct llist_node llnode; 927 nid_t ino; 928 int ret; 929 }; 930 931 struct flush_cmd_control { 932 struct task_struct *f2fs_issue_flush; /* flush thread */ 933 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 934 atomic_t issued_flush; /* # of issued flushes */ 935 atomic_t queued_flush; /* # of queued flushes */ 936 struct llist_head issue_list; /* list for command issue */ 937 struct llist_node *dispatch_list; /* list for command dispatch */ 938 }; 939 940 struct f2fs_sm_info { 941 struct sit_info *sit_info; /* whole segment information */ 942 struct free_segmap_info *free_info; /* free segment information */ 943 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 944 struct curseg_info *curseg_array; /* active segment information */ 945 946 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 947 948 block_t seg0_blkaddr; /* block address of 0'th segment */ 949 block_t main_blkaddr; /* start block address of main area */ 950 block_t ssa_blkaddr; /* start block address of SSA area */ 951 952 unsigned int segment_count; /* total # of segments */ 953 unsigned int main_segments; /* # of segments in main area */ 954 unsigned int reserved_segments; /* # of reserved segments */ 955 unsigned int ovp_segments; /* # of overprovision segments */ 956 957 /* a threshold to reclaim prefree segments */ 958 unsigned int rec_prefree_segments; 959 960 /* for batched trimming */ 961 unsigned int trim_sections; /* # of sections to trim */ 962 963 struct list_head sit_entry_set; /* sit entry set list */ 964 965 unsigned int ipu_policy; /* in-place-update policy */ 966 unsigned int min_ipu_util; /* in-place-update threshold */ 967 unsigned int min_fsync_blocks; /* threshold for fsync */ 968 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 969 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 970 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 971 972 /* for flush command control */ 973 struct flush_cmd_control *fcc_info; 974 975 /* for discard command control */ 976 struct discard_cmd_control *dcc_info; 977 }; 978 979 /* 980 * For superblock 981 */ 982 /* 983 * COUNT_TYPE for monitoring 984 * 985 * f2fs monitors the number of several block types such as on-writeback, 986 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 987 */ 988 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 989 enum count_type { 990 F2FS_DIRTY_DENTS, 991 F2FS_DIRTY_DATA, 992 F2FS_DIRTY_QDATA, 993 F2FS_DIRTY_NODES, 994 F2FS_DIRTY_META, 995 F2FS_INMEM_PAGES, 996 F2FS_DIRTY_IMETA, 997 F2FS_WB_CP_DATA, 998 F2FS_WB_DATA, 999 F2FS_RD_DATA, 1000 F2FS_RD_NODE, 1001 F2FS_RD_META, 1002 F2FS_DIO_WRITE, 1003 F2FS_DIO_READ, 1004 NR_COUNT_TYPE, 1005 }; 1006 1007 /* 1008 * The below are the page types of bios used in submit_bio(). 1009 * The available types are: 1010 * DATA User data pages. It operates as async mode. 1011 * NODE Node pages. It operates as async mode. 1012 * META FS metadata pages such as SIT, NAT, CP. 1013 * NR_PAGE_TYPE The number of page types. 1014 * META_FLUSH Make sure the previous pages are written 1015 * with waiting the bio's completion 1016 * ... Only can be used with META. 1017 */ 1018 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1019 enum page_type { 1020 DATA, 1021 NODE, 1022 META, 1023 NR_PAGE_TYPE, 1024 META_FLUSH, 1025 INMEM, /* the below types are used by tracepoints only. */ 1026 INMEM_DROP, 1027 INMEM_INVALIDATE, 1028 INMEM_REVOKE, 1029 IPU, 1030 OPU, 1031 }; 1032 1033 enum temp_type { 1034 HOT = 0, /* must be zero for meta bio */ 1035 WARM, 1036 COLD, 1037 NR_TEMP_TYPE, 1038 }; 1039 1040 enum need_lock_type { 1041 LOCK_REQ = 0, 1042 LOCK_DONE, 1043 LOCK_RETRY, 1044 }; 1045 1046 enum cp_reason_type { 1047 CP_NO_NEEDED, 1048 CP_NON_REGULAR, 1049 CP_COMPRESSED, 1050 CP_HARDLINK, 1051 CP_SB_NEED_CP, 1052 CP_WRONG_PINO, 1053 CP_NO_SPC_ROLL, 1054 CP_NODE_NEED_CP, 1055 CP_FASTBOOT_MODE, 1056 CP_SPEC_LOG_NUM, 1057 CP_RECOVER_DIR, 1058 }; 1059 1060 enum iostat_type { 1061 /* WRITE IO */ 1062 APP_DIRECT_IO, /* app direct write IOs */ 1063 APP_BUFFERED_IO, /* app buffered write IOs */ 1064 APP_WRITE_IO, /* app write IOs */ 1065 APP_MAPPED_IO, /* app mapped IOs */ 1066 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1067 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1068 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1069 FS_GC_DATA_IO, /* data IOs from forground gc */ 1070 FS_GC_NODE_IO, /* node IOs from forground gc */ 1071 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1072 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1073 FS_CP_META_IO, /* meta IOs from checkpoint */ 1074 1075 /* READ IO */ 1076 APP_DIRECT_READ_IO, /* app direct read IOs */ 1077 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1078 APP_READ_IO, /* app read IOs */ 1079 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1080 FS_DATA_READ_IO, /* data read IOs */ 1081 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1082 FS_CDATA_READ_IO, /* compressed data read IOs */ 1083 FS_NODE_READ_IO, /* node read IOs */ 1084 FS_META_READ_IO, /* meta read IOs */ 1085 1086 /* other */ 1087 FS_DISCARD, /* discard */ 1088 NR_IO_TYPE, 1089 }; 1090 1091 struct f2fs_io_info { 1092 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1093 nid_t ino; /* inode number */ 1094 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1095 enum temp_type temp; /* contains HOT/WARM/COLD */ 1096 int op; /* contains REQ_OP_ */ 1097 int op_flags; /* req_flag_bits */ 1098 block_t new_blkaddr; /* new block address to be written */ 1099 block_t old_blkaddr; /* old block address before Cow */ 1100 struct page *page; /* page to be written */ 1101 struct page *encrypted_page; /* encrypted page */ 1102 struct page *compressed_page; /* compressed page */ 1103 struct list_head list; /* serialize IOs */ 1104 bool submitted; /* indicate IO submission */ 1105 int need_lock; /* indicate we need to lock cp_rwsem */ 1106 bool in_list; /* indicate fio is in io_list */ 1107 bool is_por; /* indicate IO is from recovery or not */ 1108 bool retry; /* need to reallocate block address */ 1109 int compr_blocks; /* # of compressed block addresses */ 1110 bool encrypted; /* indicate file is encrypted */ 1111 enum iostat_type io_type; /* io type */ 1112 struct writeback_control *io_wbc; /* writeback control */ 1113 struct bio **bio; /* bio for ipu */ 1114 sector_t *last_block; /* last block number in bio */ 1115 unsigned char version; /* version of the node */ 1116 }; 1117 1118 struct bio_entry { 1119 struct bio *bio; 1120 struct list_head list; 1121 }; 1122 1123 #define is_read_io(rw) ((rw) == READ) 1124 struct f2fs_bio_info { 1125 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1126 struct bio *bio; /* bios to merge */ 1127 sector_t last_block_in_bio; /* last block number */ 1128 struct f2fs_io_info fio; /* store buffered io info. */ 1129 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1130 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1131 struct list_head io_list; /* track fios */ 1132 struct list_head bio_list; /* bio entry list head */ 1133 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1134 }; 1135 1136 #define FDEV(i) (sbi->devs[i]) 1137 #define RDEV(i) (raw_super->devs[i]) 1138 struct f2fs_dev_info { 1139 struct block_device *bdev; 1140 char path[MAX_PATH_LEN]; 1141 unsigned int total_segments; 1142 block_t start_blk; 1143 block_t end_blk; 1144 #ifdef CONFIG_BLK_DEV_ZONED 1145 unsigned int nr_blkz; /* Total number of zones */ 1146 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1147 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1148 #endif 1149 }; 1150 1151 enum inode_type { 1152 DIR_INODE, /* for dirty dir inode */ 1153 FILE_INODE, /* for dirty regular/symlink inode */ 1154 DIRTY_META, /* for all dirtied inode metadata */ 1155 ATOMIC_FILE, /* for all atomic files */ 1156 NR_INODE_TYPE, 1157 }; 1158 1159 /* for inner inode cache management */ 1160 struct inode_management { 1161 struct radix_tree_root ino_root; /* ino entry array */ 1162 spinlock_t ino_lock; /* for ino entry lock */ 1163 struct list_head ino_list; /* inode list head */ 1164 unsigned long ino_num; /* number of entries */ 1165 }; 1166 1167 /* for GC_AT */ 1168 struct atgc_management { 1169 bool atgc_enabled; /* ATGC is enabled or not */ 1170 struct rb_root_cached root; /* root of victim rb-tree */ 1171 struct list_head victim_list; /* linked with all victim entries */ 1172 unsigned int victim_count; /* victim count in rb-tree */ 1173 unsigned int candidate_ratio; /* candidate ratio */ 1174 unsigned int max_candidate_count; /* max candidate count */ 1175 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1176 unsigned long long age_threshold; /* age threshold */ 1177 }; 1178 1179 /* For s_flag in struct f2fs_sb_info */ 1180 enum { 1181 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1182 SBI_IS_CLOSE, /* specify unmounting */ 1183 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1184 SBI_POR_DOING, /* recovery is doing or not */ 1185 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1186 SBI_NEED_CP, /* need to checkpoint */ 1187 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1188 SBI_IS_RECOVERED, /* recovered orphan/data */ 1189 SBI_CP_DISABLED, /* CP was disabled last mount */ 1190 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1191 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1192 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1193 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1194 SBI_IS_RESIZEFS, /* resizefs is in process */ 1195 }; 1196 1197 enum { 1198 CP_TIME, 1199 REQ_TIME, 1200 DISCARD_TIME, 1201 GC_TIME, 1202 DISABLE_TIME, 1203 UMOUNT_DISCARD_TIMEOUT, 1204 MAX_TIME, 1205 }; 1206 1207 enum { 1208 GC_NORMAL, 1209 GC_IDLE_CB, 1210 GC_IDLE_GREEDY, 1211 GC_IDLE_AT, 1212 GC_URGENT_HIGH, 1213 GC_URGENT_LOW, 1214 }; 1215 1216 enum { 1217 BGGC_MODE_ON, /* background gc is on */ 1218 BGGC_MODE_OFF, /* background gc is off */ 1219 BGGC_MODE_SYNC, /* 1220 * background gc is on, migrating blocks 1221 * like foreground gc 1222 */ 1223 }; 1224 1225 enum { 1226 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1227 FS_MODE_LFS, /* use lfs allocation only */ 1228 }; 1229 1230 enum { 1231 WHINT_MODE_OFF, /* not pass down write hints */ 1232 WHINT_MODE_USER, /* try to pass down hints given by users */ 1233 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1234 }; 1235 1236 enum { 1237 ALLOC_MODE_DEFAULT, /* stay default */ 1238 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1239 }; 1240 1241 enum fsync_mode { 1242 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1243 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1244 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1245 }; 1246 1247 /* 1248 * this value is set in page as a private data which indicate that 1249 * the page is atomically written, and it is in inmem_pages list. 1250 */ 1251 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 1252 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2) 1253 1254 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 1255 (page_private(page) == ATOMIC_WRITTEN_PAGE) 1256 #define IS_DUMMY_WRITTEN_PAGE(page) \ 1257 (page_private(page) == DUMMY_WRITTEN_PAGE) 1258 1259 #ifdef CONFIG_F2FS_IO_TRACE 1260 #define IS_IO_TRACED_PAGE(page) \ 1261 (page_private(page) > 0 && \ 1262 page_private(page) < (unsigned long)PID_MAX_LIMIT) 1263 #else 1264 #define IS_IO_TRACED_PAGE(page) (0) 1265 #endif 1266 1267 /* For compression */ 1268 enum compress_algorithm_type { 1269 COMPRESS_LZO, 1270 COMPRESS_LZ4, 1271 COMPRESS_ZSTD, 1272 COMPRESS_LZORLE, 1273 COMPRESS_MAX, 1274 }; 1275 1276 enum compress_flag { 1277 COMPRESS_CHKSUM, 1278 COMPRESS_MAX_FLAG, 1279 }; 1280 1281 #define COMPRESS_DATA_RESERVED_SIZE 4 1282 struct compress_data { 1283 __le32 clen; /* compressed data size */ 1284 __le32 chksum; /* compressed data chksum */ 1285 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1286 u8 cdata[]; /* compressed data */ 1287 }; 1288 1289 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1290 1291 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1292 1293 /* compress context */ 1294 struct compress_ctx { 1295 struct inode *inode; /* inode the context belong to */ 1296 pgoff_t cluster_idx; /* cluster index number */ 1297 unsigned int cluster_size; /* page count in cluster */ 1298 unsigned int log_cluster_size; /* log of cluster size */ 1299 struct page **rpages; /* pages store raw data in cluster */ 1300 unsigned int nr_rpages; /* total page number in rpages */ 1301 struct page **cpages; /* pages store compressed data in cluster */ 1302 unsigned int nr_cpages; /* total page number in cpages */ 1303 void *rbuf; /* virtual mapped address on rpages */ 1304 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1305 size_t rlen; /* valid data length in rbuf */ 1306 size_t clen; /* valid data length in cbuf */ 1307 void *private; /* payload buffer for specified compression algorithm */ 1308 void *private2; /* extra payload buffer */ 1309 }; 1310 1311 /* compress context for write IO path */ 1312 struct compress_io_ctx { 1313 u32 magic; /* magic number to indicate page is compressed */ 1314 struct inode *inode; /* inode the context belong to */ 1315 struct page **rpages; /* pages store raw data in cluster */ 1316 unsigned int nr_rpages; /* total page number in rpages */ 1317 atomic_t pending_pages; /* in-flight compressed page count */ 1318 }; 1319 1320 /* decompress io context for read IO path */ 1321 struct decompress_io_ctx { 1322 u32 magic; /* magic number to indicate page is compressed */ 1323 struct inode *inode; /* inode the context belong to */ 1324 pgoff_t cluster_idx; /* cluster index number */ 1325 unsigned int cluster_size; /* page count in cluster */ 1326 unsigned int log_cluster_size; /* log of cluster size */ 1327 struct page **rpages; /* pages store raw data in cluster */ 1328 unsigned int nr_rpages; /* total page number in rpages */ 1329 struct page **cpages; /* pages store compressed data in cluster */ 1330 unsigned int nr_cpages; /* total page number in cpages */ 1331 struct page **tpages; /* temp pages to pad holes in cluster */ 1332 void *rbuf; /* virtual mapped address on rpages */ 1333 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1334 size_t rlen; /* valid data length in rbuf */ 1335 size_t clen; /* valid data length in cbuf */ 1336 atomic_t pending_pages; /* in-flight compressed page count */ 1337 bool failed; /* indicate IO error during decompression */ 1338 void *private; /* payload buffer for specified decompression algorithm */ 1339 void *private2; /* extra payload buffer */ 1340 }; 1341 1342 #define NULL_CLUSTER ((unsigned int)(~0)) 1343 #define MIN_COMPRESS_LOG_SIZE 2 1344 #define MAX_COMPRESS_LOG_SIZE 8 1345 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1346 1347 struct f2fs_sb_info { 1348 struct super_block *sb; /* pointer to VFS super block */ 1349 struct proc_dir_entry *s_proc; /* proc entry */ 1350 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1351 struct rw_semaphore sb_lock; /* lock for raw super block */ 1352 int valid_super_block; /* valid super block no */ 1353 unsigned long s_flag; /* flags for sbi */ 1354 struct mutex writepages; /* mutex for writepages() */ 1355 1356 #ifdef CONFIG_BLK_DEV_ZONED 1357 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1358 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1359 #endif 1360 1361 /* for node-related operations */ 1362 struct f2fs_nm_info *nm_info; /* node manager */ 1363 struct inode *node_inode; /* cache node blocks */ 1364 1365 /* for segment-related operations */ 1366 struct f2fs_sm_info *sm_info; /* segment manager */ 1367 1368 /* for bio operations */ 1369 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1370 /* keep migration IO order for LFS mode */ 1371 struct rw_semaphore io_order_lock; 1372 mempool_t *write_io_dummy; /* Dummy pages */ 1373 1374 /* for checkpoint */ 1375 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1376 int cur_cp_pack; /* remain current cp pack */ 1377 spinlock_t cp_lock; /* for flag in ckpt */ 1378 struct inode *meta_inode; /* cache meta blocks */ 1379 struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ 1380 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1381 struct rw_semaphore node_write; /* locking node writes */ 1382 struct rw_semaphore node_change; /* locking node change */ 1383 wait_queue_head_t cp_wait; 1384 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1385 long interval_time[MAX_TIME]; /* to store thresholds */ 1386 1387 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1388 1389 spinlock_t fsync_node_lock; /* for node entry lock */ 1390 struct list_head fsync_node_list; /* node list head */ 1391 unsigned int fsync_seg_id; /* sequence id */ 1392 unsigned int fsync_node_num; /* number of node entries */ 1393 1394 /* for orphan inode, use 0'th array */ 1395 unsigned int max_orphans; /* max orphan inodes */ 1396 1397 /* for inode management */ 1398 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1399 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1400 struct mutex flush_lock; /* for flush exclusion */ 1401 1402 /* for extent tree cache */ 1403 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1404 struct mutex extent_tree_lock; /* locking extent radix tree */ 1405 struct list_head extent_list; /* lru list for shrinker */ 1406 spinlock_t extent_lock; /* locking extent lru list */ 1407 atomic_t total_ext_tree; /* extent tree count */ 1408 struct list_head zombie_list; /* extent zombie tree list */ 1409 atomic_t total_zombie_tree; /* extent zombie tree count */ 1410 atomic_t total_ext_node; /* extent info count */ 1411 1412 /* basic filesystem units */ 1413 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1414 unsigned int log_blocksize; /* log2 block size */ 1415 unsigned int blocksize; /* block size */ 1416 unsigned int root_ino_num; /* root inode number*/ 1417 unsigned int node_ino_num; /* node inode number*/ 1418 unsigned int meta_ino_num; /* meta inode number*/ 1419 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1420 unsigned int blocks_per_seg; /* blocks per segment */ 1421 unsigned int segs_per_sec; /* segments per section */ 1422 unsigned int secs_per_zone; /* sections per zone */ 1423 unsigned int total_sections; /* total section count */ 1424 unsigned int total_node_count; /* total node block count */ 1425 unsigned int total_valid_node_count; /* valid node block count */ 1426 loff_t max_file_blocks; /* max block index of file */ 1427 int dir_level; /* directory level */ 1428 int readdir_ra; /* readahead inode in readdir */ 1429 1430 block_t user_block_count; /* # of user blocks */ 1431 block_t total_valid_block_count; /* # of valid blocks */ 1432 block_t discard_blks; /* discard command candidats */ 1433 block_t last_valid_block_count; /* for recovery */ 1434 block_t reserved_blocks; /* configurable reserved blocks */ 1435 block_t current_reserved_blocks; /* current reserved blocks */ 1436 1437 /* Additional tracking for no checkpoint mode */ 1438 block_t unusable_block_count; /* # of blocks saved by last cp */ 1439 1440 unsigned int nquota_files; /* # of quota sysfile */ 1441 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1442 1443 /* # of pages, see count_type */ 1444 atomic_t nr_pages[NR_COUNT_TYPE]; 1445 /* # of allocated blocks */ 1446 struct percpu_counter alloc_valid_block_count; 1447 1448 /* writeback control */ 1449 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1450 1451 /* valid inode count */ 1452 struct percpu_counter total_valid_inode_count; 1453 1454 struct f2fs_mount_info mount_opt; /* mount options */ 1455 1456 /* for cleaning operations */ 1457 struct rw_semaphore gc_lock; /* 1458 * semaphore for GC, avoid 1459 * race between GC and GC or CP 1460 */ 1461 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1462 struct atgc_management am; /* atgc management */ 1463 unsigned int cur_victim_sec; /* current victim section num */ 1464 unsigned int gc_mode; /* current GC state */ 1465 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1466 1467 /* for skip statistic */ 1468 unsigned int atomic_files; /* # of opened atomic file */ 1469 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1470 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1471 1472 /* threshold for gc trials on pinned files */ 1473 u64 gc_pin_file_threshold; 1474 struct rw_semaphore pin_sem; 1475 1476 /* maximum # of trials to find a victim segment for SSR and GC */ 1477 unsigned int max_victim_search; 1478 /* migration granularity of garbage collection, unit: segment */ 1479 unsigned int migration_granularity; 1480 1481 /* 1482 * for stat information. 1483 * one is for the LFS mode, and the other is for the SSR mode. 1484 */ 1485 #ifdef CONFIG_F2FS_STAT_FS 1486 struct f2fs_stat_info *stat_info; /* FS status information */ 1487 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1488 unsigned int segment_count[2]; /* # of allocated segments */ 1489 unsigned int block_count[2]; /* # of allocated blocks */ 1490 atomic_t inplace_count; /* # of inplace update */ 1491 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1492 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1493 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1494 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1495 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1496 atomic_t inline_inode; /* # of inline_data inodes */ 1497 atomic_t inline_dir; /* # of inline_dentry inodes */ 1498 atomic_t compr_inode; /* # of compressed inodes */ 1499 atomic64_t compr_blocks; /* # of compressed blocks */ 1500 atomic_t vw_cnt; /* # of volatile writes */ 1501 atomic_t max_aw_cnt; /* max # of atomic writes */ 1502 atomic_t max_vw_cnt; /* max # of volatile writes */ 1503 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1504 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1505 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1506 #endif 1507 spinlock_t stat_lock; /* lock for stat operations */ 1508 1509 /* For app/fs IO statistics */ 1510 spinlock_t iostat_lock; 1511 unsigned long long rw_iostat[NR_IO_TYPE]; 1512 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1513 bool iostat_enable; 1514 unsigned long iostat_next_period; 1515 unsigned int iostat_period_ms; 1516 1517 /* to attach REQ_META|REQ_FUA flags */ 1518 unsigned int data_io_flag; 1519 unsigned int node_io_flag; 1520 1521 /* For sysfs suppport */ 1522 struct kobject s_kobj; 1523 struct completion s_kobj_unregister; 1524 1525 /* For shrinker support */ 1526 struct list_head s_list; 1527 int s_ndevs; /* number of devices */ 1528 struct f2fs_dev_info *devs; /* for device list */ 1529 unsigned int dirty_device; /* for checkpoint data flush */ 1530 spinlock_t dev_lock; /* protect dirty_device */ 1531 struct mutex umount_mutex; 1532 unsigned int shrinker_run_no; 1533 1534 /* For write statistics */ 1535 u64 sectors_written_start; 1536 u64 kbytes_written; 1537 1538 /* Reference to checksum algorithm driver via cryptoapi */ 1539 struct crypto_shash *s_chksum_driver; 1540 1541 /* Precomputed FS UUID checksum for seeding other checksums */ 1542 __u32 s_chksum_seed; 1543 1544 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1545 1546 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1547 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1548 1549 #ifdef CONFIG_F2FS_FS_COMPRESSION 1550 struct kmem_cache *page_array_slab; /* page array entry */ 1551 unsigned int page_array_slab_size; /* default page array slab size */ 1552 #endif 1553 }; 1554 1555 struct f2fs_private_dio { 1556 struct inode *inode; 1557 void *orig_private; 1558 bio_end_io_t *orig_end_io; 1559 bool write; 1560 }; 1561 1562 #ifdef CONFIG_F2FS_FAULT_INJECTION 1563 #define f2fs_show_injection_info(sbi, type) \ 1564 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1565 KERN_INFO, sbi->sb->s_id, \ 1566 f2fs_fault_name[type], \ 1567 __func__, __builtin_return_address(0)) 1568 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1569 { 1570 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1571 1572 if (!ffi->inject_rate) 1573 return false; 1574 1575 if (!IS_FAULT_SET(ffi, type)) 1576 return false; 1577 1578 atomic_inc(&ffi->inject_ops); 1579 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1580 atomic_set(&ffi->inject_ops, 0); 1581 return true; 1582 } 1583 return false; 1584 } 1585 #else 1586 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1587 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1588 { 1589 return false; 1590 } 1591 #endif 1592 1593 /* 1594 * Test if the mounted volume is a multi-device volume. 1595 * - For a single regular disk volume, sbi->s_ndevs is 0. 1596 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1597 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1598 */ 1599 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1600 { 1601 return sbi->s_ndevs > 1; 1602 } 1603 1604 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1605 { 1606 unsigned long now = jiffies; 1607 1608 sbi->last_time[type] = now; 1609 1610 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1611 if (type == REQ_TIME) { 1612 sbi->last_time[DISCARD_TIME] = now; 1613 sbi->last_time[GC_TIME] = now; 1614 } 1615 } 1616 1617 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1618 { 1619 unsigned long interval = sbi->interval_time[type] * HZ; 1620 1621 return time_after(jiffies, sbi->last_time[type] + interval); 1622 } 1623 1624 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1625 int type) 1626 { 1627 unsigned long interval = sbi->interval_time[type] * HZ; 1628 unsigned int wait_ms = 0; 1629 long delta; 1630 1631 delta = (sbi->last_time[type] + interval) - jiffies; 1632 if (delta > 0) 1633 wait_ms = jiffies_to_msecs(delta); 1634 1635 return wait_ms; 1636 } 1637 1638 /* 1639 * Inline functions 1640 */ 1641 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1642 const void *address, unsigned int length) 1643 { 1644 struct { 1645 struct shash_desc shash; 1646 char ctx[4]; 1647 } desc; 1648 int err; 1649 1650 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1651 1652 desc.shash.tfm = sbi->s_chksum_driver; 1653 *(u32 *)desc.ctx = crc; 1654 1655 err = crypto_shash_update(&desc.shash, address, length); 1656 BUG_ON(err); 1657 1658 return *(u32 *)desc.ctx; 1659 } 1660 1661 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1662 unsigned int length) 1663 { 1664 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1665 } 1666 1667 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1668 void *buf, size_t buf_size) 1669 { 1670 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1671 } 1672 1673 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1674 const void *address, unsigned int length) 1675 { 1676 return __f2fs_crc32(sbi, crc, address, length); 1677 } 1678 1679 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1680 { 1681 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1682 } 1683 1684 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1685 { 1686 return sb->s_fs_info; 1687 } 1688 1689 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1690 { 1691 return F2FS_SB(inode->i_sb); 1692 } 1693 1694 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1695 { 1696 return F2FS_I_SB(mapping->host); 1697 } 1698 1699 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1700 { 1701 return F2FS_M_SB(page_file_mapping(page)); 1702 } 1703 1704 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1705 { 1706 return (struct f2fs_super_block *)(sbi->raw_super); 1707 } 1708 1709 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1710 { 1711 return (struct f2fs_checkpoint *)(sbi->ckpt); 1712 } 1713 1714 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1715 { 1716 return (struct f2fs_node *)page_address(page); 1717 } 1718 1719 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1720 { 1721 return &((struct f2fs_node *)page_address(page))->i; 1722 } 1723 1724 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1725 { 1726 return (struct f2fs_nm_info *)(sbi->nm_info); 1727 } 1728 1729 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1730 { 1731 return (struct f2fs_sm_info *)(sbi->sm_info); 1732 } 1733 1734 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1735 { 1736 return (struct sit_info *)(SM_I(sbi)->sit_info); 1737 } 1738 1739 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1740 { 1741 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1742 } 1743 1744 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1745 { 1746 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1747 } 1748 1749 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1750 { 1751 return sbi->meta_inode->i_mapping; 1752 } 1753 1754 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1755 { 1756 return sbi->node_inode->i_mapping; 1757 } 1758 1759 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1760 { 1761 return test_bit(type, &sbi->s_flag); 1762 } 1763 1764 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1765 { 1766 set_bit(type, &sbi->s_flag); 1767 } 1768 1769 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1770 { 1771 clear_bit(type, &sbi->s_flag); 1772 } 1773 1774 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1775 { 1776 return le64_to_cpu(cp->checkpoint_ver); 1777 } 1778 1779 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1780 { 1781 if (type < F2FS_MAX_QUOTAS) 1782 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1783 return 0; 1784 } 1785 1786 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1787 { 1788 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1789 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1790 } 1791 1792 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1793 { 1794 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1795 1796 return ckpt_flags & f; 1797 } 1798 1799 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1800 { 1801 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1802 } 1803 1804 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1805 { 1806 unsigned int ckpt_flags; 1807 1808 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1809 ckpt_flags |= f; 1810 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1811 } 1812 1813 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1814 { 1815 unsigned long flags; 1816 1817 spin_lock_irqsave(&sbi->cp_lock, flags); 1818 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1819 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1820 } 1821 1822 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1823 { 1824 unsigned int ckpt_flags; 1825 1826 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1827 ckpt_flags &= (~f); 1828 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1829 } 1830 1831 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1832 { 1833 unsigned long flags; 1834 1835 spin_lock_irqsave(&sbi->cp_lock, flags); 1836 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1837 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1838 } 1839 1840 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1841 { 1842 unsigned long flags; 1843 unsigned char *nat_bits; 1844 1845 /* 1846 * In order to re-enable nat_bits we need to call fsck.f2fs by 1847 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 1848 * so let's rely on regular fsck or unclean shutdown. 1849 */ 1850 1851 if (lock) 1852 spin_lock_irqsave(&sbi->cp_lock, flags); 1853 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1854 nat_bits = NM_I(sbi)->nat_bits; 1855 NM_I(sbi)->nat_bits = NULL; 1856 if (lock) 1857 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1858 1859 kvfree(nat_bits); 1860 } 1861 1862 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1863 struct cp_control *cpc) 1864 { 1865 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1866 1867 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1868 } 1869 1870 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1871 { 1872 down_read(&sbi->cp_rwsem); 1873 } 1874 1875 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1876 { 1877 return down_read_trylock(&sbi->cp_rwsem); 1878 } 1879 1880 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1881 { 1882 up_read(&sbi->cp_rwsem); 1883 } 1884 1885 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1886 { 1887 down_write(&sbi->cp_rwsem); 1888 } 1889 1890 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1891 { 1892 up_write(&sbi->cp_rwsem); 1893 } 1894 1895 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1896 { 1897 int reason = CP_SYNC; 1898 1899 if (test_opt(sbi, FASTBOOT)) 1900 reason = CP_FASTBOOT; 1901 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1902 reason = CP_UMOUNT; 1903 return reason; 1904 } 1905 1906 static inline bool __remain_node_summaries(int reason) 1907 { 1908 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1909 } 1910 1911 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1912 { 1913 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1914 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1915 } 1916 1917 /* 1918 * Check whether the inode has blocks or not 1919 */ 1920 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 1921 { 1922 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 1923 1924 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 1925 } 1926 1927 static inline bool f2fs_has_xattr_block(unsigned int ofs) 1928 { 1929 return ofs == XATTR_NODE_OFFSET; 1930 } 1931 1932 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 1933 struct inode *inode, bool cap) 1934 { 1935 if (!inode) 1936 return true; 1937 if (!test_opt(sbi, RESERVE_ROOT)) 1938 return false; 1939 if (IS_NOQUOTA(inode)) 1940 return true; 1941 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 1942 return true; 1943 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 1944 in_group_p(F2FS_OPTION(sbi).s_resgid)) 1945 return true; 1946 if (cap && capable(CAP_SYS_RESOURCE)) 1947 return true; 1948 return false; 1949 } 1950 1951 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 1952 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 1953 struct inode *inode, blkcnt_t *count) 1954 { 1955 blkcnt_t diff = 0, release = 0; 1956 block_t avail_user_block_count; 1957 int ret; 1958 1959 ret = dquot_reserve_block(inode, *count); 1960 if (ret) 1961 return ret; 1962 1963 if (time_to_inject(sbi, FAULT_BLOCK)) { 1964 f2fs_show_injection_info(sbi, FAULT_BLOCK); 1965 release = *count; 1966 goto release_quota; 1967 } 1968 1969 /* 1970 * let's increase this in prior to actual block count change in order 1971 * for f2fs_sync_file to avoid data races when deciding checkpoint. 1972 */ 1973 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 1974 1975 spin_lock(&sbi->stat_lock); 1976 sbi->total_valid_block_count += (block_t)(*count); 1977 avail_user_block_count = sbi->user_block_count - 1978 sbi->current_reserved_blocks; 1979 1980 if (!__allow_reserved_blocks(sbi, inode, true)) 1981 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 1982 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1983 if (avail_user_block_count > sbi->unusable_block_count) 1984 avail_user_block_count -= sbi->unusable_block_count; 1985 else 1986 avail_user_block_count = 0; 1987 } 1988 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 1989 diff = sbi->total_valid_block_count - avail_user_block_count; 1990 if (diff > *count) 1991 diff = *count; 1992 *count -= diff; 1993 release = diff; 1994 sbi->total_valid_block_count -= diff; 1995 if (!*count) { 1996 spin_unlock(&sbi->stat_lock); 1997 goto enospc; 1998 } 1999 } 2000 spin_unlock(&sbi->stat_lock); 2001 2002 if (unlikely(release)) { 2003 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2004 dquot_release_reservation_block(inode, release); 2005 } 2006 f2fs_i_blocks_write(inode, *count, true, true); 2007 return 0; 2008 2009 enospc: 2010 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2011 release_quota: 2012 dquot_release_reservation_block(inode, release); 2013 return -ENOSPC; 2014 } 2015 2016 __printf(2, 3) 2017 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2018 2019 #define f2fs_err(sbi, fmt, ...) \ 2020 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2021 #define f2fs_warn(sbi, fmt, ...) \ 2022 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2023 #define f2fs_notice(sbi, fmt, ...) \ 2024 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2025 #define f2fs_info(sbi, fmt, ...) \ 2026 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2027 #define f2fs_debug(sbi, fmt, ...) \ 2028 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2029 2030 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2031 struct inode *inode, 2032 block_t count) 2033 { 2034 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2035 2036 spin_lock(&sbi->stat_lock); 2037 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2038 sbi->total_valid_block_count -= (block_t)count; 2039 if (sbi->reserved_blocks && 2040 sbi->current_reserved_blocks < sbi->reserved_blocks) 2041 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2042 sbi->current_reserved_blocks + count); 2043 spin_unlock(&sbi->stat_lock); 2044 if (unlikely(inode->i_blocks < sectors)) { 2045 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2046 inode->i_ino, 2047 (unsigned long long)inode->i_blocks, 2048 (unsigned long long)sectors); 2049 set_sbi_flag(sbi, SBI_NEED_FSCK); 2050 return; 2051 } 2052 f2fs_i_blocks_write(inode, count, false, true); 2053 } 2054 2055 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2056 { 2057 atomic_inc(&sbi->nr_pages[count_type]); 2058 2059 if (count_type == F2FS_DIRTY_DENTS || 2060 count_type == F2FS_DIRTY_NODES || 2061 count_type == F2FS_DIRTY_META || 2062 count_type == F2FS_DIRTY_QDATA || 2063 count_type == F2FS_DIRTY_IMETA) 2064 set_sbi_flag(sbi, SBI_IS_DIRTY); 2065 } 2066 2067 static inline void inode_inc_dirty_pages(struct inode *inode) 2068 { 2069 atomic_inc(&F2FS_I(inode)->dirty_pages); 2070 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2071 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2072 if (IS_NOQUOTA(inode)) 2073 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2074 } 2075 2076 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2077 { 2078 atomic_dec(&sbi->nr_pages[count_type]); 2079 } 2080 2081 static inline void inode_dec_dirty_pages(struct inode *inode) 2082 { 2083 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2084 !S_ISLNK(inode->i_mode)) 2085 return; 2086 2087 atomic_dec(&F2FS_I(inode)->dirty_pages); 2088 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2089 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2090 if (IS_NOQUOTA(inode)) 2091 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2092 } 2093 2094 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2095 { 2096 return atomic_read(&sbi->nr_pages[count_type]); 2097 } 2098 2099 static inline int get_dirty_pages(struct inode *inode) 2100 { 2101 return atomic_read(&F2FS_I(inode)->dirty_pages); 2102 } 2103 2104 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2105 { 2106 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2107 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2108 sbi->log_blocks_per_seg; 2109 2110 return segs / sbi->segs_per_sec; 2111 } 2112 2113 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2114 { 2115 return sbi->total_valid_block_count; 2116 } 2117 2118 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2119 { 2120 return sbi->discard_blks; 2121 } 2122 2123 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2124 { 2125 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2126 2127 /* return NAT or SIT bitmap */ 2128 if (flag == NAT_BITMAP) 2129 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2130 else if (flag == SIT_BITMAP) 2131 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2132 2133 return 0; 2134 } 2135 2136 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2137 { 2138 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2139 } 2140 2141 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2142 { 2143 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2144 int offset; 2145 2146 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2147 offset = (flag == SIT_BITMAP) ? 2148 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2149 /* 2150 * if large_nat_bitmap feature is enabled, leave checksum 2151 * protection for all nat/sit bitmaps. 2152 */ 2153 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); 2154 } 2155 2156 if (__cp_payload(sbi) > 0) { 2157 if (flag == NAT_BITMAP) 2158 return &ckpt->sit_nat_version_bitmap; 2159 else 2160 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2161 } else { 2162 offset = (flag == NAT_BITMAP) ? 2163 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2164 return &ckpt->sit_nat_version_bitmap + offset; 2165 } 2166 } 2167 2168 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2169 { 2170 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2171 2172 if (sbi->cur_cp_pack == 2) 2173 start_addr += sbi->blocks_per_seg; 2174 return start_addr; 2175 } 2176 2177 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2178 { 2179 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2180 2181 if (sbi->cur_cp_pack == 1) 2182 start_addr += sbi->blocks_per_seg; 2183 return start_addr; 2184 } 2185 2186 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2187 { 2188 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2189 } 2190 2191 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2192 { 2193 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2194 } 2195 2196 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2197 struct inode *inode, bool is_inode) 2198 { 2199 block_t valid_block_count; 2200 unsigned int valid_node_count, user_block_count; 2201 int err; 2202 2203 if (is_inode) { 2204 if (inode) { 2205 err = dquot_alloc_inode(inode); 2206 if (err) 2207 return err; 2208 } 2209 } else { 2210 err = dquot_reserve_block(inode, 1); 2211 if (err) 2212 return err; 2213 } 2214 2215 if (time_to_inject(sbi, FAULT_BLOCK)) { 2216 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2217 goto enospc; 2218 } 2219 2220 spin_lock(&sbi->stat_lock); 2221 2222 valid_block_count = sbi->total_valid_block_count + 2223 sbi->current_reserved_blocks + 1; 2224 2225 if (!__allow_reserved_blocks(sbi, inode, false)) 2226 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2227 user_block_count = sbi->user_block_count; 2228 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2229 user_block_count -= sbi->unusable_block_count; 2230 2231 if (unlikely(valid_block_count > user_block_count)) { 2232 spin_unlock(&sbi->stat_lock); 2233 goto enospc; 2234 } 2235 2236 valid_node_count = sbi->total_valid_node_count + 1; 2237 if (unlikely(valid_node_count > sbi->total_node_count)) { 2238 spin_unlock(&sbi->stat_lock); 2239 goto enospc; 2240 } 2241 2242 sbi->total_valid_node_count++; 2243 sbi->total_valid_block_count++; 2244 spin_unlock(&sbi->stat_lock); 2245 2246 if (inode) { 2247 if (is_inode) 2248 f2fs_mark_inode_dirty_sync(inode, true); 2249 else 2250 f2fs_i_blocks_write(inode, 1, true, true); 2251 } 2252 2253 percpu_counter_inc(&sbi->alloc_valid_block_count); 2254 return 0; 2255 2256 enospc: 2257 if (is_inode) { 2258 if (inode) 2259 dquot_free_inode(inode); 2260 } else { 2261 dquot_release_reservation_block(inode, 1); 2262 } 2263 return -ENOSPC; 2264 } 2265 2266 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2267 struct inode *inode, bool is_inode) 2268 { 2269 spin_lock(&sbi->stat_lock); 2270 2271 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2272 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2273 2274 sbi->total_valid_node_count--; 2275 sbi->total_valid_block_count--; 2276 if (sbi->reserved_blocks && 2277 sbi->current_reserved_blocks < sbi->reserved_blocks) 2278 sbi->current_reserved_blocks++; 2279 2280 spin_unlock(&sbi->stat_lock); 2281 2282 if (is_inode) { 2283 dquot_free_inode(inode); 2284 } else { 2285 if (unlikely(inode->i_blocks == 0)) { 2286 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2287 inode->i_ino, 2288 (unsigned long long)inode->i_blocks); 2289 set_sbi_flag(sbi, SBI_NEED_FSCK); 2290 return; 2291 } 2292 f2fs_i_blocks_write(inode, 1, false, true); 2293 } 2294 } 2295 2296 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2297 { 2298 return sbi->total_valid_node_count; 2299 } 2300 2301 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2302 { 2303 percpu_counter_inc(&sbi->total_valid_inode_count); 2304 } 2305 2306 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2307 { 2308 percpu_counter_dec(&sbi->total_valid_inode_count); 2309 } 2310 2311 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2312 { 2313 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2314 } 2315 2316 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2317 pgoff_t index, bool for_write) 2318 { 2319 struct page *page; 2320 2321 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2322 if (!for_write) 2323 page = find_get_page_flags(mapping, index, 2324 FGP_LOCK | FGP_ACCESSED); 2325 else 2326 page = find_lock_page(mapping, index); 2327 if (page) 2328 return page; 2329 2330 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2331 f2fs_show_injection_info(F2FS_M_SB(mapping), 2332 FAULT_PAGE_ALLOC); 2333 return NULL; 2334 } 2335 } 2336 2337 if (!for_write) 2338 return grab_cache_page(mapping, index); 2339 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2340 } 2341 2342 static inline struct page *f2fs_pagecache_get_page( 2343 struct address_space *mapping, pgoff_t index, 2344 int fgp_flags, gfp_t gfp_mask) 2345 { 2346 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2347 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2348 return NULL; 2349 } 2350 2351 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2352 } 2353 2354 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2355 { 2356 char *src_kaddr = kmap(src); 2357 char *dst_kaddr = kmap(dst); 2358 2359 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2360 kunmap(dst); 2361 kunmap(src); 2362 } 2363 2364 static inline void f2fs_put_page(struct page *page, int unlock) 2365 { 2366 if (!page) 2367 return; 2368 2369 if (unlock) { 2370 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2371 unlock_page(page); 2372 } 2373 put_page(page); 2374 } 2375 2376 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2377 { 2378 if (dn->node_page) 2379 f2fs_put_page(dn->node_page, 1); 2380 if (dn->inode_page && dn->node_page != dn->inode_page) 2381 f2fs_put_page(dn->inode_page, 0); 2382 dn->node_page = NULL; 2383 dn->inode_page = NULL; 2384 } 2385 2386 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2387 size_t size) 2388 { 2389 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2390 } 2391 2392 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2393 gfp_t flags) 2394 { 2395 void *entry; 2396 2397 entry = kmem_cache_alloc(cachep, flags); 2398 if (!entry) 2399 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2400 return entry; 2401 } 2402 2403 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) 2404 { 2405 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2406 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2407 get_pages(sbi, F2FS_WB_CP_DATA) || 2408 get_pages(sbi, F2FS_DIO_READ) || 2409 get_pages(sbi, F2FS_DIO_WRITE)) 2410 return true; 2411 2412 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2413 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2414 return true; 2415 2416 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2417 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2418 return true; 2419 return false; 2420 } 2421 2422 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2423 { 2424 if (sbi->gc_mode == GC_URGENT_HIGH) 2425 return true; 2426 2427 if (is_inflight_io(sbi, type)) 2428 return false; 2429 2430 if (sbi->gc_mode == GC_URGENT_LOW && 2431 (type == DISCARD_TIME || type == GC_TIME)) 2432 return true; 2433 2434 return f2fs_time_over(sbi, type); 2435 } 2436 2437 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2438 unsigned long index, void *item) 2439 { 2440 while (radix_tree_insert(root, index, item)) 2441 cond_resched(); 2442 } 2443 2444 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2445 2446 static inline bool IS_INODE(struct page *page) 2447 { 2448 struct f2fs_node *p = F2FS_NODE(page); 2449 2450 return RAW_IS_INODE(p); 2451 } 2452 2453 static inline int offset_in_addr(struct f2fs_inode *i) 2454 { 2455 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2456 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2457 } 2458 2459 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2460 { 2461 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2462 } 2463 2464 static inline int f2fs_has_extra_attr(struct inode *inode); 2465 static inline block_t data_blkaddr(struct inode *inode, 2466 struct page *node_page, unsigned int offset) 2467 { 2468 struct f2fs_node *raw_node; 2469 __le32 *addr_array; 2470 int base = 0; 2471 bool is_inode = IS_INODE(node_page); 2472 2473 raw_node = F2FS_NODE(node_page); 2474 2475 if (is_inode) { 2476 if (!inode) 2477 /* from GC path only */ 2478 base = offset_in_addr(&raw_node->i); 2479 else if (f2fs_has_extra_attr(inode)) 2480 base = get_extra_isize(inode); 2481 } 2482 2483 addr_array = blkaddr_in_node(raw_node); 2484 return le32_to_cpu(addr_array[base + offset]); 2485 } 2486 2487 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2488 { 2489 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2490 } 2491 2492 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2493 { 2494 int mask; 2495 2496 addr += (nr >> 3); 2497 mask = 1 << (7 - (nr & 0x07)); 2498 return mask & *addr; 2499 } 2500 2501 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2502 { 2503 int mask; 2504 2505 addr += (nr >> 3); 2506 mask = 1 << (7 - (nr & 0x07)); 2507 *addr |= mask; 2508 } 2509 2510 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2511 { 2512 int mask; 2513 2514 addr += (nr >> 3); 2515 mask = 1 << (7 - (nr & 0x07)); 2516 *addr &= ~mask; 2517 } 2518 2519 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2520 { 2521 int mask; 2522 int ret; 2523 2524 addr += (nr >> 3); 2525 mask = 1 << (7 - (nr & 0x07)); 2526 ret = mask & *addr; 2527 *addr |= mask; 2528 return ret; 2529 } 2530 2531 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2532 { 2533 int mask; 2534 int ret; 2535 2536 addr += (nr >> 3); 2537 mask = 1 << (7 - (nr & 0x07)); 2538 ret = mask & *addr; 2539 *addr &= ~mask; 2540 return ret; 2541 } 2542 2543 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2544 { 2545 int mask; 2546 2547 addr += (nr >> 3); 2548 mask = 1 << (7 - (nr & 0x07)); 2549 *addr ^= mask; 2550 } 2551 2552 /* 2553 * On-disk inode flags (f2fs_inode::i_flags) 2554 */ 2555 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2556 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2557 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2558 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2559 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2560 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2561 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2562 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2563 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2564 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2565 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2566 2567 /* Flags that should be inherited by new inodes from their parent. */ 2568 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2569 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2570 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2571 2572 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2573 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2574 F2FS_CASEFOLD_FL)) 2575 2576 /* Flags that are appropriate for non-directories/regular files. */ 2577 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2578 2579 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2580 { 2581 if (S_ISDIR(mode)) 2582 return flags; 2583 else if (S_ISREG(mode)) 2584 return flags & F2FS_REG_FLMASK; 2585 else 2586 return flags & F2FS_OTHER_FLMASK; 2587 } 2588 2589 static inline void __mark_inode_dirty_flag(struct inode *inode, 2590 int flag, bool set) 2591 { 2592 switch (flag) { 2593 case FI_INLINE_XATTR: 2594 case FI_INLINE_DATA: 2595 case FI_INLINE_DENTRY: 2596 case FI_NEW_INODE: 2597 if (set) 2598 return; 2599 fallthrough; 2600 case FI_DATA_EXIST: 2601 case FI_INLINE_DOTS: 2602 case FI_PIN_FILE: 2603 f2fs_mark_inode_dirty_sync(inode, true); 2604 } 2605 } 2606 2607 static inline void set_inode_flag(struct inode *inode, int flag) 2608 { 2609 set_bit(flag, F2FS_I(inode)->flags); 2610 __mark_inode_dirty_flag(inode, flag, true); 2611 } 2612 2613 static inline int is_inode_flag_set(struct inode *inode, int flag) 2614 { 2615 return test_bit(flag, F2FS_I(inode)->flags); 2616 } 2617 2618 static inline void clear_inode_flag(struct inode *inode, int flag) 2619 { 2620 clear_bit(flag, F2FS_I(inode)->flags); 2621 __mark_inode_dirty_flag(inode, flag, false); 2622 } 2623 2624 static inline bool f2fs_verity_in_progress(struct inode *inode) 2625 { 2626 return IS_ENABLED(CONFIG_FS_VERITY) && 2627 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2628 } 2629 2630 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2631 { 2632 F2FS_I(inode)->i_acl_mode = mode; 2633 set_inode_flag(inode, FI_ACL_MODE); 2634 f2fs_mark_inode_dirty_sync(inode, false); 2635 } 2636 2637 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2638 { 2639 if (inc) 2640 inc_nlink(inode); 2641 else 2642 drop_nlink(inode); 2643 f2fs_mark_inode_dirty_sync(inode, true); 2644 } 2645 2646 static inline void f2fs_i_blocks_write(struct inode *inode, 2647 block_t diff, bool add, bool claim) 2648 { 2649 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2650 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2651 2652 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2653 if (add) { 2654 if (claim) 2655 dquot_claim_block(inode, diff); 2656 else 2657 dquot_alloc_block_nofail(inode, diff); 2658 } else { 2659 dquot_free_block(inode, diff); 2660 } 2661 2662 f2fs_mark_inode_dirty_sync(inode, true); 2663 if (clean || recover) 2664 set_inode_flag(inode, FI_AUTO_RECOVER); 2665 } 2666 2667 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2668 { 2669 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2670 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2671 2672 if (i_size_read(inode) == i_size) 2673 return; 2674 2675 i_size_write(inode, i_size); 2676 f2fs_mark_inode_dirty_sync(inode, true); 2677 if (clean || recover) 2678 set_inode_flag(inode, FI_AUTO_RECOVER); 2679 } 2680 2681 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2682 { 2683 F2FS_I(inode)->i_current_depth = depth; 2684 f2fs_mark_inode_dirty_sync(inode, true); 2685 } 2686 2687 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2688 unsigned int count) 2689 { 2690 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2691 f2fs_mark_inode_dirty_sync(inode, true); 2692 } 2693 2694 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2695 { 2696 F2FS_I(inode)->i_xattr_nid = xnid; 2697 f2fs_mark_inode_dirty_sync(inode, true); 2698 } 2699 2700 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2701 { 2702 F2FS_I(inode)->i_pino = pino; 2703 f2fs_mark_inode_dirty_sync(inode, true); 2704 } 2705 2706 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2707 { 2708 struct f2fs_inode_info *fi = F2FS_I(inode); 2709 2710 if (ri->i_inline & F2FS_INLINE_XATTR) 2711 set_bit(FI_INLINE_XATTR, fi->flags); 2712 if (ri->i_inline & F2FS_INLINE_DATA) 2713 set_bit(FI_INLINE_DATA, fi->flags); 2714 if (ri->i_inline & F2FS_INLINE_DENTRY) 2715 set_bit(FI_INLINE_DENTRY, fi->flags); 2716 if (ri->i_inline & F2FS_DATA_EXIST) 2717 set_bit(FI_DATA_EXIST, fi->flags); 2718 if (ri->i_inline & F2FS_INLINE_DOTS) 2719 set_bit(FI_INLINE_DOTS, fi->flags); 2720 if (ri->i_inline & F2FS_EXTRA_ATTR) 2721 set_bit(FI_EXTRA_ATTR, fi->flags); 2722 if (ri->i_inline & F2FS_PIN_FILE) 2723 set_bit(FI_PIN_FILE, fi->flags); 2724 } 2725 2726 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2727 { 2728 ri->i_inline = 0; 2729 2730 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2731 ri->i_inline |= F2FS_INLINE_XATTR; 2732 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2733 ri->i_inline |= F2FS_INLINE_DATA; 2734 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2735 ri->i_inline |= F2FS_INLINE_DENTRY; 2736 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2737 ri->i_inline |= F2FS_DATA_EXIST; 2738 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2739 ri->i_inline |= F2FS_INLINE_DOTS; 2740 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2741 ri->i_inline |= F2FS_EXTRA_ATTR; 2742 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2743 ri->i_inline |= F2FS_PIN_FILE; 2744 } 2745 2746 static inline int f2fs_has_extra_attr(struct inode *inode) 2747 { 2748 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2749 } 2750 2751 static inline int f2fs_has_inline_xattr(struct inode *inode) 2752 { 2753 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2754 } 2755 2756 static inline int f2fs_compressed_file(struct inode *inode) 2757 { 2758 return S_ISREG(inode->i_mode) && 2759 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2760 } 2761 2762 static inline unsigned int addrs_per_inode(struct inode *inode) 2763 { 2764 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2765 get_inline_xattr_addrs(inode); 2766 2767 if (!f2fs_compressed_file(inode)) 2768 return addrs; 2769 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2770 } 2771 2772 static inline unsigned int addrs_per_block(struct inode *inode) 2773 { 2774 if (!f2fs_compressed_file(inode)) 2775 return DEF_ADDRS_PER_BLOCK; 2776 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2777 } 2778 2779 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2780 { 2781 struct f2fs_inode *ri = F2FS_INODE(page); 2782 2783 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2784 get_inline_xattr_addrs(inode)]); 2785 } 2786 2787 static inline int inline_xattr_size(struct inode *inode) 2788 { 2789 if (f2fs_has_inline_xattr(inode)) 2790 return get_inline_xattr_addrs(inode) * sizeof(__le32); 2791 return 0; 2792 } 2793 2794 static inline int f2fs_has_inline_data(struct inode *inode) 2795 { 2796 return is_inode_flag_set(inode, FI_INLINE_DATA); 2797 } 2798 2799 static inline int f2fs_exist_data(struct inode *inode) 2800 { 2801 return is_inode_flag_set(inode, FI_DATA_EXIST); 2802 } 2803 2804 static inline int f2fs_has_inline_dots(struct inode *inode) 2805 { 2806 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2807 } 2808 2809 static inline int f2fs_is_mmap_file(struct inode *inode) 2810 { 2811 return is_inode_flag_set(inode, FI_MMAP_FILE); 2812 } 2813 2814 static inline bool f2fs_is_pinned_file(struct inode *inode) 2815 { 2816 return is_inode_flag_set(inode, FI_PIN_FILE); 2817 } 2818 2819 static inline bool f2fs_is_atomic_file(struct inode *inode) 2820 { 2821 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2822 } 2823 2824 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2825 { 2826 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2827 } 2828 2829 static inline bool f2fs_is_volatile_file(struct inode *inode) 2830 { 2831 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2832 } 2833 2834 static inline bool f2fs_is_first_block_written(struct inode *inode) 2835 { 2836 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2837 } 2838 2839 static inline bool f2fs_is_drop_cache(struct inode *inode) 2840 { 2841 return is_inode_flag_set(inode, FI_DROP_CACHE); 2842 } 2843 2844 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2845 { 2846 struct f2fs_inode *ri = F2FS_INODE(page); 2847 int extra_size = get_extra_isize(inode); 2848 2849 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2850 } 2851 2852 static inline int f2fs_has_inline_dentry(struct inode *inode) 2853 { 2854 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2855 } 2856 2857 static inline int is_file(struct inode *inode, int type) 2858 { 2859 return F2FS_I(inode)->i_advise & type; 2860 } 2861 2862 static inline void set_file(struct inode *inode, int type) 2863 { 2864 F2FS_I(inode)->i_advise |= type; 2865 f2fs_mark_inode_dirty_sync(inode, true); 2866 } 2867 2868 static inline void clear_file(struct inode *inode, int type) 2869 { 2870 F2FS_I(inode)->i_advise &= ~type; 2871 f2fs_mark_inode_dirty_sync(inode, true); 2872 } 2873 2874 static inline bool f2fs_is_time_consistent(struct inode *inode) 2875 { 2876 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 2877 return false; 2878 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 2879 return false; 2880 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 2881 return false; 2882 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 2883 &F2FS_I(inode)->i_crtime)) 2884 return false; 2885 return true; 2886 } 2887 2888 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2889 { 2890 bool ret; 2891 2892 if (dsync) { 2893 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2894 2895 spin_lock(&sbi->inode_lock[DIRTY_META]); 2896 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2897 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2898 return ret; 2899 } 2900 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2901 file_keep_isize(inode) || 2902 i_size_read(inode) & ~PAGE_MASK) 2903 return false; 2904 2905 if (!f2fs_is_time_consistent(inode)) 2906 return false; 2907 2908 spin_lock(&F2FS_I(inode)->i_size_lock); 2909 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 2910 spin_unlock(&F2FS_I(inode)->i_size_lock); 2911 2912 return ret; 2913 } 2914 2915 static inline bool f2fs_readonly(struct super_block *sb) 2916 { 2917 return sb_rdonly(sb); 2918 } 2919 2920 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 2921 { 2922 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 2923 } 2924 2925 static inline bool is_dot_dotdot(const u8 *name, size_t len) 2926 { 2927 if (len == 1 && name[0] == '.') 2928 return true; 2929 2930 if (len == 2 && name[0] == '.' && name[1] == '.') 2931 return true; 2932 2933 return false; 2934 } 2935 2936 static inline bool f2fs_may_extent_tree(struct inode *inode) 2937 { 2938 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2939 2940 if (!test_opt(sbi, EXTENT_CACHE) || 2941 is_inode_flag_set(inode, FI_NO_EXTENT) || 2942 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 2943 return false; 2944 2945 /* 2946 * for recovered files during mount do not create extents 2947 * if shrinker is not registered. 2948 */ 2949 if (list_empty(&sbi->s_list)) 2950 return false; 2951 2952 return S_ISREG(inode->i_mode); 2953 } 2954 2955 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 2956 size_t size, gfp_t flags) 2957 { 2958 if (time_to_inject(sbi, FAULT_KMALLOC)) { 2959 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 2960 return NULL; 2961 } 2962 2963 return kmalloc(size, flags); 2964 } 2965 2966 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 2967 size_t size, gfp_t flags) 2968 { 2969 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 2970 } 2971 2972 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 2973 size_t size, gfp_t flags) 2974 { 2975 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 2976 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 2977 return NULL; 2978 } 2979 2980 return kvmalloc(size, flags); 2981 } 2982 2983 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 2984 size_t size, gfp_t flags) 2985 { 2986 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 2987 } 2988 2989 static inline int get_extra_isize(struct inode *inode) 2990 { 2991 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 2992 } 2993 2994 static inline int get_inline_xattr_addrs(struct inode *inode) 2995 { 2996 return F2FS_I(inode)->i_inline_xattr_size; 2997 } 2998 2999 #define f2fs_get_inode_mode(i) \ 3000 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3001 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3002 3003 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3004 (offsetof(struct f2fs_inode, i_extra_end) - \ 3005 offsetof(struct f2fs_inode, i_extra_isize)) \ 3006 3007 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3008 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3009 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3010 sizeof((f2fs_inode)->field)) \ 3011 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3012 3013 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3014 #define MIN_IOSTAT_PERIOD_MS 100 3015 /* maximum period of iostat tracing is 1 day */ 3016 #define MAX_IOSTAT_PERIOD_MS 8640000 3017 3018 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3019 { 3020 int i; 3021 3022 spin_lock(&sbi->iostat_lock); 3023 for (i = 0; i < NR_IO_TYPE; i++) { 3024 sbi->rw_iostat[i] = 0; 3025 sbi->prev_rw_iostat[i] = 0; 3026 } 3027 spin_unlock(&sbi->iostat_lock); 3028 } 3029 3030 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3031 3032 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3033 enum iostat_type type, unsigned long long io_bytes) 3034 { 3035 if (!sbi->iostat_enable) 3036 return; 3037 spin_lock(&sbi->iostat_lock); 3038 sbi->rw_iostat[type] += io_bytes; 3039 3040 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3041 sbi->rw_iostat[APP_BUFFERED_IO] = 3042 sbi->rw_iostat[APP_WRITE_IO] - 3043 sbi->rw_iostat[APP_DIRECT_IO]; 3044 3045 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3046 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3047 sbi->rw_iostat[APP_READ_IO] - 3048 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3049 spin_unlock(&sbi->iostat_lock); 3050 3051 f2fs_record_iostat(sbi); 3052 } 3053 3054 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3055 3056 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3057 3058 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3059 block_t blkaddr, int type); 3060 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3061 block_t blkaddr, int type) 3062 { 3063 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3064 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3065 blkaddr, type); 3066 f2fs_bug_on(sbi, 1); 3067 } 3068 } 3069 3070 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3071 { 3072 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3073 blkaddr == COMPRESS_ADDR) 3074 return false; 3075 return true; 3076 } 3077 3078 static inline void f2fs_set_page_private(struct page *page, 3079 unsigned long data) 3080 { 3081 if (PagePrivate(page)) 3082 return; 3083 3084 attach_page_private(page, (void *)data); 3085 } 3086 3087 static inline void f2fs_clear_page_private(struct page *page) 3088 { 3089 detach_page_private(page); 3090 } 3091 3092 /* 3093 * file.c 3094 */ 3095 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3096 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3097 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3098 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3099 int f2fs_truncate(struct inode *inode); 3100 int f2fs_getattr(const struct path *path, struct kstat *stat, 3101 u32 request_mask, unsigned int flags); 3102 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 3103 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3104 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3105 int f2fs_precache_extents(struct inode *inode); 3106 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3107 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3108 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3109 int f2fs_pin_file_control(struct inode *inode, bool inc); 3110 3111 /* 3112 * inode.c 3113 */ 3114 void f2fs_set_inode_flags(struct inode *inode); 3115 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3116 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3117 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3118 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3119 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3120 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3121 void f2fs_update_inode_page(struct inode *inode); 3122 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3123 void f2fs_evict_inode(struct inode *inode); 3124 void f2fs_handle_failed_inode(struct inode *inode); 3125 3126 /* 3127 * namei.c 3128 */ 3129 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3130 bool hot, bool set); 3131 struct dentry *f2fs_get_parent(struct dentry *child); 3132 3133 /* 3134 * dir.c 3135 */ 3136 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3137 int f2fs_init_casefolded_name(const struct inode *dir, 3138 struct f2fs_filename *fname); 3139 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3140 int lookup, struct f2fs_filename *fname); 3141 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3142 struct f2fs_filename *fname); 3143 void f2fs_free_filename(struct f2fs_filename *fname); 3144 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3145 const struct f2fs_filename *fname, int *max_slots); 3146 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3147 unsigned int start_pos, struct fscrypt_str *fstr); 3148 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3149 struct f2fs_dentry_ptr *d); 3150 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3151 const struct f2fs_filename *fname, struct page *dpage); 3152 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3153 unsigned int current_depth); 3154 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3155 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3156 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3157 const struct f2fs_filename *fname, 3158 struct page **res_page); 3159 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3160 const struct qstr *child, struct page **res_page); 3161 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3162 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3163 struct page **page); 3164 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3165 struct page *page, struct inode *inode); 3166 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3167 const struct f2fs_filename *fname); 3168 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3169 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3170 unsigned int bit_pos); 3171 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3172 struct inode *inode, nid_t ino, umode_t mode); 3173 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3174 struct inode *inode, nid_t ino, umode_t mode); 3175 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3176 struct inode *inode, nid_t ino, umode_t mode); 3177 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3178 struct inode *dir, struct inode *inode); 3179 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3180 bool f2fs_empty_dir(struct inode *dir); 3181 3182 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3183 { 3184 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3185 inode, inode->i_ino, inode->i_mode); 3186 } 3187 3188 /* 3189 * super.c 3190 */ 3191 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3192 void f2fs_inode_synced(struct inode *inode); 3193 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3194 int f2fs_quota_sync(struct super_block *sb, int type); 3195 void f2fs_quota_off_umount(struct super_block *sb); 3196 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3197 int f2fs_sync_fs(struct super_block *sb, int sync); 3198 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3199 3200 /* 3201 * hash.c 3202 */ 3203 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3204 3205 /* 3206 * node.c 3207 */ 3208 struct dnode_of_data; 3209 struct node_info; 3210 3211 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3212 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3213 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3214 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3215 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3216 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3217 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3218 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3219 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3220 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3221 struct node_info *ni); 3222 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3223 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3224 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3225 int f2fs_truncate_xattr_node(struct inode *inode); 3226 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3227 unsigned int seq_id); 3228 int f2fs_remove_inode_page(struct inode *inode); 3229 struct page *f2fs_new_inode_page(struct inode *inode); 3230 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3231 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3232 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3233 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3234 int f2fs_move_node_page(struct page *node_page, int gc_type); 3235 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3236 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3237 struct writeback_control *wbc, bool atomic, 3238 unsigned int *seq_id); 3239 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3240 struct writeback_control *wbc, 3241 bool do_balance, enum iostat_type io_type); 3242 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3243 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3244 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3245 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3246 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3247 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3248 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3249 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3250 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3251 unsigned int segno, struct f2fs_summary_block *sum); 3252 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3253 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3254 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3255 int __init f2fs_create_node_manager_caches(void); 3256 void f2fs_destroy_node_manager_caches(void); 3257 3258 /* 3259 * segment.c 3260 */ 3261 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3262 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3263 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3264 void f2fs_drop_inmem_pages(struct inode *inode); 3265 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3266 int f2fs_commit_inmem_pages(struct inode *inode); 3267 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3268 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3269 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3270 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3271 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3272 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3273 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3274 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3275 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3276 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3277 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3278 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3279 struct cp_control *cpc); 3280 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3281 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3282 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3283 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3284 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3285 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3286 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3287 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3288 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3289 unsigned int *newseg, bool new_sec, int dir); 3290 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3291 unsigned int start, unsigned int end); 3292 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); 3293 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3294 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3295 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3296 struct cp_control *cpc); 3297 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3298 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3299 block_t blk_addr); 3300 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3301 enum iostat_type io_type); 3302 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3303 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3304 struct f2fs_io_info *fio); 3305 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3306 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3307 block_t old_blkaddr, block_t new_blkaddr, 3308 bool recover_curseg, bool recover_newaddr, 3309 bool from_gc); 3310 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3311 block_t old_addr, block_t new_addr, 3312 unsigned char version, bool recover_curseg, 3313 bool recover_newaddr); 3314 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3315 block_t old_blkaddr, block_t *new_blkaddr, 3316 struct f2fs_summary *sum, int type, 3317 struct f2fs_io_info *fio); 3318 void f2fs_wait_on_page_writeback(struct page *page, 3319 enum page_type type, bool ordered, bool locked); 3320 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3321 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3322 block_t len); 3323 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3324 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3325 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3326 unsigned int val, int alloc); 3327 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3328 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3329 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3330 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3331 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3332 int __init f2fs_create_segment_manager_caches(void); 3333 void f2fs_destroy_segment_manager_caches(void); 3334 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3335 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3336 enum page_type type, enum temp_type temp); 3337 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3338 unsigned int segno); 3339 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3340 unsigned int segno); 3341 3342 /* 3343 * checkpoint.c 3344 */ 3345 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3346 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3347 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3348 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3349 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3350 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3351 block_t blkaddr, int type); 3352 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3353 int type, bool sync); 3354 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3355 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3356 long nr_to_write, enum iostat_type io_type); 3357 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3358 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3359 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3360 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3361 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3362 unsigned int devidx, int type); 3363 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3364 unsigned int devidx, int type); 3365 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3366 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3367 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3368 void f2fs_add_orphan_inode(struct inode *inode); 3369 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3370 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3371 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3372 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3373 void f2fs_remove_dirty_inode(struct inode *inode); 3374 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3375 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3376 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); 3377 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3378 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3379 int __init f2fs_create_checkpoint_caches(void); 3380 void f2fs_destroy_checkpoint_caches(void); 3381 3382 /* 3383 * data.c 3384 */ 3385 int __init f2fs_init_bioset(void); 3386 void f2fs_destroy_bioset(void); 3387 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); 3388 int f2fs_init_bio_entry_cache(void); 3389 void f2fs_destroy_bio_entry_cache(void); 3390 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3391 struct bio *bio, enum page_type type); 3392 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3393 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3394 struct inode *inode, struct page *page, 3395 nid_t ino, enum page_type type); 3396 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3397 struct bio **bio, struct page *page); 3398 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3399 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3400 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3401 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3402 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3403 block_t blk_addr, struct bio *bio); 3404 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3405 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3406 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3407 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3408 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3409 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3410 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3411 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3412 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3413 int op_flags, bool for_write); 3414 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3415 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3416 bool for_write); 3417 struct page *f2fs_get_new_data_page(struct inode *inode, 3418 struct page *ipage, pgoff_t index, bool new_i_size); 3419 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3420 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3421 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3422 int create, int flag); 3423 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3424 u64 start, u64 len); 3425 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3426 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3427 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3428 int f2fs_write_single_data_page(struct page *page, int *submitted, 3429 struct bio **bio, sector_t *last_block, 3430 struct writeback_control *wbc, 3431 enum iostat_type io_type, 3432 int compr_blocks); 3433 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3434 unsigned int length); 3435 int f2fs_release_page(struct page *page, gfp_t wait); 3436 #ifdef CONFIG_MIGRATION 3437 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3438 struct page *page, enum migrate_mode mode); 3439 #endif 3440 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3441 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3442 int f2fs_init_post_read_processing(void); 3443 void f2fs_destroy_post_read_processing(void); 3444 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3445 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3446 3447 /* 3448 * gc.c 3449 */ 3450 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3451 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3452 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3453 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3454 unsigned int segno); 3455 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3456 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3457 int __init f2fs_create_garbage_collection_cache(void); 3458 void f2fs_destroy_garbage_collection_cache(void); 3459 3460 /* 3461 * recovery.c 3462 */ 3463 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3464 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3465 3466 /* 3467 * debug.c 3468 */ 3469 #ifdef CONFIG_F2FS_STAT_FS 3470 struct f2fs_stat_info { 3471 struct list_head stat_list; 3472 struct f2fs_sb_info *sbi; 3473 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3474 int main_area_segs, main_area_sections, main_area_zones; 3475 unsigned long long hit_largest, hit_cached, hit_rbtree; 3476 unsigned long long hit_total, total_ext; 3477 int ext_tree, zombie_tree, ext_node; 3478 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3479 int ndirty_data, ndirty_qdata; 3480 int inmem_pages; 3481 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3482 int nats, dirty_nats, sits, dirty_sits; 3483 int free_nids, avail_nids, alloc_nids; 3484 int total_count, utilization; 3485 int bg_gc, nr_wb_cp_data, nr_wb_data; 3486 int nr_rd_data, nr_rd_node, nr_rd_meta; 3487 int nr_dio_read, nr_dio_write; 3488 unsigned int io_skip_bggc, other_skip_bggc; 3489 int nr_flushing, nr_flushed, flush_list_empty; 3490 int nr_discarding, nr_discarded; 3491 int nr_discard_cmd; 3492 unsigned int undiscard_blks; 3493 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3494 int compr_inode; 3495 unsigned long long compr_blocks; 3496 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3497 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3498 unsigned int bimodal, avg_vblocks; 3499 int util_free, util_valid, util_invalid; 3500 int rsvd_segs, overp_segs; 3501 int dirty_count, node_pages, meta_pages; 3502 int prefree_count, call_count, cp_count, bg_cp_count; 3503 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3504 int bg_node_segs, bg_data_segs; 3505 int tot_blks, data_blks, node_blks; 3506 int bg_data_blks, bg_node_blks; 3507 unsigned long long skipped_atomic_files[2]; 3508 int curseg[NR_CURSEG_TYPE]; 3509 int cursec[NR_CURSEG_TYPE]; 3510 int curzone[NR_CURSEG_TYPE]; 3511 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3512 unsigned int full_seg[NR_CURSEG_TYPE]; 3513 unsigned int valid_blks[NR_CURSEG_TYPE]; 3514 3515 unsigned int meta_count[META_MAX]; 3516 unsigned int segment_count[2]; 3517 unsigned int block_count[2]; 3518 unsigned int inplace_count; 3519 unsigned long long base_mem, cache_mem, page_mem; 3520 }; 3521 3522 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3523 { 3524 return (struct f2fs_stat_info *)sbi->stat_info; 3525 } 3526 3527 #define stat_inc_cp_count(si) ((si)->cp_count++) 3528 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3529 #define stat_inc_call_count(si) ((si)->call_count++) 3530 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3531 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3532 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3533 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3534 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3535 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3536 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3537 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3538 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3539 #define stat_inc_inline_xattr(inode) \ 3540 do { \ 3541 if (f2fs_has_inline_xattr(inode)) \ 3542 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3543 } while (0) 3544 #define stat_dec_inline_xattr(inode) \ 3545 do { \ 3546 if (f2fs_has_inline_xattr(inode)) \ 3547 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3548 } while (0) 3549 #define stat_inc_inline_inode(inode) \ 3550 do { \ 3551 if (f2fs_has_inline_data(inode)) \ 3552 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3553 } while (0) 3554 #define stat_dec_inline_inode(inode) \ 3555 do { \ 3556 if (f2fs_has_inline_data(inode)) \ 3557 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3558 } while (0) 3559 #define stat_inc_inline_dir(inode) \ 3560 do { \ 3561 if (f2fs_has_inline_dentry(inode)) \ 3562 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3563 } while (0) 3564 #define stat_dec_inline_dir(inode) \ 3565 do { \ 3566 if (f2fs_has_inline_dentry(inode)) \ 3567 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3568 } while (0) 3569 #define stat_inc_compr_inode(inode) \ 3570 do { \ 3571 if (f2fs_compressed_file(inode)) \ 3572 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3573 } while (0) 3574 #define stat_dec_compr_inode(inode) \ 3575 do { \ 3576 if (f2fs_compressed_file(inode)) \ 3577 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3578 } while (0) 3579 #define stat_add_compr_blocks(inode, blocks) \ 3580 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3581 #define stat_sub_compr_blocks(inode, blocks) \ 3582 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3583 #define stat_inc_meta_count(sbi, blkaddr) \ 3584 do { \ 3585 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3586 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3587 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3588 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3589 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3590 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3591 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3592 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3593 } while (0) 3594 #define stat_inc_seg_type(sbi, curseg) \ 3595 ((sbi)->segment_count[(curseg)->alloc_type]++) 3596 #define stat_inc_block_count(sbi, curseg) \ 3597 ((sbi)->block_count[(curseg)->alloc_type]++) 3598 #define stat_inc_inplace_blocks(sbi) \ 3599 (atomic_inc(&(sbi)->inplace_count)) 3600 #define stat_update_max_atomic_write(inode) \ 3601 do { \ 3602 int cur = F2FS_I_SB(inode)->atomic_files; \ 3603 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3604 if (cur > max) \ 3605 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3606 } while (0) 3607 #define stat_inc_volatile_write(inode) \ 3608 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3609 #define stat_dec_volatile_write(inode) \ 3610 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3611 #define stat_update_max_volatile_write(inode) \ 3612 do { \ 3613 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3614 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3615 if (cur > max) \ 3616 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3617 } while (0) 3618 #define stat_inc_seg_count(sbi, type, gc_type) \ 3619 do { \ 3620 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3621 si->tot_segs++; \ 3622 if ((type) == SUM_TYPE_DATA) { \ 3623 si->data_segs++; \ 3624 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3625 } else { \ 3626 si->node_segs++; \ 3627 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3628 } \ 3629 } while (0) 3630 3631 #define stat_inc_tot_blk_count(si, blks) \ 3632 ((si)->tot_blks += (blks)) 3633 3634 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3635 do { \ 3636 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3637 stat_inc_tot_blk_count(si, blks); \ 3638 si->data_blks += (blks); \ 3639 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3640 } while (0) 3641 3642 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3643 do { \ 3644 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3645 stat_inc_tot_blk_count(si, blks); \ 3646 si->node_blks += (blks); \ 3647 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3648 } while (0) 3649 3650 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3651 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3652 void __init f2fs_create_root_stats(void); 3653 void f2fs_destroy_root_stats(void); 3654 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3655 #else 3656 #define stat_inc_cp_count(si) do { } while (0) 3657 #define stat_inc_bg_cp_count(si) do { } while (0) 3658 #define stat_inc_call_count(si) do { } while (0) 3659 #define stat_inc_bggc_count(si) do { } while (0) 3660 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3661 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3662 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3663 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3664 #define stat_inc_total_hit(sbi) do { } while (0) 3665 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3666 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3667 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3668 #define stat_inc_inline_xattr(inode) do { } while (0) 3669 #define stat_dec_inline_xattr(inode) do { } while (0) 3670 #define stat_inc_inline_inode(inode) do { } while (0) 3671 #define stat_dec_inline_inode(inode) do { } while (0) 3672 #define stat_inc_inline_dir(inode) do { } while (0) 3673 #define stat_dec_inline_dir(inode) do { } while (0) 3674 #define stat_inc_compr_inode(inode) do { } while (0) 3675 #define stat_dec_compr_inode(inode) do { } while (0) 3676 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3677 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3678 #define stat_inc_atomic_write(inode) do { } while (0) 3679 #define stat_dec_atomic_write(inode) do { } while (0) 3680 #define stat_update_max_atomic_write(inode) do { } while (0) 3681 #define stat_inc_volatile_write(inode) do { } while (0) 3682 #define stat_dec_volatile_write(inode) do { } while (0) 3683 #define stat_update_max_volatile_write(inode) do { } while (0) 3684 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3685 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3686 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3687 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3688 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3689 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3690 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3691 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3692 3693 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3694 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3695 static inline void __init f2fs_create_root_stats(void) { } 3696 static inline void f2fs_destroy_root_stats(void) { } 3697 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3698 #endif 3699 3700 extern const struct file_operations f2fs_dir_operations; 3701 extern const struct file_operations f2fs_file_operations; 3702 extern const struct inode_operations f2fs_file_inode_operations; 3703 extern const struct address_space_operations f2fs_dblock_aops; 3704 extern const struct address_space_operations f2fs_node_aops; 3705 extern const struct address_space_operations f2fs_meta_aops; 3706 extern const struct inode_operations f2fs_dir_inode_operations; 3707 extern const struct inode_operations f2fs_symlink_inode_operations; 3708 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3709 extern const struct inode_operations f2fs_special_inode_operations; 3710 extern struct kmem_cache *f2fs_inode_entry_slab; 3711 3712 /* 3713 * inline.c 3714 */ 3715 bool f2fs_may_inline_data(struct inode *inode); 3716 bool f2fs_may_inline_dentry(struct inode *inode); 3717 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3718 void f2fs_truncate_inline_inode(struct inode *inode, 3719 struct page *ipage, u64 from); 3720 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3721 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3722 int f2fs_convert_inline_inode(struct inode *inode); 3723 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3724 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3725 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3726 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3727 const struct f2fs_filename *fname, 3728 struct page **res_page); 3729 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3730 struct page *ipage); 3731 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3732 struct inode *inode, nid_t ino, umode_t mode); 3733 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3734 struct page *page, struct inode *dir, 3735 struct inode *inode); 3736 bool f2fs_empty_inline_dir(struct inode *dir); 3737 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3738 struct fscrypt_str *fstr); 3739 int f2fs_inline_data_fiemap(struct inode *inode, 3740 struct fiemap_extent_info *fieinfo, 3741 __u64 start, __u64 len); 3742 3743 /* 3744 * shrinker.c 3745 */ 3746 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3747 struct shrink_control *sc); 3748 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3749 struct shrink_control *sc); 3750 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3751 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3752 3753 /* 3754 * extent_cache.c 3755 */ 3756 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3757 struct rb_entry *cached_re, unsigned int ofs); 3758 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3759 struct rb_root_cached *root, 3760 struct rb_node **parent, 3761 unsigned long long key, bool *left_most); 3762 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3763 struct rb_root_cached *root, 3764 struct rb_node **parent, 3765 unsigned int ofs, bool *leftmost); 3766 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3767 struct rb_entry *cached_re, unsigned int ofs, 3768 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3769 struct rb_node ***insert_p, struct rb_node **insert_parent, 3770 bool force, bool *leftmost); 3771 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3772 struct rb_root_cached *root, bool check_key); 3773 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3774 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3775 void f2fs_drop_extent_tree(struct inode *inode); 3776 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3777 void f2fs_destroy_extent_tree(struct inode *inode); 3778 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3779 struct extent_info *ei); 3780 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3781 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3782 pgoff_t fofs, block_t blkaddr, unsigned int len); 3783 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3784 int __init f2fs_create_extent_cache(void); 3785 void f2fs_destroy_extent_cache(void); 3786 3787 /* 3788 * sysfs.c 3789 */ 3790 int __init f2fs_init_sysfs(void); 3791 void f2fs_exit_sysfs(void); 3792 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 3793 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 3794 3795 /* verity.c */ 3796 extern const struct fsverity_operations f2fs_verityops; 3797 3798 /* 3799 * crypto support 3800 */ 3801 static inline bool f2fs_encrypted_file(struct inode *inode) 3802 { 3803 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 3804 } 3805 3806 static inline void f2fs_set_encrypted_inode(struct inode *inode) 3807 { 3808 #ifdef CONFIG_FS_ENCRYPTION 3809 file_set_encrypt(inode); 3810 f2fs_set_inode_flags(inode); 3811 #endif 3812 } 3813 3814 /* 3815 * Returns true if the reads of the inode's data need to undergo some 3816 * postprocessing step, like decryption or authenticity verification. 3817 */ 3818 static inline bool f2fs_post_read_required(struct inode *inode) 3819 { 3820 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 3821 f2fs_compressed_file(inode); 3822 } 3823 3824 /* 3825 * compress.c 3826 */ 3827 #ifdef CONFIG_F2FS_FS_COMPRESSION 3828 bool f2fs_is_compressed_page(struct page *page); 3829 struct page *f2fs_compress_control_page(struct page *page); 3830 int f2fs_prepare_compress_overwrite(struct inode *inode, 3831 struct page **pagep, pgoff_t index, void **fsdata); 3832 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 3833 pgoff_t index, unsigned copied); 3834 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 3835 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 3836 bool f2fs_is_compress_backend_ready(struct inode *inode); 3837 int f2fs_init_compress_mempool(void); 3838 void f2fs_destroy_compress_mempool(void); 3839 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); 3840 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 3841 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 3842 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 3843 int f2fs_write_multi_pages(struct compress_ctx *cc, 3844 int *submitted, 3845 struct writeback_control *wbc, 3846 enum iostat_type io_type); 3847 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 3848 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 3849 unsigned nr_pages, sector_t *last_block_in_bio, 3850 bool is_readahead, bool for_write); 3851 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 3852 void f2fs_free_dic(struct decompress_io_ctx *dic); 3853 void f2fs_decompress_end_io(struct page **rpages, 3854 unsigned int cluster_size, bool err, bool verity); 3855 int f2fs_init_compress_ctx(struct compress_ctx *cc); 3856 void f2fs_destroy_compress_ctx(struct compress_ctx *cc); 3857 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 3858 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 3859 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 3860 int __init f2fs_init_compress_cache(void); 3861 void f2fs_destroy_compress_cache(void); 3862 #else 3863 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 3864 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 3865 { 3866 if (!f2fs_compressed_file(inode)) 3867 return true; 3868 /* not support compression */ 3869 return false; 3870 } 3871 static inline struct page *f2fs_compress_control_page(struct page *page) 3872 { 3873 WARN_ON_ONCE(1); 3874 return ERR_PTR(-EINVAL); 3875 } 3876 static inline int f2fs_init_compress_mempool(void) { return 0; } 3877 static inline void f2fs_destroy_compress_mempool(void) { } 3878 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 3879 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 3880 static inline int __init f2fs_init_compress_cache(void) { return 0; } 3881 static inline void f2fs_destroy_compress_cache(void) { } 3882 #endif 3883 3884 static inline void set_compress_context(struct inode *inode) 3885 { 3886 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3887 3888 F2FS_I(inode)->i_compress_algorithm = 3889 F2FS_OPTION(sbi).compress_algorithm; 3890 F2FS_I(inode)->i_log_cluster_size = 3891 F2FS_OPTION(sbi).compress_log_size; 3892 F2FS_I(inode)->i_compress_flag = 3893 F2FS_OPTION(sbi).compress_chksum ? 3894 1 << COMPRESS_CHKSUM : 0; 3895 F2FS_I(inode)->i_cluster_size = 3896 1 << F2FS_I(inode)->i_log_cluster_size; 3897 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 3898 set_inode_flag(inode, FI_COMPRESSED_FILE); 3899 stat_inc_compr_inode(inode); 3900 f2fs_mark_inode_dirty_sync(inode, true); 3901 } 3902 3903 static inline bool f2fs_disable_compressed_file(struct inode *inode) 3904 { 3905 struct f2fs_inode_info *fi = F2FS_I(inode); 3906 3907 if (!f2fs_compressed_file(inode)) 3908 return true; 3909 if (S_ISREG(inode->i_mode) && 3910 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks))) 3911 return false; 3912 3913 fi->i_flags &= ~F2FS_COMPR_FL; 3914 stat_dec_compr_inode(inode); 3915 clear_inode_flag(inode, FI_COMPRESSED_FILE); 3916 f2fs_mark_inode_dirty_sync(inode, true); 3917 return true; 3918 } 3919 3920 #define F2FS_FEATURE_FUNCS(name, flagname) \ 3921 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 3922 { \ 3923 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 3924 } 3925 3926 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 3927 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 3928 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 3929 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 3930 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 3931 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 3932 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 3933 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 3934 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 3935 F2FS_FEATURE_FUNCS(verity, VERITY); 3936 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 3937 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 3938 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 3939 3940 #ifdef CONFIG_BLK_DEV_ZONED 3941 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 3942 block_t blkaddr) 3943 { 3944 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 3945 3946 return test_bit(zno, FDEV(devi).blkz_seq); 3947 } 3948 #endif 3949 3950 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 3951 { 3952 return f2fs_sb_has_blkzoned(sbi); 3953 } 3954 3955 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 3956 { 3957 return blk_queue_discard(bdev_get_queue(bdev)) || 3958 bdev_is_zoned(bdev); 3959 } 3960 3961 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 3962 { 3963 int i; 3964 3965 if (!f2fs_is_multi_device(sbi)) 3966 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 3967 3968 for (i = 0; i < sbi->s_ndevs; i++) 3969 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 3970 return true; 3971 return false; 3972 } 3973 3974 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 3975 { 3976 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 3977 f2fs_hw_should_discard(sbi); 3978 } 3979 3980 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 3981 { 3982 int i; 3983 3984 if (!f2fs_is_multi_device(sbi)) 3985 return bdev_read_only(sbi->sb->s_bdev); 3986 3987 for (i = 0; i < sbi->s_ndevs; i++) 3988 if (bdev_read_only(FDEV(i).bdev)) 3989 return true; 3990 return false; 3991 } 3992 3993 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 3994 { 3995 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 3996 } 3997 3998 static inline bool f2fs_may_compress(struct inode *inode) 3999 { 4000 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4001 f2fs_is_atomic_file(inode) || 4002 f2fs_is_volatile_file(inode)) 4003 return false; 4004 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4005 } 4006 4007 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4008 u64 blocks, bool add) 4009 { 4010 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4011 struct f2fs_inode_info *fi = F2FS_I(inode); 4012 4013 /* don't update i_compr_blocks if saved blocks were released */ 4014 if (!add && !atomic_read(&fi->i_compr_blocks)) 4015 return; 4016 4017 if (add) { 4018 atomic_add(diff, &fi->i_compr_blocks); 4019 stat_add_compr_blocks(inode, diff); 4020 } else { 4021 atomic_sub(diff, &fi->i_compr_blocks); 4022 stat_sub_compr_blocks(inode, diff); 4023 } 4024 f2fs_mark_inode_dirty_sync(inode, true); 4025 } 4026 4027 static inline int block_unaligned_IO(struct inode *inode, 4028 struct kiocb *iocb, struct iov_iter *iter) 4029 { 4030 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4031 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4032 loff_t offset = iocb->ki_pos; 4033 unsigned long align = offset | iov_iter_alignment(iter); 4034 4035 return align & blocksize_mask; 4036 } 4037 4038 static inline int allow_outplace_dio(struct inode *inode, 4039 struct kiocb *iocb, struct iov_iter *iter) 4040 { 4041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4042 int rw = iov_iter_rw(iter); 4043 4044 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4045 !block_unaligned_IO(inode, iocb, iter)); 4046 } 4047 4048 static inline bool f2fs_force_buffered_io(struct inode *inode, 4049 struct kiocb *iocb, struct iov_iter *iter) 4050 { 4051 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4052 int rw = iov_iter_rw(iter); 4053 4054 if (f2fs_post_read_required(inode)) 4055 return true; 4056 if (f2fs_is_multi_device(sbi)) 4057 return true; 4058 /* 4059 * for blkzoned device, fallback direct IO to buffered IO, so 4060 * all IOs can be serialized by log-structured write. 4061 */ 4062 if (f2fs_sb_has_blkzoned(sbi)) 4063 return true; 4064 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4065 if (block_unaligned_IO(inode, iocb, iter)) 4066 return true; 4067 if (F2FS_IO_ALIGNED(sbi)) 4068 return true; 4069 } 4070 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 4071 !IS_SWAPFILE(inode)) 4072 return true; 4073 4074 return false; 4075 } 4076 4077 #ifdef CONFIG_F2FS_FAULT_INJECTION 4078 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4079 unsigned int type); 4080 #else 4081 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4082 #endif 4083 4084 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4085 { 4086 #ifdef CONFIG_QUOTA 4087 if (f2fs_sb_has_quota_ino(sbi)) 4088 return true; 4089 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4090 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4091 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4092 return true; 4093 #endif 4094 return false; 4095 } 4096 4097 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4098 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4099 4100 #endif /* _LINUX_F2FS_H */ 4101