1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (unlikely(condition)) { \ 37 WARN_ON(1); \ 38 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 39 } \ 40 } while (0) 41 #endif 42 43 enum { 44 FAULT_KMALLOC, 45 FAULT_KVMALLOC, 46 FAULT_PAGE_ALLOC, 47 FAULT_PAGE_GET, 48 FAULT_ALLOC_BIO, 49 FAULT_ALLOC_NID, 50 FAULT_ORPHAN, 51 FAULT_BLOCK, 52 FAULT_DIR_DEPTH, 53 FAULT_EVICT_INODE, 54 FAULT_TRUNCATE, 55 FAULT_READ_IO, 56 FAULT_CHECKPOINT, 57 FAULT_DISCARD, 58 FAULT_WRITE_IO, 59 FAULT_MAX, 60 }; 61 62 #ifdef CONFIG_F2FS_FAULT_INJECTION 63 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 64 65 struct f2fs_fault_info { 66 atomic_t inject_ops; 67 unsigned int inject_rate; 68 unsigned int inject_type; 69 }; 70 71 extern const char *f2fs_fault_name[FAULT_MAX]; 72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 73 #endif 74 75 /* 76 * For mount options 77 */ 78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 79 #define F2FS_MOUNT_DISCARD 0x00000004 80 #define F2FS_MOUNT_NOHEAP 0x00000008 81 #define F2FS_MOUNT_XATTR_USER 0x00000010 82 #define F2FS_MOUNT_POSIX_ACL 0x00000020 83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 84 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 85 #define F2FS_MOUNT_INLINE_DATA 0x00000100 86 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 87 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 88 #define F2FS_MOUNT_NOBARRIER 0x00000800 89 #define F2FS_MOUNT_FASTBOOT 0x00001000 90 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 91 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 92 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 93 #define F2FS_MOUNT_USRQUOTA 0x00080000 94 #define F2FS_MOUNT_GRPQUOTA 0x00100000 95 #define F2FS_MOUNT_PRJQUOTA 0x00200000 96 #define F2FS_MOUNT_QUOTA 0x00400000 97 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 98 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 99 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 100 #define F2FS_MOUNT_NORECOVERY 0x04000000 101 #define F2FS_MOUNT_ATGC 0x08000000 102 103 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 104 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 105 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 106 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 107 108 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 109 typecheck(unsigned long long, b) && \ 110 ((long long)((a) - (b)) > 0)) 111 112 typedef u32 block_t; /* 113 * should not change u32, since it is the on-disk block 114 * address format, __le32. 115 */ 116 typedef u32 nid_t; 117 118 #define COMPRESS_EXT_NUM 16 119 120 struct f2fs_mount_info { 121 unsigned int opt; 122 int write_io_size_bits; /* Write IO size bits */ 123 block_t root_reserved_blocks; /* root reserved blocks */ 124 kuid_t s_resuid; /* reserved blocks for uid */ 125 kgid_t s_resgid; /* reserved blocks for gid */ 126 int active_logs; /* # of active logs */ 127 int inline_xattr_size; /* inline xattr size */ 128 #ifdef CONFIG_F2FS_FAULT_INJECTION 129 struct f2fs_fault_info fault_info; /* For fault injection */ 130 #endif 131 #ifdef CONFIG_QUOTA 132 /* Names of quota files with journalled quota */ 133 char *s_qf_names[MAXQUOTAS]; 134 int s_jquota_fmt; /* Format of quota to use */ 135 #endif 136 /* For which write hints are passed down to block layer */ 137 int whint_mode; 138 int alloc_mode; /* segment allocation policy */ 139 int fsync_mode; /* fsync policy */ 140 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 141 int bggc_mode; /* bggc mode: off, on or sync */ 142 struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */ 143 block_t unusable_cap_perc; /* percentage for cap */ 144 block_t unusable_cap; /* Amount of space allowed to be 145 * unusable when disabling checkpoint 146 */ 147 148 /* For compression */ 149 unsigned char compress_algorithm; /* algorithm type */ 150 unsigned compress_log_size; /* cluster log size */ 151 unsigned char compress_ext_cnt; /* extension count */ 152 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 153 }; 154 155 #define F2FS_FEATURE_ENCRYPT 0x0001 156 #define F2FS_FEATURE_BLKZONED 0x0002 157 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 158 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 159 #define F2FS_FEATURE_PRJQUOTA 0x0010 160 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 161 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 162 #define F2FS_FEATURE_QUOTA_INO 0x0080 163 #define F2FS_FEATURE_INODE_CRTIME 0x0100 164 #define F2FS_FEATURE_LOST_FOUND 0x0200 165 #define F2FS_FEATURE_VERITY 0x0400 166 #define F2FS_FEATURE_SB_CHKSUM 0x0800 167 #define F2FS_FEATURE_CASEFOLD 0x1000 168 #define F2FS_FEATURE_COMPRESSION 0x2000 169 170 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 171 ((raw_super->feature & cpu_to_le32(mask)) != 0) 172 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 173 #define F2FS_SET_FEATURE(sbi, mask) \ 174 (sbi->raw_super->feature |= cpu_to_le32(mask)) 175 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 176 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 177 178 /* 179 * Default values for user and/or group using reserved blocks 180 */ 181 #define F2FS_DEF_RESUID 0 182 #define F2FS_DEF_RESGID 0 183 184 /* 185 * For checkpoint manager 186 */ 187 enum { 188 NAT_BITMAP, 189 SIT_BITMAP 190 }; 191 192 #define CP_UMOUNT 0x00000001 193 #define CP_FASTBOOT 0x00000002 194 #define CP_SYNC 0x00000004 195 #define CP_RECOVERY 0x00000008 196 #define CP_DISCARD 0x00000010 197 #define CP_TRIMMED 0x00000020 198 #define CP_PAUSE 0x00000040 199 #define CP_RESIZE 0x00000080 200 201 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 202 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 203 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 204 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 205 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 206 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 207 #define DEF_CP_INTERVAL 60 /* 60 secs */ 208 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 209 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 210 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 211 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 212 213 struct cp_control { 214 int reason; 215 __u64 trim_start; 216 __u64 trim_end; 217 __u64 trim_minlen; 218 }; 219 220 /* 221 * indicate meta/data type 222 */ 223 enum { 224 META_CP, 225 META_NAT, 226 META_SIT, 227 META_SSA, 228 META_MAX, 229 META_POR, 230 DATA_GENERIC, /* check range only */ 231 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 232 DATA_GENERIC_ENHANCE_READ, /* 233 * strong check on range and segment 234 * bitmap but no warning due to race 235 * condition of read on truncated area 236 * by extent_cache 237 */ 238 META_GENERIC, 239 }; 240 241 /* for the list of ino */ 242 enum { 243 ORPHAN_INO, /* for orphan ino list */ 244 APPEND_INO, /* for append ino list */ 245 UPDATE_INO, /* for update ino list */ 246 TRANS_DIR_INO, /* for trasactions dir ino list */ 247 FLUSH_INO, /* for multiple device flushing */ 248 MAX_INO_ENTRY, /* max. list */ 249 }; 250 251 struct ino_entry { 252 struct list_head list; /* list head */ 253 nid_t ino; /* inode number */ 254 unsigned int dirty_device; /* dirty device bitmap */ 255 }; 256 257 /* for the list of inodes to be GCed */ 258 struct inode_entry { 259 struct list_head list; /* list head */ 260 struct inode *inode; /* vfs inode pointer */ 261 }; 262 263 struct fsync_node_entry { 264 struct list_head list; /* list head */ 265 struct page *page; /* warm node page pointer */ 266 unsigned int seq_id; /* sequence id */ 267 }; 268 269 /* for the bitmap indicate blocks to be discarded */ 270 struct discard_entry { 271 struct list_head list; /* list head */ 272 block_t start_blkaddr; /* start blockaddr of current segment */ 273 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 274 }; 275 276 /* default discard granularity of inner discard thread, unit: block count */ 277 #define DEFAULT_DISCARD_GRANULARITY 16 278 279 /* max discard pend list number */ 280 #define MAX_PLIST_NUM 512 281 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 282 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 283 284 enum { 285 D_PREP, /* initial */ 286 D_PARTIAL, /* partially submitted */ 287 D_SUBMIT, /* all submitted */ 288 D_DONE, /* finished */ 289 }; 290 291 struct discard_info { 292 block_t lstart; /* logical start address */ 293 block_t len; /* length */ 294 block_t start; /* actual start address in dev */ 295 }; 296 297 struct discard_cmd { 298 struct rb_node rb_node; /* rb node located in rb-tree */ 299 union { 300 struct { 301 block_t lstart; /* logical start address */ 302 block_t len; /* length */ 303 block_t start; /* actual start address in dev */ 304 }; 305 struct discard_info di; /* discard info */ 306 307 }; 308 struct list_head list; /* command list */ 309 struct completion wait; /* compleation */ 310 struct block_device *bdev; /* bdev */ 311 unsigned short ref; /* reference count */ 312 unsigned char state; /* state */ 313 unsigned char queued; /* queued discard */ 314 int error; /* bio error */ 315 spinlock_t lock; /* for state/bio_ref updating */ 316 unsigned short bio_ref; /* bio reference count */ 317 }; 318 319 enum { 320 DPOLICY_BG, 321 DPOLICY_FORCE, 322 DPOLICY_FSTRIM, 323 DPOLICY_UMOUNT, 324 MAX_DPOLICY, 325 }; 326 327 struct discard_policy { 328 int type; /* type of discard */ 329 unsigned int min_interval; /* used for candidates exist */ 330 unsigned int mid_interval; /* used for device busy */ 331 unsigned int max_interval; /* used for candidates not exist */ 332 unsigned int max_requests; /* # of discards issued per round */ 333 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 334 bool io_aware; /* issue discard in idle time */ 335 bool sync; /* submit discard with REQ_SYNC flag */ 336 bool ordered; /* issue discard by lba order */ 337 bool timeout; /* discard timeout for put_super */ 338 unsigned int granularity; /* discard granularity */ 339 }; 340 341 struct discard_cmd_control { 342 struct task_struct *f2fs_issue_discard; /* discard thread */ 343 struct list_head entry_list; /* 4KB discard entry list */ 344 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 345 struct list_head wait_list; /* store on-flushing entries */ 346 struct list_head fstrim_list; /* in-flight discard from fstrim */ 347 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 348 unsigned int discard_wake; /* to wake up discard thread */ 349 struct mutex cmd_lock; 350 unsigned int nr_discards; /* # of discards in the list */ 351 unsigned int max_discards; /* max. discards to be issued */ 352 unsigned int discard_granularity; /* discard granularity */ 353 unsigned int undiscard_blks; /* # of undiscard blocks */ 354 unsigned int next_pos; /* next discard position */ 355 atomic_t issued_discard; /* # of issued discard */ 356 atomic_t queued_discard; /* # of queued discard */ 357 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 358 struct rb_root_cached root; /* root of discard rb-tree */ 359 bool rbtree_check; /* config for consistence check */ 360 }; 361 362 /* for the list of fsync inodes, used only during recovery */ 363 struct fsync_inode_entry { 364 struct list_head list; /* list head */ 365 struct inode *inode; /* vfs inode pointer */ 366 block_t blkaddr; /* block address locating the last fsync */ 367 block_t last_dentry; /* block address locating the last dentry */ 368 }; 369 370 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 371 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 372 373 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 374 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 375 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 376 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 377 378 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 379 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 380 381 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 382 { 383 int before = nats_in_cursum(journal); 384 385 journal->n_nats = cpu_to_le16(before + i); 386 return before; 387 } 388 389 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 390 { 391 int before = sits_in_cursum(journal); 392 393 journal->n_sits = cpu_to_le16(before + i); 394 return before; 395 } 396 397 static inline bool __has_cursum_space(struct f2fs_journal *journal, 398 int size, int type) 399 { 400 if (type == NAT_JOURNAL) 401 return size <= MAX_NAT_JENTRIES(journal); 402 return size <= MAX_SIT_JENTRIES(journal); 403 } 404 405 /* 406 * f2fs-specific ioctl commands 407 */ 408 #define F2FS_IOCTL_MAGIC 0xf5 409 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) 410 #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) 411 #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) 412 #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) 413 #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) 414 #define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) 415 #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) 416 #define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ 417 struct f2fs_defragment) 418 #define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ 419 struct f2fs_move_range) 420 #define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \ 421 struct f2fs_flush_device) 422 #define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \ 423 struct f2fs_gc_range) 424 #define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32) 425 #define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) 426 #define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) 427 #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) 428 #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) 429 #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) 430 #define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ 431 _IOR(F2FS_IOCTL_MAGIC, 18, __u64) 432 #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ 433 _IOR(F2FS_IOCTL_MAGIC, 19, __u64) 434 #define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \ 435 struct f2fs_sectrim_range) 436 437 /* 438 * should be same as XFS_IOC_GOINGDOWN. 439 * Flags for going down operation used by FS_IOC_GOINGDOWN 440 */ 441 #define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ 442 #define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ 443 #define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ 444 #define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ 445 #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ 446 #define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ 447 448 /* 449 * Flags used by F2FS_IOC_SEC_TRIM_FILE 450 */ 451 #define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */ 452 #define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */ 453 #define F2FS_TRIM_FILE_MASK 0x3 454 455 struct f2fs_gc_range { 456 u32 sync; 457 u64 start; 458 u64 len; 459 }; 460 461 struct f2fs_defragment { 462 u64 start; 463 u64 len; 464 }; 465 466 struct f2fs_move_range { 467 u32 dst_fd; /* destination fd */ 468 u64 pos_in; /* start position in src_fd */ 469 u64 pos_out; /* start position in dst_fd */ 470 u64 len; /* size to move */ 471 }; 472 473 struct f2fs_flush_device { 474 u32 dev_num; /* device number to flush */ 475 u32 segments; /* # of segments to flush */ 476 }; 477 478 struct f2fs_sectrim_range { 479 u64 start; 480 u64 len; 481 u64 flags; 482 }; 483 484 /* for inline stuff */ 485 #define DEF_INLINE_RESERVED_SIZE 1 486 static inline int get_extra_isize(struct inode *inode); 487 static inline int get_inline_xattr_addrs(struct inode *inode); 488 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 489 (CUR_ADDRS_PER_INODE(inode) - \ 490 get_inline_xattr_addrs(inode) - \ 491 DEF_INLINE_RESERVED_SIZE)) 492 493 /* for inline dir */ 494 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 495 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 496 BITS_PER_BYTE + 1)) 497 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 498 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 499 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 500 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 501 NR_INLINE_DENTRY(inode) + \ 502 INLINE_DENTRY_BITMAP_SIZE(inode))) 503 504 /* 505 * For INODE and NODE manager 506 */ 507 /* for directory operations */ 508 509 struct f2fs_filename { 510 /* 511 * The filename the user specified. This is NULL for some 512 * filesystem-internal operations, e.g. converting an inline directory 513 * to a non-inline one, or roll-forward recovering an encrypted dentry. 514 */ 515 const struct qstr *usr_fname; 516 517 /* 518 * The on-disk filename. For encrypted directories, this is encrypted. 519 * This may be NULL for lookups in an encrypted dir without the key. 520 */ 521 struct fscrypt_str disk_name; 522 523 /* The dirhash of this filename */ 524 f2fs_hash_t hash; 525 526 #ifdef CONFIG_FS_ENCRYPTION 527 /* 528 * For lookups in encrypted directories: either the buffer backing 529 * disk_name, or a buffer that holds the decoded no-key name. 530 */ 531 struct fscrypt_str crypto_buf; 532 #endif 533 #ifdef CONFIG_UNICODE 534 /* 535 * For casefolded directories: the casefolded name, but it's left NULL 536 * if the original name is not valid Unicode or if the filesystem is 537 * doing an internal operation where usr_fname is also NULL. In these 538 * cases we fall back to treating the name as an opaque byte sequence. 539 */ 540 struct fscrypt_str cf_name; 541 #endif 542 }; 543 544 struct f2fs_dentry_ptr { 545 struct inode *inode; 546 void *bitmap; 547 struct f2fs_dir_entry *dentry; 548 __u8 (*filename)[F2FS_SLOT_LEN]; 549 int max; 550 int nr_bitmap; 551 }; 552 553 static inline void make_dentry_ptr_block(struct inode *inode, 554 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 555 { 556 d->inode = inode; 557 d->max = NR_DENTRY_IN_BLOCK; 558 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 559 d->bitmap = t->dentry_bitmap; 560 d->dentry = t->dentry; 561 d->filename = t->filename; 562 } 563 564 static inline void make_dentry_ptr_inline(struct inode *inode, 565 struct f2fs_dentry_ptr *d, void *t) 566 { 567 int entry_cnt = NR_INLINE_DENTRY(inode); 568 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 569 int reserved_size = INLINE_RESERVED_SIZE(inode); 570 571 d->inode = inode; 572 d->max = entry_cnt; 573 d->nr_bitmap = bitmap_size; 574 d->bitmap = t; 575 d->dentry = t + bitmap_size + reserved_size; 576 d->filename = t + bitmap_size + reserved_size + 577 SIZE_OF_DIR_ENTRY * entry_cnt; 578 } 579 580 /* 581 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 582 * as its node offset to distinguish from index node blocks. 583 * But some bits are used to mark the node block. 584 */ 585 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 586 >> OFFSET_BIT_SHIFT) 587 enum { 588 ALLOC_NODE, /* allocate a new node page if needed */ 589 LOOKUP_NODE, /* look up a node without readahead */ 590 LOOKUP_NODE_RA, /* 591 * look up a node with readahead called 592 * by get_data_block. 593 */ 594 }; 595 596 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 597 598 /* congestion wait timeout value, default: 20ms */ 599 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 600 601 /* maximum retry quota flush count */ 602 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 603 604 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 605 606 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 607 608 /* for in-memory extent cache entry */ 609 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 610 611 /* number of extent info in extent cache we try to shrink */ 612 #define EXTENT_CACHE_SHRINK_NUMBER 128 613 614 struct rb_entry { 615 struct rb_node rb_node; /* rb node located in rb-tree */ 616 union { 617 struct { 618 unsigned int ofs; /* start offset of the entry */ 619 unsigned int len; /* length of the entry */ 620 }; 621 unsigned long long key; /* 64-bits key */ 622 } __packed; 623 }; 624 625 struct extent_info { 626 unsigned int fofs; /* start offset in a file */ 627 unsigned int len; /* length of the extent */ 628 u32 blk; /* start block address of the extent */ 629 }; 630 631 struct extent_node { 632 struct rb_node rb_node; /* rb node located in rb-tree */ 633 struct extent_info ei; /* extent info */ 634 struct list_head list; /* node in global extent list of sbi */ 635 struct extent_tree *et; /* extent tree pointer */ 636 }; 637 638 struct extent_tree { 639 nid_t ino; /* inode number */ 640 struct rb_root_cached root; /* root of extent info rb-tree */ 641 struct extent_node *cached_en; /* recently accessed extent node */ 642 struct extent_info largest; /* largested extent info */ 643 struct list_head list; /* to be used by sbi->zombie_list */ 644 rwlock_t lock; /* protect extent info rb-tree */ 645 atomic_t node_cnt; /* # of extent node in rb-tree*/ 646 bool largest_updated; /* largest extent updated */ 647 }; 648 649 /* 650 * This structure is taken from ext4_map_blocks. 651 * 652 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 653 */ 654 #define F2FS_MAP_NEW (1 << BH_New) 655 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 656 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 657 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 658 F2FS_MAP_UNWRITTEN) 659 660 struct f2fs_map_blocks { 661 block_t m_pblk; 662 block_t m_lblk; 663 unsigned int m_len; 664 unsigned int m_flags; 665 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 666 pgoff_t *m_next_extent; /* point to next possible extent */ 667 int m_seg_type; 668 bool m_may_create; /* indicate it is from write path */ 669 }; 670 671 /* for flag in get_data_block */ 672 enum { 673 F2FS_GET_BLOCK_DEFAULT, 674 F2FS_GET_BLOCK_FIEMAP, 675 F2FS_GET_BLOCK_BMAP, 676 F2FS_GET_BLOCK_DIO, 677 F2FS_GET_BLOCK_PRE_DIO, 678 F2FS_GET_BLOCK_PRE_AIO, 679 F2FS_GET_BLOCK_PRECACHE, 680 }; 681 682 /* 683 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 684 */ 685 #define FADVISE_COLD_BIT 0x01 686 #define FADVISE_LOST_PINO_BIT 0x02 687 #define FADVISE_ENCRYPT_BIT 0x04 688 #define FADVISE_ENC_NAME_BIT 0x08 689 #define FADVISE_KEEP_SIZE_BIT 0x10 690 #define FADVISE_HOT_BIT 0x20 691 #define FADVISE_VERITY_BIT 0x40 692 693 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 694 695 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 696 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 697 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 698 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 699 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 700 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 701 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 702 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 703 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 704 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 705 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 706 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 707 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 708 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 709 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 710 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 711 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 712 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 713 714 #define DEF_DIR_LEVEL 0 715 716 enum { 717 GC_FAILURE_PIN, 718 GC_FAILURE_ATOMIC, 719 MAX_GC_FAILURE 720 }; 721 722 /* used for f2fs_inode_info->flags */ 723 enum { 724 FI_NEW_INODE, /* indicate newly allocated inode */ 725 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 726 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 727 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 728 FI_INC_LINK, /* need to increment i_nlink */ 729 FI_ACL_MODE, /* indicate acl mode */ 730 FI_NO_ALLOC, /* should not allocate any blocks */ 731 FI_FREE_NID, /* free allocated nide */ 732 FI_NO_EXTENT, /* not to use the extent cache */ 733 FI_INLINE_XATTR, /* used for inline xattr */ 734 FI_INLINE_DATA, /* used for inline data*/ 735 FI_INLINE_DENTRY, /* used for inline dentry */ 736 FI_APPEND_WRITE, /* inode has appended data */ 737 FI_UPDATE_WRITE, /* inode has in-place-update data */ 738 FI_NEED_IPU, /* used for ipu per file */ 739 FI_ATOMIC_FILE, /* indicate atomic file */ 740 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 741 FI_VOLATILE_FILE, /* indicate volatile file */ 742 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 743 FI_DROP_CACHE, /* drop dirty page cache */ 744 FI_DATA_EXIST, /* indicate data exists */ 745 FI_INLINE_DOTS, /* indicate inline dot dentries */ 746 FI_DO_DEFRAG, /* indicate defragment is running */ 747 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 748 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 749 FI_HOT_DATA, /* indicate file is hot */ 750 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 751 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 752 FI_PIN_FILE, /* indicate file should not be gced */ 753 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 754 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 755 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 756 FI_MMAP_FILE, /* indicate file was mmapped */ 757 FI_MAX, /* max flag, never be used */ 758 }; 759 760 struct f2fs_inode_info { 761 struct inode vfs_inode; /* serve a vfs inode */ 762 unsigned long i_flags; /* keep an inode flags for ioctl */ 763 unsigned char i_advise; /* use to give file attribute hints */ 764 unsigned char i_dir_level; /* use for dentry level for large dir */ 765 unsigned int i_current_depth; /* only for directory depth */ 766 /* for gc failure statistic */ 767 unsigned int i_gc_failures[MAX_GC_FAILURE]; 768 unsigned int i_pino; /* parent inode number */ 769 umode_t i_acl_mode; /* keep file acl mode temporarily */ 770 771 /* Use below internally in f2fs*/ 772 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 773 struct rw_semaphore i_sem; /* protect fi info */ 774 atomic_t dirty_pages; /* # of dirty pages */ 775 f2fs_hash_t chash; /* hash value of given file name */ 776 unsigned int clevel; /* maximum level of given file name */ 777 struct task_struct *task; /* lookup and create consistency */ 778 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 779 nid_t i_xattr_nid; /* node id that contains xattrs */ 780 loff_t last_disk_size; /* lastly written file size */ 781 spinlock_t i_size_lock; /* protect last_disk_size */ 782 783 #ifdef CONFIG_QUOTA 784 struct dquot *i_dquot[MAXQUOTAS]; 785 786 /* quota space reservation, managed internally by quota code */ 787 qsize_t i_reserved_quota; 788 #endif 789 struct list_head dirty_list; /* dirty list for dirs and files */ 790 struct list_head gdirty_list; /* linked in global dirty list */ 791 struct list_head inmem_ilist; /* list for inmem inodes */ 792 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 793 struct task_struct *inmem_task; /* store inmemory task */ 794 struct mutex inmem_lock; /* lock for inmemory pages */ 795 pgoff_t ra_offset; /* ongoing readahead offset */ 796 struct extent_tree *extent_tree; /* cached extent_tree entry */ 797 798 /* avoid racing between foreground op and gc */ 799 struct rw_semaphore i_gc_rwsem[2]; 800 struct rw_semaphore i_mmap_sem; 801 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 802 803 int i_extra_isize; /* size of extra space located in i_addr */ 804 kprojid_t i_projid; /* id for project quota */ 805 int i_inline_xattr_size; /* inline xattr size */ 806 struct timespec64 i_crtime; /* inode creation time */ 807 struct timespec64 i_disk_time[4];/* inode disk times */ 808 809 /* for file compress */ 810 atomic_t i_compr_blocks; /* # of compressed blocks */ 811 unsigned char i_compress_algorithm; /* algorithm type */ 812 unsigned char i_log_cluster_size; /* log of cluster size */ 813 unsigned int i_cluster_size; /* cluster size */ 814 }; 815 816 static inline void get_extent_info(struct extent_info *ext, 817 struct f2fs_extent *i_ext) 818 { 819 ext->fofs = le32_to_cpu(i_ext->fofs); 820 ext->blk = le32_to_cpu(i_ext->blk); 821 ext->len = le32_to_cpu(i_ext->len); 822 } 823 824 static inline void set_raw_extent(struct extent_info *ext, 825 struct f2fs_extent *i_ext) 826 { 827 i_ext->fofs = cpu_to_le32(ext->fofs); 828 i_ext->blk = cpu_to_le32(ext->blk); 829 i_ext->len = cpu_to_le32(ext->len); 830 } 831 832 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 833 u32 blk, unsigned int len) 834 { 835 ei->fofs = fofs; 836 ei->blk = blk; 837 ei->len = len; 838 } 839 840 static inline bool __is_discard_mergeable(struct discard_info *back, 841 struct discard_info *front, unsigned int max_len) 842 { 843 return (back->lstart + back->len == front->lstart) && 844 (back->len + front->len <= max_len); 845 } 846 847 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 848 struct discard_info *back, unsigned int max_len) 849 { 850 return __is_discard_mergeable(back, cur, max_len); 851 } 852 853 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 854 struct discard_info *front, unsigned int max_len) 855 { 856 return __is_discard_mergeable(cur, front, max_len); 857 } 858 859 static inline bool __is_extent_mergeable(struct extent_info *back, 860 struct extent_info *front) 861 { 862 return (back->fofs + back->len == front->fofs && 863 back->blk + back->len == front->blk); 864 } 865 866 static inline bool __is_back_mergeable(struct extent_info *cur, 867 struct extent_info *back) 868 { 869 return __is_extent_mergeable(back, cur); 870 } 871 872 static inline bool __is_front_mergeable(struct extent_info *cur, 873 struct extent_info *front) 874 { 875 return __is_extent_mergeable(cur, front); 876 } 877 878 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 879 static inline void __try_update_largest_extent(struct extent_tree *et, 880 struct extent_node *en) 881 { 882 if (en->ei.len > et->largest.len) { 883 et->largest = en->ei; 884 et->largest_updated = true; 885 } 886 } 887 888 /* 889 * For free nid management 890 */ 891 enum nid_state { 892 FREE_NID, /* newly added to free nid list */ 893 PREALLOC_NID, /* it is preallocated */ 894 MAX_NID_STATE, 895 }; 896 897 struct f2fs_nm_info { 898 block_t nat_blkaddr; /* base disk address of NAT */ 899 nid_t max_nid; /* maximum possible node ids */ 900 nid_t available_nids; /* # of available node ids */ 901 nid_t next_scan_nid; /* the next nid to be scanned */ 902 unsigned int ram_thresh; /* control the memory footprint */ 903 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 904 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 905 906 /* NAT cache management */ 907 struct radix_tree_root nat_root;/* root of the nat entry cache */ 908 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 909 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 910 struct list_head nat_entries; /* cached nat entry list (clean) */ 911 spinlock_t nat_list_lock; /* protect clean nat entry list */ 912 unsigned int nat_cnt; /* the # of cached nat entries */ 913 unsigned int dirty_nat_cnt; /* total num of nat entries in set */ 914 unsigned int nat_blocks; /* # of nat blocks */ 915 916 /* free node ids management */ 917 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 918 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 919 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 920 spinlock_t nid_list_lock; /* protect nid lists ops */ 921 struct mutex build_lock; /* lock for build free nids */ 922 unsigned char **free_nid_bitmap; 923 unsigned char *nat_block_bitmap; 924 unsigned short *free_nid_count; /* free nid count of NAT block */ 925 926 /* for checkpoint */ 927 char *nat_bitmap; /* NAT bitmap pointer */ 928 929 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 930 unsigned char *nat_bits; /* NAT bits blocks */ 931 unsigned char *full_nat_bits; /* full NAT pages */ 932 unsigned char *empty_nat_bits; /* empty NAT pages */ 933 #ifdef CONFIG_F2FS_CHECK_FS 934 char *nat_bitmap_mir; /* NAT bitmap mirror */ 935 #endif 936 int bitmap_size; /* bitmap size */ 937 }; 938 939 /* 940 * this structure is used as one of function parameters. 941 * all the information are dedicated to a given direct node block determined 942 * by the data offset in a file. 943 */ 944 struct dnode_of_data { 945 struct inode *inode; /* vfs inode pointer */ 946 struct page *inode_page; /* its inode page, NULL is possible */ 947 struct page *node_page; /* cached direct node page */ 948 nid_t nid; /* node id of the direct node block */ 949 unsigned int ofs_in_node; /* data offset in the node page */ 950 bool inode_page_locked; /* inode page is locked or not */ 951 bool node_changed; /* is node block changed */ 952 char cur_level; /* level of hole node page */ 953 char max_level; /* level of current page located */ 954 block_t data_blkaddr; /* block address of the node block */ 955 }; 956 957 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 958 struct page *ipage, struct page *npage, nid_t nid) 959 { 960 memset(dn, 0, sizeof(*dn)); 961 dn->inode = inode; 962 dn->inode_page = ipage; 963 dn->node_page = npage; 964 dn->nid = nid; 965 } 966 967 /* 968 * For SIT manager 969 * 970 * By default, there are 6 active log areas across the whole main area. 971 * When considering hot and cold data separation to reduce cleaning overhead, 972 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 973 * respectively. 974 * In the current design, you should not change the numbers intentionally. 975 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 976 * logs individually according to the underlying devices. (default: 6) 977 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 978 * data and 8 for node logs. 979 */ 980 #define NR_CURSEG_DATA_TYPE (3) 981 #define NR_CURSEG_NODE_TYPE (3) 982 #define NR_CURSEG_INMEM_TYPE (2) 983 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 984 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) 985 986 enum { 987 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 988 CURSEG_WARM_DATA, /* data blocks */ 989 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 990 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 991 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 992 CURSEG_COLD_NODE, /* indirect node blocks */ 993 NR_PERSISTENT_LOG, /* number of persistent log */ 994 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, 995 /* pinned file that needs consecutive block address */ 996 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ 997 NO_CHECK_TYPE, /* number of persistent & inmem log */ 998 }; 999 1000 struct flush_cmd { 1001 struct completion wait; 1002 struct llist_node llnode; 1003 nid_t ino; 1004 int ret; 1005 }; 1006 1007 struct flush_cmd_control { 1008 struct task_struct *f2fs_issue_flush; /* flush thread */ 1009 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 1010 atomic_t issued_flush; /* # of issued flushes */ 1011 atomic_t queued_flush; /* # of queued flushes */ 1012 struct llist_head issue_list; /* list for command issue */ 1013 struct llist_node *dispatch_list; /* list for command dispatch */ 1014 }; 1015 1016 struct f2fs_sm_info { 1017 struct sit_info *sit_info; /* whole segment information */ 1018 struct free_segmap_info *free_info; /* free segment information */ 1019 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1020 struct curseg_info *curseg_array; /* active segment information */ 1021 1022 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 1023 1024 block_t seg0_blkaddr; /* block address of 0'th segment */ 1025 block_t main_blkaddr; /* start block address of main area */ 1026 block_t ssa_blkaddr; /* start block address of SSA area */ 1027 1028 unsigned int segment_count; /* total # of segments */ 1029 unsigned int main_segments; /* # of segments in main area */ 1030 unsigned int reserved_segments; /* # of reserved segments */ 1031 unsigned int ovp_segments; /* # of overprovision segments */ 1032 1033 /* a threshold to reclaim prefree segments */ 1034 unsigned int rec_prefree_segments; 1035 1036 /* for batched trimming */ 1037 unsigned int trim_sections; /* # of sections to trim */ 1038 1039 struct list_head sit_entry_set; /* sit entry set list */ 1040 1041 unsigned int ipu_policy; /* in-place-update policy */ 1042 unsigned int min_ipu_util; /* in-place-update threshold */ 1043 unsigned int min_fsync_blocks; /* threshold for fsync */ 1044 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1045 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1046 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1047 1048 /* for flush command control */ 1049 struct flush_cmd_control *fcc_info; 1050 1051 /* for discard command control */ 1052 struct discard_cmd_control *dcc_info; 1053 }; 1054 1055 /* 1056 * For superblock 1057 */ 1058 /* 1059 * COUNT_TYPE for monitoring 1060 * 1061 * f2fs monitors the number of several block types such as on-writeback, 1062 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1063 */ 1064 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1065 enum count_type { 1066 F2FS_DIRTY_DENTS, 1067 F2FS_DIRTY_DATA, 1068 F2FS_DIRTY_QDATA, 1069 F2FS_DIRTY_NODES, 1070 F2FS_DIRTY_META, 1071 F2FS_INMEM_PAGES, 1072 F2FS_DIRTY_IMETA, 1073 F2FS_WB_CP_DATA, 1074 F2FS_WB_DATA, 1075 F2FS_RD_DATA, 1076 F2FS_RD_NODE, 1077 F2FS_RD_META, 1078 F2FS_DIO_WRITE, 1079 F2FS_DIO_READ, 1080 NR_COUNT_TYPE, 1081 }; 1082 1083 /* 1084 * The below are the page types of bios used in submit_bio(). 1085 * The available types are: 1086 * DATA User data pages. It operates as async mode. 1087 * NODE Node pages. It operates as async mode. 1088 * META FS metadata pages such as SIT, NAT, CP. 1089 * NR_PAGE_TYPE The number of page types. 1090 * META_FLUSH Make sure the previous pages are written 1091 * with waiting the bio's completion 1092 * ... Only can be used with META. 1093 */ 1094 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1095 enum page_type { 1096 DATA, 1097 NODE, 1098 META, 1099 NR_PAGE_TYPE, 1100 META_FLUSH, 1101 INMEM, /* the below types are used by tracepoints only. */ 1102 INMEM_DROP, 1103 INMEM_INVALIDATE, 1104 INMEM_REVOKE, 1105 IPU, 1106 OPU, 1107 }; 1108 1109 enum temp_type { 1110 HOT = 0, /* must be zero for meta bio */ 1111 WARM, 1112 COLD, 1113 NR_TEMP_TYPE, 1114 }; 1115 1116 enum need_lock_type { 1117 LOCK_REQ = 0, 1118 LOCK_DONE, 1119 LOCK_RETRY, 1120 }; 1121 1122 enum cp_reason_type { 1123 CP_NO_NEEDED, 1124 CP_NON_REGULAR, 1125 CP_COMPRESSED, 1126 CP_HARDLINK, 1127 CP_SB_NEED_CP, 1128 CP_WRONG_PINO, 1129 CP_NO_SPC_ROLL, 1130 CP_NODE_NEED_CP, 1131 CP_FASTBOOT_MODE, 1132 CP_SPEC_LOG_NUM, 1133 CP_RECOVER_DIR, 1134 }; 1135 1136 enum iostat_type { 1137 /* WRITE IO */ 1138 APP_DIRECT_IO, /* app direct write IOs */ 1139 APP_BUFFERED_IO, /* app buffered write IOs */ 1140 APP_WRITE_IO, /* app write IOs */ 1141 APP_MAPPED_IO, /* app mapped IOs */ 1142 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1143 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1144 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1145 FS_GC_DATA_IO, /* data IOs from forground gc */ 1146 FS_GC_NODE_IO, /* node IOs from forground gc */ 1147 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1148 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1149 FS_CP_META_IO, /* meta IOs from checkpoint */ 1150 1151 /* READ IO */ 1152 APP_DIRECT_READ_IO, /* app direct read IOs */ 1153 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1154 APP_READ_IO, /* app read IOs */ 1155 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1156 FS_DATA_READ_IO, /* data read IOs */ 1157 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1158 FS_CDATA_READ_IO, /* compressed data read IOs */ 1159 FS_NODE_READ_IO, /* node read IOs */ 1160 FS_META_READ_IO, /* meta read IOs */ 1161 1162 /* other */ 1163 FS_DISCARD, /* discard */ 1164 NR_IO_TYPE, 1165 }; 1166 1167 struct f2fs_io_info { 1168 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1169 nid_t ino; /* inode number */ 1170 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1171 enum temp_type temp; /* contains HOT/WARM/COLD */ 1172 int op; /* contains REQ_OP_ */ 1173 int op_flags; /* req_flag_bits */ 1174 block_t new_blkaddr; /* new block address to be written */ 1175 block_t old_blkaddr; /* old block address before Cow */ 1176 struct page *page; /* page to be written */ 1177 struct page *encrypted_page; /* encrypted page */ 1178 struct page *compressed_page; /* compressed page */ 1179 struct list_head list; /* serialize IOs */ 1180 bool submitted; /* indicate IO submission */ 1181 int need_lock; /* indicate we need to lock cp_rwsem */ 1182 bool in_list; /* indicate fio is in io_list */ 1183 bool is_por; /* indicate IO is from recovery or not */ 1184 bool retry; /* need to reallocate block address */ 1185 int compr_blocks; /* # of compressed block addresses */ 1186 bool encrypted; /* indicate file is encrypted */ 1187 enum iostat_type io_type; /* io type */ 1188 struct writeback_control *io_wbc; /* writeback control */ 1189 struct bio **bio; /* bio for ipu */ 1190 sector_t *last_block; /* last block number in bio */ 1191 unsigned char version; /* version of the node */ 1192 }; 1193 1194 struct bio_entry { 1195 struct bio *bio; 1196 struct list_head list; 1197 }; 1198 1199 #define is_read_io(rw) ((rw) == READ) 1200 struct f2fs_bio_info { 1201 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1202 struct bio *bio; /* bios to merge */ 1203 sector_t last_block_in_bio; /* last block number */ 1204 struct f2fs_io_info fio; /* store buffered io info. */ 1205 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1206 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1207 struct list_head io_list; /* track fios */ 1208 struct list_head bio_list; /* bio entry list head */ 1209 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1210 }; 1211 1212 #define FDEV(i) (sbi->devs[i]) 1213 #define RDEV(i) (raw_super->devs[i]) 1214 struct f2fs_dev_info { 1215 struct block_device *bdev; 1216 char path[MAX_PATH_LEN]; 1217 unsigned int total_segments; 1218 block_t start_blk; 1219 block_t end_blk; 1220 #ifdef CONFIG_BLK_DEV_ZONED 1221 unsigned int nr_blkz; /* Total number of zones */ 1222 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1223 block_t *zone_capacity_blocks; /* Array of zone capacity in blks */ 1224 #endif 1225 }; 1226 1227 enum inode_type { 1228 DIR_INODE, /* for dirty dir inode */ 1229 FILE_INODE, /* for dirty regular/symlink inode */ 1230 DIRTY_META, /* for all dirtied inode metadata */ 1231 ATOMIC_FILE, /* for all atomic files */ 1232 NR_INODE_TYPE, 1233 }; 1234 1235 /* for inner inode cache management */ 1236 struct inode_management { 1237 struct radix_tree_root ino_root; /* ino entry array */ 1238 spinlock_t ino_lock; /* for ino entry lock */ 1239 struct list_head ino_list; /* inode list head */ 1240 unsigned long ino_num; /* number of entries */ 1241 }; 1242 1243 /* for GC_AT */ 1244 struct atgc_management { 1245 bool atgc_enabled; /* ATGC is enabled or not */ 1246 struct rb_root_cached root; /* root of victim rb-tree */ 1247 struct list_head victim_list; /* linked with all victim entries */ 1248 unsigned int victim_count; /* victim count in rb-tree */ 1249 unsigned int candidate_ratio; /* candidate ratio */ 1250 unsigned int max_candidate_count; /* max candidate count */ 1251 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ 1252 unsigned long long age_threshold; /* age threshold */ 1253 }; 1254 1255 /* For s_flag in struct f2fs_sb_info */ 1256 enum { 1257 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1258 SBI_IS_CLOSE, /* specify unmounting */ 1259 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1260 SBI_POR_DOING, /* recovery is doing or not */ 1261 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1262 SBI_NEED_CP, /* need to checkpoint */ 1263 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1264 SBI_IS_RECOVERED, /* recovered orphan/data */ 1265 SBI_CP_DISABLED, /* CP was disabled last mount */ 1266 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1267 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1268 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1269 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1270 SBI_IS_RESIZEFS, /* resizefs is in process */ 1271 }; 1272 1273 enum { 1274 CP_TIME, 1275 REQ_TIME, 1276 DISCARD_TIME, 1277 GC_TIME, 1278 DISABLE_TIME, 1279 UMOUNT_DISCARD_TIMEOUT, 1280 MAX_TIME, 1281 }; 1282 1283 enum { 1284 GC_NORMAL, 1285 GC_IDLE_CB, 1286 GC_IDLE_GREEDY, 1287 GC_IDLE_AT, 1288 GC_URGENT_HIGH, 1289 GC_URGENT_LOW, 1290 }; 1291 1292 enum { 1293 BGGC_MODE_ON, /* background gc is on */ 1294 BGGC_MODE_OFF, /* background gc is off */ 1295 BGGC_MODE_SYNC, /* 1296 * background gc is on, migrating blocks 1297 * like foreground gc 1298 */ 1299 }; 1300 1301 enum { 1302 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1303 FS_MODE_LFS, /* use lfs allocation only */ 1304 }; 1305 1306 enum { 1307 WHINT_MODE_OFF, /* not pass down write hints */ 1308 WHINT_MODE_USER, /* try to pass down hints given by users */ 1309 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1310 }; 1311 1312 enum { 1313 ALLOC_MODE_DEFAULT, /* stay default */ 1314 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1315 }; 1316 1317 enum fsync_mode { 1318 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1319 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1320 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1321 }; 1322 1323 /* 1324 * this value is set in page as a private data which indicate that 1325 * the page is atomically written, and it is in inmem_pages list. 1326 */ 1327 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 1328 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2) 1329 1330 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 1331 (page_private(page) == ATOMIC_WRITTEN_PAGE) 1332 #define IS_DUMMY_WRITTEN_PAGE(page) \ 1333 (page_private(page) == DUMMY_WRITTEN_PAGE) 1334 1335 #ifdef CONFIG_F2FS_IO_TRACE 1336 #define IS_IO_TRACED_PAGE(page) \ 1337 (page_private(page) > 0 && \ 1338 page_private(page) < (unsigned long)PID_MAX_LIMIT) 1339 #else 1340 #define IS_IO_TRACED_PAGE(page) (0) 1341 #endif 1342 1343 #ifdef CONFIG_FS_ENCRYPTION 1344 #define DUMMY_ENCRYPTION_ENABLED(sbi) \ 1345 (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) 1346 #else 1347 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) 1348 #endif 1349 1350 /* For compression */ 1351 enum compress_algorithm_type { 1352 COMPRESS_LZO, 1353 COMPRESS_LZ4, 1354 COMPRESS_ZSTD, 1355 COMPRESS_LZORLE, 1356 COMPRESS_MAX, 1357 }; 1358 1359 #define COMPRESS_DATA_RESERVED_SIZE 5 1360 struct compress_data { 1361 __le32 clen; /* compressed data size */ 1362 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1363 u8 cdata[]; /* compressed data */ 1364 }; 1365 1366 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1367 1368 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1369 1370 /* compress context */ 1371 struct compress_ctx { 1372 struct inode *inode; /* inode the context belong to */ 1373 pgoff_t cluster_idx; /* cluster index number */ 1374 unsigned int cluster_size; /* page count in cluster */ 1375 unsigned int log_cluster_size; /* log of cluster size */ 1376 struct page **rpages; /* pages store raw data in cluster */ 1377 unsigned int nr_rpages; /* total page number in rpages */ 1378 struct page **cpages; /* pages store compressed data in cluster */ 1379 unsigned int nr_cpages; /* total page number in cpages */ 1380 void *rbuf; /* virtual mapped address on rpages */ 1381 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1382 size_t rlen; /* valid data length in rbuf */ 1383 size_t clen; /* valid data length in cbuf */ 1384 void *private; /* payload buffer for specified compression algorithm */ 1385 void *private2; /* extra payload buffer */ 1386 }; 1387 1388 /* compress context for write IO path */ 1389 struct compress_io_ctx { 1390 u32 magic; /* magic number to indicate page is compressed */ 1391 struct inode *inode; /* inode the context belong to */ 1392 struct page **rpages; /* pages store raw data in cluster */ 1393 unsigned int nr_rpages; /* total page number in rpages */ 1394 atomic_t pending_pages; /* in-flight compressed page count */ 1395 }; 1396 1397 /* decompress io context for read IO path */ 1398 struct decompress_io_ctx { 1399 u32 magic; /* magic number to indicate page is compressed */ 1400 struct inode *inode; /* inode the context belong to */ 1401 pgoff_t cluster_idx; /* cluster index number */ 1402 unsigned int cluster_size; /* page count in cluster */ 1403 unsigned int log_cluster_size; /* log of cluster size */ 1404 struct page **rpages; /* pages store raw data in cluster */ 1405 unsigned int nr_rpages; /* total page number in rpages */ 1406 struct page **cpages; /* pages store compressed data in cluster */ 1407 unsigned int nr_cpages; /* total page number in cpages */ 1408 struct page **tpages; /* temp pages to pad holes in cluster */ 1409 void *rbuf; /* virtual mapped address on rpages */ 1410 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1411 size_t rlen; /* valid data length in rbuf */ 1412 size_t clen; /* valid data length in cbuf */ 1413 atomic_t pending_pages; /* in-flight compressed page count */ 1414 bool failed; /* indicate IO error during decompression */ 1415 void *private; /* payload buffer for specified decompression algorithm */ 1416 void *private2; /* extra payload buffer */ 1417 }; 1418 1419 #define NULL_CLUSTER ((unsigned int)(~0)) 1420 #define MIN_COMPRESS_LOG_SIZE 2 1421 #define MAX_COMPRESS_LOG_SIZE 8 1422 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) 1423 1424 struct f2fs_sb_info { 1425 struct super_block *sb; /* pointer to VFS super block */ 1426 struct proc_dir_entry *s_proc; /* proc entry */ 1427 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1428 struct rw_semaphore sb_lock; /* lock for raw super block */ 1429 int valid_super_block; /* valid super block no */ 1430 unsigned long s_flag; /* flags for sbi */ 1431 struct mutex writepages; /* mutex for writepages() */ 1432 1433 #ifdef CONFIG_BLK_DEV_ZONED 1434 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1435 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1436 #endif 1437 1438 /* for node-related operations */ 1439 struct f2fs_nm_info *nm_info; /* node manager */ 1440 struct inode *node_inode; /* cache node blocks */ 1441 1442 /* for segment-related operations */ 1443 struct f2fs_sm_info *sm_info; /* segment manager */ 1444 1445 /* for bio operations */ 1446 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1447 /* keep migration IO order for LFS mode */ 1448 struct rw_semaphore io_order_lock; 1449 mempool_t *write_io_dummy; /* Dummy pages */ 1450 1451 /* for checkpoint */ 1452 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1453 int cur_cp_pack; /* remain current cp pack */ 1454 spinlock_t cp_lock; /* for flag in ckpt */ 1455 struct inode *meta_inode; /* cache meta blocks */ 1456 struct mutex cp_mutex; /* checkpoint procedure lock */ 1457 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1458 struct rw_semaphore node_write; /* locking node writes */ 1459 struct rw_semaphore node_change; /* locking node change */ 1460 wait_queue_head_t cp_wait; 1461 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1462 long interval_time[MAX_TIME]; /* to store thresholds */ 1463 1464 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1465 1466 spinlock_t fsync_node_lock; /* for node entry lock */ 1467 struct list_head fsync_node_list; /* node list head */ 1468 unsigned int fsync_seg_id; /* sequence id */ 1469 unsigned int fsync_node_num; /* number of node entries */ 1470 1471 /* for orphan inode, use 0'th array */ 1472 unsigned int max_orphans; /* max orphan inodes */ 1473 1474 /* for inode management */ 1475 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1476 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1477 struct mutex flush_lock; /* for flush exclusion */ 1478 1479 /* for extent tree cache */ 1480 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1481 struct mutex extent_tree_lock; /* locking extent radix tree */ 1482 struct list_head extent_list; /* lru list for shrinker */ 1483 spinlock_t extent_lock; /* locking extent lru list */ 1484 atomic_t total_ext_tree; /* extent tree count */ 1485 struct list_head zombie_list; /* extent zombie tree list */ 1486 atomic_t total_zombie_tree; /* extent zombie tree count */ 1487 atomic_t total_ext_node; /* extent info count */ 1488 1489 /* basic filesystem units */ 1490 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1491 unsigned int log_blocksize; /* log2 block size */ 1492 unsigned int blocksize; /* block size */ 1493 unsigned int root_ino_num; /* root inode number*/ 1494 unsigned int node_ino_num; /* node inode number*/ 1495 unsigned int meta_ino_num; /* meta inode number*/ 1496 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1497 unsigned int blocks_per_seg; /* blocks per segment */ 1498 unsigned int segs_per_sec; /* segments per section */ 1499 unsigned int secs_per_zone; /* sections per zone */ 1500 unsigned int total_sections; /* total section count */ 1501 unsigned int total_node_count; /* total node block count */ 1502 unsigned int total_valid_node_count; /* valid node block count */ 1503 loff_t max_file_blocks; /* max block index of file */ 1504 int dir_level; /* directory level */ 1505 int readdir_ra; /* readahead inode in readdir */ 1506 1507 block_t user_block_count; /* # of user blocks */ 1508 block_t total_valid_block_count; /* # of valid blocks */ 1509 block_t discard_blks; /* discard command candidats */ 1510 block_t last_valid_block_count; /* for recovery */ 1511 block_t reserved_blocks; /* configurable reserved blocks */ 1512 block_t current_reserved_blocks; /* current reserved blocks */ 1513 1514 /* Additional tracking for no checkpoint mode */ 1515 block_t unusable_block_count; /* # of blocks saved by last cp */ 1516 1517 unsigned int nquota_files; /* # of quota sysfile */ 1518 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1519 1520 /* # of pages, see count_type */ 1521 atomic_t nr_pages[NR_COUNT_TYPE]; 1522 /* # of allocated blocks */ 1523 struct percpu_counter alloc_valid_block_count; 1524 1525 /* writeback control */ 1526 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1527 1528 /* valid inode count */ 1529 struct percpu_counter total_valid_inode_count; 1530 1531 struct f2fs_mount_info mount_opt; /* mount options */ 1532 1533 /* for cleaning operations */ 1534 struct rw_semaphore gc_lock; /* 1535 * semaphore for GC, avoid 1536 * race between GC and GC or CP 1537 */ 1538 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1539 struct atgc_management am; /* atgc management */ 1540 unsigned int cur_victim_sec; /* current victim section num */ 1541 unsigned int gc_mode; /* current GC state */ 1542 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1543 1544 /* for skip statistic */ 1545 unsigned int atomic_files; /* # of opened atomic file */ 1546 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1547 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1548 1549 /* threshold for gc trials on pinned files */ 1550 u64 gc_pin_file_threshold; 1551 struct rw_semaphore pin_sem; 1552 1553 /* maximum # of trials to find a victim segment for SSR and GC */ 1554 unsigned int max_victim_search; 1555 /* migration granularity of garbage collection, unit: segment */ 1556 unsigned int migration_granularity; 1557 1558 /* 1559 * for stat information. 1560 * one is for the LFS mode, and the other is for the SSR mode. 1561 */ 1562 #ifdef CONFIG_F2FS_STAT_FS 1563 struct f2fs_stat_info *stat_info; /* FS status information */ 1564 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1565 unsigned int segment_count[2]; /* # of allocated segments */ 1566 unsigned int block_count[2]; /* # of allocated blocks */ 1567 atomic_t inplace_count; /* # of inplace update */ 1568 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1569 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1570 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1571 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1572 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1573 atomic_t inline_inode; /* # of inline_data inodes */ 1574 atomic_t inline_dir; /* # of inline_dentry inodes */ 1575 atomic_t compr_inode; /* # of compressed inodes */ 1576 atomic64_t compr_blocks; /* # of compressed blocks */ 1577 atomic_t vw_cnt; /* # of volatile writes */ 1578 atomic_t max_aw_cnt; /* max # of atomic writes */ 1579 atomic_t max_vw_cnt; /* max # of volatile writes */ 1580 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1581 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1582 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1583 #endif 1584 spinlock_t stat_lock; /* lock for stat operations */ 1585 1586 /* For app/fs IO statistics */ 1587 spinlock_t iostat_lock; 1588 unsigned long long rw_iostat[NR_IO_TYPE]; 1589 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1590 bool iostat_enable; 1591 unsigned long iostat_next_period; 1592 unsigned int iostat_period_ms; 1593 1594 /* to attach REQ_META|REQ_FUA flags */ 1595 unsigned int data_io_flag; 1596 unsigned int node_io_flag; 1597 1598 /* For sysfs suppport */ 1599 struct kobject s_kobj; 1600 struct completion s_kobj_unregister; 1601 1602 /* For shrinker support */ 1603 struct list_head s_list; 1604 int s_ndevs; /* number of devices */ 1605 struct f2fs_dev_info *devs; /* for device list */ 1606 unsigned int dirty_device; /* for checkpoint data flush */ 1607 spinlock_t dev_lock; /* protect dirty_device */ 1608 struct mutex umount_mutex; 1609 unsigned int shrinker_run_no; 1610 1611 /* For write statistics */ 1612 u64 sectors_written_start; 1613 u64 kbytes_written; 1614 1615 /* Reference to checksum algorithm driver via cryptoapi */ 1616 struct crypto_shash *s_chksum_driver; 1617 1618 /* Precomputed FS UUID checksum for seeding other checksums */ 1619 __u32 s_chksum_seed; 1620 1621 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1622 1623 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1624 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1625 1626 #ifdef CONFIG_F2FS_FS_COMPRESSION 1627 struct kmem_cache *page_array_slab; /* page array entry */ 1628 unsigned int page_array_slab_size; /* default page array slab size */ 1629 #endif 1630 }; 1631 1632 struct f2fs_private_dio { 1633 struct inode *inode; 1634 void *orig_private; 1635 bio_end_io_t *orig_end_io; 1636 bool write; 1637 }; 1638 1639 #ifdef CONFIG_F2FS_FAULT_INJECTION 1640 #define f2fs_show_injection_info(sbi, type) \ 1641 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1642 KERN_INFO, sbi->sb->s_id, \ 1643 f2fs_fault_name[type], \ 1644 __func__, __builtin_return_address(0)) 1645 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1646 { 1647 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1648 1649 if (!ffi->inject_rate) 1650 return false; 1651 1652 if (!IS_FAULT_SET(ffi, type)) 1653 return false; 1654 1655 atomic_inc(&ffi->inject_ops); 1656 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1657 atomic_set(&ffi->inject_ops, 0); 1658 return true; 1659 } 1660 return false; 1661 } 1662 #else 1663 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1664 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1665 { 1666 return false; 1667 } 1668 #endif 1669 1670 /* 1671 * Test if the mounted volume is a multi-device volume. 1672 * - For a single regular disk volume, sbi->s_ndevs is 0. 1673 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1674 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1675 */ 1676 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1677 { 1678 return sbi->s_ndevs > 1; 1679 } 1680 1681 /* For write statistics. Suppose sector size is 512 bytes, 1682 * and the return value is in kbytes. s is of struct f2fs_sb_info. 1683 */ 1684 #define BD_PART_WRITTEN(s) \ 1685 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \ 1686 (s)->sectors_written_start) >> 1) 1687 1688 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1689 { 1690 unsigned long now = jiffies; 1691 1692 sbi->last_time[type] = now; 1693 1694 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1695 if (type == REQ_TIME) { 1696 sbi->last_time[DISCARD_TIME] = now; 1697 sbi->last_time[GC_TIME] = now; 1698 } 1699 } 1700 1701 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1702 { 1703 unsigned long interval = sbi->interval_time[type] * HZ; 1704 1705 return time_after(jiffies, sbi->last_time[type] + interval); 1706 } 1707 1708 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1709 int type) 1710 { 1711 unsigned long interval = sbi->interval_time[type] * HZ; 1712 unsigned int wait_ms = 0; 1713 long delta; 1714 1715 delta = (sbi->last_time[type] + interval) - jiffies; 1716 if (delta > 0) 1717 wait_ms = jiffies_to_msecs(delta); 1718 1719 return wait_ms; 1720 } 1721 1722 /* 1723 * Inline functions 1724 */ 1725 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1726 const void *address, unsigned int length) 1727 { 1728 struct { 1729 struct shash_desc shash; 1730 char ctx[4]; 1731 } desc; 1732 int err; 1733 1734 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1735 1736 desc.shash.tfm = sbi->s_chksum_driver; 1737 *(u32 *)desc.ctx = crc; 1738 1739 err = crypto_shash_update(&desc.shash, address, length); 1740 BUG_ON(err); 1741 1742 return *(u32 *)desc.ctx; 1743 } 1744 1745 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1746 unsigned int length) 1747 { 1748 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1749 } 1750 1751 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1752 void *buf, size_t buf_size) 1753 { 1754 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1755 } 1756 1757 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1758 const void *address, unsigned int length) 1759 { 1760 return __f2fs_crc32(sbi, crc, address, length); 1761 } 1762 1763 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1764 { 1765 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1766 } 1767 1768 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1769 { 1770 return sb->s_fs_info; 1771 } 1772 1773 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1774 { 1775 return F2FS_SB(inode->i_sb); 1776 } 1777 1778 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1779 { 1780 return F2FS_I_SB(mapping->host); 1781 } 1782 1783 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1784 { 1785 return F2FS_M_SB(page_file_mapping(page)); 1786 } 1787 1788 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1789 { 1790 return (struct f2fs_super_block *)(sbi->raw_super); 1791 } 1792 1793 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1794 { 1795 return (struct f2fs_checkpoint *)(sbi->ckpt); 1796 } 1797 1798 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1799 { 1800 return (struct f2fs_node *)page_address(page); 1801 } 1802 1803 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1804 { 1805 return &((struct f2fs_node *)page_address(page))->i; 1806 } 1807 1808 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1809 { 1810 return (struct f2fs_nm_info *)(sbi->nm_info); 1811 } 1812 1813 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1814 { 1815 return (struct f2fs_sm_info *)(sbi->sm_info); 1816 } 1817 1818 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1819 { 1820 return (struct sit_info *)(SM_I(sbi)->sit_info); 1821 } 1822 1823 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1824 { 1825 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1826 } 1827 1828 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1829 { 1830 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1831 } 1832 1833 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1834 { 1835 return sbi->meta_inode->i_mapping; 1836 } 1837 1838 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1839 { 1840 return sbi->node_inode->i_mapping; 1841 } 1842 1843 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1844 { 1845 return test_bit(type, &sbi->s_flag); 1846 } 1847 1848 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1849 { 1850 set_bit(type, &sbi->s_flag); 1851 } 1852 1853 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1854 { 1855 clear_bit(type, &sbi->s_flag); 1856 } 1857 1858 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1859 { 1860 return le64_to_cpu(cp->checkpoint_ver); 1861 } 1862 1863 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1864 { 1865 if (type < F2FS_MAX_QUOTAS) 1866 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1867 return 0; 1868 } 1869 1870 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1871 { 1872 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1873 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1874 } 1875 1876 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1877 { 1878 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1879 1880 return ckpt_flags & f; 1881 } 1882 1883 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1884 { 1885 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1886 } 1887 1888 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1889 { 1890 unsigned int ckpt_flags; 1891 1892 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1893 ckpt_flags |= f; 1894 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1895 } 1896 1897 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1898 { 1899 unsigned long flags; 1900 1901 spin_lock_irqsave(&sbi->cp_lock, flags); 1902 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1903 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1904 } 1905 1906 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1907 { 1908 unsigned int ckpt_flags; 1909 1910 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1911 ckpt_flags &= (~f); 1912 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1913 } 1914 1915 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1916 { 1917 unsigned long flags; 1918 1919 spin_lock_irqsave(&sbi->cp_lock, flags); 1920 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1921 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1922 } 1923 1924 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1925 { 1926 unsigned long flags; 1927 unsigned char *nat_bits; 1928 1929 /* 1930 * In order to re-enable nat_bits we need to call fsck.f2fs by 1931 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 1932 * so let's rely on regular fsck or unclean shutdown. 1933 */ 1934 1935 if (lock) 1936 spin_lock_irqsave(&sbi->cp_lock, flags); 1937 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1938 nat_bits = NM_I(sbi)->nat_bits; 1939 NM_I(sbi)->nat_bits = NULL; 1940 if (lock) 1941 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1942 1943 kvfree(nat_bits); 1944 } 1945 1946 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1947 struct cp_control *cpc) 1948 { 1949 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1950 1951 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1952 } 1953 1954 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1955 { 1956 down_read(&sbi->cp_rwsem); 1957 } 1958 1959 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1960 { 1961 return down_read_trylock(&sbi->cp_rwsem); 1962 } 1963 1964 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1965 { 1966 up_read(&sbi->cp_rwsem); 1967 } 1968 1969 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1970 { 1971 down_write(&sbi->cp_rwsem); 1972 } 1973 1974 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1975 { 1976 up_write(&sbi->cp_rwsem); 1977 } 1978 1979 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1980 { 1981 int reason = CP_SYNC; 1982 1983 if (test_opt(sbi, FASTBOOT)) 1984 reason = CP_FASTBOOT; 1985 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1986 reason = CP_UMOUNT; 1987 return reason; 1988 } 1989 1990 static inline bool __remain_node_summaries(int reason) 1991 { 1992 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1993 } 1994 1995 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1996 { 1997 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1998 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1999 } 2000 2001 /* 2002 * Check whether the inode has blocks or not 2003 */ 2004 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 2005 { 2006 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 2007 2008 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 2009 } 2010 2011 static inline bool f2fs_has_xattr_block(unsigned int ofs) 2012 { 2013 return ofs == XATTR_NODE_OFFSET; 2014 } 2015 2016 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 2017 struct inode *inode, bool cap) 2018 { 2019 if (!inode) 2020 return true; 2021 if (!test_opt(sbi, RESERVE_ROOT)) 2022 return false; 2023 if (IS_NOQUOTA(inode)) 2024 return true; 2025 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 2026 return true; 2027 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2028 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2029 return true; 2030 if (cap && capable(CAP_SYS_RESOURCE)) 2031 return true; 2032 return false; 2033 } 2034 2035 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2036 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2037 struct inode *inode, blkcnt_t *count) 2038 { 2039 blkcnt_t diff = 0, release = 0; 2040 block_t avail_user_block_count; 2041 int ret; 2042 2043 ret = dquot_reserve_block(inode, *count); 2044 if (ret) 2045 return ret; 2046 2047 if (time_to_inject(sbi, FAULT_BLOCK)) { 2048 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2049 release = *count; 2050 goto release_quota; 2051 } 2052 2053 /* 2054 * let's increase this in prior to actual block count change in order 2055 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2056 */ 2057 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2058 2059 spin_lock(&sbi->stat_lock); 2060 sbi->total_valid_block_count += (block_t)(*count); 2061 avail_user_block_count = sbi->user_block_count - 2062 sbi->current_reserved_blocks; 2063 2064 if (!__allow_reserved_blocks(sbi, inode, true)) 2065 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2066 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2067 if (avail_user_block_count > sbi->unusable_block_count) 2068 avail_user_block_count -= sbi->unusable_block_count; 2069 else 2070 avail_user_block_count = 0; 2071 } 2072 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2073 diff = sbi->total_valid_block_count - avail_user_block_count; 2074 if (diff > *count) 2075 diff = *count; 2076 *count -= diff; 2077 release = diff; 2078 sbi->total_valid_block_count -= diff; 2079 if (!*count) { 2080 spin_unlock(&sbi->stat_lock); 2081 goto enospc; 2082 } 2083 } 2084 spin_unlock(&sbi->stat_lock); 2085 2086 if (unlikely(release)) { 2087 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2088 dquot_release_reservation_block(inode, release); 2089 } 2090 f2fs_i_blocks_write(inode, *count, true, true); 2091 return 0; 2092 2093 enospc: 2094 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2095 release_quota: 2096 dquot_release_reservation_block(inode, release); 2097 return -ENOSPC; 2098 } 2099 2100 __printf(2, 3) 2101 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2102 2103 #define f2fs_err(sbi, fmt, ...) \ 2104 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2105 #define f2fs_warn(sbi, fmt, ...) \ 2106 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2107 #define f2fs_notice(sbi, fmt, ...) \ 2108 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2109 #define f2fs_info(sbi, fmt, ...) \ 2110 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2111 #define f2fs_debug(sbi, fmt, ...) \ 2112 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2113 2114 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2115 struct inode *inode, 2116 block_t count) 2117 { 2118 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2119 2120 spin_lock(&sbi->stat_lock); 2121 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2122 sbi->total_valid_block_count -= (block_t)count; 2123 if (sbi->reserved_blocks && 2124 sbi->current_reserved_blocks < sbi->reserved_blocks) 2125 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2126 sbi->current_reserved_blocks + count); 2127 spin_unlock(&sbi->stat_lock); 2128 if (unlikely(inode->i_blocks < sectors)) { 2129 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2130 inode->i_ino, 2131 (unsigned long long)inode->i_blocks, 2132 (unsigned long long)sectors); 2133 set_sbi_flag(sbi, SBI_NEED_FSCK); 2134 return; 2135 } 2136 f2fs_i_blocks_write(inode, count, false, true); 2137 } 2138 2139 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2140 { 2141 atomic_inc(&sbi->nr_pages[count_type]); 2142 2143 if (count_type == F2FS_DIRTY_DENTS || 2144 count_type == F2FS_DIRTY_NODES || 2145 count_type == F2FS_DIRTY_META || 2146 count_type == F2FS_DIRTY_QDATA || 2147 count_type == F2FS_DIRTY_IMETA) 2148 set_sbi_flag(sbi, SBI_IS_DIRTY); 2149 } 2150 2151 static inline void inode_inc_dirty_pages(struct inode *inode) 2152 { 2153 atomic_inc(&F2FS_I(inode)->dirty_pages); 2154 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2155 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2156 if (IS_NOQUOTA(inode)) 2157 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2158 } 2159 2160 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2161 { 2162 atomic_dec(&sbi->nr_pages[count_type]); 2163 } 2164 2165 static inline void inode_dec_dirty_pages(struct inode *inode) 2166 { 2167 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2168 !S_ISLNK(inode->i_mode)) 2169 return; 2170 2171 atomic_dec(&F2FS_I(inode)->dirty_pages); 2172 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2173 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2174 if (IS_NOQUOTA(inode)) 2175 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2176 } 2177 2178 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2179 { 2180 return atomic_read(&sbi->nr_pages[count_type]); 2181 } 2182 2183 static inline int get_dirty_pages(struct inode *inode) 2184 { 2185 return atomic_read(&F2FS_I(inode)->dirty_pages); 2186 } 2187 2188 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2189 { 2190 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2191 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2192 sbi->log_blocks_per_seg; 2193 2194 return segs / sbi->segs_per_sec; 2195 } 2196 2197 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2198 { 2199 return sbi->total_valid_block_count; 2200 } 2201 2202 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2203 { 2204 return sbi->discard_blks; 2205 } 2206 2207 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2208 { 2209 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2210 2211 /* return NAT or SIT bitmap */ 2212 if (flag == NAT_BITMAP) 2213 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2214 else if (flag == SIT_BITMAP) 2215 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2216 2217 return 0; 2218 } 2219 2220 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2221 { 2222 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2223 } 2224 2225 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2226 { 2227 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2228 int offset; 2229 2230 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2231 offset = (flag == SIT_BITMAP) ? 2232 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2233 /* 2234 * if large_nat_bitmap feature is enabled, leave checksum 2235 * protection for all nat/sit bitmaps. 2236 */ 2237 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); 2238 } 2239 2240 if (__cp_payload(sbi) > 0) { 2241 if (flag == NAT_BITMAP) 2242 return &ckpt->sit_nat_version_bitmap; 2243 else 2244 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2245 } else { 2246 offset = (flag == NAT_BITMAP) ? 2247 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2248 return &ckpt->sit_nat_version_bitmap + offset; 2249 } 2250 } 2251 2252 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2253 { 2254 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2255 2256 if (sbi->cur_cp_pack == 2) 2257 start_addr += sbi->blocks_per_seg; 2258 return start_addr; 2259 } 2260 2261 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2262 { 2263 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2264 2265 if (sbi->cur_cp_pack == 1) 2266 start_addr += sbi->blocks_per_seg; 2267 return start_addr; 2268 } 2269 2270 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2271 { 2272 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2273 } 2274 2275 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2276 { 2277 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2278 } 2279 2280 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2281 struct inode *inode, bool is_inode) 2282 { 2283 block_t valid_block_count; 2284 unsigned int valid_node_count, user_block_count; 2285 int err; 2286 2287 if (is_inode) { 2288 if (inode) { 2289 err = dquot_alloc_inode(inode); 2290 if (err) 2291 return err; 2292 } 2293 } else { 2294 err = dquot_reserve_block(inode, 1); 2295 if (err) 2296 return err; 2297 } 2298 2299 if (time_to_inject(sbi, FAULT_BLOCK)) { 2300 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2301 goto enospc; 2302 } 2303 2304 spin_lock(&sbi->stat_lock); 2305 2306 valid_block_count = sbi->total_valid_block_count + 2307 sbi->current_reserved_blocks + 1; 2308 2309 if (!__allow_reserved_blocks(sbi, inode, false)) 2310 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2311 user_block_count = sbi->user_block_count; 2312 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2313 user_block_count -= sbi->unusable_block_count; 2314 2315 if (unlikely(valid_block_count > user_block_count)) { 2316 spin_unlock(&sbi->stat_lock); 2317 goto enospc; 2318 } 2319 2320 valid_node_count = sbi->total_valid_node_count + 1; 2321 if (unlikely(valid_node_count > sbi->total_node_count)) { 2322 spin_unlock(&sbi->stat_lock); 2323 goto enospc; 2324 } 2325 2326 sbi->total_valid_node_count++; 2327 sbi->total_valid_block_count++; 2328 spin_unlock(&sbi->stat_lock); 2329 2330 if (inode) { 2331 if (is_inode) 2332 f2fs_mark_inode_dirty_sync(inode, true); 2333 else 2334 f2fs_i_blocks_write(inode, 1, true, true); 2335 } 2336 2337 percpu_counter_inc(&sbi->alloc_valid_block_count); 2338 return 0; 2339 2340 enospc: 2341 if (is_inode) { 2342 if (inode) 2343 dquot_free_inode(inode); 2344 } else { 2345 dquot_release_reservation_block(inode, 1); 2346 } 2347 return -ENOSPC; 2348 } 2349 2350 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2351 struct inode *inode, bool is_inode) 2352 { 2353 spin_lock(&sbi->stat_lock); 2354 2355 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2356 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2357 2358 sbi->total_valid_node_count--; 2359 sbi->total_valid_block_count--; 2360 if (sbi->reserved_blocks && 2361 sbi->current_reserved_blocks < sbi->reserved_blocks) 2362 sbi->current_reserved_blocks++; 2363 2364 spin_unlock(&sbi->stat_lock); 2365 2366 if (is_inode) { 2367 dquot_free_inode(inode); 2368 } else { 2369 if (unlikely(inode->i_blocks == 0)) { 2370 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2371 inode->i_ino, 2372 (unsigned long long)inode->i_blocks); 2373 set_sbi_flag(sbi, SBI_NEED_FSCK); 2374 return; 2375 } 2376 f2fs_i_blocks_write(inode, 1, false, true); 2377 } 2378 } 2379 2380 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2381 { 2382 return sbi->total_valid_node_count; 2383 } 2384 2385 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2386 { 2387 percpu_counter_inc(&sbi->total_valid_inode_count); 2388 } 2389 2390 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2391 { 2392 percpu_counter_dec(&sbi->total_valid_inode_count); 2393 } 2394 2395 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2396 { 2397 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2398 } 2399 2400 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2401 pgoff_t index, bool for_write) 2402 { 2403 struct page *page; 2404 2405 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2406 if (!for_write) 2407 page = find_get_page_flags(mapping, index, 2408 FGP_LOCK | FGP_ACCESSED); 2409 else 2410 page = find_lock_page(mapping, index); 2411 if (page) 2412 return page; 2413 2414 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2415 f2fs_show_injection_info(F2FS_M_SB(mapping), 2416 FAULT_PAGE_ALLOC); 2417 return NULL; 2418 } 2419 } 2420 2421 if (!for_write) 2422 return grab_cache_page(mapping, index); 2423 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2424 } 2425 2426 static inline struct page *f2fs_pagecache_get_page( 2427 struct address_space *mapping, pgoff_t index, 2428 int fgp_flags, gfp_t gfp_mask) 2429 { 2430 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2431 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2432 return NULL; 2433 } 2434 2435 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2436 } 2437 2438 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2439 { 2440 char *src_kaddr = kmap(src); 2441 char *dst_kaddr = kmap(dst); 2442 2443 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2444 kunmap(dst); 2445 kunmap(src); 2446 } 2447 2448 static inline void f2fs_put_page(struct page *page, int unlock) 2449 { 2450 if (!page) 2451 return; 2452 2453 if (unlock) { 2454 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2455 unlock_page(page); 2456 } 2457 put_page(page); 2458 } 2459 2460 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2461 { 2462 if (dn->node_page) 2463 f2fs_put_page(dn->node_page, 1); 2464 if (dn->inode_page && dn->node_page != dn->inode_page) 2465 f2fs_put_page(dn->inode_page, 0); 2466 dn->node_page = NULL; 2467 dn->inode_page = NULL; 2468 } 2469 2470 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2471 size_t size) 2472 { 2473 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2474 } 2475 2476 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2477 gfp_t flags) 2478 { 2479 void *entry; 2480 2481 entry = kmem_cache_alloc(cachep, flags); 2482 if (!entry) 2483 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2484 return entry; 2485 } 2486 2487 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2488 { 2489 if (sbi->gc_mode == GC_URGENT_HIGH) 2490 return true; 2491 2492 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2493 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2494 get_pages(sbi, F2FS_WB_CP_DATA) || 2495 get_pages(sbi, F2FS_DIO_READ) || 2496 get_pages(sbi, F2FS_DIO_WRITE)) 2497 return false; 2498 2499 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2500 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2501 return false; 2502 2503 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2504 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2505 return false; 2506 2507 if (sbi->gc_mode == GC_URGENT_LOW && 2508 (type == DISCARD_TIME || type == GC_TIME)) 2509 return true; 2510 2511 return f2fs_time_over(sbi, type); 2512 } 2513 2514 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2515 unsigned long index, void *item) 2516 { 2517 while (radix_tree_insert(root, index, item)) 2518 cond_resched(); 2519 } 2520 2521 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2522 2523 static inline bool IS_INODE(struct page *page) 2524 { 2525 struct f2fs_node *p = F2FS_NODE(page); 2526 2527 return RAW_IS_INODE(p); 2528 } 2529 2530 static inline int offset_in_addr(struct f2fs_inode *i) 2531 { 2532 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2533 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2534 } 2535 2536 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2537 { 2538 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2539 } 2540 2541 static inline int f2fs_has_extra_attr(struct inode *inode); 2542 static inline block_t data_blkaddr(struct inode *inode, 2543 struct page *node_page, unsigned int offset) 2544 { 2545 struct f2fs_node *raw_node; 2546 __le32 *addr_array; 2547 int base = 0; 2548 bool is_inode = IS_INODE(node_page); 2549 2550 raw_node = F2FS_NODE(node_page); 2551 2552 if (is_inode) { 2553 if (!inode) 2554 /* from GC path only */ 2555 base = offset_in_addr(&raw_node->i); 2556 else if (f2fs_has_extra_attr(inode)) 2557 base = get_extra_isize(inode); 2558 } 2559 2560 addr_array = blkaddr_in_node(raw_node); 2561 return le32_to_cpu(addr_array[base + offset]); 2562 } 2563 2564 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2565 { 2566 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2567 } 2568 2569 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2570 { 2571 int mask; 2572 2573 addr += (nr >> 3); 2574 mask = 1 << (7 - (nr & 0x07)); 2575 return mask & *addr; 2576 } 2577 2578 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2579 { 2580 int mask; 2581 2582 addr += (nr >> 3); 2583 mask = 1 << (7 - (nr & 0x07)); 2584 *addr |= mask; 2585 } 2586 2587 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2588 { 2589 int mask; 2590 2591 addr += (nr >> 3); 2592 mask = 1 << (7 - (nr & 0x07)); 2593 *addr &= ~mask; 2594 } 2595 2596 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2597 { 2598 int mask; 2599 int ret; 2600 2601 addr += (nr >> 3); 2602 mask = 1 << (7 - (nr & 0x07)); 2603 ret = mask & *addr; 2604 *addr |= mask; 2605 return ret; 2606 } 2607 2608 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2609 { 2610 int mask; 2611 int ret; 2612 2613 addr += (nr >> 3); 2614 mask = 1 << (7 - (nr & 0x07)); 2615 ret = mask & *addr; 2616 *addr &= ~mask; 2617 return ret; 2618 } 2619 2620 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2621 { 2622 int mask; 2623 2624 addr += (nr >> 3); 2625 mask = 1 << (7 - (nr & 0x07)); 2626 *addr ^= mask; 2627 } 2628 2629 /* 2630 * On-disk inode flags (f2fs_inode::i_flags) 2631 */ 2632 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2633 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2634 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2635 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2636 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2637 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2638 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2639 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2640 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2641 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2642 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2643 2644 /* Flags that should be inherited by new inodes from their parent. */ 2645 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2646 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2647 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2648 2649 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2650 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2651 F2FS_CASEFOLD_FL)) 2652 2653 /* Flags that are appropriate for non-directories/regular files. */ 2654 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2655 2656 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2657 { 2658 if (S_ISDIR(mode)) 2659 return flags; 2660 else if (S_ISREG(mode)) 2661 return flags & F2FS_REG_FLMASK; 2662 else 2663 return flags & F2FS_OTHER_FLMASK; 2664 } 2665 2666 static inline void __mark_inode_dirty_flag(struct inode *inode, 2667 int flag, bool set) 2668 { 2669 switch (flag) { 2670 case FI_INLINE_XATTR: 2671 case FI_INLINE_DATA: 2672 case FI_INLINE_DENTRY: 2673 case FI_NEW_INODE: 2674 if (set) 2675 return; 2676 fallthrough; 2677 case FI_DATA_EXIST: 2678 case FI_INLINE_DOTS: 2679 case FI_PIN_FILE: 2680 f2fs_mark_inode_dirty_sync(inode, true); 2681 } 2682 } 2683 2684 static inline void set_inode_flag(struct inode *inode, int flag) 2685 { 2686 set_bit(flag, F2FS_I(inode)->flags); 2687 __mark_inode_dirty_flag(inode, flag, true); 2688 } 2689 2690 static inline int is_inode_flag_set(struct inode *inode, int flag) 2691 { 2692 return test_bit(flag, F2FS_I(inode)->flags); 2693 } 2694 2695 static inline void clear_inode_flag(struct inode *inode, int flag) 2696 { 2697 clear_bit(flag, F2FS_I(inode)->flags); 2698 __mark_inode_dirty_flag(inode, flag, false); 2699 } 2700 2701 static inline bool f2fs_verity_in_progress(struct inode *inode) 2702 { 2703 return IS_ENABLED(CONFIG_FS_VERITY) && 2704 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2705 } 2706 2707 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2708 { 2709 F2FS_I(inode)->i_acl_mode = mode; 2710 set_inode_flag(inode, FI_ACL_MODE); 2711 f2fs_mark_inode_dirty_sync(inode, false); 2712 } 2713 2714 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2715 { 2716 if (inc) 2717 inc_nlink(inode); 2718 else 2719 drop_nlink(inode); 2720 f2fs_mark_inode_dirty_sync(inode, true); 2721 } 2722 2723 static inline void f2fs_i_blocks_write(struct inode *inode, 2724 block_t diff, bool add, bool claim) 2725 { 2726 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2727 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2728 2729 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2730 if (add) { 2731 if (claim) 2732 dquot_claim_block(inode, diff); 2733 else 2734 dquot_alloc_block_nofail(inode, diff); 2735 } else { 2736 dquot_free_block(inode, diff); 2737 } 2738 2739 f2fs_mark_inode_dirty_sync(inode, true); 2740 if (clean || recover) 2741 set_inode_flag(inode, FI_AUTO_RECOVER); 2742 } 2743 2744 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2745 { 2746 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2747 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2748 2749 if (i_size_read(inode) == i_size) 2750 return; 2751 2752 i_size_write(inode, i_size); 2753 f2fs_mark_inode_dirty_sync(inode, true); 2754 if (clean || recover) 2755 set_inode_flag(inode, FI_AUTO_RECOVER); 2756 } 2757 2758 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2759 { 2760 F2FS_I(inode)->i_current_depth = depth; 2761 f2fs_mark_inode_dirty_sync(inode, true); 2762 } 2763 2764 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2765 unsigned int count) 2766 { 2767 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2768 f2fs_mark_inode_dirty_sync(inode, true); 2769 } 2770 2771 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2772 { 2773 F2FS_I(inode)->i_xattr_nid = xnid; 2774 f2fs_mark_inode_dirty_sync(inode, true); 2775 } 2776 2777 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2778 { 2779 F2FS_I(inode)->i_pino = pino; 2780 f2fs_mark_inode_dirty_sync(inode, true); 2781 } 2782 2783 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2784 { 2785 struct f2fs_inode_info *fi = F2FS_I(inode); 2786 2787 if (ri->i_inline & F2FS_INLINE_XATTR) 2788 set_bit(FI_INLINE_XATTR, fi->flags); 2789 if (ri->i_inline & F2FS_INLINE_DATA) 2790 set_bit(FI_INLINE_DATA, fi->flags); 2791 if (ri->i_inline & F2FS_INLINE_DENTRY) 2792 set_bit(FI_INLINE_DENTRY, fi->flags); 2793 if (ri->i_inline & F2FS_DATA_EXIST) 2794 set_bit(FI_DATA_EXIST, fi->flags); 2795 if (ri->i_inline & F2FS_INLINE_DOTS) 2796 set_bit(FI_INLINE_DOTS, fi->flags); 2797 if (ri->i_inline & F2FS_EXTRA_ATTR) 2798 set_bit(FI_EXTRA_ATTR, fi->flags); 2799 if (ri->i_inline & F2FS_PIN_FILE) 2800 set_bit(FI_PIN_FILE, fi->flags); 2801 } 2802 2803 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2804 { 2805 ri->i_inline = 0; 2806 2807 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2808 ri->i_inline |= F2FS_INLINE_XATTR; 2809 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2810 ri->i_inline |= F2FS_INLINE_DATA; 2811 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2812 ri->i_inline |= F2FS_INLINE_DENTRY; 2813 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2814 ri->i_inline |= F2FS_DATA_EXIST; 2815 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2816 ri->i_inline |= F2FS_INLINE_DOTS; 2817 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2818 ri->i_inline |= F2FS_EXTRA_ATTR; 2819 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2820 ri->i_inline |= F2FS_PIN_FILE; 2821 } 2822 2823 static inline int f2fs_has_extra_attr(struct inode *inode) 2824 { 2825 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2826 } 2827 2828 static inline int f2fs_has_inline_xattr(struct inode *inode) 2829 { 2830 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2831 } 2832 2833 static inline int f2fs_compressed_file(struct inode *inode) 2834 { 2835 return S_ISREG(inode->i_mode) && 2836 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2837 } 2838 2839 static inline unsigned int addrs_per_inode(struct inode *inode) 2840 { 2841 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2842 get_inline_xattr_addrs(inode); 2843 2844 if (!f2fs_compressed_file(inode)) 2845 return addrs; 2846 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2847 } 2848 2849 static inline unsigned int addrs_per_block(struct inode *inode) 2850 { 2851 if (!f2fs_compressed_file(inode)) 2852 return DEF_ADDRS_PER_BLOCK; 2853 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2854 } 2855 2856 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2857 { 2858 struct f2fs_inode *ri = F2FS_INODE(page); 2859 2860 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2861 get_inline_xattr_addrs(inode)]); 2862 } 2863 2864 static inline int inline_xattr_size(struct inode *inode) 2865 { 2866 if (f2fs_has_inline_xattr(inode)) 2867 return get_inline_xattr_addrs(inode) * sizeof(__le32); 2868 return 0; 2869 } 2870 2871 static inline int f2fs_has_inline_data(struct inode *inode) 2872 { 2873 return is_inode_flag_set(inode, FI_INLINE_DATA); 2874 } 2875 2876 static inline int f2fs_exist_data(struct inode *inode) 2877 { 2878 return is_inode_flag_set(inode, FI_DATA_EXIST); 2879 } 2880 2881 static inline int f2fs_has_inline_dots(struct inode *inode) 2882 { 2883 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2884 } 2885 2886 static inline int f2fs_is_mmap_file(struct inode *inode) 2887 { 2888 return is_inode_flag_set(inode, FI_MMAP_FILE); 2889 } 2890 2891 static inline bool f2fs_is_pinned_file(struct inode *inode) 2892 { 2893 return is_inode_flag_set(inode, FI_PIN_FILE); 2894 } 2895 2896 static inline bool f2fs_is_atomic_file(struct inode *inode) 2897 { 2898 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2899 } 2900 2901 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2902 { 2903 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2904 } 2905 2906 static inline bool f2fs_is_volatile_file(struct inode *inode) 2907 { 2908 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2909 } 2910 2911 static inline bool f2fs_is_first_block_written(struct inode *inode) 2912 { 2913 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2914 } 2915 2916 static inline bool f2fs_is_drop_cache(struct inode *inode) 2917 { 2918 return is_inode_flag_set(inode, FI_DROP_CACHE); 2919 } 2920 2921 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2922 { 2923 struct f2fs_inode *ri = F2FS_INODE(page); 2924 int extra_size = get_extra_isize(inode); 2925 2926 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2927 } 2928 2929 static inline int f2fs_has_inline_dentry(struct inode *inode) 2930 { 2931 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2932 } 2933 2934 static inline int is_file(struct inode *inode, int type) 2935 { 2936 return F2FS_I(inode)->i_advise & type; 2937 } 2938 2939 static inline void set_file(struct inode *inode, int type) 2940 { 2941 F2FS_I(inode)->i_advise |= type; 2942 f2fs_mark_inode_dirty_sync(inode, true); 2943 } 2944 2945 static inline void clear_file(struct inode *inode, int type) 2946 { 2947 F2FS_I(inode)->i_advise &= ~type; 2948 f2fs_mark_inode_dirty_sync(inode, true); 2949 } 2950 2951 static inline bool f2fs_is_time_consistent(struct inode *inode) 2952 { 2953 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 2954 return false; 2955 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 2956 return false; 2957 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 2958 return false; 2959 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 2960 &F2FS_I(inode)->i_crtime)) 2961 return false; 2962 return true; 2963 } 2964 2965 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2966 { 2967 bool ret; 2968 2969 if (dsync) { 2970 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2971 2972 spin_lock(&sbi->inode_lock[DIRTY_META]); 2973 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2974 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2975 return ret; 2976 } 2977 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2978 file_keep_isize(inode) || 2979 i_size_read(inode) & ~PAGE_MASK) 2980 return false; 2981 2982 if (!f2fs_is_time_consistent(inode)) 2983 return false; 2984 2985 spin_lock(&F2FS_I(inode)->i_size_lock); 2986 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 2987 spin_unlock(&F2FS_I(inode)->i_size_lock); 2988 2989 return ret; 2990 } 2991 2992 static inline bool f2fs_readonly(struct super_block *sb) 2993 { 2994 return sb_rdonly(sb); 2995 } 2996 2997 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 2998 { 2999 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 3000 } 3001 3002 static inline bool is_dot_dotdot(const u8 *name, size_t len) 3003 { 3004 if (len == 1 && name[0] == '.') 3005 return true; 3006 3007 if (len == 2 && name[0] == '.' && name[1] == '.') 3008 return true; 3009 3010 return false; 3011 } 3012 3013 static inline bool f2fs_may_extent_tree(struct inode *inode) 3014 { 3015 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3016 3017 if (!test_opt(sbi, EXTENT_CACHE) || 3018 is_inode_flag_set(inode, FI_NO_EXTENT) || 3019 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 3020 return false; 3021 3022 /* 3023 * for recovered files during mount do not create extents 3024 * if shrinker is not registered. 3025 */ 3026 if (list_empty(&sbi->s_list)) 3027 return false; 3028 3029 return S_ISREG(inode->i_mode); 3030 } 3031 3032 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3033 size_t size, gfp_t flags) 3034 { 3035 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3036 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3037 return NULL; 3038 } 3039 3040 return kmalloc(size, flags); 3041 } 3042 3043 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3044 size_t size, gfp_t flags) 3045 { 3046 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3047 } 3048 3049 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3050 size_t size, gfp_t flags) 3051 { 3052 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3053 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3054 return NULL; 3055 } 3056 3057 return kvmalloc(size, flags); 3058 } 3059 3060 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3061 size_t size, gfp_t flags) 3062 { 3063 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3064 } 3065 3066 static inline int get_extra_isize(struct inode *inode) 3067 { 3068 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3069 } 3070 3071 static inline int get_inline_xattr_addrs(struct inode *inode) 3072 { 3073 return F2FS_I(inode)->i_inline_xattr_size; 3074 } 3075 3076 #define f2fs_get_inode_mode(i) \ 3077 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3078 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3079 3080 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3081 (offsetof(struct f2fs_inode, i_extra_end) - \ 3082 offsetof(struct f2fs_inode, i_extra_isize)) \ 3083 3084 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3085 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3086 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3087 sizeof((f2fs_inode)->field)) \ 3088 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3089 3090 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3091 #define MIN_IOSTAT_PERIOD_MS 100 3092 /* maximum period of iostat tracing is 1 day */ 3093 #define MAX_IOSTAT_PERIOD_MS 8640000 3094 3095 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3096 { 3097 int i; 3098 3099 spin_lock(&sbi->iostat_lock); 3100 for (i = 0; i < NR_IO_TYPE; i++) { 3101 sbi->rw_iostat[i] = 0; 3102 sbi->prev_rw_iostat[i] = 0; 3103 } 3104 spin_unlock(&sbi->iostat_lock); 3105 } 3106 3107 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3108 3109 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3110 enum iostat_type type, unsigned long long io_bytes) 3111 { 3112 if (!sbi->iostat_enable) 3113 return; 3114 spin_lock(&sbi->iostat_lock); 3115 sbi->rw_iostat[type] += io_bytes; 3116 3117 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3118 sbi->rw_iostat[APP_BUFFERED_IO] = 3119 sbi->rw_iostat[APP_WRITE_IO] - 3120 sbi->rw_iostat[APP_DIRECT_IO]; 3121 3122 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3123 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3124 sbi->rw_iostat[APP_READ_IO] - 3125 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3126 spin_unlock(&sbi->iostat_lock); 3127 3128 f2fs_record_iostat(sbi); 3129 } 3130 3131 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3132 3133 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3134 3135 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3136 block_t blkaddr, int type); 3137 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3138 block_t blkaddr, int type) 3139 { 3140 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3141 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3142 blkaddr, type); 3143 f2fs_bug_on(sbi, 1); 3144 } 3145 } 3146 3147 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3148 { 3149 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3150 blkaddr == COMPRESS_ADDR) 3151 return false; 3152 return true; 3153 } 3154 3155 static inline void f2fs_set_page_private(struct page *page, 3156 unsigned long data) 3157 { 3158 if (PagePrivate(page)) 3159 return; 3160 3161 attach_page_private(page, (void *)data); 3162 } 3163 3164 static inline void f2fs_clear_page_private(struct page *page) 3165 { 3166 detach_page_private(page); 3167 } 3168 3169 /* 3170 * file.c 3171 */ 3172 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3173 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3174 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3175 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3176 int f2fs_truncate(struct inode *inode); 3177 int f2fs_getattr(const struct path *path, struct kstat *stat, 3178 u32 request_mask, unsigned int flags); 3179 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 3180 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3181 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3182 int f2fs_precache_extents(struct inode *inode); 3183 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3184 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3185 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3186 int f2fs_pin_file_control(struct inode *inode, bool inc); 3187 3188 /* 3189 * inode.c 3190 */ 3191 void f2fs_set_inode_flags(struct inode *inode); 3192 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3193 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3194 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3195 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3196 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3197 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3198 void f2fs_update_inode_page(struct inode *inode); 3199 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3200 void f2fs_evict_inode(struct inode *inode); 3201 void f2fs_handle_failed_inode(struct inode *inode); 3202 3203 /* 3204 * namei.c 3205 */ 3206 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3207 bool hot, bool set); 3208 struct dentry *f2fs_get_parent(struct dentry *child); 3209 3210 /* 3211 * dir.c 3212 */ 3213 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3214 int f2fs_init_casefolded_name(const struct inode *dir, 3215 struct f2fs_filename *fname); 3216 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3217 int lookup, struct f2fs_filename *fname); 3218 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3219 struct f2fs_filename *fname); 3220 void f2fs_free_filename(struct f2fs_filename *fname); 3221 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3222 const struct f2fs_filename *fname, int *max_slots); 3223 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3224 unsigned int start_pos, struct fscrypt_str *fstr); 3225 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3226 struct f2fs_dentry_ptr *d); 3227 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3228 const struct f2fs_filename *fname, struct page *dpage); 3229 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3230 unsigned int current_depth); 3231 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3232 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3233 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3234 const struct f2fs_filename *fname, 3235 struct page **res_page); 3236 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3237 const struct qstr *child, struct page **res_page); 3238 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3239 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3240 struct page **page); 3241 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3242 struct page *page, struct inode *inode); 3243 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3244 const struct f2fs_filename *fname); 3245 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3246 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3247 unsigned int bit_pos); 3248 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3249 struct inode *inode, nid_t ino, umode_t mode); 3250 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3251 struct inode *inode, nid_t ino, umode_t mode); 3252 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3253 struct inode *inode, nid_t ino, umode_t mode); 3254 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3255 struct inode *dir, struct inode *inode); 3256 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3257 bool f2fs_empty_dir(struct inode *dir); 3258 3259 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3260 { 3261 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3262 inode, inode->i_ino, inode->i_mode); 3263 } 3264 3265 /* 3266 * super.c 3267 */ 3268 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3269 void f2fs_inode_synced(struct inode *inode); 3270 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3271 int f2fs_quota_sync(struct super_block *sb, int type); 3272 void f2fs_quota_off_umount(struct super_block *sb); 3273 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3274 int f2fs_sync_fs(struct super_block *sb, int sync); 3275 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3276 3277 /* 3278 * hash.c 3279 */ 3280 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3281 3282 /* 3283 * node.c 3284 */ 3285 struct dnode_of_data; 3286 struct node_info; 3287 3288 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3289 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3290 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3291 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3292 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3293 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3294 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3295 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3296 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3297 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3298 struct node_info *ni); 3299 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3300 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3301 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3302 int f2fs_truncate_xattr_node(struct inode *inode); 3303 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3304 unsigned int seq_id); 3305 int f2fs_remove_inode_page(struct inode *inode); 3306 struct page *f2fs_new_inode_page(struct inode *inode); 3307 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3308 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3309 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3310 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3311 int f2fs_move_node_page(struct page *node_page, int gc_type); 3312 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3313 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3314 struct writeback_control *wbc, bool atomic, 3315 unsigned int *seq_id); 3316 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3317 struct writeback_control *wbc, 3318 bool do_balance, enum iostat_type io_type); 3319 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3320 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3321 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3322 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3323 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3324 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3325 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3326 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3327 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3328 unsigned int segno, struct f2fs_summary_block *sum); 3329 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3330 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3331 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3332 int __init f2fs_create_node_manager_caches(void); 3333 void f2fs_destroy_node_manager_caches(void); 3334 3335 /* 3336 * segment.c 3337 */ 3338 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3339 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3340 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3341 void f2fs_drop_inmem_pages(struct inode *inode); 3342 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3343 int f2fs_commit_inmem_pages(struct inode *inode); 3344 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3345 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3346 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3347 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3348 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3349 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3350 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3351 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3352 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3353 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3354 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3355 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3356 struct cp_control *cpc); 3357 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3358 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3359 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3360 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3361 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3362 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); 3363 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); 3364 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); 3365 void f2fs_get_new_segment(struct f2fs_sb_info *sbi, 3366 unsigned int *newseg, bool new_sec, int dir); 3367 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3368 unsigned int start, unsigned int end); 3369 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); 3370 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3371 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3372 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3373 struct cp_control *cpc); 3374 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3375 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3376 block_t blk_addr); 3377 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3378 enum iostat_type io_type); 3379 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3380 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3381 struct f2fs_io_info *fio); 3382 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3383 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3384 block_t old_blkaddr, block_t new_blkaddr, 3385 bool recover_curseg, bool recover_newaddr, 3386 bool from_gc); 3387 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3388 block_t old_addr, block_t new_addr, 3389 unsigned char version, bool recover_curseg, 3390 bool recover_newaddr); 3391 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3392 block_t old_blkaddr, block_t *new_blkaddr, 3393 struct f2fs_summary *sum, int type, 3394 struct f2fs_io_info *fio); 3395 void f2fs_wait_on_page_writeback(struct page *page, 3396 enum page_type type, bool ordered, bool locked); 3397 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3398 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3399 block_t len); 3400 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3401 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3402 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3403 unsigned int val, int alloc); 3404 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3405 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3406 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3407 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3408 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3409 int __init f2fs_create_segment_manager_caches(void); 3410 void f2fs_destroy_segment_manager_caches(void); 3411 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3412 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3413 enum page_type type, enum temp_type temp); 3414 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, 3415 unsigned int segno); 3416 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, 3417 unsigned int segno); 3418 3419 /* 3420 * checkpoint.c 3421 */ 3422 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3423 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3424 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3425 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); 3426 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3427 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3428 block_t blkaddr, int type); 3429 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3430 int type, bool sync); 3431 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3432 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3433 long nr_to_write, enum iostat_type io_type); 3434 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3435 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3436 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3437 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3438 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3439 unsigned int devidx, int type); 3440 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3441 unsigned int devidx, int type); 3442 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3443 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3444 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3445 void f2fs_add_orphan_inode(struct inode *inode); 3446 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3447 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3448 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3449 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3450 void f2fs_remove_dirty_inode(struct inode *inode); 3451 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3452 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3453 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3454 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3455 int __init f2fs_create_checkpoint_caches(void); 3456 void f2fs_destroy_checkpoint_caches(void); 3457 3458 /* 3459 * data.c 3460 */ 3461 int __init f2fs_init_bioset(void); 3462 void f2fs_destroy_bioset(void); 3463 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); 3464 int f2fs_init_bio_entry_cache(void); 3465 void f2fs_destroy_bio_entry_cache(void); 3466 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3467 struct bio *bio, enum page_type type); 3468 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3469 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3470 struct inode *inode, struct page *page, 3471 nid_t ino, enum page_type type); 3472 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3473 struct bio **bio, struct page *page); 3474 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3475 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3476 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3477 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3478 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3479 block_t blk_addr, struct bio *bio); 3480 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3481 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3482 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3483 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3484 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3485 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3486 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3487 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3488 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3489 int op_flags, bool for_write); 3490 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3491 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3492 bool for_write); 3493 struct page *f2fs_get_new_data_page(struct inode *inode, 3494 struct page *ipage, pgoff_t index, bool new_i_size); 3495 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3496 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3497 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3498 int create, int flag); 3499 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3500 u64 start, u64 len); 3501 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3502 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3503 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3504 int f2fs_write_single_data_page(struct page *page, int *submitted, 3505 struct bio **bio, sector_t *last_block, 3506 struct writeback_control *wbc, 3507 enum iostat_type io_type, 3508 int compr_blocks); 3509 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3510 unsigned int length); 3511 int f2fs_release_page(struct page *page, gfp_t wait); 3512 #ifdef CONFIG_MIGRATION 3513 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3514 struct page *page, enum migrate_mode mode); 3515 #endif 3516 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3517 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3518 int f2fs_init_post_read_processing(void); 3519 void f2fs_destroy_post_read_processing(void); 3520 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3521 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3522 3523 /* 3524 * gc.c 3525 */ 3526 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3527 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3528 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3529 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3530 unsigned int segno); 3531 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3532 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3533 int __init f2fs_create_garbage_collection_cache(void); 3534 void f2fs_destroy_garbage_collection_cache(void); 3535 3536 /* 3537 * recovery.c 3538 */ 3539 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3540 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3541 3542 /* 3543 * debug.c 3544 */ 3545 #ifdef CONFIG_F2FS_STAT_FS 3546 struct f2fs_stat_info { 3547 struct list_head stat_list; 3548 struct f2fs_sb_info *sbi; 3549 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3550 int main_area_segs, main_area_sections, main_area_zones; 3551 unsigned long long hit_largest, hit_cached, hit_rbtree; 3552 unsigned long long hit_total, total_ext; 3553 int ext_tree, zombie_tree, ext_node; 3554 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3555 int ndirty_data, ndirty_qdata; 3556 int inmem_pages; 3557 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3558 int nats, dirty_nats, sits, dirty_sits; 3559 int free_nids, avail_nids, alloc_nids; 3560 int total_count, utilization; 3561 int bg_gc, nr_wb_cp_data, nr_wb_data; 3562 int nr_rd_data, nr_rd_node, nr_rd_meta; 3563 int nr_dio_read, nr_dio_write; 3564 unsigned int io_skip_bggc, other_skip_bggc; 3565 int nr_flushing, nr_flushed, flush_list_empty; 3566 int nr_discarding, nr_discarded; 3567 int nr_discard_cmd; 3568 unsigned int undiscard_blks; 3569 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3570 int compr_inode; 3571 unsigned long long compr_blocks; 3572 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3573 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3574 unsigned int bimodal, avg_vblocks; 3575 int util_free, util_valid, util_invalid; 3576 int rsvd_segs, overp_segs; 3577 int dirty_count, node_pages, meta_pages; 3578 int prefree_count, call_count, cp_count, bg_cp_count; 3579 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3580 int bg_node_segs, bg_data_segs; 3581 int tot_blks, data_blks, node_blks; 3582 int bg_data_blks, bg_node_blks; 3583 unsigned long long skipped_atomic_files[2]; 3584 int curseg[NR_CURSEG_TYPE]; 3585 int cursec[NR_CURSEG_TYPE]; 3586 int curzone[NR_CURSEG_TYPE]; 3587 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3588 unsigned int full_seg[NR_CURSEG_TYPE]; 3589 unsigned int valid_blks[NR_CURSEG_TYPE]; 3590 3591 unsigned int meta_count[META_MAX]; 3592 unsigned int segment_count[2]; 3593 unsigned int block_count[2]; 3594 unsigned int inplace_count; 3595 unsigned long long base_mem, cache_mem, page_mem; 3596 }; 3597 3598 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3599 { 3600 return (struct f2fs_stat_info *)sbi->stat_info; 3601 } 3602 3603 #define stat_inc_cp_count(si) ((si)->cp_count++) 3604 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3605 #define stat_inc_call_count(si) ((si)->call_count++) 3606 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3607 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3608 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3609 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3610 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3611 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3612 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3613 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3614 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3615 #define stat_inc_inline_xattr(inode) \ 3616 do { \ 3617 if (f2fs_has_inline_xattr(inode)) \ 3618 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3619 } while (0) 3620 #define stat_dec_inline_xattr(inode) \ 3621 do { \ 3622 if (f2fs_has_inline_xattr(inode)) \ 3623 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3624 } while (0) 3625 #define stat_inc_inline_inode(inode) \ 3626 do { \ 3627 if (f2fs_has_inline_data(inode)) \ 3628 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3629 } while (0) 3630 #define stat_dec_inline_inode(inode) \ 3631 do { \ 3632 if (f2fs_has_inline_data(inode)) \ 3633 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3634 } while (0) 3635 #define stat_inc_inline_dir(inode) \ 3636 do { \ 3637 if (f2fs_has_inline_dentry(inode)) \ 3638 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3639 } while (0) 3640 #define stat_dec_inline_dir(inode) \ 3641 do { \ 3642 if (f2fs_has_inline_dentry(inode)) \ 3643 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3644 } while (0) 3645 #define stat_inc_compr_inode(inode) \ 3646 do { \ 3647 if (f2fs_compressed_file(inode)) \ 3648 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3649 } while (0) 3650 #define stat_dec_compr_inode(inode) \ 3651 do { \ 3652 if (f2fs_compressed_file(inode)) \ 3653 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3654 } while (0) 3655 #define stat_add_compr_blocks(inode, blocks) \ 3656 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3657 #define stat_sub_compr_blocks(inode, blocks) \ 3658 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3659 #define stat_inc_meta_count(sbi, blkaddr) \ 3660 do { \ 3661 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3662 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3663 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3664 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3665 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3666 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3667 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3668 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3669 } while (0) 3670 #define stat_inc_seg_type(sbi, curseg) \ 3671 ((sbi)->segment_count[(curseg)->alloc_type]++) 3672 #define stat_inc_block_count(sbi, curseg) \ 3673 ((sbi)->block_count[(curseg)->alloc_type]++) 3674 #define stat_inc_inplace_blocks(sbi) \ 3675 (atomic_inc(&(sbi)->inplace_count)) 3676 #define stat_update_max_atomic_write(inode) \ 3677 do { \ 3678 int cur = F2FS_I_SB(inode)->atomic_files; \ 3679 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3680 if (cur > max) \ 3681 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3682 } while (0) 3683 #define stat_inc_volatile_write(inode) \ 3684 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3685 #define stat_dec_volatile_write(inode) \ 3686 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3687 #define stat_update_max_volatile_write(inode) \ 3688 do { \ 3689 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3690 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3691 if (cur > max) \ 3692 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3693 } while (0) 3694 #define stat_inc_seg_count(sbi, type, gc_type) \ 3695 do { \ 3696 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3697 si->tot_segs++; \ 3698 if ((type) == SUM_TYPE_DATA) { \ 3699 si->data_segs++; \ 3700 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3701 } else { \ 3702 si->node_segs++; \ 3703 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3704 } \ 3705 } while (0) 3706 3707 #define stat_inc_tot_blk_count(si, blks) \ 3708 ((si)->tot_blks += (blks)) 3709 3710 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3711 do { \ 3712 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3713 stat_inc_tot_blk_count(si, blks); \ 3714 si->data_blks += (blks); \ 3715 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3716 } while (0) 3717 3718 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3719 do { \ 3720 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3721 stat_inc_tot_blk_count(si, blks); \ 3722 si->node_blks += (blks); \ 3723 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3724 } while (0) 3725 3726 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3727 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3728 void __init f2fs_create_root_stats(void); 3729 void f2fs_destroy_root_stats(void); 3730 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3731 #else 3732 #define stat_inc_cp_count(si) do { } while (0) 3733 #define stat_inc_bg_cp_count(si) do { } while (0) 3734 #define stat_inc_call_count(si) do { } while (0) 3735 #define stat_inc_bggc_count(si) do { } while (0) 3736 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3737 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3738 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3739 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3740 #define stat_inc_total_hit(sbi) do { } while (0) 3741 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3742 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3743 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3744 #define stat_inc_inline_xattr(inode) do { } while (0) 3745 #define stat_dec_inline_xattr(inode) do { } while (0) 3746 #define stat_inc_inline_inode(inode) do { } while (0) 3747 #define stat_dec_inline_inode(inode) do { } while (0) 3748 #define stat_inc_inline_dir(inode) do { } while (0) 3749 #define stat_dec_inline_dir(inode) do { } while (0) 3750 #define stat_inc_compr_inode(inode) do { } while (0) 3751 #define stat_dec_compr_inode(inode) do { } while (0) 3752 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3753 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3754 #define stat_inc_atomic_write(inode) do { } while (0) 3755 #define stat_dec_atomic_write(inode) do { } while (0) 3756 #define stat_update_max_atomic_write(inode) do { } while (0) 3757 #define stat_inc_volatile_write(inode) do { } while (0) 3758 #define stat_dec_volatile_write(inode) do { } while (0) 3759 #define stat_update_max_volatile_write(inode) do { } while (0) 3760 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3761 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3762 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3763 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3764 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3765 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3766 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3767 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3768 3769 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3770 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3771 static inline void __init f2fs_create_root_stats(void) { } 3772 static inline void f2fs_destroy_root_stats(void) { } 3773 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3774 #endif 3775 3776 extern const struct file_operations f2fs_dir_operations; 3777 #ifdef CONFIG_UNICODE 3778 extern const struct dentry_operations f2fs_dentry_ops; 3779 #endif 3780 extern const struct file_operations f2fs_file_operations; 3781 extern const struct inode_operations f2fs_file_inode_operations; 3782 extern const struct address_space_operations f2fs_dblock_aops; 3783 extern const struct address_space_operations f2fs_node_aops; 3784 extern const struct address_space_operations f2fs_meta_aops; 3785 extern const struct inode_operations f2fs_dir_inode_operations; 3786 extern const struct inode_operations f2fs_symlink_inode_operations; 3787 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3788 extern const struct inode_operations f2fs_special_inode_operations; 3789 extern struct kmem_cache *f2fs_inode_entry_slab; 3790 3791 /* 3792 * inline.c 3793 */ 3794 bool f2fs_may_inline_data(struct inode *inode); 3795 bool f2fs_may_inline_dentry(struct inode *inode); 3796 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3797 void f2fs_truncate_inline_inode(struct inode *inode, 3798 struct page *ipage, u64 from); 3799 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3800 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3801 int f2fs_convert_inline_inode(struct inode *inode); 3802 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3803 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3804 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3805 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3806 const struct f2fs_filename *fname, 3807 struct page **res_page); 3808 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3809 struct page *ipage); 3810 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3811 struct inode *inode, nid_t ino, umode_t mode); 3812 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3813 struct page *page, struct inode *dir, 3814 struct inode *inode); 3815 bool f2fs_empty_inline_dir(struct inode *dir); 3816 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3817 struct fscrypt_str *fstr); 3818 int f2fs_inline_data_fiemap(struct inode *inode, 3819 struct fiemap_extent_info *fieinfo, 3820 __u64 start, __u64 len); 3821 3822 /* 3823 * shrinker.c 3824 */ 3825 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3826 struct shrink_control *sc); 3827 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3828 struct shrink_control *sc); 3829 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3830 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3831 3832 /* 3833 * extent_cache.c 3834 */ 3835 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3836 struct rb_entry *cached_re, unsigned int ofs); 3837 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, 3838 struct rb_root_cached *root, 3839 struct rb_node **parent, 3840 unsigned long long key, bool *left_most); 3841 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3842 struct rb_root_cached *root, 3843 struct rb_node **parent, 3844 unsigned int ofs, bool *leftmost); 3845 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3846 struct rb_entry *cached_re, unsigned int ofs, 3847 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3848 struct rb_node ***insert_p, struct rb_node **insert_parent, 3849 bool force, bool *leftmost); 3850 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3851 struct rb_root_cached *root, bool check_key); 3852 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3853 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3854 void f2fs_drop_extent_tree(struct inode *inode); 3855 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3856 void f2fs_destroy_extent_tree(struct inode *inode); 3857 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3858 struct extent_info *ei); 3859 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3860 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3861 pgoff_t fofs, block_t blkaddr, unsigned int len); 3862 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3863 int __init f2fs_create_extent_cache(void); 3864 void f2fs_destroy_extent_cache(void); 3865 3866 /* 3867 * sysfs.c 3868 */ 3869 int __init f2fs_init_sysfs(void); 3870 void f2fs_exit_sysfs(void); 3871 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 3872 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 3873 3874 /* verity.c */ 3875 extern const struct fsverity_operations f2fs_verityops; 3876 3877 /* 3878 * crypto support 3879 */ 3880 static inline bool f2fs_encrypted_file(struct inode *inode) 3881 { 3882 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 3883 } 3884 3885 static inline void f2fs_set_encrypted_inode(struct inode *inode) 3886 { 3887 #ifdef CONFIG_FS_ENCRYPTION 3888 file_set_encrypt(inode); 3889 f2fs_set_inode_flags(inode); 3890 #endif 3891 } 3892 3893 /* 3894 * Returns true if the reads of the inode's data need to undergo some 3895 * postprocessing step, like decryption or authenticity verification. 3896 */ 3897 static inline bool f2fs_post_read_required(struct inode *inode) 3898 { 3899 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 3900 f2fs_compressed_file(inode); 3901 } 3902 3903 /* 3904 * compress.c 3905 */ 3906 #ifdef CONFIG_F2FS_FS_COMPRESSION 3907 bool f2fs_is_compressed_page(struct page *page); 3908 struct page *f2fs_compress_control_page(struct page *page); 3909 int f2fs_prepare_compress_overwrite(struct inode *inode, 3910 struct page **pagep, pgoff_t index, void **fsdata); 3911 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 3912 pgoff_t index, unsigned copied); 3913 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 3914 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 3915 bool f2fs_is_compress_backend_ready(struct inode *inode); 3916 int f2fs_init_compress_mempool(void); 3917 void f2fs_destroy_compress_mempool(void); 3918 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); 3919 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 3920 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 3921 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 3922 int f2fs_write_multi_pages(struct compress_ctx *cc, 3923 int *submitted, 3924 struct writeback_control *wbc, 3925 enum iostat_type io_type); 3926 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 3927 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 3928 unsigned nr_pages, sector_t *last_block_in_bio, 3929 bool is_readahead, bool for_write); 3930 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 3931 void f2fs_free_dic(struct decompress_io_ctx *dic); 3932 void f2fs_decompress_end_io(struct page **rpages, 3933 unsigned int cluster_size, bool err, bool verity); 3934 int f2fs_init_compress_ctx(struct compress_ctx *cc); 3935 void f2fs_destroy_compress_ctx(struct compress_ctx *cc); 3936 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 3937 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); 3938 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); 3939 int __init f2fs_init_compress_cache(void); 3940 void f2fs_destroy_compress_cache(void); 3941 #else 3942 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 3943 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 3944 { 3945 if (!f2fs_compressed_file(inode)) 3946 return true; 3947 /* not support compression */ 3948 return false; 3949 } 3950 static inline struct page *f2fs_compress_control_page(struct page *page) 3951 { 3952 WARN_ON_ONCE(1); 3953 return ERR_PTR(-EINVAL); 3954 } 3955 static inline int f2fs_init_compress_mempool(void) { return 0; } 3956 static inline void f2fs_destroy_compress_mempool(void) { } 3957 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } 3958 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } 3959 static inline int __init f2fs_init_compress_cache(void) { return 0; } 3960 static inline void f2fs_destroy_compress_cache(void) { } 3961 #endif 3962 3963 static inline void set_compress_context(struct inode *inode) 3964 { 3965 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3966 3967 F2FS_I(inode)->i_compress_algorithm = 3968 F2FS_OPTION(sbi).compress_algorithm; 3969 F2FS_I(inode)->i_log_cluster_size = 3970 F2FS_OPTION(sbi).compress_log_size; 3971 F2FS_I(inode)->i_cluster_size = 3972 1 << F2FS_I(inode)->i_log_cluster_size; 3973 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 3974 set_inode_flag(inode, FI_COMPRESSED_FILE); 3975 stat_inc_compr_inode(inode); 3976 f2fs_mark_inode_dirty_sync(inode, true); 3977 } 3978 3979 static inline bool f2fs_disable_compressed_file(struct inode *inode) 3980 { 3981 struct f2fs_inode_info *fi = F2FS_I(inode); 3982 3983 if (!f2fs_compressed_file(inode)) 3984 return true; 3985 if (S_ISREG(inode->i_mode) && 3986 (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks))) 3987 return false; 3988 3989 fi->i_flags &= ~F2FS_COMPR_FL; 3990 stat_dec_compr_inode(inode); 3991 clear_inode_flag(inode, FI_COMPRESSED_FILE); 3992 f2fs_mark_inode_dirty_sync(inode, true); 3993 return true; 3994 } 3995 3996 #define F2FS_FEATURE_FUNCS(name, flagname) \ 3997 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 3998 { \ 3999 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 4000 } 4001 4002 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 4003 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 4004 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 4005 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 4006 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 4007 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 4008 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 4009 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 4010 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 4011 F2FS_FEATURE_FUNCS(verity, VERITY); 4012 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 4013 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 4014 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 4015 4016 #ifdef CONFIG_BLK_DEV_ZONED 4017 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 4018 block_t blkaddr) 4019 { 4020 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 4021 4022 return test_bit(zno, FDEV(devi).blkz_seq); 4023 } 4024 #endif 4025 4026 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 4027 { 4028 return f2fs_sb_has_blkzoned(sbi); 4029 } 4030 4031 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 4032 { 4033 return blk_queue_discard(bdev_get_queue(bdev)) || 4034 bdev_is_zoned(bdev); 4035 } 4036 4037 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 4038 { 4039 int i; 4040 4041 if (!f2fs_is_multi_device(sbi)) 4042 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 4043 4044 for (i = 0; i < sbi->s_ndevs; i++) 4045 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 4046 return true; 4047 return false; 4048 } 4049 4050 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4051 { 4052 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4053 f2fs_hw_should_discard(sbi); 4054 } 4055 4056 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4057 { 4058 int i; 4059 4060 if (!f2fs_is_multi_device(sbi)) 4061 return bdev_read_only(sbi->sb->s_bdev); 4062 4063 for (i = 0; i < sbi->s_ndevs; i++) 4064 if (bdev_read_only(FDEV(i).bdev)) 4065 return true; 4066 return false; 4067 } 4068 4069 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4070 { 4071 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4072 } 4073 4074 static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode) 4075 { 4076 #ifdef CONFIG_FS_ENCRYPTION 4077 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 4078 umode_t mode = inode->i_mode; 4079 4080 /* 4081 * If the directory encrypted or dummy encryption enabled, 4082 * then we should encrypt the inode. 4083 */ 4084 if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) 4085 return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); 4086 #endif 4087 return false; 4088 } 4089 4090 static inline bool f2fs_may_compress(struct inode *inode) 4091 { 4092 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4093 f2fs_is_atomic_file(inode) || 4094 f2fs_is_volatile_file(inode)) 4095 return false; 4096 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4097 } 4098 4099 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4100 u64 blocks, bool add) 4101 { 4102 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4103 struct f2fs_inode_info *fi = F2FS_I(inode); 4104 4105 /* don't update i_compr_blocks if saved blocks were released */ 4106 if (!add && !atomic_read(&fi->i_compr_blocks)) 4107 return; 4108 4109 if (add) { 4110 atomic_add(diff, &fi->i_compr_blocks); 4111 stat_add_compr_blocks(inode, diff); 4112 } else { 4113 atomic_sub(diff, &fi->i_compr_blocks); 4114 stat_sub_compr_blocks(inode, diff); 4115 } 4116 f2fs_mark_inode_dirty_sync(inode, true); 4117 } 4118 4119 static inline int block_unaligned_IO(struct inode *inode, 4120 struct kiocb *iocb, struct iov_iter *iter) 4121 { 4122 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4123 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4124 loff_t offset = iocb->ki_pos; 4125 unsigned long align = offset | iov_iter_alignment(iter); 4126 4127 return align & blocksize_mask; 4128 } 4129 4130 static inline int allow_outplace_dio(struct inode *inode, 4131 struct kiocb *iocb, struct iov_iter *iter) 4132 { 4133 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4134 int rw = iov_iter_rw(iter); 4135 4136 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4137 !block_unaligned_IO(inode, iocb, iter)); 4138 } 4139 4140 static inline bool f2fs_force_buffered_io(struct inode *inode, 4141 struct kiocb *iocb, struct iov_iter *iter) 4142 { 4143 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4144 int rw = iov_iter_rw(iter); 4145 4146 if (f2fs_post_read_required(inode)) 4147 return true; 4148 if (f2fs_is_multi_device(sbi)) 4149 return true; 4150 /* 4151 * for blkzoned device, fallback direct IO to buffered IO, so 4152 * all IOs can be serialized by log-structured write. 4153 */ 4154 if (f2fs_sb_has_blkzoned(sbi)) 4155 return true; 4156 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4157 if (block_unaligned_IO(inode, iocb, iter)) 4158 return true; 4159 if (F2FS_IO_ALIGNED(sbi)) 4160 return true; 4161 } 4162 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 4163 !IS_SWAPFILE(inode)) 4164 return true; 4165 4166 return false; 4167 } 4168 4169 #ifdef CONFIG_F2FS_FAULT_INJECTION 4170 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4171 unsigned int type); 4172 #else 4173 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4174 #endif 4175 4176 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4177 { 4178 #ifdef CONFIG_QUOTA 4179 if (f2fs_sb_has_quota_ino(sbi)) 4180 return true; 4181 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4182 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4183 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4184 return true; 4185 #endif 4186 return false; 4187 } 4188 4189 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4190 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4191 4192 #endif /* _LINUX_F2FS_H */ 4193