1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * fs/f2fs/f2fs.h 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #ifndef _LINUX_F2FS_H 9 #define _LINUX_F2FS_H 10 11 #include <linux/uio.h> 12 #include <linux/types.h> 13 #include <linux/page-flags.h> 14 #include <linux/buffer_head.h> 15 #include <linux/slab.h> 16 #include <linux/crc32.h> 17 #include <linux/magic.h> 18 #include <linux/kobject.h> 19 #include <linux/sched.h> 20 #include <linux/cred.h> 21 #include <linux/vmalloc.h> 22 #include <linux/bio.h> 23 #include <linux/blkdev.h> 24 #include <linux/quotaops.h> 25 #include <linux/part_stat.h> 26 #include <crypto/hash.h> 27 28 #include <linux/fscrypt.h> 29 #include <linux/fsverity.h> 30 31 #ifdef CONFIG_F2FS_CHECK_FS 32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 33 #else 34 #define f2fs_bug_on(sbi, condition) \ 35 do { \ 36 if (unlikely(condition)) { \ 37 WARN_ON(1); \ 38 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 39 } \ 40 } while (0) 41 #endif 42 43 enum { 44 FAULT_KMALLOC, 45 FAULT_KVMALLOC, 46 FAULT_PAGE_ALLOC, 47 FAULT_PAGE_GET, 48 FAULT_ALLOC_BIO, 49 FAULT_ALLOC_NID, 50 FAULT_ORPHAN, 51 FAULT_BLOCK, 52 FAULT_DIR_DEPTH, 53 FAULT_EVICT_INODE, 54 FAULT_TRUNCATE, 55 FAULT_READ_IO, 56 FAULT_CHECKPOINT, 57 FAULT_DISCARD, 58 FAULT_WRITE_IO, 59 FAULT_MAX, 60 }; 61 62 #ifdef CONFIG_F2FS_FAULT_INJECTION 63 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1) 64 65 struct f2fs_fault_info { 66 atomic_t inject_ops; 67 unsigned int inject_rate; 68 unsigned int inject_type; 69 }; 70 71 extern const char *f2fs_fault_name[FAULT_MAX]; 72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 73 #endif 74 75 /* 76 * For mount options 77 */ 78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 79 #define F2FS_MOUNT_DISCARD 0x00000004 80 #define F2FS_MOUNT_NOHEAP 0x00000008 81 #define F2FS_MOUNT_XATTR_USER 0x00000010 82 #define F2FS_MOUNT_POSIX_ACL 0x00000020 83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 84 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 85 #define F2FS_MOUNT_INLINE_DATA 0x00000100 86 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 87 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 88 #define F2FS_MOUNT_NOBARRIER 0x00000800 89 #define F2FS_MOUNT_FASTBOOT 0x00001000 90 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 91 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 92 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 93 #define F2FS_MOUNT_USRQUOTA 0x00080000 94 #define F2FS_MOUNT_GRPQUOTA 0x00100000 95 #define F2FS_MOUNT_PRJQUOTA 0x00200000 96 #define F2FS_MOUNT_QUOTA 0x00400000 97 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 98 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 99 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000 100 #define F2FS_MOUNT_NORECOVERY 0x04000000 101 102 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) 103 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) 104 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) 105 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) 106 107 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 108 typecheck(unsigned long long, b) && \ 109 ((long long)((a) - (b)) > 0)) 110 111 typedef u32 block_t; /* 112 * should not change u32, since it is the on-disk block 113 * address format, __le32. 114 */ 115 typedef u32 nid_t; 116 117 #define COMPRESS_EXT_NUM 16 118 119 struct f2fs_mount_info { 120 unsigned int opt; 121 int write_io_size_bits; /* Write IO size bits */ 122 block_t root_reserved_blocks; /* root reserved blocks */ 123 kuid_t s_resuid; /* reserved blocks for uid */ 124 kgid_t s_resgid; /* reserved blocks for gid */ 125 int active_logs; /* # of active logs */ 126 int inline_xattr_size; /* inline xattr size */ 127 #ifdef CONFIG_F2FS_FAULT_INJECTION 128 struct f2fs_fault_info fault_info; /* For fault injection */ 129 #endif 130 #ifdef CONFIG_QUOTA 131 /* Names of quota files with journalled quota */ 132 char *s_qf_names[MAXQUOTAS]; 133 int s_jquota_fmt; /* Format of quota to use */ 134 #endif 135 /* For which write hints are passed down to block layer */ 136 int whint_mode; 137 int alloc_mode; /* segment allocation policy */ 138 int fsync_mode; /* fsync policy */ 139 int fs_mode; /* fs mode: LFS or ADAPTIVE */ 140 int bggc_mode; /* bggc mode: off, on or sync */ 141 struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */ 142 block_t unusable_cap_perc; /* percentage for cap */ 143 block_t unusable_cap; /* Amount of space allowed to be 144 * unusable when disabling checkpoint 145 */ 146 147 /* For compression */ 148 unsigned char compress_algorithm; /* algorithm type */ 149 unsigned compress_log_size; /* cluster log size */ 150 unsigned char compress_ext_cnt; /* extension count */ 151 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ 152 }; 153 154 #define F2FS_FEATURE_ENCRYPT 0x0001 155 #define F2FS_FEATURE_BLKZONED 0x0002 156 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 157 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 158 #define F2FS_FEATURE_PRJQUOTA 0x0010 159 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 160 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 161 #define F2FS_FEATURE_QUOTA_INO 0x0080 162 #define F2FS_FEATURE_INODE_CRTIME 0x0100 163 #define F2FS_FEATURE_LOST_FOUND 0x0200 164 #define F2FS_FEATURE_VERITY 0x0400 165 #define F2FS_FEATURE_SB_CHKSUM 0x0800 166 #define F2FS_FEATURE_CASEFOLD 0x1000 167 #define F2FS_FEATURE_COMPRESSION 0x2000 168 169 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 170 ((raw_super->feature & cpu_to_le32(mask)) != 0) 171 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) 172 #define F2FS_SET_FEATURE(sbi, mask) \ 173 (sbi->raw_super->feature |= cpu_to_le32(mask)) 174 #define F2FS_CLEAR_FEATURE(sbi, mask) \ 175 (sbi->raw_super->feature &= ~cpu_to_le32(mask)) 176 177 /* 178 * Default values for user and/or group using reserved blocks 179 */ 180 #define F2FS_DEF_RESUID 0 181 #define F2FS_DEF_RESGID 0 182 183 /* 184 * For checkpoint manager 185 */ 186 enum { 187 NAT_BITMAP, 188 SIT_BITMAP 189 }; 190 191 #define CP_UMOUNT 0x00000001 192 #define CP_FASTBOOT 0x00000002 193 #define CP_SYNC 0x00000004 194 #define CP_RECOVERY 0x00000008 195 #define CP_DISCARD 0x00000010 196 #define CP_TRIMMED 0x00000020 197 #define CP_PAUSE 0x00000040 198 #define CP_RESIZE 0x00000080 199 200 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 201 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ 202 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 203 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ 204 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 205 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ 206 #define DEF_CP_INTERVAL 60 /* 60 secs */ 207 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 208 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ 209 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ 210 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ 211 212 struct cp_control { 213 int reason; 214 __u64 trim_start; 215 __u64 trim_end; 216 __u64 trim_minlen; 217 }; 218 219 /* 220 * indicate meta/data type 221 */ 222 enum { 223 META_CP, 224 META_NAT, 225 META_SIT, 226 META_SSA, 227 META_MAX, 228 META_POR, 229 DATA_GENERIC, /* check range only */ 230 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ 231 DATA_GENERIC_ENHANCE_READ, /* 232 * strong check on range and segment 233 * bitmap but no warning due to race 234 * condition of read on truncated area 235 * by extent_cache 236 */ 237 META_GENERIC, 238 }; 239 240 /* for the list of ino */ 241 enum { 242 ORPHAN_INO, /* for orphan ino list */ 243 APPEND_INO, /* for append ino list */ 244 UPDATE_INO, /* for update ino list */ 245 TRANS_DIR_INO, /* for trasactions dir ino list */ 246 FLUSH_INO, /* for multiple device flushing */ 247 MAX_INO_ENTRY, /* max. list */ 248 }; 249 250 struct ino_entry { 251 struct list_head list; /* list head */ 252 nid_t ino; /* inode number */ 253 unsigned int dirty_device; /* dirty device bitmap */ 254 }; 255 256 /* for the list of inodes to be GCed */ 257 struct inode_entry { 258 struct list_head list; /* list head */ 259 struct inode *inode; /* vfs inode pointer */ 260 }; 261 262 struct fsync_node_entry { 263 struct list_head list; /* list head */ 264 struct page *page; /* warm node page pointer */ 265 unsigned int seq_id; /* sequence id */ 266 }; 267 268 /* for the bitmap indicate blocks to be discarded */ 269 struct discard_entry { 270 struct list_head list; /* list head */ 271 block_t start_blkaddr; /* start blockaddr of current segment */ 272 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 273 }; 274 275 /* default discard granularity of inner discard thread, unit: block count */ 276 #define DEFAULT_DISCARD_GRANULARITY 16 277 278 /* max discard pend list number */ 279 #define MAX_PLIST_NUM 512 280 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 281 (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) 282 283 enum { 284 D_PREP, /* initial */ 285 D_PARTIAL, /* partially submitted */ 286 D_SUBMIT, /* all submitted */ 287 D_DONE, /* finished */ 288 }; 289 290 struct discard_info { 291 block_t lstart; /* logical start address */ 292 block_t len; /* length */ 293 block_t start; /* actual start address in dev */ 294 }; 295 296 struct discard_cmd { 297 struct rb_node rb_node; /* rb node located in rb-tree */ 298 union { 299 struct { 300 block_t lstart; /* logical start address */ 301 block_t len; /* length */ 302 block_t start; /* actual start address in dev */ 303 }; 304 struct discard_info di; /* discard info */ 305 306 }; 307 struct list_head list; /* command list */ 308 struct completion wait; /* compleation */ 309 struct block_device *bdev; /* bdev */ 310 unsigned short ref; /* reference count */ 311 unsigned char state; /* state */ 312 unsigned char queued; /* queued discard */ 313 int error; /* bio error */ 314 spinlock_t lock; /* for state/bio_ref updating */ 315 unsigned short bio_ref; /* bio reference count */ 316 }; 317 318 enum { 319 DPOLICY_BG, 320 DPOLICY_FORCE, 321 DPOLICY_FSTRIM, 322 DPOLICY_UMOUNT, 323 MAX_DPOLICY, 324 }; 325 326 struct discard_policy { 327 int type; /* type of discard */ 328 unsigned int min_interval; /* used for candidates exist */ 329 unsigned int mid_interval; /* used for device busy */ 330 unsigned int max_interval; /* used for candidates not exist */ 331 unsigned int max_requests; /* # of discards issued per round */ 332 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ 333 bool io_aware; /* issue discard in idle time */ 334 bool sync; /* submit discard with REQ_SYNC flag */ 335 bool ordered; /* issue discard by lba order */ 336 bool timeout; /* discard timeout for put_super */ 337 unsigned int granularity; /* discard granularity */ 338 }; 339 340 struct discard_cmd_control { 341 struct task_struct *f2fs_issue_discard; /* discard thread */ 342 struct list_head entry_list; /* 4KB discard entry list */ 343 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 344 struct list_head wait_list; /* store on-flushing entries */ 345 struct list_head fstrim_list; /* in-flight discard from fstrim */ 346 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 347 unsigned int discard_wake; /* to wake up discard thread */ 348 struct mutex cmd_lock; 349 unsigned int nr_discards; /* # of discards in the list */ 350 unsigned int max_discards; /* max. discards to be issued */ 351 unsigned int discard_granularity; /* discard granularity */ 352 unsigned int undiscard_blks; /* # of undiscard blocks */ 353 unsigned int next_pos; /* next discard position */ 354 atomic_t issued_discard; /* # of issued discard */ 355 atomic_t queued_discard; /* # of queued discard */ 356 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 357 struct rb_root_cached root; /* root of discard rb-tree */ 358 bool rbtree_check; /* config for consistence check */ 359 }; 360 361 /* for the list of fsync inodes, used only during recovery */ 362 struct fsync_inode_entry { 363 struct list_head list; /* list head */ 364 struct inode *inode; /* vfs inode pointer */ 365 block_t blkaddr; /* block address locating the last fsync */ 366 block_t last_dentry; /* block address locating the last dentry */ 367 }; 368 369 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 370 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 371 372 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 373 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 374 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 375 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 376 377 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 378 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 379 380 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 381 { 382 int before = nats_in_cursum(journal); 383 384 journal->n_nats = cpu_to_le16(before + i); 385 return before; 386 } 387 388 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 389 { 390 int before = sits_in_cursum(journal); 391 392 journal->n_sits = cpu_to_le16(before + i); 393 return before; 394 } 395 396 static inline bool __has_cursum_space(struct f2fs_journal *journal, 397 int size, int type) 398 { 399 if (type == NAT_JOURNAL) 400 return size <= MAX_NAT_JENTRIES(journal); 401 return size <= MAX_SIT_JENTRIES(journal); 402 } 403 404 /* 405 * f2fs-specific ioctl commands 406 */ 407 #define F2FS_IOCTL_MAGIC 0xf5 408 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) 409 #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) 410 #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) 411 #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) 412 #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) 413 #define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) 414 #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) 415 #define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ 416 struct f2fs_defragment) 417 #define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ 418 struct f2fs_move_range) 419 #define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \ 420 struct f2fs_flush_device) 421 #define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \ 422 struct f2fs_gc_range) 423 #define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32) 424 #define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) 425 #define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) 426 #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) 427 #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) 428 #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) 429 #define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ 430 _IOR(F2FS_IOCTL_MAGIC, 18, __u64) 431 #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ 432 _IOR(F2FS_IOCTL_MAGIC, 19, __u64) 433 #define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \ 434 struct f2fs_sectrim_range) 435 436 /* 437 * should be same as XFS_IOC_GOINGDOWN. 438 * Flags for going down operation used by FS_IOC_GOINGDOWN 439 */ 440 #define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ 441 #define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ 442 #define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ 443 #define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ 444 #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ 445 #define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ 446 447 /* 448 * Flags used by F2FS_IOC_SEC_TRIM_FILE 449 */ 450 #define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */ 451 #define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */ 452 #define F2FS_TRIM_FILE_MASK 0x3 453 454 struct f2fs_gc_range { 455 u32 sync; 456 u64 start; 457 u64 len; 458 }; 459 460 struct f2fs_defragment { 461 u64 start; 462 u64 len; 463 }; 464 465 struct f2fs_move_range { 466 u32 dst_fd; /* destination fd */ 467 u64 pos_in; /* start position in src_fd */ 468 u64 pos_out; /* start position in dst_fd */ 469 u64 len; /* size to move */ 470 }; 471 472 struct f2fs_flush_device { 473 u32 dev_num; /* device number to flush */ 474 u32 segments; /* # of segments to flush */ 475 }; 476 477 struct f2fs_sectrim_range { 478 u64 start; 479 u64 len; 480 u64 flags; 481 }; 482 483 /* for inline stuff */ 484 #define DEF_INLINE_RESERVED_SIZE 1 485 static inline int get_extra_isize(struct inode *inode); 486 static inline int get_inline_xattr_addrs(struct inode *inode); 487 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 488 (CUR_ADDRS_PER_INODE(inode) - \ 489 get_inline_xattr_addrs(inode) - \ 490 DEF_INLINE_RESERVED_SIZE)) 491 492 /* for inline dir */ 493 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 494 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 495 BITS_PER_BYTE + 1)) 496 #define INLINE_DENTRY_BITMAP_SIZE(inode) \ 497 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) 498 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 499 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 500 NR_INLINE_DENTRY(inode) + \ 501 INLINE_DENTRY_BITMAP_SIZE(inode))) 502 503 /* 504 * For INODE and NODE manager 505 */ 506 /* for directory operations */ 507 508 struct f2fs_filename { 509 /* 510 * The filename the user specified. This is NULL for some 511 * filesystem-internal operations, e.g. converting an inline directory 512 * to a non-inline one, or roll-forward recovering an encrypted dentry. 513 */ 514 const struct qstr *usr_fname; 515 516 /* 517 * The on-disk filename. For encrypted directories, this is encrypted. 518 * This may be NULL for lookups in an encrypted dir without the key. 519 */ 520 struct fscrypt_str disk_name; 521 522 /* The dirhash of this filename */ 523 f2fs_hash_t hash; 524 525 #ifdef CONFIG_FS_ENCRYPTION 526 /* 527 * For lookups in encrypted directories: either the buffer backing 528 * disk_name, or a buffer that holds the decoded no-key name. 529 */ 530 struct fscrypt_str crypto_buf; 531 #endif 532 #ifdef CONFIG_UNICODE 533 /* 534 * For casefolded directories: the casefolded name, but it's left NULL 535 * if the original name is not valid Unicode or if the filesystem is 536 * doing an internal operation where usr_fname is also NULL. In these 537 * cases we fall back to treating the name as an opaque byte sequence. 538 */ 539 struct fscrypt_str cf_name; 540 #endif 541 }; 542 543 struct f2fs_dentry_ptr { 544 struct inode *inode; 545 void *bitmap; 546 struct f2fs_dir_entry *dentry; 547 __u8 (*filename)[F2FS_SLOT_LEN]; 548 int max; 549 int nr_bitmap; 550 }; 551 552 static inline void make_dentry_ptr_block(struct inode *inode, 553 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 554 { 555 d->inode = inode; 556 d->max = NR_DENTRY_IN_BLOCK; 557 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 558 d->bitmap = t->dentry_bitmap; 559 d->dentry = t->dentry; 560 d->filename = t->filename; 561 } 562 563 static inline void make_dentry_ptr_inline(struct inode *inode, 564 struct f2fs_dentry_ptr *d, void *t) 565 { 566 int entry_cnt = NR_INLINE_DENTRY(inode); 567 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 568 int reserved_size = INLINE_RESERVED_SIZE(inode); 569 570 d->inode = inode; 571 d->max = entry_cnt; 572 d->nr_bitmap = bitmap_size; 573 d->bitmap = t; 574 d->dentry = t + bitmap_size + reserved_size; 575 d->filename = t + bitmap_size + reserved_size + 576 SIZE_OF_DIR_ENTRY * entry_cnt; 577 } 578 579 /* 580 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 581 * as its node offset to distinguish from index node blocks. 582 * But some bits are used to mark the node block. 583 */ 584 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 585 >> OFFSET_BIT_SHIFT) 586 enum { 587 ALLOC_NODE, /* allocate a new node page if needed */ 588 LOOKUP_NODE, /* look up a node without readahead */ 589 LOOKUP_NODE_RA, /* 590 * look up a node with readahead called 591 * by get_data_block. 592 */ 593 }; 594 595 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */ 596 597 /* congestion wait timeout value, default: 20ms */ 598 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) 599 600 /* maximum retry quota flush count */ 601 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 602 603 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 604 605 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 606 607 /* for in-memory extent cache entry */ 608 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 609 610 /* number of extent info in extent cache we try to shrink */ 611 #define EXTENT_CACHE_SHRINK_NUMBER 128 612 613 struct rb_entry { 614 struct rb_node rb_node; /* rb node located in rb-tree */ 615 unsigned int ofs; /* start offset of the entry */ 616 unsigned int len; /* length of the entry */ 617 }; 618 619 struct extent_info { 620 unsigned int fofs; /* start offset in a file */ 621 unsigned int len; /* length of the extent */ 622 u32 blk; /* start block address of the extent */ 623 }; 624 625 struct extent_node { 626 struct rb_node rb_node; /* rb node located in rb-tree */ 627 struct extent_info ei; /* extent info */ 628 struct list_head list; /* node in global extent list of sbi */ 629 struct extent_tree *et; /* extent tree pointer */ 630 }; 631 632 struct extent_tree { 633 nid_t ino; /* inode number */ 634 struct rb_root_cached root; /* root of extent info rb-tree */ 635 struct extent_node *cached_en; /* recently accessed extent node */ 636 struct extent_info largest; /* largested extent info */ 637 struct list_head list; /* to be used by sbi->zombie_list */ 638 rwlock_t lock; /* protect extent info rb-tree */ 639 atomic_t node_cnt; /* # of extent node in rb-tree*/ 640 bool largest_updated; /* largest extent updated */ 641 }; 642 643 /* 644 * This structure is taken from ext4_map_blocks. 645 * 646 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 647 */ 648 #define F2FS_MAP_NEW (1 << BH_New) 649 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 650 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 651 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 652 F2FS_MAP_UNWRITTEN) 653 654 struct f2fs_map_blocks { 655 block_t m_pblk; 656 block_t m_lblk; 657 unsigned int m_len; 658 unsigned int m_flags; 659 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 660 pgoff_t *m_next_extent; /* point to next possible extent */ 661 int m_seg_type; 662 bool m_may_create; /* indicate it is from write path */ 663 }; 664 665 /* for flag in get_data_block */ 666 enum { 667 F2FS_GET_BLOCK_DEFAULT, 668 F2FS_GET_BLOCK_FIEMAP, 669 F2FS_GET_BLOCK_BMAP, 670 F2FS_GET_BLOCK_DIO, 671 F2FS_GET_BLOCK_PRE_DIO, 672 F2FS_GET_BLOCK_PRE_AIO, 673 F2FS_GET_BLOCK_PRECACHE, 674 }; 675 676 /* 677 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 678 */ 679 #define FADVISE_COLD_BIT 0x01 680 #define FADVISE_LOST_PINO_BIT 0x02 681 #define FADVISE_ENCRYPT_BIT 0x04 682 #define FADVISE_ENC_NAME_BIT 0x08 683 #define FADVISE_KEEP_SIZE_BIT 0x10 684 #define FADVISE_HOT_BIT 0x20 685 #define FADVISE_VERITY_BIT 0x40 686 687 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) 688 689 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 690 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 691 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 692 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 693 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 694 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 695 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 696 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 697 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 698 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 699 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 700 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 701 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 702 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) 703 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) 704 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) 705 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) 706 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) 707 708 #define DEF_DIR_LEVEL 0 709 710 enum { 711 GC_FAILURE_PIN, 712 GC_FAILURE_ATOMIC, 713 MAX_GC_FAILURE 714 }; 715 716 /* used for f2fs_inode_info->flags */ 717 enum { 718 FI_NEW_INODE, /* indicate newly allocated inode */ 719 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 720 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 721 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 722 FI_INC_LINK, /* need to increment i_nlink */ 723 FI_ACL_MODE, /* indicate acl mode */ 724 FI_NO_ALLOC, /* should not allocate any blocks */ 725 FI_FREE_NID, /* free allocated nide */ 726 FI_NO_EXTENT, /* not to use the extent cache */ 727 FI_INLINE_XATTR, /* used for inline xattr */ 728 FI_INLINE_DATA, /* used for inline data*/ 729 FI_INLINE_DENTRY, /* used for inline dentry */ 730 FI_APPEND_WRITE, /* inode has appended data */ 731 FI_UPDATE_WRITE, /* inode has in-place-update data */ 732 FI_NEED_IPU, /* used for ipu per file */ 733 FI_ATOMIC_FILE, /* indicate atomic file */ 734 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 735 FI_VOLATILE_FILE, /* indicate volatile file */ 736 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 737 FI_DROP_CACHE, /* drop dirty page cache */ 738 FI_DATA_EXIST, /* indicate data exists */ 739 FI_INLINE_DOTS, /* indicate inline dot dentries */ 740 FI_DO_DEFRAG, /* indicate defragment is running */ 741 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 742 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 743 FI_HOT_DATA, /* indicate file is hot */ 744 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 745 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 746 FI_PIN_FILE, /* indicate file should not be gced */ 747 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */ 748 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ 749 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ 750 FI_MMAP_FILE, /* indicate file was mmapped */ 751 FI_MAX, /* max flag, never be used */ 752 }; 753 754 struct f2fs_inode_info { 755 struct inode vfs_inode; /* serve a vfs inode */ 756 unsigned long i_flags; /* keep an inode flags for ioctl */ 757 unsigned char i_advise; /* use to give file attribute hints */ 758 unsigned char i_dir_level; /* use for dentry level for large dir */ 759 unsigned int i_current_depth; /* only for directory depth */ 760 /* for gc failure statistic */ 761 unsigned int i_gc_failures[MAX_GC_FAILURE]; 762 unsigned int i_pino; /* parent inode number */ 763 umode_t i_acl_mode; /* keep file acl mode temporarily */ 764 765 /* Use below internally in f2fs*/ 766 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ 767 struct rw_semaphore i_sem; /* protect fi info */ 768 atomic_t dirty_pages; /* # of dirty pages */ 769 f2fs_hash_t chash; /* hash value of given file name */ 770 unsigned int clevel; /* maximum level of given file name */ 771 struct task_struct *task; /* lookup and create consistency */ 772 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 773 nid_t i_xattr_nid; /* node id that contains xattrs */ 774 loff_t last_disk_size; /* lastly written file size */ 775 spinlock_t i_size_lock; /* protect last_disk_size */ 776 777 #ifdef CONFIG_QUOTA 778 struct dquot *i_dquot[MAXQUOTAS]; 779 780 /* quota space reservation, managed internally by quota code */ 781 qsize_t i_reserved_quota; 782 #endif 783 struct list_head dirty_list; /* dirty list for dirs and files */ 784 struct list_head gdirty_list; /* linked in global dirty list */ 785 struct list_head inmem_ilist; /* list for inmem inodes */ 786 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 787 struct task_struct *inmem_task; /* store inmemory task */ 788 struct mutex inmem_lock; /* lock for inmemory pages */ 789 pgoff_t ra_offset; /* ongoing readahead offset */ 790 struct extent_tree *extent_tree; /* cached extent_tree entry */ 791 792 /* avoid racing between foreground op and gc */ 793 struct rw_semaphore i_gc_rwsem[2]; 794 struct rw_semaphore i_mmap_sem; 795 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 796 797 int i_extra_isize; /* size of extra space located in i_addr */ 798 kprojid_t i_projid; /* id for project quota */ 799 int i_inline_xattr_size; /* inline xattr size */ 800 struct timespec64 i_crtime; /* inode creation time */ 801 struct timespec64 i_disk_time[4];/* inode disk times */ 802 803 /* for file compress */ 804 u64 i_compr_blocks; /* # of compressed blocks */ 805 unsigned char i_compress_algorithm; /* algorithm type */ 806 unsigned char i_log_cluster_size; /* log of cluster size */ 807 unsigned int i_cluster_size; /* cluster size */ 808 }; 809 810 static inline void get_extent_info(struct extent_info *ext, 811 struct f2fs_extent *i_ext) 812 { 813 ext->fofs = le32_to_cpu(i_ext->fofs); 814 ext->blk = le32_to_cpu(i_ext->blk); 815 ext->len = le32_to_cpu(i_ext->len); 816 } 817 818 static inline void set_raw_extent(struct extent_info *ext, 819 struct f2fs_extent *i_ext) 820 { 821 i_ext->fofs = cpu_to_le32(ext->fofs); 822 i_ext->blk = cpu_to_le32(ext->blk); 823 i_ext->len = cpu_to_le32(ext->len); 824 } 825 826 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 827 u32 blk, unsigned int len) 828 { 829 ei->fofs = fofs; 830 ei->blk = blk; 831 ei->len = len; 832 } 833 834 static inline bool __is_discard_mergeable(struct discard_info *back, 835 struct discard_info *front, unsigned int max_len) 836 { 837 return (back->lstart + back->len == front->lstart) && 838 (back->len + front->len <= max_len); 839 } 840 841 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 842 struct discard_info *back, unsigned int max_len) 843 { 844 return __is_discard_mergeable(back, cur, max_len); 845 } 846 847 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 848 struct discard_info *front, unsigned int max_len) 849 { 850 return __is_discard_mergeable(cur, front, max_len); 851 } 852 853 static inline bool __is_extent_mergeable(struct extent_info *back, 854 struct extent_info *front) 855 { 856 return (back->fofs + back->len == front->fofs && 857 back->blk + back->len == front->blk); 858 } 859 860 static inline bool __is_back_mergeable(struct extent_info *cur, 861 struct extent_info *back) 862 { 863 return __is_extent_mergeable(back, cur); 864 } 865 866 static inline bool __is_front_mergeable(struct extent_info *cur, 867 struct extent_info *front) 868 { 869 return __is_extent_mergeable(cur, front); 870 } 871 872 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 873 static inline void __try_update_largest_extent(struct extent_tree *et, 874 struct extent_node *en) 875 { 876 if (en->ei.len > et->largest.len) { 877 et->largest = en->ei; 878 et->largest_updated = true; 879 } 880 } 881 882 /* 883 * For free nid management 884 */ 885 enum nid_state { 886 FREE_NID, /* newly added to free nid list */ 887 PREALLOC_NID, /* it is preallocated */ 888 MAX_NID_STATE, 889 }; 890 891 struct f2fs_nm_info { 892 block_t nat_blkaddr; /* base disk address of NAT */ 893 nid_t max_nid; /* maximum possible node ids */ 894 nid_t available_nids; /* # of available node ids */ 895 nid_t next_scan_nid; /* the next nid to be scanned */ 896 unsigned int ram_thresh; /* control the memory footprint */ 897 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 898 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 899 900 /* NAT cache management */ 901 struct radix_tree_root nat_root;/* root of the nat entry cache */ 902 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 903 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 904 struct list_head nat_entries; /* cached nat entry list (clean) */ 905 spinlock_t nat_list_lock; /* protect clean nat entry list */ 906 unsigned int nat_cnt; /* the # of cached nat entries */ 907 unsigned int dirty_nat_cnt; /* total num of nat entries in set */ 908 unsigned int nat_blocks; /* # of nat blocks */ 909 910 /* free node ids management */ 911 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 912 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ 913 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ 914 spinlock_t nid_list_lock; /* protect nid lists ops */ 915 struct mutex build_lock; /* lock for build free nids */ 916 unsigned char **free_nid_bitmap; 917 unsigned char *nat_block_bitmap; 918 unsigned short *free_nid_count; /* free nid count of NAT block */ 919 920 /* for checkpoint */ 921 char *nat_bitmap; /* NAT bitmap pointer */ 922 923 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 924 unsigned char *nat_bits; /* NAT bits blocks */ 925 unsigned char *full_nat_bits; /* full NAT pages */ 926 unsigned char *empty_nat_bits; /* empty NAT pages */ 927 #ifdef CONFIG_F2FS_CHECK_FS 928 char *nat_bitmap_mir; /* NAT bitmap mirror */ 929 #endif 930 int bitmap_size; /* bitmap size */ 931 }; 932 933 /* 934 * this structure is used as one of function parameters. 935 * all the information are dedicated to a given direct node block determined 936 * by the data offset in a file. 937 */ 938 struct dnode_of_data { 939 struct inode *inode; /* vfs inode pointer */ 940 struct page *inode_page; /* its inode page, NULL is possible */ 941 struct page *node_page; /* cached direct node page */ 942 nid_t nid; /* node id of the direct node block */ 943 unsigned int ofs_in_node; /* data offset in the node page */ 944 bool inode_page_locked; /* inode page is locked or not */ 945 bool node_changed; /* is node block changed */ 946 char cur_level; /* level of hole node page */ 947 char max_level; /* level of current page located */ 948 block_t data_blkaddr; /* block address of the node block */ 949 }; 950 951 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 952 struct page *ipage, struct page *npage, nid_t nid) 953 { 954 memset(dn, 0, sizeof(*dn)); 955 dn->inode = inode; 956 dn->inode_page = ipage; 957 dn->node_page = npage; 958 dn->nid = nid; 959 } 960 961 /* 962 * For SIT manager 963 * 964 * By default, there are 6 active log areas across the whole main area. 965 * When considering hot and cold data separation to reduce cleaning overhead, 966 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 967 * respectively. 968 * In the current design, you should not change the numbers intentionally. 969 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 970 * logs individually according to the underlying devices. (default: 6) 971 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 972 * data and 8 for node logs. 973 */ 974 #define NR_CURSEG_DATA_TYPE (3) 975 #define NR_CURSEG_NODE_TYPE (3) 976 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 977 978 enum { 979 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 980 CURSEG_WARM_DATA, /* data blocks */ 981 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 982 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 983 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 984 CURSEG_COLD_NODE, /* indirect node blocks */ 985 NO_CHECK_TYPE, 986 CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */ 987 }; 988 989 struct flush_cmd { 990 struct completion wait; 991 struct llist_node llnode; 992 nid_t ino; 993 int ret; 994 }; 995 996 struct flush_cmd_control { 997 struct task_struct *f2fs_issue_flush; /* flush thread */ 998 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 999 atomic_t issued_flush; /* # of issued flushes */ 1000 atomic_t queued_flush; /* # of queued flushes */ 1001 struct llist_head issue_list; /* list for command issue */ 1002 struct llist_node *dispatch_list; /* list for command dispatch */ 1003 }; 1004 1005 struct f2fs_sm_info { 1006 struct sit_info *sit_info; /* whole segment information */ 1007 struct free_segmap_info *free_info; /* free segment information */ 1008 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 1009 struct curseg_info *curseg_array; /* active segment information */ 1010 1011 struct rw_semaphore curseg_lock; /* for preventing curseg change */ 1012 1013 block_t seg0_blkaddr; /* block address of 0'th segment */ 1014 block_t main_blkaddr; /* start block address of main area */ 1015 block_t ssa_blkaddr; /* start block address of SSA area */ 1016 1017 unsigned int segment_count; /* total # of segments */ 1018 unsigned int main_segments; /* # of segments in main area */ 1019 unsigned int reserved_segments; /* # of reserved segments */ 1020 unsigned int ovp_segments; /* # of overprovision segments */ 1021 1022 /* a threshold to reclaim prefree segments */ 1023 unsigned int rec_prefree_segments; 1024 1025 /* for batched trimming */ 1026 unsigned int trim_sections; /* # of sections to trim */ 1027 1028 struct list_head sit_entry_set; /* sit entry set list */ 1029 1030 unsigned int ipu_policy; /* in-place-update policy */ 1031 unsigned int min_ipu_util; /* in-place-update threshold */ 1032 unsigned int min_fsync_blocks; /* threshold for fsync */ 1033 unsigned int min_seq_blocks; /* threshold for sequential blocks */ 1034 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 1035 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ 1036 1037 /* for flush command control */ 1038 struct flush_cmd_control *fcc_info; 1039 1040 /* for discard command control */ 1041 struct discard_cmd_control *dcc_info; 1042 }; 1043 1044 /* 1045 * For superblock 1046 */ 1047 /* 1048 * COUNT_TYPE for monitoring 1049 * 1050 * f2fs monitors the number of several block types such as on-writeback, 1051 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 1052 */ 1053 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 1054 enum count_type { 1055 F2FS_DIRTY_DENTS, 1056 F2FS_DIRTY_DATA, 1057 F2FS_DIRTY_QDATA, 1058 F2FS_DIRTY_NODES, 1059 F2FS_DIRTY_META, 1060 F2FS_INMEM_PAGES, 1061 F2FS_DIRTY_IMETA, 1062 F2FS_WB_CP_DATA, 1063 F2FS_WB_DATA, 1064 F2FS_RD_DATA, 1065 F2FS_RD_NODE, 1066 F2FS_RD_META, 1067 F2FS_DIO_WRITE, 1068 F2FS_DIO_READ, 1069 NR_COUNT_TYPE, 1070 }; 1071 1072 /* 1073 * The below are the page types of bios used in submit_bio(). 1074 * The available types are: 1075 * DATA User data pages. It operates as async mode. 1076 * NODE Node pages. It operates as async mode. 1077 * META FS metadata pages such as SIT, NAT, CP. 1078 * NR_PAGE_TYPE The number of page types. 1079 * META_FLUSH Make sure the previous pages are written 1080 * with waiting the bio's completion 1081 * ... Only can be used with META. 1082 */ 1083 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 1084 enum page_type { 1085 DATA, 1086 NODE, 1087 META, 1088 NR_PAGE_TYPE, 1089 META_FLUSH, 1090 INMEM, /* the below types are used by tracepoints only. */ 1091 INMEM_DROP, 1092 INMEM_INVALIDATE, 1093 INMEM_REVOKE, 1094 IPU, 1095 OPU, 1096 }; 1097 1098 enum temp_type { 1099 HOT = 0, /* must be zero for meta bio */ 1100 WARM, 1101 COLD, 1102 NR_TEMP_TYPE, 1103 }; 1104 1105 enum need_lock_type { 1106 LOCK_REQ = 0, 1107 LOCK_DONE, 1108 LOCK_RETRY, 1109 }; 1110 1111 enum cp_reason_type { 1112 CP_NO_NEEDED, 1113 CP_NON_REGULAR, 1114 CP_COMPRESSED, 1115 CP_HARDLINK, 1116 CP_SB_NEED_CP, 1117 CP_WRONG_PINO, 1118 CP_NO_SPC_ROLL, 1119 CP_NODE_NEED_CP, 1120 CP_FASTBOOT_MODE, 1121 CP_SPEC_LOG_NUM, 1122 CP_RECOVER_DIR, 1123 }; 1124 1125 enum iostat_type { 1126 /* WRITE IO */ 1127 APP_DIRECT_IO, /* app direct write IOs */ 1128 APP_BUFFERED_IO, /* app buffered write IOs */ 1129 APP_WRITE_IO, /* app write IOs */ 1130 APP_MAPPED_IO, /* app mapped IOs */ 1131 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 1132 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 1133 FS_META_IO, /* meta IOs from kworker/reclaimer */ 1134 FS_GC_DATA_IO, /* data IOs from forground gc */ 1135 FS_GC_NODE_IO, /* node IOs from forground gc */ 1136 FS_CP_DATA_IO, /* data IOs from checkpoint */ 1137 FS_CP_NODE_IO, /* node IOs from checkpoint */ 1138 FS_CP_META_IO, /* meta IOs from checkpoint */ 1139 1140 /* READ IO */ 1141 APP_DIRECT_READ_IO, /* app direct read IOs */ 1142 APP_BUFFERED_READ_IO, /* app buffered read IOs */ 1143 APP_READ_IO, /* app read IOs */ 1144 APP_MAPPED_READ_IO, /* app mapped read IOs */ 1145 FS_DATA_READ_IO, /* data read IOs */ 1146 FS_GDATA_READ_IO, /* data read IOs from background gc */ 1147 FS_CDATA_READ_IO, /* compressed data read IOs */ 1148 FS_NODE_READ_IO, /* node read IOs */ 1149 FS_META_READ_IO, /* meta read IOs */ 1150 1151 /* other */ 1152 FS_DISCARD, /* discard */ 1153 NR_IO_TYPE, 1154 }; 1155 1156 struct f2fs_io_info { 1157 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 1158 nid_t ino; /* inode number */ 1159 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 1160 enum temp_type temp; /* contains HOT/WARM/COLD */ 1161 int op; /* contains REQ_OP_ */ 1162 int op_flags; /* req_flag_bits */ 1163 block_t new_blkaddr; /* new block address to be written */ 1164 block_t old_blkaddr; /* old block address before Cow */ 1165 struct page *page; /* page to be written */ 1166 struct page *encrypted_page; /* encrypted page */ 1167 struct page *compressed_page; /* compressed page */ 1168 struct list_head list; /* serialize IOs */ 1169 bool submitted; /* indicate IO submission */ 1170 int need_lock; /* indicate we need to lock cp_rwsem */ 1171 bool in_list; /* indicate fio is in io_list */ 1172 bool is_por; /* indicate IO is from recovery or not */ 1173 bool retry; /* need to reallocate block address */ 1174 int compr_blocks; /* # of compressed block addresses */ 1175 bool encrypted; /* indicate file is encrypted */ 1176 enum iostat_type io_type; /* io type */ 1177 struct writeback_control *io_wbc; /* writeback control */ 1178 struct bio **bio; /* bio for ipu */ 1179 sector_t *last_block; /* last block number in bio */ 1180 unsigned char version; /* version of the node */ 1181 }; 1182 1183 struct bio_entry { 1184 struct bio *bio; 1185 struct list_head list; 1186 }; 1187 1188 #define is_read_io(rw) ((rw) == READ) 1189 struct f2fs_bio_info { 1190 struct f2fs_sb_info *sbi; /* f2fs superblock */ 1191 struct bio *bio; /* bios to merge */ 1192 sector_t last_block_in_bio; /* last block number */ 1193 struct f2fs_io_info fio; /* store buffered io info. */ 1194 struct rw_semaphore io_rwsem; /* blocking op for bio */ 1195 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 1196 struct list_head io_list; /* track fios */ 1197 struct list_head bio_list; /* bio entry list head */ 1198 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ 1199 }; 1200 1201 #define FDEV(i) (sbi->devs[i]) 1202 #define RDEV(i) (raw_super->devs[i]) 1203 struct f2fs_dev_info { 1204 struct block_device *bdev; 1205 char path[MAX_PATH_LEN]; 1206 unsigned int total_segments; 1207 block_t start_blk; 1208 block_t end_blk; 1209 #ifdef CONFIG_BLK_DEV_ZONED 1210 unsigned int nr_blkz; /* Total number of zones */ 1211 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ 1212 #endif 1213 }; 1214 1215 enum inode_type { 1216 DIR_INODE, /* for dirty dir inode */ 1217 FILE_INODE, /* for dirty regular/symlink inode */ 1218 DIRTY_META, /* for all dirtied inode metadata */ 1219 ATOMIC_FILE, /* for all atomic files */ 1220 NR_INODE_TYPE, 1221 }; 1222 1223 /* for inner inode cache management */ 1224 struct inode_management { 1225 struct radix_tree_root ino_root; /* ino entry array */ 1226 spinlock_t ino_lock; /* for ino entry lock */ 1227 struct list_head ino_list; /* inode list head */ 1228 unsigned long ino_num; /* number of entries */ 1229 }; 1230 1231 /* For s_flag in struct f2fs_sb_info */ 1232 enum { 1233 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 1234 SBI_IS_CLOSE, /* specify unmounting */ 1235 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 1236 SBI_POR_DOING, /* recovery is doing or not */ 1237 SBI_NEED_SB_WRITE, /* need to recover superblock */ 1238 SBI_NEED_CP, /* need to checkpoint */ 1239 SBI_IS_SHUTDOWN, /* shutdown by ioctl */ 1240 SBI_IS_RECOVERED, /* recovered orphan/data */ 1241 SBI_CP_DISABLED, /* CP was disabled last mount */ 1242 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ 1243 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1244 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1245 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1246 SBI_IS_RESIZEFS, /* resizefs is in process */ 1247 }; 1248 1249 enum { 1250 CP_TIME, 1251 REQ_TIME, 1252 DISCARD_TIME, 1253 GC_TIME, 1254 DISABLE_TIME, 1255 UMOUNT_DISCARD_TIMEOUT, 1256 MAX_TIME, 1257 }; 1258 1259 enum { 1260 GC_NORMAL, 1261 GC_IDLE_CB, 1262 GC_IDLE_GREEDY, 1263 GC_URGENT_HIGH, 1264 GC_URGENT_LOW, 1265 }; 1266 1267 enum { 1268 BGGC_MODE_ON, /* background gc is on */ 1269 BGGC_MODE_OFF, /* background gc is off */ 1270 BGGC_MODE_SYNC, /* 1271 * background gc is on, migrating blocks 1272 * like foreground gc 1273 */ 1274 }; 1275 1276 enum { 1277 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ 1278 FS_MODE_LFS, /* use lfs allocation only */ 1279 }; 1280 1281 enum { 1282 WHINT_MODE_OFF, /* not pass down write hints */ 1283 WHINT_MODE_USER, /* try to pass down hints given by users */ 1284 WHINT_MODE_FS, /* pass down hints with F2FS policy */ 1285 }; 1286 1287 enum { 1288 ALLOC_MODE_DEFAULT, /* stay default */ 1289 ALLOC_MODE_REUSE, /* reuse segments as much as possible */ 1290 }; 1291 1292 enum fsync_mode { 1293 FSYNC_MODE_POSIX, /* fsync follows posix semantics */ 1294 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ 1295 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ 1296 }; 1297 1298 /* 1299 * this value is set in page as a private data which indicate that 1300 * the page is atomically written, and it is in inmem_pages list. 1301 */ 1302 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1) 1303 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2) 1304 1305 #define IS_ATOMIC_WRITTEN_PAGE(page) \ 1306 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE) 1307 #define IS_DUMMY_WRITTEN_PAGE(page) \ 1308 (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE) 1309 1310 #ifdef CONFIG_F2FS_IO_TRACE 1311 #define IS_IO_TRACED_PAGE(page) \ 1312 (page_private(page) > 0 && \ 1313 page_private(page) < (unsigned long)PID_MAX_LIMIT) 1314 #else 1315 #define IS_IO_TRACED_PAGE(page) (0) 1316 #endif 1317 1318 #ifdef CONFIG_FS_ENCRYPTION 1319 #define DUMMY_ENCRYPTION_ENABLED(sbi) \ 1320 (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) 1321 #else 1322 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) 1323 #endif 1324 1325 /* For compression */ 1326 enum compress_algorithm_type { 1327 COMPRESS_LZO, 1328 COMPRESS_LZ4, 1329 COMPRESS_ZSTD, 1330 COMPRESS_LZORLE, 1331 COMPRESS_MAX, 1332 }; 1333 1334 #define COMPRESS_DATA_RESERVED_SIZE 5 1335 struct compress_data { 1336 __le32 clen; /* compressed data size */ 1337 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ 1338 u8 cdata[]; /* compressed data */ 1339 }; 1340 1341 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) 1342 1343 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 1344 1345 /* compress context */ 1346 struct compress_ctx { 1347 struct inode *inode; /* inode the context belong to */ 1348 pgoff_t cluster_idx; /* cluster index number */ 1349 unsigned int cluster_size; /* page count in cluster */ 1350 unsigned int log_cluster_size; /* log of cluster size */ 1351 struct page **rpages; /* pages store raw data in cluster */ 1352 unsigned int nr_rpages; /* total page number in rpages */ 1353 struct page **cpages; /* pages store compressed data in cluster */ 1354 unsigned int nr_cpages; /* total page number in cpages */ 1355 void *rbuf; /* virtual mapped address on rpages */ 1356 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1357 size_t rlen; /* valid data length in rbuf */ 1358 size_t clen; /* valid data length in cbuf */ 1359 void *private; /* payload buffer for specified compression algorithm */ 1360 void *private2; /* extra payload buffer */ 1361 }; 1362 1363 /* compress context for write IO path */ 1364 struct compress_io_ctx { 1365 u32 magic; /* magic number to indicate page is compressed */ 1366 struct inode *inode; /* inode the context belong to */ 1367 struct page **rpages; /* pages store raw data in cluster */ 1368 unsigned int nr_rpages; /* total page number in rpages */ 1369 refcount_t ref; /* referrence count of raw page */ 1370 }; 1371 1372 /* decompress io context for read IO path */ 1373 struct decompress_io_ctx { 1374 u32 magic; /* magic number to indicate page is compressed */ 1375 struct inode *inode; /* inode the context belong to */ 1376 pgoff_t cluster_idx; /* cluster index number */ 1377 unsigned int cluster_size; /* page count in cluster */ 1378 unsigned int log_cluster_size; /* log of cluster size */ 1379 struct page **rpages; /* pages store raw data in cluster */ 1380 unsigned int nr_rpages; /* total page number in rpages */ 1381 struct page **cpages; /* pages store compressed data in cluster */ 1382 unsigned int nr_cpages; /* total page number in cpages */ 1383 struct page **tpages; /* temp pages to pad holes in cluster */ 1384 void *rbuf; /* virtual mapped address on rpages */ 1385 struct compress_data *cbuf; /* virtual mapped address on cpages */ 1386 size_t rlen; /* valid data length in rbuf */ 1387 size_t clen; /* valid data length in cbuf */ 1388 refcount_t ref; /* referrence count of compressed page */ 1389 bool failed; /* indicate IO error during decompression */ 1390 void *private; /* payload buffer for specified decompression algorithm */ 1391 void *private2; /* extra payload buffer */ 1392 }; 1393 1394 #define NULL_CLUSTER ((unsigned int)(~0)) 1395 #define MIN_COMPRESS_LOG_SIZE 2 1396 #define MAX_COMPRESS_LOG_SIZE 8 1397 #define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE) 1398 1399 struct f2fs_sb_info { 1400 struct super_block *sb; /* pointer to VFS super block */ 1401 struct proc_dir_entry *s_proc; /* proc entry */ 1402 struct f2fs_super_block *raw_super; /* raw super block pointer */ 1403 struct rw_semaphore sb_lock; /* lock for raw super block */ 1404 int valid_super_block; /* valid super block no */ 1405 unsigned long s_flag; /* flags for sbi */ 1406 struct mutex writepages; /* mutex for writepages() */ 1407 #ifdef CONFIG_UNICODE 1408 struct unicode_map *s_encoding; 1409 __u16 s_encoding_flags; 1410 #endif 1411 1412 #ifdef CONFIG_BLK_DEV_ZONED 1413 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 1414 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 1415 #endif 1416 1417 /* for node-related operations */ 1418 struct f2fs_nm_info *nm_info; /* node manager */ 1419 struct inode *node_inode; /* cache node blocks */ 1420 1421 /* for segment-related operations */ 1422 struct f2fs_sm_info *sm_info; /* segment manager */ 1423 1424 /* for bio operations */ 1425 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 1426 /* keep migration IO order for LFS mode */ 1427 struct rw_semaphore io_order_lock; 1428 mempool_t *write_io_dummy; /* Dummy pages */ 1429 1430 /* for checkpoint */ 1431 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1432 int cur_cp_pack; /* remain current cp pack */ 1433 spinlock_t cp_lock; /* for flag in ckpt */ 1434 struct inode *meta_inode; /* cache meta blocks */ 1435 struct mutex cp_mutex; /* checkpoint procedure lock */ 1436 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1437 struct rw_semaphore node_write; /* locking node writes */ 1438 struct rw_semaphore node_change; /* locking node change */ 1439 wait_queue_head_t cp_wait; 1440 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1441 long interval_time[MAX_TIME]; /* to store thresholds */ 1442 1443 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1444 1445 spinlock_t fsync_node_lock; /* for node entry lock */ 1446 struct list_head fsync_node_list; /* node list head */ 1447 unsigned int fsync_seg_id; /* sequence id */ 1448 unsigned int fsync_node_num; /* number of node entries */ 1449 1450 /* for orphan inode, use 0'th array */ 1451 unsigned int max_orphans; /* max orphan inodes */ 1452 1453 /* for inode management */ 1454 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1455 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1456 struct mutex flush_lock; /* for flush exclusion */ 1457 1458 /* for extent tree cache */ 1459 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1460 struct mutex extent_tree_lock; /* locking extent radix tree */ 1461 struct list_head extent_list; /* lru list for shrinker */ 1462 spinlock_t extent_lock; /* locking extent lru list */ 1463 atomic_t total_ext_tree; /* extent tree count */ 1464 struct list_head zombie_list; /* extent zombie tree list */ 1465 atomic_t total_zombie_tree; /* extent zombie tree count */ 1466 atomic_t total_ext_node; /* extent info count */ 1467 1468 /* basic filesystem units */ 1469 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1470 unsigned int log_blocksize; /* log2 block size */ 1471 unsigned int blocksize; /* block size */ 1472 unsigned int root_ino_num; /* root inode number*/ 1473 unsigned int node_ino_num; /* node inode number*/ 1474 unsigned int meta_ino_num; /* meta inode number*/ 1475 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1476 unsigned int blocks_per_seg; /* blocks per segment */ 1477 unsigned int segs_per_sec; /* segments per section */ 1478 unsigned int secs_per_zone; /* sections per zone */ 1479 unsigned int total_sections; /* total section count */ 1480 unsigned int total_node_count; /* total node block count */ 1481 unsigned int total_valid_node_count; /* valid node block count */ 1482 loff_t max_file_blocks; /* max block index of file */ 1483 int dir_level; /* directory level */ 1484 int readdir_ra; /* readahead inode in readdir */ 1485 1486 block_t user_block_count; /* # of user blocks */ 1487 block_t total_valid_block_count; /* # of valid blocks */ 1488 block_t discard_blks; /* discard command candidats */ 1489 block_t last_valid_block_count; /* for recovery */ 1490 block_t reserved_blocks; /* configurable reserved blocks */ 1491 block_t current_reserved_blocks; /* current reserved blocks */ 1492 1493 /* Additional tracking for no checkpoint mode */ 1494 block_t unusable_block_count; /* # of blocks saved by last cp */ 1495 1496 unsigned int nquota_files; /* # of quota sysfile */ 1497 struct rw_semaphore quota_sem; /* blocking cp for flags */ 1498 1499 /* # of pages, see count_type */ 1500 atomic_t nr_pages[NR_COUNT_TYPE]; 1501 /* # of allocated blocks */ 1502 struct percpu_counter alloc_valid_block_count; 1503 1504 /* writeback control */ 1505 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ 1506 1507 /* valid inode count */ 1508 struct percpu_counter total_valid_inode_count; 1509 1510 struct f2fs_mount_info mount_opt; /* mount options */ 1511 1512 /* for cleaning operations */ 1513 struct rw_semaphore gc_lock; /* 1514 * semaphore for GC, avoid 1515 * race between GC and GC or CP 1516 */ 1517 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1518 unsigned int cur_victim_sec; /* current victim section num */ 1519 unsigned int gc_mode; /* current GC state */ 1520 unsigned int next_victim_seg[2]; /* next segment in victim section */ 1521 1522 /* for skip statistic */ 1523 unsigned int atomic_files; /* # of opened atomic file */ 1524 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ 1525 unsigned long long skipped_gc_rwsem; /* FG_GC only */ 1526 1527 /* threshold for gc trials on pinned files */ 1528 u64 gc_pin_file_threshold; 1529 struct rw_semaphore pin_sem; 1530 1531 /* maximum # of trials to find a victim segment for SSR and GC */ 1532 unsigned int max_victim_search; 1533 /* migration granularity of garbage collection, unit: segment */ 1534 unsigned int migration_granularity; 1535 1536 /* 1537 * for stat information. 1538 * one is for the LFS mode, and the other is for the SSR mode. 1539 */ 1540 #ifdef CONFIG_F2FS_STAT_FS 1541 struct f2fs_stat_info *stat_info; /* FS status information */ 1542 atomic_t meta_count[META_MAX]; /* # of meta blocks */ 1543 unsigned int segment_count[2]; /* # of allocated segments */ 1544 unsigned int block_count[2]; /* # of allocated blocks */ 1545 atomic_t inplace_count; /* # of inplace update */ 1546 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1547 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1548 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1549 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1550 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1551 atomic_t inline_inode; /* # of inline_data inodes */ 1552 atomic_t inline_dir; /* # of inline_dentry inodes */ 1553 atomic_t compr_inode; /* # of compressed inodes */ 1554 atomic_t compr_blocks; /* # of compressed blocks */ 1555 atomic_t vw_cnt; /* # of volatile writes */ 1556 atomic_t max_aw_cnt; /* max # of atomic writes */ 1557 atomic_t max_vw_cnt; /* max # of volatile writes */ 1558 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ 1559 unsigned int other_skip_bggc; /* skip background gc for other reasons */ 1560 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1561 #endif 1562 spinlock_t stat_lock; /* lock for stat operations */ 1563 1564 /* For app/fs IO statistics */ 1565 spinlock_t iostat_lock; 1566 unsigned long long rw_iostat[NR_IO_TYPE]; 1567 unsigned long long prev_rw_iostat[NR_IO_TYPE]; 1568 bool iostat_enable; 1569 unsigned long iostat_next_period; 1570 unsigned int iostat_period_ms; 1571 1572 /* to attach REQ_META|REQ_FUA flags */ 1573 unsigned int data_io_flag; 1574 unsigned int node_io_flag; 1575 1576 /* For sysfs suppport */ 1577 struct kobject s_kobj; 1578 struct completion s_kobj_unregister; 1579 1580 /* For shrinker support */ 1581 struct list_head s_list; 1582 int s_ndevs; /* number of devices */ 1583 struct f2fs_dev_info *devs; /* for device list */ 1584 unsigned int dirty_device; /* for checkpoint data flush */ 1585 spinlock_t dev_lock; /* protect dirty_device */ 1586 struct mutex umount_mutex; 1587 unsigned int shrinker_run_no; 1588 1589 /* For write statistics */ 1590 u64 sectors_written_start; 1591 u64 kbytes_written; 1592 1593 /* Reference to checksum algorithm driver via cryptoapi */ 1594 struct crypto_shash *s_chksum_driver; 1595 1596 /* Precomputed FS UUID checksum for seeding other checksums */ 1597 __u32 s_chksum_seed; 1598 1599 struct workqueue_struct *post_read_wq; /* post read workqueue */ 1600 1601 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ 1602 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ 1603 }; 1604 1605 struct f2fs_private_dio { 1606 struct inode *inode; 1607 void *orig_private; 1608 bio_end_io_t *orig_end_io; 1609 bool write; 1610 }; 1611 1612 #ifdef CONFIG_F2FS_FAULT_INJECTION 1613 #define f2fs_show_injection_info(sbi, type) \ 1614 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \ 1615 KERN_INFO, sbi->sb->s_id, \ 1616 f2fs_fault_name[type], \ 1617 __func__, __builtin_return_address(0)) 1618 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1619 { 1620 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 1621 1622 if (!ffi->inject_rate) 1623 return false; 1624 1625 if (!IS_FAULT_SET(ffi, type)) 1626 return false; 1627 1628 atomic_inc(&ffi->inject_ops); 1629 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1630 atomic_set(&ffi->inject_ops, 0); 1631 return true; 1632 } 1633 return false; 1634 } 1635 #else 1636 #define f2fs_show_injection_info(sbi, type) do { } while (0) 1637 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1638 { 1639 return false; 1640 } 1641 #endif 1642 1643 /* 1644 * Test if the mounted volume is a multi-device volume. 1645 * - For a single regular disk volume, sbi->s_ndevs is 0. 1646 * - For a single zoned disk volume, sbi->s_ndevs is 1. 1647 * - For a multi-device volume, sbi->s_ndevs is always 2 or more. 1648 */ 1649 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) 1650 { 1651 return sbi->s_ndevs > 1; 1652 } 1653 1654 /* For write statistics. Suppose sector size is 512 bytes, 1655 * and the return value is in kbytes. s is of struct f2fs_sb_info. 1656 */ 1657 #define BD_PART_WRITTEN(s) \ 1658 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \ 1659 (s)->sectors_written_start) >> 1) 1660 1661 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1662 { 1663 unsigned long now = jiffies; 1664 1665 sbi->last_time[type] = now; 1666 1667 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ 1668 if (type == REQ_TIME) { 1669 sbi->last_time[DISCARD_TIME] = now; 1670 sbi->last_time[GC_TIME] = now; 1671 } 1672 } 1673 1674 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1675 { 1676 unsigned long interval = sbi->interval_time[type] * HZ; 1677 1678 return time_after(jiffies, sbi->last_time[type] + interval); 1679 } 1680 1681 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, 1682 int type) 1683 { 1684 unsigned long interval = sbi->interval_time[type] * HZ; 1685 unsigned int wait_ms = 0; 1686 long delta; 1687 1688 delta = (sbi->last_time[type] + interval) - jiffies; 1689 if (delta > 0) 1690 wait_ms = jiffies_to_msecs(delta); 1691 1692 return wait_ms; 1693 } 1694 1695 /* 1696 * Inline functions 1697 */ 1698 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, 1699 const void *address, unsigned int length) 1700 { 1701 struct { 1702 struct shash_desc shash; 1703 char ctx[4]; 1704 } desc; 1705 int err; 1706 1707 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1708 1709 desc.shash.tfm = sbi->s_chksum_driver; 1710 *(u32 *)desc.ctx = crc; 1711 1712 err = crypto_shash_update(&desc.shash, address, length); 1713 BUG_ON(err); 1714 1715 return *(u32 *)desc.ctx; 1716 } 1717 1718 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1719 unsigned int length) 1720 { 1721 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); 1722 } 1723 1724 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1725 void *buf, size_t buf_size) 1726 { 1727 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1728 } 1729 1730 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1731 const void *address, unsigned int length) 1732 { 1733 return __f2fs_crc32(sbi, crc, address, length); 1734 } 1735 1736 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1737 { 1738 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1739 } 1740 1741 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1742 { 1743 return sb->s_fs_info; 1744 } 1745 1746 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1747 { 1748 return F2FS_SB(inode->i_sb); 1749 } 1750 1751 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1752 { 1753 return F2FS_I_SB(mapping->host); 1754 } 1755 1756 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1757 { 1758 return F2FS_M_SB(page_file_mapping(page)); 1759 } 1760 1761 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1762 { 1763 return (struct f2fs_super_block *)(sbi->raw_super); 1764 } 1765 1766 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1767 { 1768 return (struct f2fs_checkpoint *)(sbi->ckpt); 1769 } 1770 1771 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1772 { 1773 return (struct f2fs_node *)page_address(page); 1774 } 1775 1776 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1777 { 1778 return &((struct f2fs_node *)page_address(page))->i; 1779 } 1780 1781 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1782 { 1783 return (struct f2fs_nm_info *)(sbi->nm_info); 1784 } 1785 1786 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1787 { 1788 return (struct f2fs_sm_info *)(sbi->sm_info); 1789 } 1790 1791 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1792 { 1793 return (struct sit_info *)(SM_I(sbi)->sit_info); 1794 } 1795 1796 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1797 { 1798 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1799 } 1800 1801 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1802 { 1803 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1804 } 1805 1806 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1807 { 1808 return sbi->meta_inode->i_mapping; 1809 } 1810 1811 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1812 { 1813 return sbi->node_inode->i_mapping; 1814 } 1815 1816 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1817 { 1818 return test_bit(type, &sbi->s_flag); 1819 } 1820 1821 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1822 { 1823 set_bit(type, &sbi->s_flag); 1824 } 1825 1826 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1827 { 1828 clear_bit(type, &sbi->s_flag); 1829 } 1830 1831 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1832 { 1833 return le64_to_cpu(cp->checkpoint_ver); 1834 } 1835 1836 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) 1837 { 1838 if (type < F2FS_MAX_QUOTAS) 1839 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); 1840 return 0; 1841 } 1842 1843 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1844 { 1845 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1846 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1847 } 1848 1849 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1850 { 1851 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1852 1853 return ckpt_flags & f; 1854 } 1855 1856 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1857 { 1858 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1859 } 1860 1861 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1862 { 1863 unsigned int ckpt_flags; 1864 1865 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1866 ckpt_flags |= f; 1867 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1868 } 1869 1870 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1871 { 1872 unsigned long flags; 1873 1874 spin_lock_irqsave(&sbi->cp_lock, flags); 1875 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1876 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1877 } 1878 1879 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1880 { 1881 unsigned int ckpt_flags; 1882 1883 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1884 ckpt_flags &= (~f); 1885 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1886 } 1887 1888 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1889 { 1890 unsigned long flags; 1891 1892 spin_lock_irqsave(&sbi->cp_lock, flags); 1893 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1894 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1895 } 1896 1897 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1898 { 1899 unsigned long flags; 1900 unsigned char *nat_bits; 1901 1902 /* 1903 * In order to re-enable nat_bits we need to call fsck.f2fs by 1904 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost, 1905 * so let's rely on regular fsck or unclean shutdown. 1906 */ 1907 1908 if (lock) 1909 spin_lock_irqsave(&sbi->cp_lock, flags); 1910 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1911 nat_bits = NM_I(sbi)->nat_bits; 1912 NM_I(sbi)->nat_bits = NULL; 1913 if (lock) 1914 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1915 1916 kvfree(nat_bits); 1917 } 1918 1919 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1920 struct cp_control *cpc) 1921 { 1922 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1923 1924 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1925 } 1926 1927 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1928 { 1929 down_read(&sbi->cp_rwsem); 1930 } 1931 1932 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1933 { 1934 return down_read_trylock(&sbi->cp_rwsem); 1935 } 1936 1937 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1938 { 1939 up_read(&sbi->cp_rwsem); 1940 } 1941 1942 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1943 { 1944 down_write(&sbi->cp_rwsem); 1945 } 1946 1947 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1948 { 1949 up_write(&sbi->cp_rwsem); 1950 } 1951 1952 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1953 { 1954 int reason = CP_SYNC; 1955 1956 if (test_opt(sbi, FASTBOOT)) 1957 reason = CP_FASTBOOT; 1958 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1959 reason = CP_UMOUNT; 1960 return reason; 1961 } 1962 1963 static inline bool __remain_node_summaries(int reason) 1964 { 1965 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1966 } 1967 1968 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1969 { 1970 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1971 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1972 } 1973 1974 /* 1975 * Check whether the inode has blocks or not 1976 */ 1977 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 1978 { 1979 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 1980 1981 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 1982 } 1983 1984 static inline bool f2fs_has_xattr_block(unsigned int ofs) 1985 { 1986 return ofs == XATTR_NODE_OFFSET; 1987 } 1988 1989 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, 1990 struct inode *inode, bool cap) 1991 { 1992 if (!inode) 1993 return true; 1994 if (!test_opt(sbi, RESERVE_ROOT)) 1995 return false; 1996 if (IS_NOQUOTA(inode)) 1997 return true; 1998 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) 1999 return true; 2000 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && 2001 in_group_p(F2FS_OPTION(sbi).s_resgid)) 2002 return true; 2003 if (cap && capable(CAP_SYS_RESOURCE)) 2004 return true; 2005 return false; 2006 } 2007 2008 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 2009 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 2010 struct inode *inode, blkcnt_t *count) 2011 { 2012 blkcnt_t diff = 0, release = 0; 2013 block_t avail_user_block_count; 2014 int ret; 2015 2016 ret = dquot_reserve_block(inode, *count); 2017 if (ret) 2018 return ret; 2019 2020 if (time_to_inject(sbi, FAULT_BLOCK)) { 2021 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2022 release = *count; 2023 goto release_quota; 2024 } 2025 2026 /* 2027 * let's increase this in prior to actual block count change in order 2028 * for f2fs_sync_file to avoid data races when deciding checkpoint. 2029 */ 2030 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 2031 2032 spin_lock(&sbi->stat_lock); 2033 sbi->total_valid_block_count += (block_t)(*count); 2034 avail_user_block_count = sbi->user_block_count - 2035 sbi->current_reserved_blocks; 2036 2037 if (!__allow_reserved_blocks(sbi, inode, true)) 2038 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 2039 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2040 if (avail_user_block_count > sbi->unusable_block_count) 2041 avail_user_block_count -= sbi->unusable_block_count; 2042 else 2043 avail_user_block_count = 0; 2044 } 2045 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 2046 diff = sbi->total_valid_block_count - avail_user_block_count; 2047 if (diff > *count) 2048 diff = *count; 2049 *count -= diff; 2050 release = diff; 2051 sbi->total_valid_block_count -= diff; 2052 if (!*count) { 2053 spin_unlock(&sbi->stat_lock); 2054 goto enospc; 2055 } 2056 } 2057 spin_unlock(&sbi->stat_lock); 2058 2059 if (unlikely(release)) { 2060 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2061 dquot_release_reservation_block(inode, release); 2062 } 2063 f2fs_i_blocks_write(inode, *count, true, true); 2064 return 0; 2065 2066 enospc: 2067 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 2068 release_quota: 2069 dquot_release_reservation_block(inode, release); 2070 return -ENOSPC; 2071 } 2072 2073 __printf(2, 3) 2074 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 2075 2076 #define f2fs_err(sbi, fmt, ...) \ 2077 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 2078 #define f2fs_warn(sbi, fmt, ...) \ 2079 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 2080 #define f2fs_notice(sbi, fmt, ...) \ 2081 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 2082 #define f2fs_info(sbi, fmt, ...) \ 2083 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 2084 #define f2fs_debug(sbi, fmt, ...) \ 2085 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 2086 2087 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 2088 struct inode *inode, 2089 block_t count) 2090 { 2091 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 2092 2093 spin_lock(&sbi->stat_lock); 2094 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 2095 sbi->total_valid_block_count -= (block_t)count; 2096 if (sbi->reserved_blocks && 2097 sbi->current_reserved_blocks < sbi->reserved_blocks) 2098 sbi->current_reserved_blocks = min(sbi->reserved_blocks, 2099 sbi->current_reserved_blocks + count); 2100 spin_unlock(&sbi->stat_lock); 2101 if (unlikely(inode->i_blocks < sectors)) { 2102 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 2103 inode->i_ino, 2104 (unsigned long long)inode->i_blocks, 2105 (unsigned long long)sectors); 2106 set_sbi_flag(sbi, SBI_NEED_FSCK); 2107 return; 2108 } 2109 f2fs_i_blocks_write(inode, count, false, true); 2110 } 2111 2112 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 2113 { 2114 atomic_inc(&sbi->nr_pages[count_type]); 2115 2116 if (count_type == F2FS_DIRTY_DENTS || 2117 count_type == F2FS_DIRTY_NODES || 2118 count_type == F2FS_DIRTY_META || 2119 count_type == F2FS_DIRTY_QDATA || 2120 count_type == F2FS_DIRTY_IMETA) 2121 set_sbi_flag(sbi, SBI_IS_DIRTY); 2122 } 2123 2124 static inline void inode_inc_dirty_pages(struct inode *inode) 2125 { 2126 atomic_inc(&F2FS_I(inode)->dirty_pages); 2127 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2128 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2129 if (IS_NOQUOTA(inode)) 2130 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2131 } 2132 2133 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 2134 { 2135 atomic_dec(&sbi->nr_pages[count_type]); 2136 } 2137 2138 static inline void inode_dec_dirty_pages(struct inode *inode) 2139 { 2140 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 2141 !S_ISLNK(inode->i_mode)) 2142 return; 2143 2144 atomic_dec(&F2FS_I(inode)->dirty_pages); 2145 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 2146 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 2147 if (IS_NOQUOTA(inode)) 2148 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); 2149 } 2150 2151 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 2152 { 2153 return atomic_read(&sbi->nr_pages[count_type]); 2154 } 2155 2156 static inline int get_dirty_pages(struct inode *inode) 2157 { 2158 return atomic_read(&F2FS_I(inode)->dirty_pages); 2159 } 2160 2161 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 2162 { 2163 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 2164 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 2165 sbi->log_blocks_per_seg; 2166 2167 return segs / sbi->segs_per_sec; 2168 } 2169 2170 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 2171 { 2172 return sbi->total_valid_block_count; 2173 } 2174 2175 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 2176 { 2177 return sbi->discard_blks; 2178 } 2179 2180 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 2181 { 2182 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2183 2184 /* return NAT or SIT bitmap */ 2185 if (flag == NAT_BITMAP) 2186 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 2187 else if (flag == SIT_BITMAP) 2188 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 2189 2190 return 0; 2191 } 2192 2193 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 2194 { 2195 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 2196 } 2197 2198 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 2199 { 2200 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 2201 int offset; 2202 2203 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { 2204 offset = (flag == SIT_BITMAP) ? 2205 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; 2206 /* 2207 * if large_nat_bitmap feature is enabled, leave checksum 2208 * protection for all nat/sit bitmaps. 2209 */ 2210 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32); 2211 } 2212 2213 if (__cp_payload(sbi) > 0) { 2214 if (flag == NAT_BITMAP) 2215 return &ckpt->sit_nat_version_bitmap; 2216 else 2217 return (unsigned char *)ckpt + F2FS_BLKSIZE; 2218 } else { 2219 offset = (flag == NAT_BITMAP) ? 2220 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 2221 return &ckpt->sit_nat_version_bitmap + offset; 2222 } 2223 } 2224 2225 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 2226 { 2227 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2228 2229 if (sbi->cur_cp_pack == 2) 2230 start_addr += sbi->blocks_per_seg; 2231 return start_addr; 2232 } 2233 2234 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 2235 { 2236 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 2237 2238 if (sbi->cur_cp_pack == 1) 2239 start_addr += sbi->blocks_per_seg; 2240 return start_addr; 2241 } 2242 2243 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 2244 { 2245 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 2246 } 2247 2248 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 2249 { 2250 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 2251 } 2252 2253 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 2254 struct inode *inode, bool is_inode) 2255 { 2256 block_t valid_block_count; 2257 unsigned int valid_node_count, user_block_count; 2258 int err; 2259 2260 if (is_inode) { 2261 if (inode) { 2262 err = dquot_alloc_inode(inode); 2263 if (err) 2264 return err; 2265 } 2266 } else { 2267 err = dquot_reserve_block(inode, 1); 2268 if (err) 2269 return err; 2270 } 2271 2272 if (time_to_inject(sbi, FAULT_BLOCK)) { 2273 f2fs_show_injection_info(sbi, FAULT_BLOCK); 2274 goto enospc; 2275 } 2276 2277 spin_lock(&sbi->stat_lock); 2278 2279 valid_block_count = sbi->total_valid_block_count + 2280 sbi->current_reserved_blocks + 1; 2281 2282 if (!__allow_reserved_blocks(sbi, inode, false)) 2283 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2284 user_block_count = sbi->user_block_count; 2285 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2286 user_block_count -= sbi->unusable_block_count; 2287 2288 if (unlikely(valid_block_count > user_block_count)) { 2289 spin_unlock(&sbi->stat_lock); 2290 goto enospc; 2291 } 2292 2293 valid_node_count = sbi->total_valid_node_count + 1; 2294 if (unlikely(valid_node_count > sbi->total_node_count)) { 2295 spin_unlock(&sbi->stat_lock); 2296 goto enospc; 2297 } 2298 2299 sbi->total_valid_node_count++; 2300 sbi->total_valid_block_count++; 2301 spin_unlock(&sbi->stat_lock); 2302 2303 if (inode) { 2304 if (is_inode) 2305 f2fs_mark_inode_dirty_sync(inode, true); 2306 else 2307 f2fs_i_blocks_write(inode, 1, true, true); 2308 } 2309 2310 percpu_counter_inc(&sbi->alloc_valid_block_count); 2311 return 0; 2312 2313 enospc: 2314 if (is_inode) { 2315 if (inode) 2316 dquot_free_inode(inode); 2317 } else { 2318 dquot_release_reservation_block(inode, 1); 2319 } 2320 return -ENOSPC; 2321 } 2322 2323 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 2324 struct inode *inode, bool is_inode) 2325 { 2326 spin_lock(&sbi->stat_lock); 2327 2328 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 2329 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 2330 2331 sbi->total_valid_node_count--; 2332 sbi->total_valid_block_count--; 2333 if (sbi->reserved_blocks && 2334 sbi->current_reserved_blocks < sbi->reserved_blocks) 2335 sbi->current_reserved_blocks++; 2336 2337 spin_unlock(&sbi->stat_lock); 2338 2339 if (is_inode) { 2340 dquot_free_inode(inode); 2341 } else { 2342 if (unlikely(inode->i_blocks == 0)) { 2343 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", 2344 inode->i_ino, 2345 (unsigned long long)inode->i_blocks); 2346 set_sbi_flag(sbi, SBI_NEED_FSCK); 2347 return; 2348 } 2349 f2fs_i_blocks_write(inode, 1, false, true); 2350 } 2351 } 2352 2353 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 2354 { 2355 return sbi->total_valid_node_count; 2356 } 2357 2358 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 2359 { 2360 percpu_counter_inc(&sbi->total_valid_inode_count); 2361 } 2362 2363 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 2364 { 2365 percpu_counter_dec(&sbi->total_valid_inode_count); 2366 } 2367 2368 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 2369 { 2370 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 2371 } 2372 2373 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 2374 pgoff_t index, bool for_write) 2375 { 2376 struct page *page; 2377 2378 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { 2379 if (!for_write) 2380 page = find_get_page_flags(mapping, index, 2381 FGP_LOCK | FGP_ACCESSED); 2382 else 2383 page = find_lock_page(mapping, index); 2384 if (page) 2385 return page; 2386 2387 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 2388 f2fs_show_injection_info(F2FS_M_SB(mapping), 2389 FAULT_PAGE_ALLOC); 2390 return NULL; 2391 } 2392 } 2393 2394 if (!for_write) 2395 return grab_cache_page(mapping, index); 2396 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 2397 } 2398 2399 static inline struct page *f2fs_pagecache_get_page( 2400 struct address_space *mapping, pgoff_t index, 2401 int fgp_flags, gfp_t gfp_mask) 2402 { 2403 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { 2404 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET); 2405 return NULL; 2406 } 2407 2408 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); 2409 } 2410 2411 static inline void f2fs_copy_page(struct page *src, struct page *dst) 2412 { 2413 char *src_kaddr = kmap(src); 2414 char *dst_kaddr = kmap(dst); 2415 2416 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 2417 kunmap(dst); 2418 kunmap(src); 2419 } 2420 2421 static inline void f2fs_put_page(struct page *page, int unlock) 2422 { 2423 if (!page) 2424 return; 2425 2426 if (unlock) { 2427 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 2428 unlock_page(page); 2429 } 2430 put_page(page); 2431 } 2432 2433 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 2434 { 2435 if (dn->node_page) 2436 f2fs_put_page(dn->node_page, 1); 2437 if (dn->inode_page && dn->node_page != dn->inode_page) 2438 f2fs_put_page(dn->inode_page, 0); 2439 dn->node_page = NULL; 2440 dn->inode_page = NULL; 2441 } 2442 2443 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 2444 size_t size) 2445 { 2446 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 2447 } 2448 2449 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 2450 gfp_t flags) 2451 { 2452 void *entry; 2453 2454 entry = kmem_cache_alloc(cachep, flags); 2455 if (!entry) 2456 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 2457 return entry; 2458 } 2459 2460 static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2461 { 2462 if (sbi->gc_mode == GC_URGENT_HIGH) 2463 return true; 2464 2465 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2466 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2467 get_pages(sbi, F2FS_WB_CP_DATA) || 2468 get_pages(sbi, F2FS_DIO_READ) || 2469 get_pages(sbi, F2FS_DIO_WRITE)) 2470 return false; 2471 2472 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && 2473 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2474 return false; 2475 2476 if (SM_I(sbi) && SM_I(sbi)->fcc_info && 2477 atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) 2478 return false; 2479 2480 if (sbi->gc_mode == GC_URGENT_LOW && 2481 (type == DISCARD_TIME || type == GC_TIME)) 2482 return true; 2483 2484 return f2fs_time_over(sbi, type); 2485 } 2486 2487 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 2488 unsigned long index, void *item) 2489 { 2490 while (radix_tree_insert(root, index, item)) 2491 cond_resched(); 2492 } 2493 2494 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 2495 2496 static inline bool IS_INODE(struct page *page) 2497 { 2498 struct f2fs_node *p = F2FS_NODE(page); 2499 2500 return RAW_IS_INODE(p); 2501 } 2502 2503 static inline int offset_in_addr(struct f2fs_inode *i) 2504 { 2505 return (i->i_inline & F2FS_EXTRA_ATTR) ? 2506 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 2507 } 2508 2509 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 2510 { 2511 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 2512 } 2513 2514 static inline int f2fs_has_extra_attr(struct inode *inode); 2515 static inline block_t data_blkaddr(struct inode *inode, 2516 struct page *node_page, unsigned int offset) 2517 { 2518 struct f2fs_node *raw_node; 2519 __le32 *addr_array; 2520 int base = 0; 2521 bool is_inode = IS_INODE(node_page); 2522 2523 raw_node = F2FS_NODE(node_page); 2524 2525 if (is_inode) { 2526 if (!inode) 2527 /* from GC path only */ 2528 base = offset_in_addr(&raw_node->i); 2529 else if (f2fs_has_extra_attr(inode)) 2530 base = get_extra_isize(inode); 2531 } 2532 2533 addr_array = blkaddr_in_node(raw_node); 2534 return le32_to_cpu(addr_array[base + offset]); 2535 } 2536 2537 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) 2538 { 2539 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); 2540 } 2541 2542 static inline int f2fs_test_bit(unsigned int nr, char *addr) 2543 { 2544 int mask; 2545 2546 addr += (nr >> 3); 2547 mask = 1 << (7 - (nr & 0x07)); 2548 return mask & *addr; 2549 } 2550 2551 static inline void f2fs_set_bit(unsigned int nr, char *addr) 2552 { 2553 int mask; 2554 2555 addr += (nr >> 3); 2556 mask = 1 << (7 - (nr & 0x07)); 2557 *addr |= mask; 2558 } 2559 2560 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 2561 { 2562 int mask; 2563 2564 addr += (nr >> 3); 2565 mask = 1 << (7 - (nr & 0x07)); 2566 *addr &= ~mask; 2567 } 2568 2569 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 2570 { 2571 int mask; 2572 int ret; 2573 2574 addr += (nr >> 3); 2575 mask = 1 << (7 - (nr & 0x07)); 2576 ret = mask & *addr; 2577 *addr |= mask; 2578 return ret; 2579 } 2580 2581 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 2582 { 2583 int mask; 2584 int ret; 2585 2586 addr += (nr >> 3); 2587 mask = 1 << (7 - (nr & 0x07)); 2588 ret = mask & *addr; 2589 *addr &= ~mask; 2590 return ret; 2591 } 2592 2593 static inline void f2fs_change_bit(unsigned int nr, char *addr) 2594 { 2595 int mask; 2596 2597 addr += (nr >> 3); 2598 mask = 1 << (7 - (nr & 0x07)); 2599 *addr ^= mask; 2600 } 2601 2602 /* 2603 * On-disk inode flags (f2fs_inode::i_flags) 2604 */ 2605 #define F2FS_COMPR_FL 0x00000004 /* Compress file */ 2606 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2607 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2608 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2609 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2610 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2611 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ 2612 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2613 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2614 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2615 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2616 2617 /* Flags that should be inherited by new inodes from their parent. */ 2618 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2619 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2620 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL) 2621 2622 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2623 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2624 F2FS_CASEFOLD_FL)) 2625 2626 /* Flags that are appropriate for non-directories/regular files. */ 2627 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2628 2629 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 2630 { 2631 if (S_ISDIR(mode)) 2632 return flags; 2633 else if (S_ISREG(mode)) 2634 return flags & F2FS_REG_FLMASK; 2635 else 2636 return flags & F2FS_OTHER_FLMASK; 2637 } 2638 2639 static inline void __mark_inode_dirty_flag(struct inode *inode, 2640 int flag, bool set) 2641 { 2642 switch (flag) { 2643 case FI_INLINE_XATTR: 2644 case FI_INLINE_DATA: 2645 case FI_INLINE_DENTRY: 2646 case FI_NEW_INODE: 2647 if (set) 2648 return; 2649 /* fall through */ 2650 case FI_DATA_EXIST: 2651 case FI_INLINE_DOTS: 2652 case FI_PIN_FILE: 2653 f2fs_mark_inode_dirty_sync(inode, true); 2654 } 2655 } 2656 2657 static inline void set_inode_flag(struct inode *inode, int flag) 2658 { 2659 set_bit(flag, F2FS_I(inode)->flags); 2660 __mark_inode_dirty_flag(inode, flag, true); 2661 } 2662 2663 static inline int is_inode_flag_set(struct inode *inode, int flag) 2664 { 2665 return test_bit(flag, F2FS_I(inode)->flags); 2666 } 2667 2668 static inline void clear_inode_flag(struct inode *inode, int flag) 2669 { 2670 clear_bit(flag, F2FS_I(inode)->flags); 2671 __mark_inode_dirty_flag(inode, flag, false); 2672 } 2673 2674 static inline bool f2fs_verity_in_progress(struct inode *inode) 2675 { 2676 return IS_ENABLED(CONFIG_FS_VERITY) && 2677 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); 2678 } 2679 2680 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2681 { 2682 F2FS_I(inode)->i_acl_mode = mode; 2683 set_inode_flag(inode, FI_ACL_MODE); 2684 f2fs_mark_inode_dirty_sync(inode, false); 2685 } 2686 2687 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2688 { 2689 if (inc) 2690 inc_nlink(inode); 2691 else 2692 drop_nlink(inode); 2693 f2fs_mark_inode_dirty_sync(inode, true); 2694 } 2695 2696 static inline void f2fs_i_blocks_write(struct inode *inode, 2697 block_t diff, bool add, bool claim) 2698 { 2699 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2700 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2701 2702 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2703 if (add) { 2704 if (claim) 2705 dquot_claim_block(inode, diff); 2706 else 2707 dquot_alloc_block_nofail(inode, diff); 2708 } else { 2709 dquot_free_block(inode, diff); 2710 } 2711 2712 f2fs_mark_inode_dirty_sync(inode, true); 2713 if (clean || recover) 2714 set_inode_flag(inode, FI_AUTO_RECOVER); 2715 } 2716 2717 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2718 { 2719 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2720 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2721 2722 if (i_size_read(inode) == i_size) 2723 return; 2724 2725 i_size_write(inode, i_size); 2726 f2fs_mark_inode_dirty_sync(inode, true); 2727 if (clean || recover) 2728 set_inode_flag(inode, FI_AUTO_RECOVER); 2729 } 2730 2731 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2732 { 2733 F2FS_I(inode)->i_current_depth = depth; 2734 f2fs_mark_inode_dirty_sync(inode, true); 2735 } 2736 2737 static inline void f2fs_i_gc_failures_write(struct inode *inode, 2738 unsigned int count) 2739 { 2740 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; 2741 f2fs_mark_inode_dirty_sync(inode, true); 2742 } 2743 2744 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2745 { 2746 F2FS_I(inode)->i_xattr_nid = xnid; 2747 f2fs_mark_inode_dirty_sync(inode, true); 2748 } 2749 2750 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2751 { 2752 F2FS_I(inode)->i_pino = pino; 2753 f2fs_mark_inode_dirty_sync(inode, true); 2754 } 2755 2756 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2757 { 2758 struct f2fs_inode_info *fi = F2FS_I(inode); 2759 2760 if (ri->i_inline & F2FS_INLINE_XATTR) 2761 set_bit(FI_INLINE_XATTR, fi->flags); 2762 if (ri->i_inline & F2FS_INLINE_DATA) 2763 set_bit(FI_INLINE_DATA, fi->flags); 2764 if (ri->i_inline & F2FS_INLINE_DENTRY) 2765 set_bit(FI_INLINE_DENTRY, fi->flags); 2766 if (ri->i_inline & F2FS_DATA_EXIST) 2767 set_bit(FI_DATA_EXIST, fi->flags); 2768 if (ri->i_inline & F2FS_INLINE_DOTS) 2769 set_bit(FI_INLINE_DOTS, fi->flags); 2770 if (ri->i_inline & F2FS_EXTRA_ATTR) 2771 set_bit(FI_EXTRA_ATTR, fi->flags); 2772 if (ri->i_inline & F2FS_PIN_FILE) 2773 set_bit(FI_PIN_FILE, fi->flags); 2774 } 2775 2776 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2777 { 2778 ri->i_inline = 0; 2779 2780 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2781 ri->i_inline |= F2FS_INLINE_XATTR; 2782 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2783 ri->i_inline |= F2FS_INLINE_DATA; 2784 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2785 ri->i_inline |= F2FS_INLINE_DENTRY; 2786 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2787 ri->i_inline |= F2FS_DATA_EXIST; 2788 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2789 ri->i_inline |= F2FS_INLINE_DOTS; 2790 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2791 ri->i_inline |= F2FS_EXTRA_ATTR; 2792 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2793 ri->i_inline |= F2FS_PIN_FILE; 2794 } 2795 2796 static inline int f2fs_has_extra_attr(struct inode *inode) 2797 { 2798 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2799 } 2800 2801 static inline int f2fs_has_inline_xattr(struct inode *inode) 2802 { 2803 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2804 } 2805 2806 static inline int f2fs_compressed_file(struct inode *inode) 2807 { 2808 return S_ISREG(inode->i_mode) && 2809 is_inode_flag_set(inode, FI_COMPRESSED_FILE); 2810 } 2811 2812 static inline unsigned int addrs_per_inode(struct inode *inode) 2813 { 2814 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - 2815 get_inline_xattr_addrs(inode); 2816 2817 if (!f2fs_compressed_file(inode)) 2818 return addrs; 2819 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); 2820 } 2821 2822 static inline unsigned int addrs_per_block(struct inode *inode) 2823 { 2824 if (!f2fs_compressed_file(inode)) 2825 return DEF_ADDRS_PER_BLOCK; 2826 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); 2827 } 2828 2829 static inline void *inline_xattr_addr(struct inode *inode, struct page *page) 2830 { 2831 struct f2fs_inode *ri = F2FS_INODE(page); 2832 2833 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2834 get_inline_xattr_addrs(inode)]); 2835 } 2836 2837 static inline int inline_xattr_size(struct inode *inode) 2838 { 2839 if (f2fs_has_inline_xattr(inode)) 2840 return get_inline_xattr_addrs(inode) * sizeof(__le32); 2841 return 0; 2842 } 2843 2844 static inline int f2fs_has_inline_data(struct inode *inode) 2845 { 2846 return is_inode_flag_set(inode, FI_INLINE_DATA); 2847 } 2848 2849 static inline int f2fs_exist_data(struct inode *inode) 2850 { 2851 return is_inode_flag_set(inode, FI_DATA_EXIST); 2852 } 2853 2854 static inline int f2fs_has_inline_dots(struct inode *inode) 2855 { 2856 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2857 } 2858 2859 static inline int f2fs_is_mmap_file(struct inode *inode) 2860 { 2861 return is_inode_flag_set(inode, FI_MMAP_FILE); 2862 } 2863 2864 static inline bool f2fs_is_pinned_file(struct inode *inode) 2865 { 2866 return is_inode_flag_set(inode, FI_PIN_FILE); 2867 } 2868 2869 static inline bool f2fs_is_atomic_file(struct inode *inode) 2870 { 2871 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2872 } 2873 2874 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2875 { 2876 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2877 } 2878 2879 static inline bool f2fs_is_volatile_file(struct inode *inode) 2880 { 2881 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2882 } 2883 2884 static inline bool f2fs_is_first_block_written(struct inode *inode) 2885 { 2886 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2887 } 2888 2889 static inline bool f2fs_is_drop_cache(struct inode *inode) 2890 { 2891 return is_inode_flag_set(inode, FI_DROP_CACHE); 2892 } 2893 2894 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2895 { 2896 struct f2fs_inode *ri = F2FS_INODE(page); 2897 int extra_size = get_extra_isize(inode); 2898 2899 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2900 } 2901 2902 static inline int f2fs_has_inline_dentry(struct inode *inode) 2903 { 2904 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2905 } 2906 2907 static inline int is_file(struct inode *inode, int type) 2908 { 2909 return F2FS_I(inode)->i_advise & type; 2910 } 2911 2912 static inline void set_file(struct inode *inode, int type) 2913 { 2914 F2FS_I(inode)->i_advise |= type; 2915 f2fs_mark_inode_dirty_sync(inode, true); 2916 } 2917 2918 static inline void clear_file(struct inode *inode, int type) 2919 { 2920 F2FS_I(inode)->i_advise &= ~type; 2921 f2fs_mark_inode_dirty_sync(inode, true); 2922 } 2923 2924 static inline bool f2fs_is_time_consistent(struct inode *inode) 2925 { 2926 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) 2927 return false; 2928 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) 2929 return false; 2930 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) 2931 return false; 2932 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3, 2933 &F2FS_I(inode)->i_crtime)) 2934 return false; 2935 return true; 2936 } 2937 2938 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2939 { 2940 bool ret; 2941 2942 if (dsync) { 2943 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2944 2945 spin_lock(&sbi->inode_lock[DIRTY_META]); 2946 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2947 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2948 return ret; 2949 } 2950 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2951 file_keep_isize(inode) || 2952 i_size_read(inode) & ~PAGE_MASK) 2953 return false; 2954 2955 if (!f2fs_is_time_consistent(inode)) 2956 return false; 2957 2958 spin_lock(&F2FS_I(inode)->i_size_lock); 2959 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); 2960 spin_unlock(&F2FS_I(inode)->i_size_lock); 2961 2962 return ret; 2963 } 2964 2965 static inline bool f2fs_readonly(struct super_block *sb) 2966 { 2967 return sb_rdonly(sb); 2968 } 2969 2970 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 2971 { 2972 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 2973 } 2974 2975 static inline bool is_dot_dotdot(const u8 *name, size_t len) 2976 { 2977 if (len == 1 && name[0] == '.') 2978 return true; 2979 2980 if (len == 2 && name[0] == '.' && name[1] == '.') 2981 return true; 2982 2983 return false; 2984 } 2985 2986 static inline bool f2fs_may_extent_tree(struct inode *inode) 2987 { 2988 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2989 2990 if (!test_opt(sbi, EXTENT_CACHE) || 2991 is_inode_flag_set(inode, FI_NO_EXTENT) || 2992 is_inode_flag_set(inode, FI_COMPRESSED_FILE)) 2993 return false; 2994 2995 /* 2996 * for recovered files during mount do not create extents 2997 * if shrinker is not registered. 2998 */ 2999 if (list_empty(&sbi->s_list)) 3000 return false; 3001 3002 return S_ISREG(inode->i_mode); 3003 } 3004 3005 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 3006 size_t size, gfp_t flags) 3007 { 3008 if (time_to_inject(sbi, FAULT_KMALLOC)) { 3009 f2fs_show_injection_info(sbi, FAULT_KMALLOC); 3010 return NULL; 3011 } 3012 3013 return kmalloc(size, flags); 3014 } 3015 3016 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, 3017 size_t size, gfp_t flags) 3018 { 3019 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); 3020 } 3021 3022 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, 3023 size_t size, gfp_t flags) 3024 { 3025 if (time_to_inject(sbi, FAULT_KVMALLOC)) { 3026 f2fs_show_injection_info(sbi, FAULT_KVMALLOC); 3027 return NULL; 3028 } 3029 3030 return kvmalloc(size, flags); 3031 } 3032 3033 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, 3034 size_t size, gfp_t flags) 3035 { 3036 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); 3037 } 3038 3039 static inline int get_extra_isize(struct inode *inode) 3040 { 3041 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 3042 } 3043 3044 static inline int get_inline_xattr_addrs(struct inode *inode) 3045 { 3046 return F2FS_I(inode)->i_inline_xattr_size; 3047 } 3048 3049 #define f2fs_get_inode_mode(i) \ 3050 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 3051 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 3052 3053 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 3054 (offsetof(struct f2fs_inode, i_extra_end) - \ 3055 offsetof(struct f2fs_inode, i_extra_isize)) \ 3056 3057 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 3058 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 3059 ((offsetof(typeof(*(f2fs_inode)), field) + \ 3060 sizeof((f2fs_inode)->field)) \ 3061 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ 3062 3063 #define DEFAULT_IOSTAT_PERIOD_MS 3000 3064 #define MIN_IOSTAT_PERIOD_MS 100 3065 /* maximum period of iostat tracing is 1 day */ 3066 #define MAX_IOSTAT_PERIOD_MS 8640000 3067 3068 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 3069 { 3070 int i; 3071 3072 spin_lock(&sbi->iostat_lock); 3073 for (i = 0; i < NR_IO_TYPE; i++) { 3074 sbi->rw_iostat[i] = 0; 3075 sbi->prev_rw_iostat[i] = 0; 3076 } 3077 spin_unlock(&sbi->iostat_lock); 3078 } 3079 3080 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); 3081 3082 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 3083 enum iostat_type type, unsigned long long io_bytes) 3084 { 3085 if (!sbi->iostat_enable) 3086 return; 3087 spin_lock(&sbi->iostat_lock); 3088 sbi->rw_iostat[type] += io_bytes; 3089 3090 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 3091 sbi->rw_iostat[APP_BUFFERED_IO] = 3092 sbi->rw_iostat[APP_WRITE_IO] - 3093 sbi->rw_iostat[APP_DIRECT_IO]; 3094 3095 if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) 3096 sbi->rw_iostat[APP_BUFFERED_READ_IO] = 3097 sbi->rw_iostat[APP_READ_IO] - 3098 sbi->rw_iostat[APP_DIRECT_READ_IO]; 3099 spin_unlock(&sbi->iostat_lock); 3100 3101 f2fs_record_iostat(sbi); 3102 } 3103 3104 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) 3105 3106 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) 3107 3108 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3109 block_t blkaddr, int type); 3110 static inline void verify_blkaddr(struct f2fs_sb_info *sbi, 3111 block_t blkaddr, int type) 3112 { 3113 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 3114 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 3115 blkaddr, type); 3116 f2fs_bug_on(sbi, 1); 3117 } 3118 } 3119 3120 static inline bool __is_valid_data_blkaddr(block_t blkaddr) 3121 { 3122 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || 3123 blkaddr == COMPRESS_ADDR) 3124 return false; 3125 return true; 3126 } 3127 3128 static inline void f2fs_set_page_private(struct page *page, 3129 unsigned long data) 3130 { 3131 if (PagePrivate(page)) 3132 return; 3133 3134 attach_page_private(page, (void *)data); 3135 } 3136 3137 static inline void f2fs_clear_page_private(struct page *page) 3138 { 3139 detach_page_private(page); 3140 } 3141 3142 /* 3143 * file.c 3144 */ 3145 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3146 void f2fs_truncate_data_blocks(struct dnode_of_data *dn); 3147 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); 3148 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); 3149 int f2fs_truncate(struct inode *inode); 3150 int f2fs_getattr(const struct path *path, struct kstat *stat, 3151 u32 request_mask, unsigned int flags); 3152 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 3153 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 3154 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); 3155 int f2fs_precache_extents(struct inode *inode); 3156 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 3157 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3158 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); 3159 int f2fs_pin_file_control(struct inode *inode, bool inc); 3160 3161 /* 3162 * inode.c 3163 */ 3164 void f2fs_set_inode_flags(struct inode *inode); 3165 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 3166 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 3167 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 3168 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 3169 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 3170 void f2fs_update_inode(struct inode *inode, struct page *node_page); 3171 void f2fs_update_inode_page(struct inode *inode); 3172 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 3173 void f2fs_evict_inode(struct inode *inode); 3174 void f2fs_handle_failed_inode(struct inode *inode); 3175 3176 /* 3177 * namei.c 3178 */ 3179 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, 3180 bool hot, bool set); 3181 struct dentry *f2fs_get_parent(struct dentry *child); 3182 3183 /* 3184 * dir.c 3185 */ 3186 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); 3187 int f2fs_init_casefolded_name(const struct inode *dir, 3188 struct f2fs_filename *fname); 3189 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, 3190 int lookup, struct f2fs_filename *fname); 3191 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, 3192 struct f2fs_filename *fname); 3193 void f2fs_free_filename(struct f2fs_filename *fname); 3194 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, 3195 const struct f2fs_filename *fname, int *max_slots); 3196 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 3197 unsigned int start_pos, struct fscrypt_str *fstr); 3198 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, 3199 struct f2fs_dentry_ptr *d); 3200 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, 3201 const struct f2fs_filename *fname, struct page *dpage); 3202 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, 3203 unsigned int current_depth); 3204 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); 3205 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 3206 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 3207 const struct f2fs_filename *fname, 3208 struct page **res_page); 3209 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 3210 const struct qstr *child, struct page **res_page); 3211 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 3212 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 3213 struct page **page); 3214 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 3215 struct page *page, struct inode *inode); 3216 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, 3217 const struct f2fs_filename *fname); 3218 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 3219 const struct fscrypt_str *name, f2fs_hash_t name_hash, 3220 unsigned int bit_pos); 3221 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, 3222 struct inode *inode, nid_t ino, umode_t mode); 3223 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, 3224 struct inode *inode, nid_t ino, umode_t mode); 3225 int f2fs_do_add_link(struct inode *dir, const struct qstr *name, 3226 struct inode *inode, nid_t ino, umode_t mode); 3227 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 3228 struct inode *dir, struct inode *inode); 3229 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 3230 bool f2fs_empty_dir(struct inode *dir); 3231 3232 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 3233 { 3234 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, 3235 inode, inode->i_ino, inode->i_mode); 3236 } 3237 3238 /* 3239 * super.c 3240 */ 3241 int f2fs_inode_dirtied(struct inode *inode, bool sync); 3242 void f2fs_inode_synced(struct inode *inode); 3243 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); 3244 int f2fs_quota_sync(struct super_block *sb, int type); 3245 void f2fs_quota_off_umount(struct super_block *sb); 3246 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 3247 int f2fs_sync_fs(struct super_block *sb, int sync); 3248 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 3249 3250 /* 3251 * hash.c 3252 */ 3253 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); 3254 3255 /* 3256 * node.c 3257 */ 3258 struct dnode_of_data; 3259 struct node_info; 3260 3261 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3262 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); 3263 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); 3264 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); 3265 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); 3266 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); 3267 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3268 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3269 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 3270 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3271 struct node_info *ni); 3272 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 3273 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 3274 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); 3275 int f2fs_truncate_xattr_node(struct inode *inode); 3276 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 3277 unsigned int seq_id); 3278 int f2fs_remove_inode_page(struct inode *inode); 3279 struct page *f2fs_new_inode_page(struct inode *inode); 3280 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); 3281 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 3282 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 3283 struct page *f2fs_get_node_page_ra(struct page *parent, int start); 3284 int f2fs_move_node_page(struct page *node_page, int gc_type); 3285 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); 3286 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 3287 struct writeback_control *wbc, bool atomic, 3288 unsigned int *seq_id); 3289 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 3290 struct writeback_control *wbc, 3291 bool do_balance, enum iostat_type io_type); 3292 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 3293 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 3294 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 3295 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 3296 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 3297 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); 3298 int f2fs_recover_xattr_data(struct inode *inode, struct page *page); 3299 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 3300 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 3301 unsigned int segno, struct f2fs_summary_block *sum); 3302 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3303 int f2fs_build_node_manager(struct f2fs_sb_info *sbi); 3304 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); 3305 int __init f2fs_create_node_manager_caches(void); 3306 void f2fs_destroy_node_manager_caches(void); 3307 3308 /* 3309 * segment.c 3310 */ 3311 bool f2fs_need_SSR(struct f2fs_sb_info *sbi); 3312 void f2fs_register_inmem_page(struct inode *inode, struct page *page); 3313 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure); 3314 void f2fs_drop_inmem_pages(struct inode *inode); 3315 void f2fs_drop_inmem_page(struct inode *inode, struct page *page); 3316 int f2fs_commit_inmem_pages(struct inode *inode); 3317 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 3318 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); 3319 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); 3320 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); 3321 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); 3322 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 3323 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 3324 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 3325 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); 3326 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); 3327 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); 3328 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3329 struct cp_control *cpc); 3330 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3331 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); 3332 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); 3333 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3334 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3335 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, 3336 unsigned int start, unsigned int end); 3337 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); 3338 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3339 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3340 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3341 struct cp_control *cpc); 3342 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 3343 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, 3344 block_t blk_addr); 3345 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 3346 enum iostat_type io_type); 3347 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); 3348 void f2fs_outplace_write_data(struct dnode_of_data *dn, 3349 struct f2fs_io_info *fio); 3350 int f2fs_inplace_write_data(struct f2fs_io_info *fio); 3351 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 3352 block_t old_blkaddr, block_t new_blkaddr, 3353 bool recover_curseg, bool recover_newaddr); 3354 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 3355 block_t old_addr, block_t new_addr, 3356 unsigned char version, bool recover_curseg, 3357 bool recover_newaddr); 3358 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 3359 block_t old_blkaddr, block_t *new_blkaddr, 3360 struct f2fs_summary *sum, int type, 3361 struct f2fs_io_info *fio); 3362 void f2fs_wait_on_page_writeback(struct page *page, 3363 enum page_type type, bool ordered, bool locked); 3364 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); 3365 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, 3366 block_t len); 3367 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3368 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 3369 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 3370 unsigned int val, int alloc); 3371 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3372 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); 3373 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); 3374 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); 3375 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); 3376 int __init f2fs_create_segment_manager_caches(void); 3377 void f2fs_destroy_segment_manager_caches(void); 3378 int f2fs_rw_hint_to_seg_type(enum rw_hint hint); 3379 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, 3380 enum page_type type, enum temp_type temp); 3381 3382 /* 3383 * checkpoint.c 3384 */ 3385 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 3386 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3387 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 3388 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index); 3389 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 3390 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, 3391 block_t blkaddr, int type); 3392 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 3393 int type, bool sync); 3394 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 3395 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 3396 long nr_to_write, enum iostat_type io_type); 3397 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3398 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 3399 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); 3400 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 3401 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3402 unsigned int devidx, int type); 3403 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, 3404 unsigned int devidx, int type); 3405 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 3406 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); 3407 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); 3408 void f2fs_add_orphan_inode(struct inode *inode); 3409 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 3410 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); 3411 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); 3412 void f2fs_update_dirty_page(struct inode *inode, struct page *page); 3413 void f2fs_remove_dirty_inode(struct inode *inode); 3414 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 3415 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); 3416 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 3417 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); 3418 int __init f2fs_create_checkpoint_caches(void); 3419 void f2fs_destroy_checkpoint_caches(void); 3420 3421 /* 3422 * data.c 3423 */ 3424 int __init f2fs_init_bioset(void); 3425 void f2fs_destroy_bioset(void); 3426 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio); 3427 int f2fs_init_bio_entry_cache(void); 3428 void f2fs_destroy_bio_entry_cache(void); 3429 void f2fs_submit_bio(struct f2fs_sb_info *sbi, 3430 struct bio *bio, enum page_type type); 3431 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 3432 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 3433 struct inode *inode, struct page *page, 3434 nid_t ino, enum page_type type); 3435 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, 3436 struct bio **bio, struct page *page); 3437 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3438 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3439 int f2fs_merge_page_bio(struct f2fs_io_info *fio); 3440 void f2fs_submit_page_write(struct f2fs_io_info *fio); 3441 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3442 block_t blk_addr, struct bio *bio); 3443 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 3444 void f2fs_set_data_blkaddr(struct dnode_of_data *dn); 3445 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 3446 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 3447 int f2fs_reserve_new_block(struct dnode_of_data *dn); 3448 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 3449 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 3450 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 3451 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, 3452 int op_flags, bool for_write); 3453 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index); 3454 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, 3455 bool for_write); 3456 struct page *f2fs_get_new_data_page(struct inode *inode, 3457 struct page *ipage, pgoff_t index, bool new_i_size); 3458 int f2fs_do_write_data_page(struct f2fs_io_info *fio); 3459 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); 3460 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 3461 int create, int flag); 3462 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 3463 u64 start, u64 len); 3464 int f2fs_encrypt_one_page(struct f2fs_io_info *fio); 3465 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); 3466 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); 3467 int f2fs_write_single_data_page(struct page *page, int *submitted, 3468 struct bio **bio, sector_t *last_block, 3469 struct writeback_control *wbc, 3470 enum iostat_type io_type, 3471 int compr_blocks); 3472 void f2fs_invalidate_page(struct page *page, unsigned int offset, 3473 unsigned int length); 3474 int f2fs_release_page(struct page *page, gfp_t wait); 3475 #ifdef CONFIG_MIGRATION 3476 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 3477 struct page *page, enum migrate_mode mode); 3478 #endif 3479 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); 3480 void f2fs_clear_page_cache_dirty_tag(struct page *page); 3481 int f2fs_init_post_read_processing(void); 3482 void f2fs_destroy_post_read_processing(void); 3483 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); 3484 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); 3485 3486 /* 3487 * gc.c 3488 */ 3489 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); 3490 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); 3491 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 3492 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3493 unsigned int segno); 3494 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3495 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count); 3496 3497 /* 3498 * recovery.c 3499 */ 3500 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 3501 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); 3502 3503 /* 3504 * debug.c 3505 */ 3506 #ifdef CONFIG_F2FS_STAT_FS 3507 struct f2fs_stat_info { 3508 struct list_head stat_list; 3509 struct f2fs_sb_info *sbi; 3510 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 3511 int main_area_segs, main_area_sections, main_area_zones; 3512 unsigned long long hit_largest, hit_cached, hit_rbtree; 3513 unsigned long long hit_total, total_ext; 3514 int ext_tree, zombie_tree, ext_node; 3515 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; 3516 int ndirty_data, ndirty_qdata; 3517 int inmem_pages; 3518 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; 3519 int nats, dirty_nats, sits, dirty_sits; 3520 int free_nids, avail_nids, alloc_nids; 3521 int total_count, utilization; 3522 int bg_gc, nr_wb_cp_data, nr_wb_data; 3523 int nr_rd_data, nr_rd_node, nr_rd_meta; 3524 int nr_dio_read, nr_dio_write; 3525 unsigned int io_skip_bggc, other_skip_bggc; 3526 int nr_flushing, nr_flushed, flush_list_empty; 3527 int nr_discarding, nr_discarded; 3528 int nr_discard_cmd; 3529 unsigned int undiscard_blks; 3530 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 3531 int compr_inode, compr_blocks; 3532 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 3533 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 3534 unsigned int bimodal, avg_vblocks; 3535 int util_free, util_valid, util_invalid; 3536 int rsvd_segs, overp_segs; 3537 int dirty_count, node_pages, meta_pages; 3538 int prefree_count, call_count, cp_count, bg_cp_count; 3539 int tot_segs, node_segs, data_segs, free_segs, free_secs; 3540 int bg_node_segs, bg_data_segs; 3541 int tot_blks, data_blks, node_blks; 3542 int bg_data_blks, bg_node_blks; 3543 unsigned long long skipped_atomic_files[2]; 3544 int curseg[NR_CURSEG_TYPE]; 3545 int cursec[NR_CURSEG_TYPE]; 3546 int curzone[NR_CURSEG_TYPE]; 3547 unsigned int dirty_seg[NR_CURSEG_TYPE]; 3548 unsigned int full_seg[NR_CURSEG_TYPE]; 3549 unsigned int valid_blks[NR_CURSEG_TYPE]; 3550 3551 unsigned int meta_count[META_MAX]; 3552 unsigned int segment_count[2]; 3553 unsigned int block_count[2]; 3554 unsigned int inplace_count; 3555 unsigned long long base_mem, cache_mem, page_mem; 3556 }; 3557 3558 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 3559 { 3560 return (struct f2fs_stat_info *)sbi->stat_info; 3561 } 3562 3563 #define stat_inc_cp_count(si) ((si)->cp_count++) 3564 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 3565 #define stat_inc_call_count(si) ((si)->call_count++) 3566 #define stat_inc_bggc_count(si) ((si)->bg_gc++) 3567 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) 3568 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) 3569 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 3570 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 3571 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 3572 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 3573 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 3574 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 3575 #define stat_inc_inline_xattr(inode) \ 3576 do { \ 3577 if (f2fs_has_inline_xattr(inode)) \ 3578 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 3579 } while (0) 3580 #define stat_dec_inline_xattr(inode) \ 3581 do { \ 3582 if (f2fs_has_inline_xattr(inode)) \ 3583 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 3584 } while (0) 3585 #define stat_inc_inline_inode(inode) \ 3586 do { \ 3587 if (f2fs_has_inline_data(inode)) \ 3588 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 3589 } while (0) 3590 #define stat_dec_inline_inode(inode) \ 3591 do { \ 3592 if (f2fs_has_inline_data(inode)) \ 3593 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 3594 } while (0) 3595 #define stat_inc_inline_dir(inode) \ 3596 do { \ 3597 if (f2fs_has_inline_dentry(inode)) \ 3598 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 3599 } while (0) 3600 #define stat_dec_inline_dir(inode) \ 3601 do { \ 3602 if (f2fs_has_inline_dentry(inode)) \ 3603 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 3604 } while (0) 3605 #define stat_inc_compr_inode(inode) \ 3606 do { \ 3607 if (f2fs_compressed_file(inode)) \ 3608 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ 3609 } while (0) 3610 #define stat_dec_compr_inode(inode) \ 3611 do { \ 3612 if (f2fs_compressed_file(inode)) \ 3613 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ 3614 } while (0) 3615 #define stat_add_compr_blocks(inode, blocks) \ 3616 (atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3617 #define stat_sub_compr_blocks(inode, blocks) \ 3618 (atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) 3619 #define stat_inc_meta_count(sbi, blkaddr) \ 3620 do { \ 3621 if (blkaddr < SIT_I(sbi)->sit_base_addr) \ 3622 atomic_inc(&(sbi)->meta_count[META_CP]); \ 3623 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ 3624 atomic_inc(&(sbi)->meta_count[META_SIT]); \ 3625 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ 3626 atomic_inc(&(sbi)->meta_count[META_NAT]); \ 3627 else if (blkaddr < SM_I(sbi)->main_blkaddr) \ 3628 atomic_inc(&(sbi)->meta_count[META_SSA]); \ 3629 } while (0) 3630 #define stat_inc_seg_type(sbi, curseg) \ 3631 ((sbi)->segment_count[(curseg)->alloc_type]++) 3632 #define stat_inc_block_count(sbi, curseg) \ 3633 ((sbi)->block_count[(curseg)->alloc_type]++) 3634 #define stat_inc_inplace_blocks(sbi) \ 3635 (atomic_inc(&(sbi)->inplace_count)) 3636 #define stat_update_max_atomic_write(inode) \ 3637 do { \ 3638 int cur = F2FS_I_SB(inode)->atomic_files; \ 3639 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 3640 if (cur > max) \ 3641 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 3642 } while (0) 3643 #define stat_inc_volatile_write(inode) \ 3644 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 3645 #define stat_dec_volatile_write(inode) \ 3646 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 3647 #define stat_update_max_volatile_write(inode) \ 3648 do { \ 3649 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 3650 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 3651 if (cur > max) \ 3652 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 3653 } while (0) 3654 #define stat_inc_seg_count(sbi, type, gc_type) \ 3655 do { \ 3656 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3657 si->tot_segs++; \ 3658 if ((type) == SUM_TYPE_DATA) { \ 3659 si->data_segs++; \ 3660 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 3661 } else { \ 3662 si->node_segs++; \ 3663 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 3664 } \ 3665 } while (0) 3666 3667 #define stat_inc_tot_blk_count(si, blks) \ 3668 ((si)->tot_blks += (blks)) 3669 3670 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 3671 do { \ 3672 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3673 stat_inc_tot_blk_count(si, blks); \ 3674 si->data_blks += (blks); \ 3675 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3676 } while (0) 3677 3678 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 3679 do { \ 3680 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 3681 stat_inc_tot_blk_count(si, blks); \ 3682 si->node_blks += (blks); \ 3683 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 3684 } while (0) 3685 3686 int f2fs_build_stats(struct f2fs_sb_info *sbi); 3687 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 3688 void __init f2fs_create_root_stats(void); 3689 void f2fs_destroy_root_stats(void); 3690 void f2fs_update_sit_info(struct f2fs_sb_info *sbi); 3691 #else 3692 #define stat_inc_cp_count(si) do { } while (0) 3693 #define stat_inc_bg_cp_count(si) do { } while (0) 3694 #define stat_inc_call_count(si) do { } while (0) 3695 #define stat_inc_bggc_count(si) do { } while (0) 3696 #define stat_io_skip_bggc_count(sbi) do { } while (0) 3697 #define stat_other_skip_bggc_count(sbi) do { } while (0) 3698 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 3699 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 3700 #define stat_inc_total_hit(sbi) do { } while (0) 3701 #define stat_inc_rbtree_node_hit(sbi) do { } while (0) 3702 #define stat_inc_largest_node_hit(sbi) do { } while (0) 3703 #define stat_inc_cached_node_hit(sbi) do { } while (0) 3704 #define stat_inc_inline_xattr(inode) do { } while (0) 3705 #define stat_dec_inline_xattr(inode) do { } while (0) 3706 #define stat_inc_inline_inode(inode) do { } while (0) 3707 #define stat_dec_inline_inode(inode) do { } while (0) 3708 #define stat_inc_inline_dir(inode) do { } while (0) 3709 #define stat_dec_inline_dir(inode) do { } while (0) 3710 #define stat_inc_compr_inode(inode) do { } while (0) 3711 #define stat_dec_compr_inode(inode) do { } while (0) 3712 #define stat_add_compr_blocks(inode, blocks) do { } while (0) 3713 #define stat_sub_compr_blocks(inode, blocks) do { } while (0) 3714 #define stat_inc_atomic_write(inode) do { } while (0) 3715 #define stat_dec_atomic_write(inode) do { } while (0) 3716 #define stat_update_max_atomic_write(inode) do { } while (0) 3717 #define stat_inc_volatile_write(inode) do { } while (0) 3718 #define stat_dec_volatile_write(inode) do { } while (0) 3719 #define stat_update_max_volatile_write(inode) do { } while (0) 3720 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) 3721 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 3722 #define stat_inc_block_count(sbi, curseg) do { } while (0) 3723 #define stat_inc_inplace_blocks(sbi) do { } while (0) 3724 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 3725 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 3726 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 3727 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 3728 3729 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 3730 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 3731 static inline void __init f2fs_create_root_stats(void) { } 3732 static inline void f2fs_destroy_root_stats(void) { } 3733 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} 3734 #endif 3735 3736 extern const struct file_operations f2fs_dir_operations; 3737 #ifdef CONFIG_UNICODE 3738 extern const struct dentry_operations f2fs_dentry_ops; 3739 #endif 3740 extern const struct file_operations f2fs_file_operations; 3741 extern const struct inode_operations f2fs_file_inode_operations; 3742 extern const struct address_space_operations f2fs_dblock_aops; 3743 extern const struct address_space_operations f2fs_node_aops; 3744 extern const struct address_space_operations f2fs_meta_aops; 3745 extern const struct inode_operations f2fs_dir_inode_operations; 3746 extern const struct inode_operations f2fs_symlink_inode_operations; 3747 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 3748 extern const struct inode_operations f2fs_special_inode_operations; 3749 extern struct kmem_cache *f2fs_inode_entry_slab; 3750 3751 /* 3752 * inline.c 3753 */ 3754 bool f2fs_may_inline_data(struct inode *inode); 3755 bool f2fs_may_inline_dentry(struct inode *inode); 3756 void f2fs_do_read_inline_data(struct page *page, struct page *ipage); 3757 void f2fs_truncate_inline_inode(struct inode *inode, 3758 struct page *ipage, u64 from); 3759 int f2fs_read_inline_data(struct inode *inode, struct page *page); 3760 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 3761 int f2fs_convert_inline_inode(struct inode *inode); 3762 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); 3763 int f2fs_write_inline_data(struct inode *inode, struct page *page); 3764 int f2fs_recover_inline_data(struct inode *inode, struct page *npage); 3765 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, 3766 const struct f2fs_filename *fname, 3767 struct page **res_page); 3768 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, 3769 struct page *ipage); 3770 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, 3771 struct inode *inode, nid_t ino, umode_t mode); 3772 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, 3773 struct page *page, struct inode *dir, 3774 struct inode *inode); 3775 bool f2fs_empty_inline_dir(struct inode *dir); 3776 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 3777 struct fscrypt_str *fstr); 3778 int f2fs_inline_data_fiemap(struct inode *inode, 3779 struct fiemap_extent_info *fieinfo, 3780 __u64 start, __u64 len); 3781 3782 /* 3783 * shrinker.c 3784 */ 3785 unsigned long f2fs_shrink_count(struct shrinker *shrink, 3786 struct shrink_control *sc); 3787 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 3788 struct shrink_control *sc); 3789 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 3790 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 3791 3792 /* 3793 * extent_cache.c 3794 */ 3795 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, 3796 struct rb_entry *cached_re, unsigned int ofs); 3797 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 3798 struct rb_root_cached *root, 3799 struct rb_node **parent, 3800 unsigned int ofs, bool *leftmost); 3801 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, 3802 struct rb_entry *cached_re, unsigned int ofs, 3803 struct rb_entry **prev_entry, struct rb_entry **next_entry, 3804 struct rb_node ***insert_p, struct rb_node **insert_parent, 3805 bool force, bool *leftmost); 3806 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, 3807 struct rb_root_cached *root); 3808 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 3809 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); 3810 void f2fs_drop_extent_tree(struct inode *inode); 3811 unsigned int f2fs_destroy_extent_node(struct inode *inode); 3812 void f2fs_destroy_extent_tree(struct inode *inode); 3813 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 3814 struct extent_info *ei); 3815 void f2fs_update_extent_cache(struct dnode_of_data *dn); 3816 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 3817 pgoff_t fofs, block_t blkaddr, unsigned int len); 3818 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); 3819 int __init f2fs_create_extent_cache(void); 3820 void f2fs_destroy_extent_cache(void); 3821 3822 /* 3823 * sysfs.c 3824 */ 3825 int __init f2fs_init_sysfs(void); 3826 void f2fs_exit_sysfs(void); 3827 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 3828 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 3829 3830 /* verity.c */ 3831 extern const struct fsverity_operations f2fs_verityops; 3832 3833 /* 3834 * crypto support 3835 */ 3836 static inline bool f2fs_encrypted_file(struct inode *inode) 3837 { 3838 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); 3839 } 3840 3841 static inline void f2fs_set_encrypted_inode(struct inode *inode) 3842 { 3843 #ifdef CONFIG_FS_ENCRYPTION 3844 file_set_encrypt(inode); 3845 f2fs_set_inode_flags(inode); 3846 #endif 3847 } 3848 3849 /* 3850 * Returns true if the reads of the inode's data need to undergo some 3851 * postprocessing step, like decryption or authenticity verification. 3852 */ 3853 static inline bool f2fs_post_read_required(struct inode *inode) 3854 { 3855 return f2fs_encrypted_file(inode) || fsverity_active(inode) || 3856 f2fs_compressed_file(inode); 3857 } 3858 3859 /* 3860 * compress.c 3861 */ 3862 #ifdef CONFIG_F2FS_FS_COMPRESSION 3863 bool f2fs_is_compressed_page(struct page *page); 3864 struct page *f2fs_compress_control_page(struct page *page); 3865 int f2fs_prepare_compress_overwrite(struct inode *inode, 3866 struct page **pagep, pgoff_t index, void **fsdata); 3867 bool f2fs_compress_write_end(struct inode *inode, void *fsdata, 3868 pgoff_t index, unsigned copied); 3869 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); 3870 void f2fs_compress_write_end_io(struct bio *bio, struct page *page); 3871 bool f2fs_is_compress_backend_ready(struct inode *inode); 3872 int f2fs_init_compress_mempool(void); 3873 void f2fs_destroy_compress_mempool(void); 3874 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); 3875 bool f2fs_cluster_is_empty(struct compress_ctx *cc); 3876 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); 3877 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); 3878 int f2fs_write_multi_pages(struct compress_ctx *cc, 3879 int *submitted, 3880 struct writeback_control *wbc, 3881 enum iostat_type io_type); 3882 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); 3883 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, 3884 unsigned nr_pages, sector_t *last_block_in_bio, 3885 bool is_readahead, bool for_write); 3886 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); 3887 void f2fs_free_dic(struct decompress_io_ctx *dic); 3888 void f2fs_decompress_end_io(struct page **rpages, 3889 unsigned int cluster_size, bool err, bool verity); 3890 int f2fs_init_compress_ctx(struct compress_ctx *cc); 3891 void f2fs_destroy_compress_ctx(struct compress_ctx *cc); 3892 void f2fs_init_compress_info(struct f2fs_sb_info *sbi); 3893 #else 3894 static inline bool f2fs_is_compressed_page(struct page *page) { return false; } 3895 static inline bool f2fs_is_compress_backend_ready(struct inode *inode) 3896 { 3897 if (!f2fs_compressed_file(inode)) 3898 return true; 3899 /* not support compression */ 3900 return false; 3901 } 3902 static inline struct page *f2fs_compress_control_page(struct page *page) 3903 { 3904 WARN_ON_ONCE(1); 3905 return ERR_PTR(-EINVAL); 3906 } 3907 static inline int f2fs_init_compress_mempool(void) { return 0; } 3908 static inline void f2fs_destroy_compress_mempool(void) { } 3909 #endif 3910 3911 static inline void set_compress_context(struct inode *inode) 3912 { 3913 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3914 3915 F2FS_I(inode)->i_compress_algorithm = 3916 F2FS_OPTION(sbi).compress_algorithm; 3917 F2FS_I(inode)->i_log_cluster_size = 3918 F2FS_OPTION(sbi).compress_log_size; 3919 F2FS_I(inode)->i_cluster_size = 3920 1 << F2FS_I(inode)->i_log_cluster_size; 3921 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 3922 set_inode_flag(inode, FI_COMPRESSED_FILE); 3923 stat_inc_compr_inode(inode); 3924 f2fs_mark_inode_dirty_sync(inode, true); 3925 } 3926 3927 static inline u64 f2fs_disable_compressed_file(struct inode *inode) 3928 { 3929 struct f2fs_inode_info *fi = F2FS_I(inode); 3930 3931 if (!f2fs_compressed_file(inode)) 3932 return 0; 3933 if (S_ISREG(inode->i_mode)) { 3934 if (get_dirty_pages(inode)) 3935 return 1; 3936 if (fi->i_compr_blocks) 3937 return fi->i_compr_blocks; 3938 } 3939 3940 fi->i_flags &= ~F2FS_COMPR_FL; 3941 stat_dec_compr_inode(inode); 3942 clear_inode_flag(inode, FI_COMPRESSED_FILE); 3943 f2fs_mark_inode_dirty_sync(inode, true); 3944 return 0; 3945 } 3946 3947 #define F2FS_FEATURE_FUNCS(name, flagname) \ 3948 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ 3949 { \ 3950 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ 3951 } 3952 3953 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); 3954 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); 3955 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); 3956 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); 3957 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); 3958 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); 3959 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); 3960 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); 3961 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 3962 F2FS_FEATURE_FUNCS(verity, VERITY); 3963 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 3964 F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 3965 F2FS_FEATURE_FUNCS(compression, COMPRESSION); 3966 3967 #ifdef CONFIG_BLK_DEV_ZONED 3968 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, 3969 block_t blkaddr) 3970 { 3971 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 3972 3973 return test_bit(zno, FDEV(devi).blkz_seq); 3974 } 3975 #endif 3976 3977 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) 3978 { 3979 return f2fs_sb_has_blkzoned(sbi); 3980 } 3981 3982 static inline bool f2fs_bdev_support_discard(struct block_device *bdev) 3983 { 3984 return blk_queue_discard(bdev_get_queue(bdev)) || 3985 bdev_is_zoned(bdev); 3986 } 3987 3988 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) 3989 { 3990 int i; 3991 3992 if (!f2fs_is_multi_device(sbi)) 3993 return f2fs_bdev_support_discard(sbi->sb->s_bdev); 3994 3995 for (i = 0; i < sbi->s_ndevs; i++) 3996 if (f2fs_bdev_support_discard(FDEV(i).bdev)) 3997 return true; 3998 return false; 3999 } 4000 4001 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) 4002 { 4003 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || 4004 f2fs_hw_should_discard(sbi); 4005 } 4006 4007 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) 4008 { 4009 int i; 4010 4011 if (!f2fs_is_multi_device(sbi)) 4012 return bdev_read_only(sbi->sb->s_bdev); 4013 4014 for (i = 0; i < sbi->s_ndevs; i++) 4015 if (bdev_read_only(FDEV(i).bdev)) 4016 return true; 4017 return false; 4018 } 4019 4020 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) 4021 { 4022 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; 4023 } 4024 4025 static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode) 4026 { 4027 #ifdef CONFIG_FS_ENCRYPTION 4028 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 4029 umode_t mode = inode->i_mode; 4030 4031 /* 4032 * If the directory encrypted or dummy encryption enabled, 4033 * then we should encrypt the inode. 4034 */ 4035 if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) 4036 return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); 4037 #endif 4038 return false; 4039 } 4040 4041 static inline bool f2fs_may_compress(struct inode *inode) 4042 { 4043 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || 4044 f2fs_is_atomic_file(inode) || 4045 f2fs_is_volatile_file(inode)) 4046 return false; 4047 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); 4048 } 4049 4050 static inline void f2fs_i_compr_blocks_update(struct inode *inode, 4051 u64 blocks, bool add) 4052 { 4053 int diff = F2FS_I(inode)->i_cluster_size - blocks; 4054 4055 /* don't update i_compr_blocks if saved blocks were released */ 4056 if (!add && !F2FS_I(inode)->i_compr_blocks) 4057 return; 4058 4059 if (add) { 4060 F2FS_I(inode)->i_compr_blocks += diff; 4061 stat_add_compr_blocks(inode, diff); 4062 } else { 4063 F2FS_I(inode)->i_compr_blocks -= diff; 4064 stat_sub_compr_blocks(inode, diff); 4065 } 4066 f2fs_mark_inode_dirty_sync(inode, true); 4067 } 4068 4069 static inline int block_unaligned_IO(struct inode *inode, 4070 struct kiocb *iocb, struct iov_iter *iter) 4071 { 4072 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits); 4073 unsigned int blocksize_mask = (1 << i_blkbits) - 1; 4074 loff_t offset = iocb->ki_pos; 4075 unsigned long align = offset | iov_iter_alignment(iter); 4076 4077 return align & blocksize_mask; 4078 } 4079 4080 static inline int allow_outplace_dio(struct inode *inode, 4081 struct kiocb *iocb, struct iov_iter *iter) 4082 { 4083 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4084 int rw = iov_iter_rw(iter); 4085 4086 return (f2fs_lfs_mode(sbi) && (rw == WRITE) && 4087 !block_unaligned_IO(inode, iocb, iter)); 4088 } 4089 4090 static inline bool f2fs_force_buffered_io(struct inode *inode, 4091 struct kiocb *iocb, struct iov_iter *iter) 4092 { 4093 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4094 int rw = iov_iter_rw(iter); 4095 4096 if (f2fs_post_read_required(inode)) 4097 return true; 4098 if (f2fs_is_multi_device(sbi)) 4099 return true; 4100 /* 4101 * for blkzoned device, fallback direct IO to buffered IO, so 4102 * all IOs can be serialized by log-structured write. 4103 */ 4104 if (f2fs_sb_has_blkzoned(sbi)) 4105 return true; 4106 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { 4107 if (block_unaligned_IO(inode, iocb, iter)) 4108 return true; 4109 if (F2FS_IO_ALIGNED(sbi)) 4110 return true; 4111 } 4112 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 4113 !IS_SWAPFILE(inode)) 4114 return true; 4115 4116 return false; 4117 } 4118 4119 #ifdef CONFIG_F2FS_FAULT_INJECTION 4120 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, 4121 unsigned int type); 4122 #else 4123 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) 4124 #endif 4125 4126 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) 4127 { 4128 #ifdef CONFIG_QUOTA 4129 if (f2fs_sb_has_quota_ino(sbi)) 4130 return true; 4131 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 4132 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 4133 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 4134 return true; 4135 #endif 4136 return false; 4137 } 4138 4139 #define EFSBADCRC EBADMSG /* Bad CRC detected */ 4140 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 4141 4142 #endif /* _LINUX_F2FS_H */ 4143