1 /* 2 * fs/f2fs/f2fs.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #ifndef _LINUX_F2FS_H 12 #define _LINUX_F2FS_H 13 14 #include <linux/types.h> 15 #include <linux/page-flags.h> 16 #include <linux/buffer_head.h> 17 #include <linux/slab.h> 18 #include <linux/crc32.h> 19 #include <linux/magic.h> 20 #include <linux/kobject.h> 21 #include <linux/sched.h> 22 #include <linux/vmalloc.h> 23 #include <linux/bio.h> 24 #include <linux/blkdev.h> 25 #include <linux/quotaops.h> 26 #ifdef CONFIG_F2FS_FS_ENCRYPTION 27 #include <linux/fscrypt_supp.h> 28 #else 29 #include <linux/fscrypt_notsupp.h> 30 #endif 31 #include <crypto/hash.h> 32 33 #ifdef CONFIG_F2FS_CHECK_FS 34 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) 35 #else 36 #define f2fs_bug_on(sbi, condition) \ 37 do { \ 38 if (unlikely(condition)) { \ 39 WARN_ON(1); \ 40 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 41 } \ 42 } while (0) 43 #endif 44 45 #ifdef CONFIG_F2FS_FAULT_INJECTION 46 enum { 47 FAULT_KMALLOC, 48 FAULT_PAGE_ALLOC, 49 FAULT_ALLOC_NID, 50 FAULT_ORPHAN, 51 FAULT_BLOCK, 52 FAULT_DIR_DEPTH, 53 FAULT_EVICT_INODE, 54 FAULT_TRUNCATE, 55 FAULT_IO, 56 FAULT_CHECKPOINT, 57 FAULT_MAX, 58 }; 59 60 struct f2fs_fault_info { 61 atomic_t inject_ops; 62 unsigned int inject_rate; 63 unsigned int inject_type; 64 }; 65 66 extern char *fault_name[FAULT_MAX]; 67 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 68 #endif 69 70 /* 71 * For mount options 72 */ 73 #define F2FS_MOUNT_BG_GC 0x00000001 74 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 75 #define F2FS_MOUNT_DISCARD 0x00000004 76 #define F2FS_MOUNT_NOHEAP 0x00000008 77 #define F2FS_MOUNT_XATTR_USER 0x00000010 78 #define F2FS_MOUNT_POSIX_ACL 0x00000020 79 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 80 #define F2FS_MOUNT_INLINE_XATTR 0x00000080 81 #define F2FS_MOUNT_INLINE_DATA 0x00000100 82 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200 83 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 84 #define F2FS_MOUNT_NOBARRIER 0x00000800 85 #define F2FS_MOUNT_FASTBOOT 0x00001000 86 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 87 #define F2FS_MOUNT_FORCE_FG_GC 0x00004000 88 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 89 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 90 #define F2FS_MOUNT_ADAPTIVE 0x00020000 91 #define F2FS_MOUNT_LFS 0x00040000 92 #define F2FS_MOUNT_USRQUOTA 0x00080000 93 #define F2FS_MOUNT_GRPQUOTA 0x00100000 94 #define F2FS_MOUNT_PRJQUOTA 0x00200000 95 #define F2FS_MOUNT_QUOTA 0x00400000 96 97 #define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option) 98 #define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option) 99 #define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option) 100 101 #define ver_after(a, b) (typecheck(unsigned long long, a) && \ 102 typecheck(unsigned long long, b) && \ 103 ((long long)((a) - (b)) > 0)) 104 105 typedef u32 block_t; /* 106 * should not change u32, since it is the on-disk block 107 * address format, __le32. 108 */ 109 typedef u32 nid_t; 110 111 struct f2fs_mount_info { 112 unsigned int opt; 113 }; 114 115 #define F2FS_FEATURE_ENCRYPT 0x0001 116 #define F2FS_FEATURE_BLKZONED 0x0002 117 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004 118 #define F2FS_FEATURE_EXTRA_ATTR 0x0008 119 #define F2FS_FEATURE_PRJQUOTA 0x0010 120 #define F2FS_FEATURE_INODE_CHKSUM 0x0020 121 122 #define F2FS_HAS_FEATURE(sb, mask) \ 123 ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) 124 #define F2FS_SET_FEATURE(sb, mask) \ 125 (F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)) 126 #define F2FS_CLEAR_FEATURE(sb, mask) \ 127 (F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)) 128 129 /* 130 * For checkpoint manager 131 */ 132 enum { 133 NAT_BITMAP, 134 SIT_BITMAP 135 }; 136 137 #define CP_UMOUNT 0x00000001 138 #define CP_FASTBOOT 0x00000002 139 #define CP_SYNC 0x00000004 140 #define CP_RECOVERY 0x00000008 141 #define CP_DISCARD 0x00000010 142 #define CP_TRIMMED 0x00000020 143 144 #define DEF_BATCHED_TRIM_SECTIONS 2048 145 #define BATCHED_TRIM_SEGMENTS(sbi) \ 146 (GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections)) 147 #define BATCHED_TRIM_BLOCKS(sbi) \ 148 (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg) 149 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) 150 #define DISCARD_ISSUE_RATE 8 151 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ 152 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ 153 #define DEF_CP_INTERVAL 60 /* 60 secs */ 154 #define DEF_IDLE_INTERVAL 5 /* 5 secs */ 155 156 struct cp_control { 157 int reason; 158 __u64 trim_start; 159 __u64 trim_end; 160 __u64 trim_minlen; 161 __u64 trimmed; 162 }; 163 164 /* 165 * For CP/NAT/SIT/SSA readahead 166 */ 167 enum { 168 META_CP, 169 META_NAT, 170 META_SIT, 171 META_SSA, 172 META_POR, 173 }; 174 175 /* for the list of ino */ 176 enum { 177 ORPHAN_INO, /* for orphan ino list */ 178 APPEND_INO, /* for append ino list */ 179 UPDATE_INO, /* for update ino list */ 180 MAX_INO_ENTRY, /* max. list */ 181 }; 182 183 struct ino_entry { 184 struct list_head list; /* list head */ 185 nid_t ino; /* inode number */ 186 }; 187 188 /* for the list of inodes to be GCed */ 189 struct inode_entry { 190 struct list_head list; /* list head */ 191 struct inode *inode; /* vfs inode pointer */ 192 }; 193 194 /* for the bitmap indicate blocks to be discarded */ 195 struct discard_entry { 196 struct list_head list; /* list head */ 197 block_t start_blkaddr; /* start blockaddr of current segment */ 198 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ 199 }; 200 201 /* default discard granularity of inner discard thread, unit: block count */ 202 #define DEFAULT_DISCARD_GRANULARITY 16 203 204 /* max discard pend list number */ 205 #define MAX_PLIST_NUM 512 206 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ 207 (MAX_PLIST_NUM - 1) : (blk_num - 1)) 208 209 #define P_ACTIVE 0x01 210 #define P_TRIM 0x02 211 #define plist_issue(tag) (((tag) & P_ACTIVE) || ((tag) & P_TRIM)) 212 213 enum { 214 D_PREP, 215 D_SUBMIT, 216 D_DONE, 217 }; 218 219 struct discard_info { 220 block_t lstart; /* logical start address */ 221 block_t len; /* length */ 222 block_t start; /* actual start address in dev */ 223 }; 224 225 struct discard_cmd { 226 struct rb_node rb_node; /* rb node located in rb-tree */ 227 union { 228 struct { 229 block_t lstart; /* logical start address */ 230 block_t len; /* length */ 231 block_t start; /* actual start address in dev */ 232 }; 233 struct discard_info di; /* discard info */ 234 235 }; 236 struct list_head list; /* command list */ 237 struct completion wait; /* compleation */ 238 struct block_device *bdev; /* bdev */ 239 unsigned short ref; /* reference count */ 240 unsigned char state; /* state */ 241 int error; /* bio error */ 242 }; 243 244 struct discard_cmd_control { 245 struct task_struct *f2fs_issue_discard; /* discard thread */ 246 struct list_head entry_list; /* 4KB discard entry list */ 247 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ 248 unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */ 249 struct list_head wait_list; /* store on-flushing entries */ 250 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ 251 unsigned int discard_wake; /* to wake up discard thread */ 252 struct mutex cmd_lock; 253 unsigned int nr_discards; /* # of discards in the list */ 254 unsigned int max_discards; /* max. discards to be issued */ 255 unsigned int discard_granularity; /* discard granularity */ 256 unsigned int undiscard_blks; /* # of undiscard blocks */ 257 atomic_t issued_discard; /* # of issued discard */ 258 atomic_t issing_discard; /* # of issing discard */ 259 atomic_t discard_cmd_cnt; /* # of cached cmd count */ 260 struct rb_root root; /* root of discard rb-tree */ 261 }; 262 263 /* for the list of fsync inodes, used only during recovery */ 264 struct fsync_inode_entry { 265 struct list_head list; /* list head */ 266 struct inode *inode; /* vfs inode pointer */ 267 block_t blkaddr; /* block address locating the last fsync */ 268 block_t last_dentry; /* block address locating the last dentry */ 269 }; 270 271 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) 272 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) 273 274 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) 275 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 276 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) 277 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) 278 279 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) 280 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) 281 282 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) 283 { 284 int before = nats_in_cursum(journal); 285 286 journal->n_nats = cpu_to_le16(before + i); 287 return before; 288 } 289 290 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) 291 { 292 int before = sits_in_cursum(journal); 293 294 journal->n_sits = cpu_to_le16(before + i); 295 return before; 296 } 297 298 static inline bool __has_cursum_space(struct f2fs_journal *journal, 299 int size, int type) 300 { 301 if (type == NAT_JOURNAL) 302 return size <= MAX_NAT_JENTRIES(journal); 303 return size <= MAX_SIT_JENTRIES(journal); 304 } 305 306 /* 307 * ioctl commands 308 */ 309 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS 310 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS 311 #define F2FS_IOC_GETVERSION FS_IOC_GETVERSION 312 313 #define F2FS_IOCTL_MAGIC 0xf5 314 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) 315 #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) 316 #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) 317 #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) 318 #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) 319 #define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) 320 #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) 321 #define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ 322 struct f2fs_defragment) 323 #define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ 324 struct f2fs_move_range) 325 #define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \ 326 struct f2fs_flush_device) 327 #define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \ 328 struct f2fs_gc_range) 329 #define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32) 330 331 #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY 332 #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY 333 #define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT 334 335 /* 336 * should be same as XFS_IOC_GOINGDOWN. 337 * Flags for going down operation used by FS_IOC_GOINGDOWN 338 */ 339 #define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */ 340 #define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */ 341 #define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ 342 #define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ 343 #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ 344 345 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 346 /* 347 * ioctl commands in 32 bit emulation 348 */ 349 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS 350 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS 351 #define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION 352 #endif 353 354 #define F2FS_IOC_FSGETXATTR FS_IOC_FSGETXATTR 355 #define F2FS_IOC_FSSETXATTR FS_IOC_FSSETXATTR 356 357 struct f2fs_gc_range { 358 u32 sync; 359 u64 start; 360 u64 len; 361 }; 362 363 struct f2fs_defragment { 364 u64 start; 365 u64 len; 366 }; 367 368 struct f2fs_move_range { 369 u32 dst_fd; /* destination fd */ 370 u64 pos_in; /* start position in src_fd */ 371 u64 pos_out; /* start position in dst_fd */ 372 u64 len; /* size to move */ 373 }; 374 375 struct f2fs_flush_device { 376 u32 dev_num; /* device number to flush */ 377 u32 segments; /* # of segments to flush */ 378 }; 379 380 /* for inline stuff */ 381 #define DEF_INLINE_RESERVED_SIZE 1 382 static inline int get_extra_isize(struct inode *inode); 383 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ 384 (CUR_ADDRS_PER_INODE(inode) - \ 385 DEF_INLINE_RESERVED_SIZE - \ 386 F2FS_INLINE_XATTR_ADDRS)) 387 388 /* for inline dir */ 389 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 390 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 391 BITS_PER_BYTE + 1)) 392 #define INLINE_DENTRY_BITMAP_SIZE(inode) ((NR_INLINE_DENTRY(inode) + \ 393 BITS_PER_BYTE - 1) / BITS_PER_BYTE) 394 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 395 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 396 NR_INLINE_DENTRY(inode) + \ 397 INLINE_DENTRY_BITMAP_SIZE(inode))) 398 399 /* 400 * For INODE and NODE manager 401 */ 402 /* for directory operations */ 403 struct f2fs_dentry_ptr { 404 struct inode *inode; 405 void *bitmap; 406 struct f2fs_dir_entry *dentry; 407 __u8 (*filename)[F2FS_SLOT_LEN]; 408 int max; 409 int nr_bitmap; 410 }; 411 412 static inline void make_dentry_ptr_block(struct inode *inode, 413 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) 414 { 415 d->inode = inode; 416 d->max = NR_DENTRY_IN_BLOCK; 417 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; 418 d->bitmap = &t->dentry_bitmap; 419 d->dentry = t->dentry; 420 d->filename = t->filename; 421 } 422 423 static inline void make_dentry_ptr_inline(struct inode *inode, 424 struct f2fs_dentry_ptr *d, void *t) 425 { 426 int entry_cnt = NR_INLINE_DENTRY(inode); 427 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); 428 int reserved_size = INLINE_RESERVED_SIZE(inode); 429 430 d->inode = inode; 431 d->max = entry_cnt; 432 d->nr_bitmap = bitmap_size; 433 d->bitmap = t; 434 d->dentry = t + bitmap_size + reserved_size; 435 d->filename = t + bitmap_size + reserved_size + 436 SIZE_OF_DIR_ENTRY * entry_cnt; 437 } 438 439 /* 440 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 441 * as its node offset to distinguish from index node blocks. 442 * But some bits are used to mark the node block. 443 */ 444 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ 445 >> OFFSET_BIT_SHIFT) 446 enum { 447 ALLOC_NODE, /* allocate a new node page if needed */ 448 LOOKUP_NODE, /* look up a node without readahead */ 449 LOOKUP_NODE_RA, /* 450 * look up a node with readahead called 451 * by get_data_block. 452 */ 453 }; 454 455 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ 456 457 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ 458 459 /* vector size for gang look-up from extent cache that consists of radix tree */ 460 #define EXT_TREE_VEC_SIZE 64 461 462 /* for in-memory extent cache entry */ 463 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ 464 465 /* number of extent info in extent cache we try to shrink */ 466 #define EXTENT_CACHE_SHRINK_NUMBER 128 467 468 struct rb_entry { 469 struct rb_node rb_node; /* rb node located in rb-tree */ 470 unsigned int ofs; /* start offset of the entry */ 471 unsigned int len; /* length of the entry */ 472 }; 473 474 struct extent_info { 475 unsigned int fofs; /* start offset in a file */ 476 unsigned int len; /* length of the extent */ 477 u32 blk; /* start block address of the extent */ 478 }; 479 480 struct extent_node { 481 struct rb_node rb_node; 482 union { 483 struct { 484 unsigned int fofs; 485 unsigned int len; 486 u32 blk; 487 }; 488 struct extent_info ei; /* extent info */ 489 490 }; 491 struct list_head list; /* node in global extent list of sbi */ 492 struct extent_tree *et; /* extent tree pointer */ 493 }; 494 495 struct extent_tree { 496 nid_t ino; /* inode number */ 497 struct rb_root root; /* root of extent info rb-tree */ 498 struct extent_node *cached_en; /* recently accessed extent node */ 499 struct extent_info largest; /* largested extent info */ 500 struct list_head list; /* to be used by sbi->zombie_list */ 501 rwlock_t lock; /* protect extent info rb-tree */ 502 atomic_t node_cnt; /* # of extent node in rb-tree*/ 503 }; 504 505 /* 506 * This structure is taken from ext4_map_blocks. 507 * 508 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks(). 509 */ 510 #define F2FS_MAP_NEW (1 << BH_New) 511 #define F2FS_MAP_MAPPED (1 << BH_Mapped) 512 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten) 513 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ 514 F2FS_MAP_UNWRITTEN) 515 516 struct f2fs_map_blocks { 517 block_t m_pblk; 518 block_t m_lblk; 519 unsigned int m_len; 520 unsigned int m_flags; 521 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ 522 }; 523 524 /* for flag in get_data_block */ 525 enum { 526 F2FS_GET_BLOCK_DEFAULT, 527 F2FS_GET_BLOCK_FIEMAP, 528 F2FS_GET_BLOCK_BMAP, 529 F2FS_GET_BLOCK_PRE_DIO, 530 F2FS_GET_BLOCK_PRE_AIO, 531 }; 532 533 /* 534 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. 535 */ 536 #define FADVISE_COLD_BIT 0x01 537 #define FADVISE_LOST_PINO_BIT 0x02 538 #define FADVISE_ENCRYPT_BIT 0x04 539 #define FADVISE_ENC_NAME_BIT 0x08 540 #define FADVISE_KEEP_SIZE_BIT 0x10 541 542 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) 543 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) 544 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) 545 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) 546 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) 547 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) 548 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) 549 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) 550 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT) 551 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) 552 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) 553 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) 554 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) 555 556 #define DEF_DIR_LEVEL 0 557 558 struct f2fs_inode_info { 559 struct inode vfs_inode; /* serve a vfs inode */ 560 unsigned long i_flags; /* keep an inode flags for ioctl */ 561 unsigned char i_advise; /* use to give file attribute hints */ 562 unsigned char i_dir_level; /* use for dentry level for large dir */ 563 unsigned int i_current_depth; /* use only in directory structure */ 564 unsigned int i_pino; /* parent inode number */ 565 umode_t i_acl_mode; /* keep file acl mode temporarily */ 566 567 /* Use below internally in f2fs*/ 568 unsigned long flags; /* use to pass per-file flags */ 569 struct rw_semaphore i_sem; /* protect fi info */ 570 atomic_t dirty_pages; /* # of dirty pages */ 571 f2fs_hash_t chash; /* hash value of given file name */ 572 unsigned int clevel; /* maximum level of given file name */ 573 struct task_struct *task; /* lookup and create consistency */ 574 struct task_struct *cp_task; /* separate cp/wb IO stats*/ 575 nid_t i_xattr_nid; /* node id that contains xattrs */ 576 loff_t last_disk_size; /* lastly written file size */ 577 578 #ifdef CONFIG_QUOTA 579 struct dquot *i_dquot[MAXQUOTAS]; 580 581 /* quota space reservation, managed internally by quota code */ 582 qsize_t i_reserved_quota; 583 #endif 584 struct list_head dirty_list; /* dirty list for dirs and files */ 585 struct list_head gdirty_list; /* linked in global dirty list */ 586 struct list_head inmem_pages; /* inmemory pages managed by f2fs */ 587 struct task_struct *inmem_task; /* store inmemory task */ 588 struct mutex inmem_lock; /* lock for inmemory pages */ 589 struct extent_tree *extent_tree; /* cached extent_tree entry */ 590 struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */ 591 struct rw_semaphore i_mmap_sem; 592 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ 593 594 int i_extra_isize; /* size of extra space located in i_addr */ 595 kprojid_t i_projid; /* id for project quota */ 596 }; 597 598 static inline void get_extent_info(struct extent_info *ext, 599 struct f2fs_extent *i_ext) 600 { 601 ext->fofs = le32_to_cpu(i_ext->fofs); 602 ext->blk = le32_to_cpu(i_ext->blk); 603 ext->len = le32_to_cpu(i_ext->len); 604 } 605 606 static inline void set_raw_extent(struct extent_info *ext, 607 struct f2fs_extent *i_ext) 608 { 609 i_ext->fofs = cpu_to_le32(ext->fofs); 610 i_ext->blk = cpu_to_le32(ext->blk); 611 i_ext->len = cpu_to_le32(ext->len); 612 } 613 614 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, 615 u32 blk, unsigned int len) 616 { 617 ei->fofs = fofs; 618 ei->blk = blk; 619 ei->len = len; 620 } 621 622 static inline bool __is_discard_mergeable(struct discard_info *back, 623 struct discard_info *front) 624 { 625 return back->lstart + back->len == front->lstart; 626 } 627 628 static inline bool __is_discard_back_mergeable(struct discard_info *cur, 629 struct discard_info *back) 630 { 631 return __is_discard_mergeable(back, cur); 632 } 633 634 static inline bool __is_discard_front_mergeable(struct discard_info *cur, 635 struct discard_info *front) 636 { 637 return __is_discard_mergeable(cur, front); 638 } 639 640 static inline bool __is_extent_mergeable(struct extent_info *back, 641 struct extent_info *front) 642 { 643 return (back->fofs + back->len == front->fofs && 644 back->blk + back->len == front->blk); 645 } 646 647 static inline bool __is_back_mergeable(struct extent_info *cur, 648 struct extent_info *back) 649 { 650 return __is_extent_mergeable(back, cur); 651 } 652 653 static inline bool __is_front_mergeable(struct extent_info *cur, 654 struct extent_info *front) 655 { 656 return __is_extent_mergeable(cur, front); 657 } 658 659 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); 660 static inline void __try_update_largest_extent(struct inode *inode, 661 struct extent_tree *et, struct extent_node *en) 662 { 663 if (en->ei.len > et->largest.len) { 664 et->largest = en->ei; 665 f2fs_mark_inode_dirty_sync(inode, true); 666 } 667 } 668 669 enum nid_list { 670 FREE_NID_LIST, 671 ALLOC_NID_LIST, 672 MAX_NID_LIST, 673 }; 674 675 struct f2fs_nm_info { 676 block_t nat_blkaddr; /* base disk address of NAT */ 677 nid_t max_nid; /* maximum possible node ids */ 678 nid_t available_nids; /* # of available node ids */ 679 nid_t next_scan_nid; /* the next nid to be scanned */ 680 unsigned int ram_thresh; /* control the memory footprint */ 681 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ 682 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ 683 684 /* NAT cache management */ 685 struct radix_tree_root nat_root;/* root of the nat entry cache */ 686 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 687 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 688 struct list_head nat_entries; /* cached nat entry list (clean) */ 689 unsigned int nat_cnt; /* the # of cached nat entries */ 690 unsigned int dirty_nat_cnt; /* total num of nat entries in set */ 691 unsigned int nat_blocks; /* # of nat blocks */ 692 693 /* free node ids management */ 694 struct radix_tree_root free_nid_root;/* root of the free_nid cache */ 695 struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */ 696 unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */ 697 spinlock_t nid_list_lock; /* protect nid lists ops */ 698 struct mutex build_lock; /* lock for build free nids */ 699 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; 700 unsigned char *nat_block_bitmap; 701 unsigned short *free_nid_count; /* free nid count of NAT block */ 702 703 /* for checkpoint */ 704 char *nat_bitmap; /* NAT bitmap pointer */ 705 706 unsigned int nat_bits_blocks; /* # of nat bits blocks */ 707 unsigned char *nat_bits; /* NAT bits blocks */ 708 unsigned char *full_nat_bits; /* full NAT pages */ 709 unsigned char *empty_nat_bits; /* empty NAT pages */ 710 #ifdef CONFIG_F2FS_CHECK_FS 711 char *nat_bitmap_mir; /* NAT bitmap mirror */ 712 #endif 713 int bitmap_size; /* bitmap size */ 714 }; 715 716 /* 717 * this structure is used as one of function parameters. 718 * all the information are dedicated to a given direct node block determined 719 * by the data offset in a file. 720 */ 721 struct dnode_of_data { 722 struct inode *inode; /* vfs inode pointer */ 723 struct page *inode_page; /* its inode page, NULL is possible */ 724 struct page *node_page; /* cached direct node page */ 725 nid_t nid; /* node id of the direct node block */ 726 unsigned int ofs_in_node; /* data offset in the node page */ 727 bool inode_page_locked; /* inode page is locked or not */ 728 bool node_changed; /* is node block changed */ 729 char cur_level; /* level of hole node page */ 730 char max_level; /* level of current page located */ 731 block_t data_blkaddr; /* block address of the node block */ 732 }; 733 734 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 735 struct page *ipage, struct page *npage, nid_t nid) 736 { 737 memset(dn, 0, sizeof(*dn)); 738 dn->inode = inode; 739 dn->inode_page = ipage; 740 dn->node_page = npage; 741 dn->nid = nid; 742 } 743 744 /* 745 * For SIT manager 746 * 747 * By default, there are 6 active log areas across the whole main area. 748 * When considering hot and cold data separation to reduce cleaning overhead, 749 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, 750 * respectively. 751 * In the current design, you should not change the numbers intentionally. 752 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 753 * logs individually according to the underlying devices. (default: 6) 754 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for 755 * data and 8 for node logs. 756 */ 757 #define NR_CURSEG_DATA_TYPE (3) 758 #define NR_CURSEG_NODE_TYPE (3) 759 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) 760 761 enum { 762 CURSEG_HOT_DATA = 0, /* directory entry blocks */ 763 CURSEG_WARM_DATA, /* data blocks */ 764 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ 765 CURSEG_HOT_NODE, /* direct node blocks of directory files */ 766 CURSEG_WARM_NODE, /* direct node blocks of normal files */ 767 CURSEG_COLD_NODE, /* indirect node blocks */ 768 NO_CHECK_TYPE, 769 }; 770 771 struct flush_cmd { 772 struct completion wait; 773 struct llist_node llnode; 774 int ret; 775 }; 776 777 struct flush_cmd_control { 778 struct task_struct *f2fs_issue_flush; /* flush thread */ 779 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ 780 atomic_t issued_flush; /* # of issued flushes */ 781 atomic_t issing_flush; /* # of issing flushes */ 782 struct llist_head issue_list; /* list for command issue */ 783 struct llist_node *dispatch_list; /* list for command dispatch */ 784 }; 785 786 struct f2fs_sm_info { 787 struct sit_info *sit_info; /* whole segment information */ 788 struct free_segmap_info *free_info; /* free segment information */ 789 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 790 struct curseg_info *curseg_array; /* active segment information */ 791 792 block_t seg0_blkaddr; /* block address of 0'th segment */ 793 block_t main_blkaddr; /* start block address of main area */ 794 block_t ssa_blkaddr; /* start block address of SSA area */ 795 796 unsigned int segment_count; /* total # of segments */ 797 unsigned int main_segments; /* # of segments in main area */ 798 unsigned int reserved_segments; /* # of reserved segments */ 799 unsigned int ovp_segments; /* # of overprovision segments */ 800 801 /* a threshold to reclaim prefree segments */ 802 unsigned int rec_prefree_segments; 803 804 /* for batched trimming */ 805 unsigned int trim_sections; /* # of sections to trim */ 806 807 struct list_head sit_entry_set; /* sit entry set list */ 808 809 unsigned int ipu_policy; /* in-place-update policy */ 810 unsigned int min_ipu_util; /* in-place-update threshold */ 811 unsigned int min_fsync_blocks; /* threshold for fsync */ 812 unsigned int min_hot_blocks; /* threshold for hot block allocation */ 813 814 /* for flush command control */ 815 struct flush_cmd_control *fcc_info; 816 817 /* for discard command control */ 818 struct discard_cmd_control *dcc_info; 819 }; 820 821 /* 822 * For superblock 823 */ 824 /* 825 * COUNT_TYPE for monitoring 826 * 827 * f2fs monitors the number of several block types such as on-writeback, 828 * dirty dentry blocks, dirty node blocks, and dirty meta blocks. 829 */ 830 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) 831 enum count_type { 832 F2FS_DIRTY_DENTS, 833 F2FS_DIRTY_DATA, 834 F2FS_DIRTY_NODES, 835 F2FS_DIRTY_META, 836 F2FS_INMEM_PAGES, 837 F2FS_DIRTY_IMETA, 838 F2FS_WB_CP_DATA, 839 F2FS_WB_DATA, 840 NR_COUNT_TYPE, 841 }; 842 843 /* 844 * The below are the page types of bios used in submit_bio(). 845 * The available types are: 846 * DATA User data pages. It operates as async mode. 847 * NODE Node pages. It operates as async mode. 848 * META FS metadata pages such as SIT, NAT, CP. 849 * NR_PAGE_TYPE The number of page types. 850 * META_FLUSH Make sure the previous pages are written 851 * with waiting the bio's completion 852 * ... Only can be used with META. 853 */ 854 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) 855 enum page_type { 856 DATA, 857 NODE, 858 META, 859 NR_PAGE_TYPE, 860 META_FLUSH, 861 INMEM, /* the below types are used by tracepoints only. */ 862 INMEM_DROP, 863 INMEM_INVALIDATE, 864 INMEM_REVOKE, 865 IPU, 866 OPU, 867 }; 868 869 enum temp_type { 870 HOT = 0, /* must be zero for meta bio */ 871 WARM, 872 COLD, 873 NR_TEMP_TYPE, 874 }; 875 876 enum need_lock_type { 877 LOCK_REQ = 0, 878 LOCK_DONE, 879 LOCK_RETRY, 880 }; 881 882 enum iostat_type { 883 APP_DIRECT_IO, /* app direct IOs */ 884 APP_BUFFERED_IO, /* app buffered IOs */ 885 APP_WRITE_IO, /* app write IOs */ 886 APP_MAPPED_IO, /* app mapped IOs */ 887 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ 888 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ 889 FS_META_IO, /* meta IOs from kworker/reclaimer */ 890 FS_GC_DATA_IO, /* data IOs from forground gc */ 891 FS_GC_NODE_IO, /* node IOs from forground gc */ 892 FS_CP_DATA_IO, /* data IOs from checkpoint */ 893 FS_CP_NODE_IO, /* node IOs from checkpoint */ 894 FS_CP_META_IO, /* meta IOs from checkpoint */ 895 FS_DISCARD, /* discard */ 896 NR_IO_TYPE, 897 }; 898 899 struct f2fs_io_info { 900 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 901 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 902 enum temp_type temp; /* contains HOT/WARM/COLD */ 903 int op; /* contains REQ_OP_ */ 904 int op_flags; /* req_flag_bits */ 905 block_t new_blkaddr; /* new block address to be written */ 906 block_t old_blkaddr; /* old block address before Cow */ 907 struct page *page; /* page to be written */ 908 struct page *encrypted_page; /* encrypted page */ 909 struct list_head list; /* serialize IOs */ 910 bool submitted; /* indicate IO submission */ 911 int need_lock; /* indicate we need to lock cp_rwsem */ 912 bool in_list; /* indicate fio is in io_list */ 913 enum iostat_type io_type; /* io type */ 914 }; 915 916 #define is_read_io(rw) ((rw) == READ) 917 struct f2fs_bio_info { 918 struct f2fs_sb_info *sbi; /* f2fs superblock */ 919 struct bio *bio; /* bios to merge */ 920 sector_t last_block_in_bio; /* last block number */ 921 struct f2fs_io_info fio; /* store buffered io info. */ 922 struct rw_semaphore io_rwsem; /* blocking op for bio */ 923 spinlock_t io_lock; /* serialize DATA/NODE IOs */ 924 struct list_head io_list; /* track fios */ 925 }; 926 927 #define FDEV(i) (sbi->devs[i]) 928 #define RDEV(i) (raw_super->devs[i]) 929 struct f2fs_dev_info { 930 struct block_device *bdev; 931 char path[MAX_PATH_LEN]; 932 unsigned int total_segments; 933 block_t start_blk; 934 block_t end_blk; 935 #ifdef CONFIG_BLK_DEV_ZONED 936 unsigned int nr_blkz; /* Total number of zones */ 937 u8 *blkz_type; /* Array of zones type */ 938 #endif 939 }; 940 941 enum inode_type { 942 DIR_INODE, /* for dirty dir inode */ 943 FILE_INODE, /* for dirty regular/symlink inode */ 944 DIRTY_META, /* for all dirtied inode metadata */ 945 NR_INODE_TYPE, 946 }; 947 948 /* for inner inode cache management */ 949 struct inode_management { 950 struct radix_tree_root ino_root; /* ino entry array */ 951 spinlock_t ino_lock; /* for ino entry lock */ 952 struct list_head ino_list; /* inode list head */ 953 unsigned long ino_num; /* number of entries */ 954 }; 955 956 /* For s_flag in struct f2fs_sb_info */ 957 enum { 958 SBI_IS_DIRTY, /* dirty flag for checkpoint */ 959 SBI_IS_CLOSE, /* specify unmounting */ 960 SBI_NEED_FSCK, /* need fsck.f2fs to fix */ 961 SBI_POR_DOING, /* recovery is doing or not */ 962 SBI_NEED_SB_WRITE, /* need to recover superblock */ 963 SBI_NEED_CP, /* need to checkpoint */ 964 }; 965 966 enum { 967 CP_TIME, 968 REQ_TIME, 969 MAX_TIME, 970 }; 971 972 struct f2fs_sb_info { 973 struct super_block *sb; /* pointer to VFS super block */ 974 struct proc_dir_entry *s_proc; /* proc entry */ 975 struct f2fs_super_block *raw_super; /* raw super block pointer */ 976 int valid_super_block; /* valid super block no */ 977 unsigned long s_flag; /* flags for sbi */ 978 979 #ifdef CONFIG_BLK_DEV_ZONED 980 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ 981 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ 982 #endif 983 984 /* for node-related operations */ 985 struct f2fs_nm_info *nm_info; /* node manager */ 986 struct inode *node_inode; /* cache node blocks */ 987 988 /* for segment-related operations */ 989 struct f2fs_sm_info *sm_info; /* segment manager */ 990 991 /* for bio operations */ 992 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ 993 struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE]; 994 /* bio ordering for NODE/DATA */ 995 int write_io_size_bits; /* Write IO size bits */ 996 mempool_t *write_io_dummy; /* Dummy pages */ 997 998 /* for checkpoint */ 999 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 1000 int cur_cp_pack; /* remain current cp pack */ 1001 spinlock_t cp_lock; /* for flag in ckpt */ 1002 struct inode *meta_inode; /* cache meta blocks */ 1003 struct mutex cp_mutex; /* checkpoint procedure lock */ 1004 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 1005 struct rw_semaphore node_write; /* locking node writes */ 1006 struct rw_semaphore node_change; /* locking node change */ 1007 wait_queue_head_t cp_wait; 1008 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 1009 long interval_time[MAX_TIME]; /* to store thresholds */ 1010 1011 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 1012 1013 /* for orphan inode, use 0'th array */ 1014 unsigned int max_orphans; /* max orphan inodes */ 1015 1016 /* for inode management */ 1017 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1018 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1019 1020 /* for extent tree cache */ 1021 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1022 struct mutex extent_tree_lock; /* locking extent radix tree */ 1023 struct list_head extent_list; /* lru list for shrinker */ 1024 spinlock_t extent_lock; /* locking extent lru list */ 1025 atomic_t total_ext_tree; /* extent tree count */ 1026 struct list_head zombie_list; /* extent zombie tree list */ 1027 atomic_t total_zombie_tree; /* extent zombie tree count */ 1028 atomic_t total_ext_node; /* extent info count */ 1029 1030 /* basic filesystem units */ 1031 unsigned int log_sectors_per_block; /* log2 sectors per block */ 1032 unsigned int log_blocksize; /* log2 block size */ 1033 unsigned int blocksize; /* block size */ 1034 unsigned int root_ino_num; /* root inode number*/ 1035 unsigned int node_ino_num; /* node inode number*/ 1036 unsigned int meta_ino_num; /* meta inode number*/ 1037 unsigned int log_blocks_per_seg; /* log2 blocks per segment */ 1038 unsigned int blocks_per_seg; /* blocks per segment */ 1039 unsigned int segs_per_sec; /* segments per section */ 1040 unsigned int secs_per_zone; /* sections per zone */ 1041 unsigned int total_sections; /* total section count */ 1042 unsigned int total_node_count; /* total node block count */ 1043 unsigned int total_valid_node_count; /* valid node block count */ 1044 loff_t max_file_blocks; /* max block index of file */ 1045 int active_logs; /* # of active logs */ 1046 int dir_level; /* directory level */ 1047 1048 block_t user_block_count; /* # of user blocks */ 1049 block_t total_valid_block_count; /* # of valid blocks */ 1050 block_t discard_blks; /* discard command candidats */ 1051 block_t last_valid_block_count; /* for recovery */ 1052 block_t reserved_blocks; /* configurable reserved blocks */ 1053 1054 u32 s_next_generation; /* for NFS support */ 1055 1056 /* # of pages, see count_type */ 1057 atomic_t nr_pages[NR_COUNT_TYPE]; 1058 /* # of allocated blocks */ 1059 struct percpu_counter alloc_valid_block_count; 1060 1061 /* writeback control */ 1062 atomic_t wb_sync_req; /* count # of WB_SYNC threads */ 1063 1064 /* valid inode count */ 1065 struct percpu_counter total_valid_inode_count; 1066 1067 struct f2fs_mount_info mount_opt; /* mount options */ 1068 1069 /* for cleaning operations */ 1070 struct mutex gc_mutex; /* mutex for GC */ 1071 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1072 unsigned int cur_victim_sec; /* current victim section num */ 1073 1074 /* threshold for converting bg victims for fg */ 1075 u64 fggc_threshold; 1076 1077 /* maximum # of trials to find a victim segment for SSR and GC */ 1078 unsigned int max_victim_search; 1079 1080 /* 1081 * for stat information. 1082 * one is for the LFS mode, and the other is for the SSR mode. 1083 */ 1084 #ifdef CONFIG_F2FS_STAT_FS 1085 struct f2fs_stat_info *stat_info; /* FS status information */ 1086 unsigned int segment_count[2]; /* # of allocated segments */ 1087 unsigned int block_count[2]; /* # of allocated blocks */ 1088 atomic_t inplace_count; /* # of inplace update */ 1089 atomic64_t total_hit_ext; /* # of lookup extent cache */ 1090 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ 1091 atomic64_t read_hit_largest; /* # of hit largest extent node */ 1092 atomic64_t read_hit_cached; /* # of hit cached extent node */ 1093 atomic_t inline_xattr; /* # of inline_xattr inodes */ 1094 atomic_t inline_inode; /* # of inline_data inodes */ 1095 atomic_t inline_dir; /* # of inline_dentry inodes */ 1096 atomic_t aw_cnt; /* # of atomic writes */ 1097 atomic_t vw_cnt; /* # of volatile writes */ 1098 atomic_t max_aw_cnt; /* max # of atomic writes */ 1099 atomic_t max_vw_cnt; /* max # of volatile writes */ 1100 int bg_gc; /* background gc calls */ 1101 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ 1102 #endif 1103 spinlock_t stat_lock; /* lock for stat operations */ 1104 1105 /* For app/fs IO statistics */ 1106 spinlock_t iostat_lock; 1107 unsigned long long write_iostat[NR_IO_TYPE]; 1108 bool iostat_enable; 1109 1110 /* For sysfs suppport */ 1111 struct kobject s_kobj; 1112 struct completion s_kobj_unregister; 1113 1114 /* For shrinker support */ 1115 struct list_head s_list; 1116 int s_ndevs; /* number of devices */ 1117 struct f2fs_dev_info *devs; /* for device list */ 1118 struct mutex umount_mutex; 1119 unsigned int shrinker_run_no; 1120 1121 /* For write statistics */ 1122 u64 sectors_written_start; 1123 u64 kbytes_written; 1124 1125 /* Reference to checksum algorithm driver via cryptoapi */ 1126 struct crypto_shash *s_chksum_driver; 1127 1128 /* Precomputed FS UUID checksum for seeding other checksums */ 1129 __u32 s_chksum_seed; 1130 1131 /* For fault injection */ 1132 #ifdef CONFIG_F2FS_FAULT_INJECTION 1133 struct f2fs_fault_info fault_info; 1134 #endif 1135 1136 #ifdef CONFIG_QUOTA 1137 /* Names of quota files with journalled quota */ 1138 char *s_qf_names[MAXQUOTAS]; 1139 int s_jquota_fmt; /* Format of quota to use */ 1140 #endif 1141 }; 1142 1143 #ifdef CONFIG_F2FS_FAULT_INJECTION 1144 #define f2fs_show_injection_info(type) \ 1145 printk("%sF2FS-fs : inject %s in %s of %pF\n", \ 1146 KERN_INFO, fault_name[type], \ 1147 __func__, __builtin_return_address(0)) 1148 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) 1149 { 1150 struct f2fs_fault_info *ffi = &sbi->fault_info; 1151 1152 if (!ffi->inject_rate) 1153 return false; 1154 1155 if (!IS_FAULT_SET(ffi, type)) 1156 return false; 1157 1158 atomic_inc(&ffi->inject_ops); 1159 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 1160 atomic_set(&ffi->inject_ops, 0); 1161 return true; 1162 } 1163 return false; 1164 } 1165 #endif 1166 1167 /* For write statistics. Suppose sector size is 512 bytes, 1168 * and the return value is in kbytes. s is of struct f2fs_sb_info. 1169 */ 1170 #define BD_PART_WRITTEN(s) \ 1171 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) - \ 1172 (s)->sectors_written_start) >> 1) 1173 1174 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) 1175 { 1176 sbi->last_time[type] = jiffies; 1177 } 1178 1179 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) 1180 { 1181 struct timespec ts = {sbi->interval_time[type], 0}; 1182 unsigned long interval = timespec_to_jiffies(&ts); 1183 1184 return time_after(jiffies, sbi->last_time[type] + interval); 1185 } 1186 1187 static inline bool is_idle(struct f2fs_sb_info *sbi) 1188 { 1189 struct block_device *bdev = sbi->sb->s_bdev; 1190 struct request_queue *q = bdev_get_queue(bdev); 1191 struct request_list *rl = &q->root_rl; 1192 1193 if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC]) 1194 return 0; 1195 1196 return f2fs_time_over(sbi, REQ_TIME); 1197 } 1198 1199 /* 1200 * Inline functions 1201 */ 1202 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, 1203 unsigned int length) 1204 { 1205 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); 1206 u32 *ctx = (u32 *)shash_desc_ctx(shash); 1207 u32 retval; 1208 int err; 1209 1210 shash->tfm = sbi->s_chksum_driver; 1211 shash->flags = 0; 1212 *ctx = F2FS_SUPER_MAGIC; 1213 1214 err = crypto_shash_update(shash, address, length); 1215 BUG_ON(err); 1216 1217 retval = *ctx; 1218 barrier_data(ctx); 1219 return retval; 1220 } 1221 1222 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1223 void *buf, size_t buf_size) 1224 { 1225 return f2fs_crc32(sbi, buf, buf_size) == blk_crc; 1226 } 1227 1228 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, 1229 const void *address, unsigned int length) 1230 { 1231 struct { 1232 struct shash_desc shash; 1233 char ctx[4]; 1234 } desc; 1235 int err; 1236 1237 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1238 1239 desc.shash.tfm = sbi->s_chksum_driver; 1240 desc.shash.flags = 0; 1241 *(u32 *)desc.ctx = crc; 1242 1243 err = crypto_shash_update(&desc.shash, address, length); 1244 BUG_ON(err); 1245 1246 return *(u32 *)desc.ctx; 1247 } 1248 1249 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) 1250 { 1251 return container_of(inode, struct f2fs_inode_info, vfs_inode); 1252 } 1253 1254 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) 1255 { 1256 return sb->s_fs_info; 1257 } 1258 1259 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) 1260 { 1261 return F2FS_SB(inode->i_sb); 1262 } 1263 1264 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) 1265 { 1266 return F2FS_I_SB(mapping->host); 1267 } 1268 1269 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1270 { 1271 return F2FS_M_SB(page->mapping); 1272 } 1273 1274 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1275 { 1276 return (struct f2fs_super_block *)(sbi->raw_super); 1277 } 1278 1279 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) 1280 { 1281 return (struct f2fs_checkpoint *)(sbi->ckpt); 1282 } 1283 1284 static inline struct f2fs_node *F2FS_NODE(struct page *page) 1285 { 1286 return (struct f2fs_node *)page_address(page); 1287 } 1288 1289 static inline struct f2fs_inode *F2FS_INODE(struct page *page) 1290 { 1291 return &((struct f2fs_node *)page_address(page))->i; 1292 } 1293 1294 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) 1295 { 1296 return (struct f2fs_nm_info *)(sbi->nm_info); 1297 } 1298 1299 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) 1300 { 1301 return (struct f2fs_sm_info *)(sbi->sm_info); 1302 } 1303 1304 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) 1305 { 1306 return (struct sit_info *)(SM_I(sbi)->sit_info); 1307 } 1308 1309 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) 1310 { 1311 return (struct free_segmap_info *)(SM_I(sbi)->free_info); 1312 } 1313 1314 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) 1315 { 1316 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); 1317 } 1318 1319 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) 1320 { 1321 return sbi->meta_inode->i_mapping; 1322 } 1323 1324 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) 1325 { 1326 return sbi->node_inode->i_mapping; 1327 } 1328 1329 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) 1330 { 1331 return test_bit(type, &sbi->s_flag); 1332 } 1333 1334 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1335 { 1336 set_bit(type, &sbi->s_flag); 1337 } 1338 1339 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) 1340 { 1341 clear_bit(type, &sbi->s_flag); 1342 } 1343 1344 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) 1345 { 1346 return le64_to_cpu(cp->checkpoint_ver); 1347 } 1348 1349 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) 1350 { 1351 size_t crc_offset = le32_to_cpu(cp->checksum_offset); 1352 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); 1353 } 1354 1355 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1356 { 1357 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1358 1359 return ckpt_flags & f; 1360 } 1361 1362 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1363 { 1364 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); 1365 } 1366 1367 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1368 { 1369 unsigned int ckpt_flags; 1370 1371 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1372 ckpt_flags |= f; 1373 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1374 } 1375 1376 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1377 { 1378 unsigned long flags; 1379 1380 spin_lock_irqsave(&sbi->cp_lock, flags); 1381 __set_ckpt_flags(F2FS_CKPT(sbi), f); 1382 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1383 } 1384 1385 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) 1386 { 1387 unsigned int ckpt_flags; 1388 1389 ckpt_flags = le32_to_cpu(cp->ckpt_flags); 1390 ckpt_flags &= (~f); 1391 cp->ckpt_flags = cpu_to_le32(ckpt_flags); 1392 } 1393 1394 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) 1395 { 1396 unsigned long flags; 1397 1398 spin_lock_irqsave(&sbi->cp_lock, flags); 1399 __clear_ckpt_flags(F2FS_CKPT(sbi), f); 1400 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1401 } 1402 1403 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1404 { 1405 unsigned long flags; 1406 1407 set_sbi_flag(sbi, SBI_NEED_FSCK); 1408 1409 if (lock) 1410 spin_lock_irqsave(&sbi->cp_lock, flags); 1411 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1412 kfree(NM_I(sbi)->nat_bits); 1413 NM_I(sbi)->nat_bits = NULL; 1414 if (lock) 1415 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1416 } 1417 1418 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, 1419 struct cp_control *cpc) 1420 { 1421 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 1422 1423 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; 1424 } 1425 1426 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1427 { 1428 down_read(&sbi->cp_rwsem); 1429 } 1430 1431 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) 1432 { 1433 return down_read_trylock(&sbi->cp_rwsem); 1434 } 1435 1436 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1437 { 1438 up_read(&sbi->cp_rwsem); 1439 } 1440 1441 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1442 { 1443 down_write(&sbi->cp_rwsem); 1444 } 1445 1446 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1447 { 1448 up_write(&sbi->cp_rwsem); 1449 } 1450 1451 static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1452 { 1453 int reason = CP_SYNC; 1454 1455 if (test_opt(sbi, FASTBOOT)) 1456 reason = CP_FASTBOOT; 1457 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) 1458 reason = CP_UMOUNT; 1459 return reason; 1460 } 1461 1462 static inline bool __remain_node_summaries(int reason) 1463 { 1464 return (reason & (CP_UMOUNT | CP_FASTBOOT)); 1465 } 1466 1467 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) 1468 { 1469 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || 1470 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); 1471 } 1472 1473 /* 1474 * Check whether the given nid is within node id range. 1475 */ 1476 static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 1477 { 1478 if (unlikely(nid < F2FS_ROOT_INO(sbi))) 1479 return -EINVAL; 1480 if (unlikely(nid >= NM_I(sbi)->max_nid)) 1481 return -EINVAL; 1482 return 0; 1483 } 1484 1485 /* 1486 * Check whether the inode has blocks or not 1487 */ 1488 static inline int F2FS_HAS_BLOCKS(struct inode *inode) 1489 { 1490 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; 1491 1492 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; 1493 } 1494 1495 static inline bool f2fs_has_xattr_block(unsigned int ofs) 1496 { 1497 return ofs == XATTR_NODE_OFFSET; 1498 } 1499 1500 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); 1501 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, 1502 struct inode *inode, blkcnt_t *count) 1503 { 1504 blkcnt_t diff = 0, release = 0; 1505 block_t avail_user_block_count; 1506 int ret; 1507 1508 ret = dquot_reserve_block(inode, *count); 1509 if (ret) 1510 return ret; 1511 1512 #ifdef CONFIG_F2FS_FAULT_INJECTION 1513 if (time_to_inject(sbi, FAULT_BLOCK)) { 1514 f2fs_show_injection_info(FAULT_BLOCK); 1515 release = *count; 1516 goto enospc; 1517 } 1518 #endif 1519 /* 1520 * let's increase this in prior to actual block count change in order 1521 * for f2fs_sync_file to avoid data races when deciding checkpoint. 1522 */ 1523 percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); 1524 1525 spin_lock(&sbi->stat_lock); 1526 sbi->total_valid_block_count += (block_t)(*count); 1527 avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks; 1528 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 1529 diff = sbi->total_valid_block_count - avail_user_block_count; 1530 *count -= diff; 1531 release = diff; 1532 sbi->total_valid_block_count = avail_user_block_count; 1533 if (!*count) { 1534 spin_unlock(&sbi->stat_lock); 1535 percpu_counter_sub(&sbi->alloc_valid_block_count, diff); 1536 goto enospc; 1537 } 1538 } 1539 spin_unlock(&sbi->stat_lock); 1540 1541 if (release) 1542 dquot_release_reservation_block(inode, release); 1543 f2fs_i_blocks_write(inode, *count, true, true); 1544 return 0; 1545 1546 enospc: 1547 dquot_release_reservation_block(inode, release); 1548 return -ENOSPC; 1549 } 1550 1551 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 1552 struct inode *inode, 1553 block_t count) 1554 { 1555 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; 1556 1557 spin_lock(&sbi->stat_lock); 1558 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); 1559 f2fs_bug_on(sbi, inode->i_blocks < sectors); 1560 sbi->total_valid_block_count -= (block_t)count; 1561 spin_unlock(&sbi->stat_lock); 1562 f2fs_i_blocks_write(inode, count, false, true); 1563 } 1564 1565 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) 1566 { 1567 atomic_inc(&sbi->nr_pages[count_type]); 1568 1569 if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES || 1570 count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA) 1571 return; 1572 1573 set_sbi_flag(sbi, SBI_IS_DIRTY); 1574 } 1575 1576 static inline void inode_inc_dirty_pages(struct inode *inode) 1577 { 1578 atomic_inc(&F2FS_I(inode)->dirty_pages); 1579 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 1580 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 1581 } 1582 1583 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) 1584 { 1585 atomic_dec(&sbi->nr_pages[count_type]); 1586 } 1587 1588 static inline void inode_dec_dirty_pages(struct inode *inode) 1589 { 1590 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && 1591 !S_ISLNK(inode->i_mode)) 1592 return; 1593 1594 atomic_dec(&F2FS_I(inode)->dirty_pages); 1595 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? 1596 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); 1597 } 1598 1599 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) 1600 { 1601 return atomic_read(&sbi->nr_pages[count_type]); 1602 } 1603 1604 static inline int get_dirty_pages(struct inode *inode) 1605 { 1606 return atomic_read(&F2FS_I(inode)->dirty_pages); 1607 } 1608 1609 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) 1610 { 1611 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; 1612 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> 1613 sbi->log_blocks_per_seg; 1614 1615 return segs / sbi->segs_per_sec; 1616 } 1617 1618 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) 1619 { 1620 return sbi->total_valid_block_count; 1621 } 1622 1623 static inline block_t discard_blocks(struct f2fs_sb_info *sbi) 1624 { 1625 return sbi->discard_blks; 1626 } 1627 1628 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) 1629 { 1630 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1631 1632 /* return NAT or SIT bitmap */ 1633 if (flag == NAT_BITMAP) 1634 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 1635 else if (flag == SIT_BITMAP) 1636 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 1637 1638 return 0; 1639 } 1640 1641 static inline block_t __cp_payload(struct f2fs_sb_info *sbi) 1642 { 1643 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 1644 } 1645 1646 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) 1647 { 1648 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 1649 int offset; 1650 1651 if (__cp_payload(sbi) > 0) { 1652 if (flag == NAT_BITMAP) 1653 return &ckpt->sit_nat_version_bitmap; 1654 else 1655 return (unsigned char *)ckpt + F2FS_BLKSIZE; 1656 } else { 1657 offset = (flag == NAT_BITMAP) ? 1658 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; 1659 return &ckpt->sit_nat_version_bitmap + offset; 1660 } 1661 } 1662 1663 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) 1664 { 1665 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 1666 1667 if (sbi->cur_cp_pack == 2) 1668 start_addr += sbi->blocks_per_seg; 1669 return start_addr; 1670 } 1671 1672 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) 1673 { 1674 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); 1675 1676 if (sbi->cur_cp_pack == 1) 1677 start_addr += sbi->blocks_per_seg; 1678 return start_addr; 1679 } 1680 1681 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) 1682 { 1683 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; 1684 } 1685 1686 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) 1687 { 1688 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); 1689 } 1690 1691 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, 1692 struct inode *inode, bool is_inode) 1693 { 1694 block_t valid_block_count; 1695 unsigned int valid_node_count; 1696 bool quota = inode && !is_inode; 1697 1698 if (quota) { 1699 int ret = dquot_reserve_block(inode, 1); 1700 if (ret) 1701 return ret; 1702 } 1703 1704 spin_lock(&sbi->stat_lock); 1705 1706 valid_block_count = sbi->total_valid_block_count + 1; 1707 if (unlikely(valid_block_count + sbi->reserved_blocks > 1708 sbi->user_block_count)) { 1709 spin_unlock(&sbi->stat_lock); 1710 goto enospc; 1711 } 1712 1713 valid_node_count = sbi->total_valid_node_count + 1; 1714 if (unlikely(valid_node_count > sbi->total_node_count)) { 1715 spin_unlock(&sbi->stat_lock); 1716 goto enospc; 1717 } 1718 1719 sbi->total_valid_node_count++; 1720 sbi->total_valid_block_count++; 1721 spin_unlock(&sbi->stat_lock); 1722 1723 if (inode) { 1724 if (is_inode) 1725 f2fs_mark_inode_dirty_sync(inode, true); 1726 else 1727 f2fs_i_blocks_write(inode, 1, true, true); 1728 } 1729 1730 percpu_counter_inc(&sbi->alloc_valid_block_count); 1731 return 0; 1732 1733 enospc: 1734 if (quota) 1735 dquot_release_reservation_block(inode, 1); 1736 return -ENOSPC; 1737 } 1738 1739 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, 1740 struct inode *inode, bool is_inode) 1741 { 1742 spin_lock(&sbi->stat_lock); 1743 1744 f2fs_bug_on(sbi, !sbi->total_valid_block_count); 1745 f2fs_bug_on(sbi, !sbi->total_valid_node_count); 1746 f2fs_bug_on(sbi, !is_inode && !inode->i_blocks); 1747 1748 sbi->total_valid_node_count--; 1749 sbi->total_valid_block_count--; 1750 1751 spin_unlock(&sbi->stat_lock); 1752 1753 if (!is_inode) 1754 f2fs_i_blocks_write(inode, 1, false, true); 1755 } 1756 1757 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) 1758 { 1759 return sbi->total_valid_node_count; 1760 } 1761 1762 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) 1763 { 1764 percpu_counter_inc(&sbi->total_valid_inode_count); 1765 } 1766 1767 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) 1768 { 1769 percpu_counter_dec(&sbi->total_valid_inode_count); 1770 } 1771 1772 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) 1773 { 1774 return percpu_counter_sum_positive(&sbi->total_valid_inode_count); 1775 } 1776 1777 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, 1778 pgoff_t index, bool for_write) 1779 { 1780 #ifdef CONFIG_F2FS_FAULT_INJECTION 1781 struct page *page = find_lock_page(mapping, index); 1782 1783 if (page) 1784 return page; 1785 1786 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { 1787 f2fs_show_injection_info(FAULT_PAGE_ALLOC); 1788 return NULL; 1789 } 1790 #endif 1791 if (!for_write) 1792 return grab_cache_page(mapping, index); 1793 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); 1794 } 1795 1796 static inline void f2fs_copy_page(struct page *src, struct page *dst) 1797 { 1798 char *src_kaddr = kmap(src); 1799 char *dst_kaddr = kmap(dst); 1800 1801 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE); 1802 kunmap(dst); 1803 kunmap(src); 1804 } 1805 1806 static inline void f2fs_put_page(struct page *page, int unlock) 1807 { 1808 if (!page) 1809 return; 1810 1811 if (unlock) { 1812 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); 1813 unlock_page(page); 1814 } 1815 put_page(page); 1816 } 1817 1818 static inline void f2fs_put_dnode(struct dnode_of_data *dn) 1819 { 1820 if (dn->node_page) 1821 f2fs_put_page(dn->node_page, 1); 1822 if (dn->inode_page && dn->node_page != dn->inode_page) 1823 f2fs_put_page(dn->inode_page, 0); 1824 dn->node_page = NULL; 1825 dn->inode_page = NULL; 1826 } 1827 1828 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, 1829 size_t size) 1830 { 1831 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); 1832 } 1833 1834 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, 1835 gfp_t flags) 1836 { 1837 void *entry; 1838 1839 entry = kmem_cache_alloc(cachep, flags); 1840 if (!entry) 1841 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); 1842 return entry; 1843 } 1844 1845 static inline struct bio *f2fs_bio_alloc(int npages) 1846 { 1847 struct bio *bio; 1848 1849 /* No failure on bio allocation */ 1850 bio = bio_alloc(GFP_NOIO, npages); 1851 if (!bio) 1852 bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); 1853 return bio; 1854 } 1855 1856 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, 1857 unsigned long index, void *item) 1858 { 1859 while (radix_tree_insert(root, index, item)) 1860 cond_resched(); 1861 } 1862 1863 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 1864 1865 static inline bool IS_INODE(struct page *page) 1866 { 1867 struct f2fs_node *p = F2FS_NODE(page); 1868 1869 return RAW_IS_INODE(p); 1870 } 1871 1872 static inline int offset_in_addr(struct f2fs_inode *i) 1873 { 1874 return (i->i_inline & F2FS_EXTRA_ATTR) ? 1875 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; 1876 } 1877 1878 static inline __le32 *blkaddr_in_node(struct f2fs_node *node) 1879 { 1880 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; 1881 } 1882 1883 static inline int f2fs_has_extra_attr(struct inode *inode); 1884 static inline block_t datablock_addr(struct inode *inode, 1885 struct page *node_page, unsigned int offset) 1886 { 1887 struct f2fs_node *raw_node; 1888 __le32 *addr_array; 1889 int base = 0; 1890 bool is_inode = IS_INODE(node_page); 1891 1892 raw_node = F2FS_NODE(node_page); 1893 1894 /* from GC path only */ 1895 if (!inode) { 1896 if (is_inode) 1897 base = offset_in_addr(&raw_node->i); 1898 } else if (f2fs_has_extra_attr(inode) && is_inode) { 1899 base = get_extra_isize(inode); 1900 } 1901 1902 addr_array = blkaddr_in_node(raw_node); 1903 return le32_to_cpu(addr_array[base + offset]); 1904 } 1905 1906 static inline int f2fs_test_bit(unsigned int nr, char *addr) 1907 { 1908 int mask; 1909 1910 addr += (nr >> 3); 1911 mask = 1 << (7 - (nr & 0x07)); 1912 return mask & *addr; 1913 } 1914 1915 static inline void f2fs_set_bit(unsigned int nr, char *addr) 1916 { 1917 int mask; 1918 1919 addr += (nr >> 3); 1920 mask = 1 << (7 - (nr & 0x07)); 1921 *addr |= mask; 1922 } 1923 1924 static inline void f2fs_clear_bit(unsigned int nr, char *addr) 1925 { 1926 int mask; 1927 1928 addr += (nr >> 3); 1929 mask = 1 << (7 - (nr & 0x07)); 1930 *addr &= ~mask; 1931 } 1932 1933 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) 1934 { 1935 int mask; 1936 int ret; 1937 1938 addr += (nr >> 3); 1939 mask = 1 << (7 - (nr & 0x07)); 1940 ret = mask & *addr; 1941 *addr |= mask; 1942 return ret; 1943 } 1944 1945 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) 1946 { 1947 int mask; 1948 int ret; 1949 1950 addr += (nr >> 3); 1951 mask = 1 << (7 - (nr & 0x07)); 1952 ret = mask & *addr; 1953 *addr &= ~mask; 1954 return ret; 1955 } 1956 1957 static inline void f2fs_change_bit(unsigned int nr, char *addr) 1958 { 1959 int mask; 1960 1961 addr += (nr >> 3); 1962 mask = 1 << (7 - (nr & 0x07)); 1963 *addr ^= mask; 1964 } 1965 1966 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) 1967 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) 1968 #define F2FS_FL_INHERITED (FS_PROJINHERIT_FL) 1969 1970 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 1971 { 1972 if (S_ISDIR(mode)) 1973 return flags; 1974 else if (S_ISREG(mode)) 1975 return flags & F2FS_REG_FLMASK; 1976 else 1977 return flags & F2FS_OTHER_FLMASK; 1978 } 1979 1980 /* used for f2fs_inode_info->flags */ 1981 enum { 1982 FI_NEW_INODE, /* indicate newly allocated inode */ 1983 FI_DIRTY_INODE, /* indicate inode is dirty or not */ 1984 FI_AUTO_RECOVER, /* indicate inode is recoverable */ 1985 FI_DIRTY_DIR, /* indicate directory has dirty pages */ 1986 FI_INC_LINK, /* need to increment i_nlink */ 1987 FI_ACL_MODE, /* indicate acl mode */ 1988 FI_NO_ALLOC, /* should not allocate any blocks */ 1989 FI_FREE_NID, /* free allocated nide */ 1990 FI_NO_EXTENT, /* not to use the extent cache */ 1991 FI_INLINE_XATTR, /* used for inline xattr */ 1992 FI_INLINE_DATA, /* used for inline data*/ 1993 FI_INLINE_DENTRY, /* used for inline dentry */ 1994 FI_APPEND_WRITE, /* inode has appended data */ 1995 FI_UPDATE_WRITE, /* inode has in-place-update data */ 1996 FI_NEED_IPU, /* used for ipu per file */ 1997 FI_ATOMIC_FILE, /* indicate atomic file */ 1998 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */ 1999 FI_VOLATILE_FILE, /* indicate volatile file */ 2000 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ 2001 FI_DROP_CACHE, /* drop dirty page cache */ 2002 FI_DATA_EXIST, /* indicate data exists */ 2003 FI_INLINE_DOTS, /* indicate inline dot dentries */ 2004 FI_DO_DEFRAG, /* indicate defragment is running */ 2005 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ 2006 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ 2007 FI_HOT_DATA, /* indicate file is hot */ 2008 FI_EXTRA_ATTR, /* indicate file has extra attribute */ 2009 FI_PROJ_INHERIT, /* indicate file inherits projectid */ 2010 }; 2011 2012 static inline void __mark_inode_dirty_flag(struct inode *inode, 2013 int flag, bool set) 2014 { 2015 switch (flag) { 2016 case FI_INLINE_XATTR: 2017 case FI_INLINE_DATA: 2018 case FI_INLINE_DENTRY: 2019 if (set) 2020 return; 2021 case FI_DATA_EXIST: 2022 case FI_INLINE_DOTS: 2023 f2fs_mark_inode_dirty_sync(inode, true); 2024 } 2025 } 2026 2027 static inline void set_inode_flag(struct inode *inode, int flag) 2028 { 2029 if (!test_bit(flag, &F2FS_I(inode)->flags)) 2030 set_bit(flag, &F2FS_I(inode)->flags); 2031 __mark_inode_dirty_flag(inode, flag, true); 2032 } 2033 2034 static inline int is_inode_flag_set(struct inode *inode, int flag) 2035 { 2036 return test_bit(flag, &F2FS_I(inode)->flags); 2037 } 2038 2039 static inline void clear_inode_flag(struct inode *inode, int flag) 2040 { 2041 if (test_bit(flag, &F2FS_I(inode)->flags)) 2042 clear_bit(flag, &F2FS_I(inode)->flags); 2043 __mark_inode_dirty_flag(inode, flag, false); 2044 } 2045 2046 static inline void set_acl_inode(struct inode *inode, umode_t mode) 2047 { 2048 F2FS_I(inode)->i_acl_mode = mode; 2049 set_inode_flag(inode, FI_ACL_MODE); 2050 f2fs_mark_inode_dirty_sync(inode, false); 2051 } 2052 2053 static inline void f2fs_i_links_write(struct inode *inode, bool inc) 2054 { 2055 if (inc) 2056 inc_nlink(inode); 2057 else 2058 drop_nlink(inode); 2059 f2fs_mark_inode_dirty_sync(inode, true); 2060 } 2061 2062 static inline void f2fs_i_blocks_write(struct inode *inode, 2063 block_t diff, bool add, bool claim) 2064 { 2065 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2066 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2067 2068 /* add = 1, claim = 1 should be dquot_reserve_block in pair */ 2069 if (add) { 2070 if (claim) 2071 dquot_claim_block(inode, diff); 2072 else 2073 dquot_alloc_block_nofail(inode, diff); 2074 } else { 2075 dquot_free_block(inode, diff); 2076 } 2077 2078 f2fs_mark_inode_dirty_sync(inode, true); 2079 if (clean || recover) 2080 set_inode_flag(inode, FI_AUTO_RECOVER); 2081 } 2082 2083 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) 2084 { 2085 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); 2086 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); 2087 2088 if (i_size_read(inode) == i_size) 2089 return; 2090 2091 i_size_write(inode, i_size); 2092 f2fs_mark_inode_dirty_sync(inode, true); 2093 if (clean || recover) 2094 set_inode_flag(inode, FI_AUTO_RECOVER); 2095 } 2096 2097 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) 2098 { 2099 F2FS_I(inode)->i_current_depth = depth; 2100 f2fs_mark_inode_dirty_sync(inode, true); 2101 } 2102 2103 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) 2104 { 2105 F2FS_I(inode)->i_xattr_nid = xnid; 2106 f2fs_mark_inode_dirty_sync(inode, true); 2107 } 2108 2109 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) 2110 { 2111 F2FS_I(inode)->i_pino = pino; 2112 f2fs_mark_inode_dirty_sync(inode, true); 2113 } 2114 2115 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) 2116 { 2117 struct f2fs_inode_info *fi = F2FS_I(inode); 2118 2119 if (ri->i_inline & F2FS_INLINE_XATTR) 2120 set_bit(FI_INLINE_XATTR, &fi->flags); 2121 if (ri->i_inline & F2FS_INLINE_DATA) 2122 set_bit(FI_INLINE_DATA, &fi->flags); 2123 if (ri->i_inline & F2FS_INLINE_DENTRY) 2124 set_bit(FI_INLINE_DENTRY, &fi->flags); 2125 if (ri->i_inline & F2FS_DATA_EXIST) 2126 set_bit(FI_DATA_EXIST, &fi->flags); 2127 if (ri->i_inline & F2FS_INLINE_DOTS) 2128 set_bit(FI_INLINE_DOTS, &fi->flags); 2129 if (ri->i_inline & F2FS_EXTRA_ATTR) 2130 set_bit(FI_EXTRA_ATTR, &fi->flags); 2131 } 2132 2133 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) 2134 { 2135 ri->i_inline = 0; 2136 2137 if (is_inode_flag_set(inode, FI_INLINE_XATTR)) 2138 ri->i_inline |= F2FS_INLINE_XATTR; 2139 if (is_inode_flag_set(inode, FI_INLINE_DATA)) 2140 ri->i_inline |= F2FS_INLINE_DATA; 2141 if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) 2142 ri->i_inline |= F2FS_INLINE_DENTRY; 2143 if (is_inode_flag_set(inode, FI_DATA_EXIST)) 2144 ri->i_inline |= F2FS_DATA_EXIST; 2145 if (is_inode_flag_set(inode, FI_INLINE_DOTS)) 2146 ri->i_inline |= F2FS_INLINE_DOTS; 2147 if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) 2148 ri->i_inline |= F2FS_EXTRA_ATTR; 2149 } 2150 2151 static inline int f2fs_has_extra_attr(struct inode *inode) 2152 { 2153 return is_inode_flag_set(inode, FI_EXTRA_ATTR); 2154 } 2155 2156 static inline int f2fs_has_inline_xattr(struct inode *inode) 2157 { 2158 return is_inode_flag_set(inode, FI_INLINE_XATTR); 2159 } 2160 2161 static inline unsigned int addrs_per_inode(struct inode *inode) 2162 { 2163 if (f2fs_has_inline_xattr(inode)) 2164 return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS; 2165 return CUR_ADDRS_PER_INODE(inode); 2166 } 2167 2168 static inline void *inline_xattr_addr(struct page *page) 2169 { 2170 struct f2fs_inode *ri = F2FS_INODE(page); 2171 2172 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - 2173 F2FS_INLINE_XATTR_ADDRS]); 2174 } 2175 2176 static inline int inline_xattr_size(struct inode *inode) 2177 { 2178 if (f2fs_has_inline_xattr(inode)) 2179 return F2FS_INLINE_XATTR_ADDRS << 2; 2180 else 2181 return 0; 2182 } 2183 2184 static inline int f2fs_has_inline_data(struct inode *inode) 2185 { 2186 return is_inode_flag_set(inode, FI_INLINE_DATA); 2187 } 2188 2189 static inline int f2fs_exist_data(struct inode *inode) 2190 { 2191 return is_inode_flag_set(inode, FI_DATA_EXIST); 2192 } 2193 2194 static inline int f2fs_has_inline_dots(struct inode *inode) 2195 { 2196 return is_inode_flag_set(inode, FI_INLINE_DOTS); 2197 } 2198 2199 static inline bool f2fs_is_atomic_file(struct inode *inode) 2200 { 2201 return is_inode_flag_set(inode, FI_ATOMIC_FILE); 2202 } 2203 2204 static inline bool f2fs_is_commit_atomic_write(struct inode *inode) 2205 { 2206 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT); 2207 } 2208 2209 static inline bool f2fs_is_volatile_file(struct inode *inode) 2210 { 2211 return is_inode_flag_set(inode, FI_VOLATILE_FILE); 2212 } 2213 2214 static inline bool f2fs_is_first_block_written(struct inode *inode) 2215 { 2216 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); 2217 } 2218 2219 static inline bool f2fs_is_drop_cache(struct inode *inode) 2220 { 2221 return is_inode_flag_set(inode, FI_DROP_CACHE); 2222 } 2223 2224 static inline void *inline_data_addr(struct inode *inode, struct page *page) 2225 { 2226 struct f2fs_inode *ri = F2FS_INODE(page); 2227 int extra_size = get_extra_isize(inode); 2228 2229 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); 2230 } 2231 2232 static inline int f2fs_has_inline_dentry(struct inode *inode) 2233 { 2234 return is_inode_flag_set(inode, FI_INLINE_DENTRY); 2235 } 2236 2237 static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page) 2238 { 2239 if (!f2fs_has_inline_dentry(dir)) 2240 kunmap(page); 2241 } 2242 2243 static inline int is_file(struct inode *inode, int type) 2244 { 2245 return F2FS_I(inode)->i_advise & type; 2246 } 2247 2248 static inline void set_file(struct inode *inode, int type) 2249 { 2250 F2FS_I(inode)->i_advise |= type; 2251 f2fs_mark_inode_dirty_sync(inode, true); 2252 } 2253 2254 static inline void clear_file(struct inode *inode, int type) 2255 { 2256 F2FS_I(inode)->i_advise &= ~type; 2257 f2fs_mark_inode_dirty_sync(inode, true); 2258 } 2259 2260 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) 2261 { 2262 if (dsync) { 2263 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2264 bool ret; 2265 2266 spin_lock(&sbi->inode_lock[DIRTY_META]); 2267 ret = list_empty(&F2FS_I(inode)->gdirty_list); 2268 spin_unlock(&sbi->inode_lock[DIRTY_META]); 2269 return ret; 2270 } 2271 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || 2272 file_keep_isize(inode) || 2273 i_size_read(inode) & PAGE_MASK) 2274 return false; 2275 return F2FS_I(inode)->last_disk_size == i_size_read(inode); 2276 } 2277 2278 static inline int f2fs_readonly(struct super_block *sb) 2279 { 2280 return sb->s_flags & MS_RDONLY; 2281 } 2282 2283 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) 2284 { 2285 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); 2286 } 2287 2288 static inline bool is_dot_dotdot(const struct qstr *str) 2289 { 2290 if (str->len == 1 && str->name[0] == '.') 2291 return true; 2292 2293 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') 2294 return true; 2295 2296 return false; 2297 } 2298 2299 static inline bool f2fs_may_extent_tree(struct inode *inode) 2300 { 2301 if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) || 2302 is_inode_flag_set(inode, FI_NO_EXTENT)) 2303 return false; 2304 2305 return S_ISREG(inode->i_mode); 2306 } 2307 2308 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, 2309 size_t size, gfp_t flags) 2310 { 2311 #ifdef CONFIG_F2FS_FAULT_INJECTION 2312 if (time_to_inject(sbi, FAULT_KMALLOC)) { 2313 f2fs_show_injection_info(FAULT_KMALLOC); 2314 return NULL; 2315 } 2316 #endif 2317 return kmalloc(size, flags); 2318 } 2319 2320 static inline int get_extra_isize(struct inode *inode) 2321 { 2322 return F2FS_I(inode)->i_extra_isize / sizeof(__le32); 2323 } 2324 2325 #define get_inode_mode(i) \ 2326 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ 2327 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) 2328 2329 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ 2330 (offsetof(struct f2fs_inode, i_extra_end) - \ 2331 offsetof(struct f2fs_inode, i_extra_isize)) \ 2332 2333 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) 2334 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ 2335 ((offsetof(typeof(*f2fs_inode), field) + \ 2336 sizeof((f2fs_inode)->field)) \ 2337 <= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize)) \ 2338 2339 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) 2340 { 2341 int i; 2342 2343 spin_lock(&sbi->iostat_lock); 2344 for (i = 0; i < NR_IO_TYPE; i++) 2345 sbi->write_iostat[i] = 0; 2346 spin_unlock(&sbi->iostat_lock); 2347 } 2348 2349 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, 2350 enum iostat_type type, unsigned long long io_bytes) 2351 { 2352 if (!sbi->iostat_enable) 2353 return; 2354 spin_lock(&sbi->iostat_lock); 2355 sbi->write_iostat[type] += io_bytes; 2356 2357 if (type == APP_WRITE_IO || type == APP_DIRECT_IO) 2358 sbi->write_iostat[APP_BUFFERED_IO] = 2359 sbi->write_iostat[APP_WRITE_IO] - 2360 sbi->write_iostat[APP_DIRECT_IO]; 2361 spin_unlock(&sbi->iostat_lock); 2362 } 2363 2364 /* 2365 * file.c 2366 */ 2367 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 2368 void truncate_data_blocks(struct dnode_of_data *dn); 2369 int truncate_blocks(struct inode *inode, u64 from, bool lock); 2370 int f2fs_truncate(struct inode *inode); 2371 int f2fs_getattr(const struct path *path, struct kstat *stat, 2372 u32 request_mask, unsigned int flags); 2373 int f2fs_setattr(struct dentry *dentry, struct iattr *attr); 2374 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); 2375 int truncate_data_blocks_range(struct dnode_of_data *dn, int count); 2376 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 2377 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 2378 2379 /* 2380 * inode.c 2381 */ 2382 void f2fs_set_inode_flags(struct inode *inode); 2383 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); 2384 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); 2385 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); 2386 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); 2387 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); 2388 int update_inode(struct inode *inode, struct page *node_page); 2389 int update_inode_page(struct inode *inode); 2390 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); 2391 void f2fs_evict_inode(struct inode *inode); 2392 void handle_failed_inode(struct inode *inode); 2393 2394 /* 2395 * namei.c 2396 */ 2397 struct dentry *f2fs_get_parent(struct dentry *child); 2398 2399 /* 2400 * dir.c 2401 */ 2402 void set_de_type(struct f2fs_dir_entry *de, umode_t mode); 2403 unsigned char get_de_type(struct f2fs_dir_entry *de); 2404 struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, 2405 f2fs_hash_t namehash, int *max_slots, 2406 struct f2fs_dentry_ptr *d); 2407 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, 2408 unsigned int start_pos, struct fscrypt_str *fstr); 2409 void do_make_empty_dir(struct inode *inode, struct inode *parent, 2410 struct f2fs_dentry_ptr *d); 2411 struct page *init_inode_metadata(struct inode *inode, struct inode *dir, 2412 const struct qstr *new_name, 2413 const struct qstr *orig_name, struct page *dpage); 2414 void update_parent_metadata(struct inode *dir, struct inode *inode, 2415 unsigned int current_depth); 2416 int room_for_filename(const void *bitmap, int slots, int max_slots); 2417 void f2fs_drop_nlink(struct inode *dir, struct inode *inode); 2418 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, 2419 struct fscrypt_name *fname, struct page **res_page); 2420 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, 2421 const struct qstr *child, struct page **res_page); 2422 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); 2423 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, 2424 struct page **page); 2425 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, 2426 struct page *page, struct inode *inode); 2427 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, 2428 const struct qstr *name, f2fs_hash_t name_hash, 2429 unsigned int bit_pos); 2430 int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, 2431 const struct qstr *orig_name, 2432 struct inode *inode, nid_t ino, umode_t mode); 2433 int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname, 2434 struct inode *inode, nid_t ino, umode_t mode); 2435 int __f2fs_add_link(struct inode *dir, const struct qstr *name, 2436 struct inode *inode, nid_t ino, umode_t mode); 2437 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, 2438 struct inode *dir, struct inode *inode); 2439 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); 2440 bool f2fs_empty_dir(struct inode *dir); 2441 2442 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) 2443 { 2444 return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name, 2445 inode, inode->i_ino, inode->i_mode); 2446 } 2447 2448 /* 2449 * super.c 2450 */ 2451 int f2fs_inode_dirtied(struct inode *inode, bool sync); 2452 void f2fs_inode_synced(struct inode *inode); 2453 void f2fs_enable_quota_files(struct f2fs_sb_info *sbi); 2454 void f2fs_quota_off_umount(struct super_block *sb); 2455 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 2456 int f2fs_sync_fs(struct super_block *sb, int sync); 2457 extern __printf(3, 4) 2458 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); 2459 int sanity_check_ckpt(struct f2fs_sb_info *sbi); 2460 2461 /* 2462 * hash.c 2463 */ 2464 f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, 2465 struct fscrypt_name *fname); 2466 2467 /* 2468 * node.c 2469 */ 2470 struct dnode_of_data; 2471 struct node_info; 2472 2473 bool available_free_memory(struct f2fs_sb_info *sbi, int type); 2474 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 2475 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 2476 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); 2477 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni); 2478 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); 2479 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); 2480 int truncate_inode_blocks(struct inode *inode, pgoff_t from); 2481 int truncate_xattr_node(struct inode *inode, struct page *page); 2482 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino); 2483 int remove_inode_page(struct inode *inode); 2484 struct page *new_inode_page(struct inode *inode); 2485 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs); 2486 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); 2487 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); 2488 struct page *get_node_page_ra(struct page *parent, int start); 2489 void move_node_page(struct page *node_page, int gc_type); 2490 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 2491 struct writeback_control *wbc, bool atomic); 2492 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, 2493 bool do_balance, enum iostat_type io_type); 2494 void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); 2495 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); 2496 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); 2497 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); 2498 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); 2499 void recover_inline_xattr(struct inode *inode, struct page *page); 2500 int recover_xattr_data(struct inode *inode, struct page *page, 2501 block_t blkaddr); 2502 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); 2503 int restore_node_summary(struct f2fs_sb_info *sbi, 2504 unsigned int segno, struct f2fs_summary_block *sum); 2505 void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2506 int build_node_manager(struct f2fs_sb_info *sbi); 2507 void destroy_node_manager(struct f2fs_sb_info *sbi); 2508 int __init create_node_manager_caches(void); 2509 void destroy_node_manager_caches(void); 2510 2511 /* 2512 * segment.c 2513 */ 2514 bool need_SSR(struct f2fs_sb_info *sbi); 2515 void register_inmem_page(struct inode *inode, struct page *page); 2516 void drop_inmem_pages(struct inode *inode); 2517 void drop_inmem_page(struct inode *inode, struct page *page); 2518 int commit_inmem_pages(struct inode *inode); 2519 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); 2520 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi); 2521 int f2fs_issue_flush(struct f2fs_sb_info *sbi); 2522 int create_flush_cmd_control(struct f2fs_sb_info *sbi); 2523 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); 2524 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); 2525 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 2526 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); 2527 void stop_discard_thread(struct f2fs_sb_info *sbi); 2528 void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); 2529 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2530 void release_discard_addrs(struct f2fs_sb_info *sbi); 2531 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 2532 void allocate_new_segments(struct f2fs_sb_info *sbi); 2533 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 2534 bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2535 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); 2536 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr); 2537 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page, 2538 enum iostat_type io_type); 2539 void write_node_page(unsigned int nid, struct f2fs_io_info *fio); 2540 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio); 2541 int rewrite_data_page(struct f2fs_io_info *fio); 2542 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, 2543 block_t old_blkaddr, block_t new_blkaddr, 2544 bool recover_curseg, bool recover_newaddr); 2545 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, 2546 block_t old_addr, block_t new_addr, 2547 unsigned char version, bool recover_curseg, 2548 bool recover_newaddr); 2549 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, 2550 block_t old_blkaddr, block_t *new_blkaddr, 2551 struct f2fs_summary *sum, int type, 2552 struct f2fs_io_info *fio, bool add_list); 2553 void f2fs_wait_on_page_writeback(struct page *page, 2554 enum page_type type, bool ordered); 2555 void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr); 2556 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 2557 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); 2558 int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, 2559 unsigned int val, int alloc); 2560 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2561 int build_segment_manager(struct f2fs_sb_info *sbi); 2562 void destroy_segment_manager(struct f2fs_sb_info *sbi); 2563 int __init create_segment_manager_caches(void); 2564 void destroy_segment_manager_caches(void); 2565 2566 /* 2567 * checkpoint.c 2568 */ 2569 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); 2570 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 2571 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); 2572 struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); 2573 bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); 2574 int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, 2575 int type, bool sync); 2576 void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index); 2577 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, 2578 long nr_to_write, enum iostat_type io_type); 2579 void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 2580 void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); 2581 void release_ino_entry(struct f2fs_sb_info *sbi, bool all); 2582 bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); 2583 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi); 2584 int acquire_orphan_inode(struct f2fs_sb_info *sbi); 2585 void release_orphan_inode(struct f2fs_sb_info *sbi); 2586 void add_orphan_inode(struct inode *inode); 2587 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); 2588 int recover_orphan_inodes(struct f2fs_sb_info *sbi); 2589 int get_valid_checkpoint(struct f2fs_sb_info *sbi); 2590 void update_dirty_page(struct inode *inode, struct page *page); 2591 void remove_dirty_inode(struct inode *inode); 2592 int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); 2593 int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2594 void init_ino_entry_info(struct f2fs_sb_info *sbi); 2595 int __init create_checkpoint_caches(void); 2596 void destroy_checkpoint_caches(void); 2597 2598 /* 2599 * data.c 2600 */ 2601 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); 2602 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, 2603 struct inode *inode, nid_t ino, pgoff_t idx, 2604 enum page_type type); 2605 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 2606 int f2fs_submit_page_bio(struct f2fs_io_info *fio); 2607 int f2fs_submit_page_write(struct f2fs_io_info *fio); 2608 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 2609 block_t blk_addr, struct bio *bio); 2610 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); 2611 void set_data_blkaddr(struct dnode_of_data *dn); 2612 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); 2613 int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); 2614 int reserve_new_block(struct dnode_of_data *dn); 2615 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index); 2616 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); 2617 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); 2618 struct page *get_read_data_page(struct inode *inode, pgoff_t index, 2619 int op_flags, bool for_write); 2620 struct page *find_data_page(struct inode *inode, pgoff_t index); 2621 struct page *get_lock_data_page(struct inode *inode, pgoff_t index, 2622 bool for_write); 2623 struct page *get_new_data_page(struct inode *inode, 2624 struct page *ipage, pgoff_t index, bool new_i_size); 2625 int do_write_data_page(struct f2fs_io_info *fio); 2626 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 2627 int create, int flag); 2628 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2629 u64 start, u64 len); 2630 void f2fs_set_page_dirty_nobuffers(struct page *page); 2631 int __f2fs_write_data_pages(struct address_space *mapping, 2632 struct writeback_control *wbc, 2633 enum iostat_type io_type); 2634 void f2fs_invalidate_page(struct page *page, unsigned int offset, 2635 unsigned int length); 2636 int f2fs_release_page(struct page *page, gfp_t wait); 2637 #ifdef CONFIG_MIGRATION 2638 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, 2639 struct page *page, enum migrate_mode mode); 2640 #endif 2641 2642 /* 2643 * gc.c 2644 */ 2645 int start_gc_thread(struct f2fs_sb_info *sbi); 2646 void stop_gc_thread(struct f2fs_sb_info *sbi); 2647 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode); 2648 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 2649 unsigned int segno); 2650 void build_gc_manager(struct f2fs_sb_info *sbi); 2651 2652 /* 2653 * recovery.c 2654 */ 2655 int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); 2656 bool space_for_roll_forward(struct f2fs_sb_info *sbi); 2657 2658 /* 2659 * debug.c 2660 */ 2661 #ifdef CONFIG_F2FS_STAT_FS 2662 struct f2fs_stat_info { 2663 struct list_head stat_list; 2664 struct f2fs_sb_info *sbi; 2665 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; 2666 int main_area_segs, main_area_sections, main_area_zones; 2667 unsigned long long hit_largest, hit_cached, hit_rbtree; 2668 unsigned long long hit_total, total_ext; 2669 int ext_tree, zombie_tree, ext_node; 2670 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta; 2671 int inmem_pages; 2672 unsigned int ndirty_dirs, ndirty_files, ndirty_all; 2673 int nats, dirty_nats, sits, dirty_sits; 2674 int free_nids, avail_nids, alloc_nids; 2675 int total_count, utilization; 2676 int bg_gc, nr_wb_cp_data, nr_wb_data; 2677 int nr_flushing, nr_flushed, nr_discarding, nr_discarded; 2678 int nr_discard_cmd; 2679 unsigned int undiscard_blks; 2680 int inline_xattr, inline_inode, inline_dir, append, update, orphans; 2681 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt; 2682 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; 2683 unsigned int bimodal, avg_vblocks; 2684 int util_free, util_valid, util_invalid; 2685 int rsvd_segs, overp_segs; 2686 int dirty_count, node_pages, meta_pages; 2687 int prefree_count, call_count, cp_count, bg_cp_count; 2688 int tot_segs, node_segs, data_segs, free_segs, free_secs; 2689 int bg_node_segs, bg_data_segs; 2690 int tot_blks, data_blks, node_blks; 2691 int bg_data_blks, bg_node_blks; 2692 int curseg[NR_CURSEG_TYPE]; 2693 int cursec[NR_CURSEG_TYPE]; 2694 int curzone[NR_CURSEG_TYPE]; 2695 2696 unsigned int segment_count[2]; 2697 unsigned int block_count[2]; 2698 unsigned int inplace_count; 2699 unsigned long long base_mem, cache_mem, page_mem; 2700 }; 2701 2702 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) 2703 { 2704 return (struct f2fs_stat_info *)sbi->stat_info; 2705 } 2706 2707 #define stat_inc_cp_count(si) ((si)->cp_count++) 2708 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++) 2709 #define stat_inc_call_count(si) ((si)->call_count++) 2710 #define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++) 2711 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) 2712 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) 2713 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) 2714 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) 2715 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) 2716 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) 2717 #define stat_inc_inline_xattr(inode) \ 2718 do { \ 2719 if (f2fs_has_inline_xattr(inode)) \ 2720 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ 2721 } while (0) 2722 #define stat_dec_inline_xattr(inode) \ 2723 do { \ 2724 if (f2fs_has_inline_xattr(inode)) \ 2725 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ 2726 } while (0) 2727 #define stat_inc_inline_inode(inode) \ 2728 do { \ 2729 if (f2fs_has_inline_data(inode)) \ 2730 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ 2731 } while (0) 2732 #define stat_dec_inline_inode(inode) \ 2733 do { \ 2734 if (f2fs_has_inline_data(inode)) \ 2735 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ 2736 } while (0) 2737 #define stat_inc_inline_dir(inode) \ 2738 do { \ 2739 if (f2fs_has_inline_dentry(inode)) \ 2740 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ 2741 } while (0) 2742 #define stat_dec_inline_dir(inode) \ 2743 do { \ 2744 if (f2fs_has_inline_dentry(inode)) \ 2745 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ 2746 } while (0) 2747 #define stat_inc_seg_type(sbi, curseg) \ 2748 ((sbi)->segment_count[(curseg)->alloc_type]++) 2749 #define stat_inc_block_count(sbi, curseg) \ 2750 ((sbi)->block_count[(curseg)->alloc_type]++) 2751 #define stat_inc_inplace_blocks(sbi) \ 2752 (atomic_inc(&(sbi)->inplace_count)) 2753 #define stat_inc_atomic_write(inode) \ 2754 (atomic_inc(&F2FS_I_SB(inode)->aw_cnt)) 2755 #define stat_dec_atomic_write(inode) \ 2756 (atomic_dec(&F2FS_I_SB(inode)->aw_cnt)) 2757 #define stat_update_max_atomic_write(inode) \ 2758 do { \ 2759 int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt); \ 2760 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ 2761 if (cur > max) \ 2762 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ 2763 } while (0) 2764 #define stat_inc_volatile_write(inode) \ 2765 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt)) 2766 #define stat_dec_volatile_write(inode) \ 2767 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt)) 2768 #define stat_update_max_volatile_write(inode) \ 2769 do { \ 2770 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \ 2771 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \ 2772 if (cur > max) \ 2773 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \ 2774 } while (0) 2775 #define stat_inc_seg_count(sbi, type, gc_type) \ 2776 do { \ 2777 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 2778 si->tot_segs++; \ 2779 if ((type) == SUM_TYPE_DATA) { \ 2780 si->data_segs++; \ 2781 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \ 2782 } else { \ 2783 si->node_segs++; \ 2784 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \ 2785 } \ 2786 } while (0) 2787 2788 #define stat_inc_tot_blk_count(si, blks) \ 2789 ((si)->tot_blks += (blks)) 2790 2791 #define stat_inc_data_blk_count(sbi, blks, gc_type) \ 2792 do { \ 2793 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 2794 stat_inc_tot_blk_count(si, blks); \ 2795 si->data_blks += (blks); \ 2796 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 2797 } while (0) 2798 2799 #define stat_inc_node_blk_count(sbi, blks, gc_type) \ 2800 do { \ 2801 struct f2fs_stat_info *si = F2FS_STAT(sbi); \ 2802 stat_inc_tot_blk_count(si, blks); \ 2803 si->node_blks += (blks); \ 2804 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ 2805 } while (0) 2806 2807 int f2fs_build_stats(struct f2fs_sb_info *sbi); 2808 void f2fs_destroy_stats(struct f2fs_sb_info *sbi); 2809 int __init f2fs_create_root_stats(void); 2810 void f2fs_destroy_root_stats(void); 2811 #else 2812 #define stat_inc_cp_count(si) do { } while (0) 2813 #define stat_inc_bg_cp_count(si) do { } while (0) 2814 #define stat_inc_call_count(si) do { } while (0) 2815 #define stat_inc_bggc_count(si) do { } while (0) 2816 #define stat_inc_dirty_inode(sbi, type) do { } while (0) 2817 #define stat_dec_dirty_inode(sbi, type) do { } while (0) 2818 #define stat_inc_total_hit(sb) do { } while (0) 2819 #define stat_inc_rbtree_node_hit(sb) do { } while (0) 2820 #define stat_inc_largest_node_hit(sbi) do { } while (0) 2821 #define stat_inc_cached_node_hit(sbi) do { } while (0) 2822 #define stat_inc_inline_xattr(inode) do { } while (0) 2823 #define stat_dec_inline_xattr(inode) do { } while (0) 2824 #define stat_inc_inline_inode(inode) do { } while (0) 2825 #define stat_dec_inline_inode(inode) do { } while (0) 2826 #define stat_inc_inline_dir(inode) do { } while (0) 2827 #define stat_dec_inline_dir(inode) do { } while (0) 2828 #define stat_inc_atomic_write(inode) do { } while (0) 2829 #define stat_dec_atomic_write(inode) do { } while (0) 2830 #define stat_update_max_atomic_write(inode) do { } while (0) 2831 #define stat_inc_volatile_write(inode) do { } while (0) 2832 #define stat_dec_volatile_write(inode) do { } while (0) 2833 #define stat_update_max_volatile_write(inode) do { } while (0) 2834 #define stat_inc_seg_type(sbi, curseg) do { } while (0) 2835 #define stat_inc_block_count(sbi, curseg) do { } while (0) 2836 #define stat_inc_inplace_blocks(sbi) do { } while (0) 2837 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0) 2838 #define stat_inc_tot_blk_count(si, blks) do { } while (0) 2839 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) 2840 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) 2841 2842 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 2843 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 2844 static inline int __init f2fs_create_root_stats(void) { return 0; } 2845 static inline void f2fs_destroy_root_stats(void) { } 2846 #endif 2847 2848 extern const struct file_operations f2fs_dir_operations; 2849 extern const struct file_operations f2fs_file_operations; 2850 extern const struct inode_operations f2fs_file_inode_operations; 2851 extern const struct address_space_operations f2fs_dblock_aops; 2852 extern const struct address_space_operations f2fs_node_aops; 2853 extern const struct address_space_operations f2fs_meta_aops; 2854 extern const struct inode_operations f2fs_dir_inode_operations; 2855 extern const struct inode_operations f2fs_symlink_inode_operations; 2856 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; 2857 extern const struct inode_operations f2fs_special_inode_operations; 2858 extern struct kmem_cache *inode_entry_slab; 2859 2860 /* 2861 * inline.c 2862 */ 2863 bool f2fs_may_inline_data(struct inode *inode); 2864 bool f2fs_may_inline_dentry(struct inode *inode); 2865 void read_inline_data(struct page *page, struct page *ipage); 2866 void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from); 2867 int f2fs_read_inline_data(struct inode *inode, struct page *page); 2868 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); 2869 int f2fs_convert_inline_inode(struct inode *inode); 2870 int f2fs_write_inline_data(struct inode *inode, struct page *page); 2871 bool recover_inline_data(struct inode *inode, struct page *npage); 2872 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 2873 struct fscrypt_name *fname, struct page **res_page); 2874 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 2875 struct page *ipage); 2876 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, 2877 const struct qstr *orig_name, 2878 struct inode *inode, nid_t ino, umode_t mode); 2879 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 2880 struct inode *dir, struct inode *inode); 2881 bool f2fs_empty_inline_dir(struct inode *dir); 2882 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 2883 struct fscrypt_str *fstr); 2884 int f2fs_inline_data_fiemap(struct inode *inode, 2885 struct fiemap_extent_info *fieinfo, 2886 __u64 start, __u64 len); 2887 2888 /* 2889 * shrinker.c 2890 */ 2891 unsigned long f2fs_shrink_count(struct shrinker *shrink, 2892 struct shrink_control *sc); 2893 unsigned long f2fs_shrink_scan(struct shrinker *shrink, 2894 struct shrink_control *sc); 2895 void f2fs_join_shrinker(struct f2fs_sb_info *sbi); 2896 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); 2897 2898 /* 2899 * extent_cache.c 2900 */ 2901 struct rb_entry *__lookup_rb_tree(struct rb_root *root, 2902 struct rb_entry *cached_re, unsigned int ofs); 2903 struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, 2904 struct rb_root *root, struct rb_node **parent, 2905 unsigned int ofs); 2906 struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root, 2907 struct rb_entry *cached_re, unsigned int ofs, 2908 struct rb_entry **prev_entry, struct rb_entry **next_entry, 2909 struct rb_node ***insert_p, struct rb_node **insert_parent, 2910 bool force); 2911 bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi, 2912 struct rb_root *root); 2913 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); 2914 bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext); 2915 void f2fs_drop_extent_tree(struct inode *inode); 2916 unsigned int f2fs_destroy_extent_node(struct inode *inode); 2917 void f2fs_destroy_extent_tree(struct inode *inode); 2918 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, 2919 struct extent_info *ei); 2920 void f2fs_update_extent_cache(struct dnode_of_data *dn); 2921 void f2fs_update_extent_cache_range(struct dnode_of_data *dn, 2922 pgoff_t fofs, block_t blkaddr, unsigned int len); 2923 void init_extent_cache_info(struct f2fs_sb_info *sbi); 2924 int __init create_extent_cache(void); 2925 void destroy_extent_cache(void); 2926 2927 /* 2928 * sysfs.c 2929 */ 2930 int __init f2fs_init_sysfs(void); 2931 void f2fs_exit_sysfs(void); 2932 int f2fs_register_sysfs(struct f2fs_sb_info *sbi); 2933 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); 2934 2935 /* 2936 * crypto support 2937 */ 2938 static inline bool f2fs_encrypted_inode(struct inode *inode) 2939 { 2940 return file_is_encrypt(inode); 2941 } 2942 2943 static inline bool f2fs_encrypted_file(struct inode *inode) 2944 { 2945 return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode); 2946 } 2947 2948 static inline void f2fs_set_encrypted_inode(struct inode *inode) 2949 { 2950 #ifdef CONFIG_F2FS_FS_ENCRYPTION 2951 file_set_encrypt(inode); 2952 #endif 2953 } 2954 2955 static inline bool f2fs_bio_encrypted(struct bio *bio) 2956 { 2957 return bio->bi_private != NULL; 2958 } 2959 2960 static inline int f2fs_sb_has_crypto(struct super_block *sb) 2961 { 2962 return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT); 2963 } 2964 2965 static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb) 2966 { 2967 return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED); 2968 } 2969 2970 static inline int f2fs_sb_has_extra_attr(struct super_block *sb) 2971 { 2972 return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR); 2973 } 2974 2975 static inline int f2fs_sb_has_project_quota(struct super_block *sb) 2976 { 2977 return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA); 2978 } 2979 2980 static inline int f2fs_sb_has_inode_chksum(struct super_block *sb) 2981 { 2982 return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM); 2983 } 2984 2985 #ifdef CONFIG_BLK_DEV_ZONED 2986 static inline int get_blkz_type(struct f2fs_sb_info *sbi, 2987 struct block_device *bdev, block_t blkaddr) 2988 { 2989 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz; 2990 int i; 2991 2992 for (i = 0; i < sbi->s_ndevs; i++) 2993 if (FDEV(i).bdev == bdev) 2994 return FDEV(i).blkz_type[zno]; 2995 return -EINVAL; 2996 } 2997 #endif 2998 2999 static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi) 3000 { 3001 struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev); 3002 3003 return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb); 3004 } 3005 3006 static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt) 3007 { 3008 clear_opt(sbi, ADAPTIVE); 3009 clear_opt(sbi, LFS); 3010 3011 switch (mt) { 3012 case F2FS_MOUNT_ADAPTIVE: 3013 set_opt(sbi, ADAPTIVE); 3014 break; 3015 case F2FS_MOUNT_LFS: 3016 set_opt(sbi, LFS); 3017 break; 3018 } 3019 } 3020 3021 static inline bool f2fs_may_encrypt(struct inode *inode) 3022 { 3023 #ifdef CONFIG_F2FS_FS_ENCRYPTION 3024 umode_t mode = inode->i_mode; 3025 3026 return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)); 3027 #else 3028 return 0; 3029 #endif 3030 } 3031 3032 #endif 3033