xref: /openbmc/linux/fs/f2fs/f2fs.h (revision c88773dc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition)					\
35 	do {								\
36 		if (unlikely(condition)) {				\
37 			WARN_ON(1);					\
38 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
39 		}							\
40 	} while (0)
41 #endif
42 
43 enum {
44 	FAULT_KMALLOC,
45 	FAULT_KVMALLOC,
46 	FAULT_PAGE_ALLOC,
47 	FAULT_PAGE_GET,
48 	FAULT_ALLOC_BIO,
49 	FAULT_ALLOC_NID,
50 	FAULT_ORPHAN,
51 	FAULT_BLOCK,
52 	FAULT_DIR_DEPTH,
53 	FAULT_EVICT_INODE,
54 	FAULT_TRUNCATE,
55 	FAULT_READ_IO,
56 	FAULT_CHECKPOINT,
57 	FAULT_DISCARD,
58 	FAULT_WRITE_IO,
59 	FAULT_MAX,
60 };
61 
62 #ifdef CONFIG_F2FS_FAULT_INJECTION
63 #define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
64 
65 struct f2fs_fault_info {
66 	atomic_t inject_ops;
67 	unsigned int inject_rate;
68 	unsigned int inject_type;
69 };
70 
71 extern const char *f2fs_fault_name[FAULT_MAX];
72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
73 #endif
74 
75 /*
76  * For mount options
77  */
78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
79 #define F2FS_MOUNT_DISCARD		0x00000004
80 #define F2FS_MOUNT_NOHEAP		0x00000008
81 #define F2FS_MOUNT_XATTR_USER		0x00000010
82 #define F2FS_MOUNT_POSIX_ACL		0x00000020
83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
84 #define F2FS_MOUNT_INLINE_XATTR		0x00000080
85 #define F2FS_MOUNT_INLINE_DATA		0x00000100
86 #define F2FS_MOUNT_INLINE_DENTRY	0x00000200
87 #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
88 #define F2FS_MOUNT_NOBARRIER		0x00000800
89 #define F2FS_MOUNT_FASTBOOT		0x00001000
90 #define F2FS_MOUNT_EXTENT_CACHE		0x00002000
91 #define F2FS_MOUNT_DATA_FLUSH		0x00008000
92 #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
93 #define F2FS_MOUNT_USRQUOTA		0x00080000
94 #define F2FS_MOUNT_GRPQUOTA		0x00100000
95 #define F2FS_MOUNT_PRJQUOTA		0x00200000
96 #define F2FS_MOUNT_QUOTA		0x00400000
97 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
98 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
99 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
100 #define F2FS_MOUNT_NORECOVERY		0x04000000
101 
102 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
103 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
104 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
105 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
106 
107 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
108 		typecheck(unsigned long long, b) &&			\
109 		((long long)((a) - (b)) > 0))
110 
111 typedef u32 block_t;	/*
112 			 * should not change u32, since it is the on-disk block
113 			 * address format, __le32.
114 			 */
115 typedef u32 nid_t;
116 
117 #define COMPRESS_EXT_NUM		16
118 
119 struct f2fs_mount_info {
120 	unsigned int opt;
121 	int write_io_size_bits;		/* Write IO size bits */
122 	block_t root_reserved_blocks;	/* root reserved blocks */
123 	kuid_t s_resuid;		/* reserved blocks for uid */
124 	kgid_t s_resgid;		/* reserved blocks for gid */
125 	int active_logs;		/* # of active logs */
126 	int inline_xattr_size;		/* inline xattr size */
127 #ifdef CONFIG_F2FS_FAULT_INJECTION
128 	struct f2fs_fault_info fault_info;	/* For fault injection */
129 #endif
130 #ifdef CONFIG_QUOTA
131 	/* Names of quota files with journalled quota */
132 	char *s_qf_names[MAXQUOTAS];
133 	int s_jquota_fmt;			/* Format of quota to use */
134 #endif
135 	/* For which write hints are passed down to block layer */
136 	int whint_mode;
137 	int alloc_mode;			/* segment allocation policy */
138 	int fsync_mode;			/* fsync policy */
139 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
140 	int bggc_mode;			/* bggc mode: off, on or sync */
141 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
142 	block_t unusable_cap_perc;	/* percentage for cap */
143 	block_t unusable_cap;		/* Amount of space allowed to be
144 					 * unusable when disabling checkpoint
145 					 */
146 
147 	/* For compression */
148 	unsigned char compress_algorithm;	/* algorithm type */
149 	unsigned compress_log_size;		/* cluster log size */
150 	unsigned char compress_ext_cnt;		/* extension count */
151 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
152 };
153 
154 #define F2FS_FEATURE_ENCRYPT		0x0001
155 #define F2FS_FEATURE_BLKZONED		0x0002
156 #define F2FS_FEATURE_ATOMIC_WRITE	0x0004
157 #define F2FS_FEATURE_EXTRA_ATTR		0x0008
158 #define F2FS_FEATURE_PRJQUOTA		0x0010
159 #define F2FS_FEATURE_INODE_CHKSUM	0x0020
160 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
161 #define F2FS_FEATURE_QUOTA_INO		0x0080
162 #define F2FS_FEATURE_INODE_CRTIME	0x0100
163 #define F2FS_FEATURE_LOST_FOUND		0x0200
164 #define F2FS_FEATURE_VERITY		0x0400
165 #define F2FS_FEATURE_SB_CHKSUM		0x0800
166 #define F2FS_FEATURE_CASEFOLD		0x1000
167 #define F2FS_FEATURE_COMPRESSION	0x2000
168 
169 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
170 	((raw_super->feature & cpu_to_le32(mask)) != 0)
171 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
172 #define F2FS_SET_FEATURE(sbi, mask)					\
173 	(sbi->raw_super->feature |= cpu_to_le32(mask))
174 #define F2FS_CLEAR_FEATURE(sbi, mask)					\
175 	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
176 
177 /*
178  * Default values for user and/or group using reserved blocks
179  */
180 #define	F2FS_DEF_RESUID		0
181 #define	F2FS_DEF_RESGID		0
182 
183 /*
184  * For checkpoint manager
185  */
186 enum {
187 	NAT_BITMAP,
188 	SIT_BITMAP
189 };
190 
191 #define	CP_UMOUNT	0x00000001
192 #define	CP_FASTBOOT	0x00000002
193 #define	CP_SYNC		0x00000004
194 #define	CP_RECOVERY	0x00000008
195 #define	CP_DISCARD	0x00000010
196 #define CP_TRIMMED	0x00000020
197 #define CP_PAUSE	0x00000040
198 #define CP_RESIZE 	0x00000080
199 
200 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
201 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
202 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
203 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
204 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
205 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
206 #define DEF_CP_INTERVAL			60	/* 60 secs */
207 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
208 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
209 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
210 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
211 
212 struct cp_control {
213 	int reason;
214 	__u64 trim_start;
215 	__u64 trim_end;
216 	__u64 trim_minlen;
217 };
218 
219 /*
220  * indicate meta/data type
221  */
222 enum {
223 	META_CP,
224 	META_NAT,
225 	META_SIT,
226 	META_SSA,
227 	META_MAX,
228 	META_POR,
229 	DATA_GENERIC,		/* check range only */
230 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
231 	DATA_GENERIC_ENHANCE_READ,	/*
232 					 * strong check on range and segment
233 					 * bitmap but no warning due to race
234 					 * condition of read on truncated area
235 					 * by extent_cache
236 					 */
237 	META_GENERIC,
238 };
239 
240 /* for the list of ino */
241 enum {
242 	ORPHAN_INO,		/* for orphan ino list */
243 	APPEND_INO,		/* for append ino list */
244 	UPDATE_INO,		/* for update ino list */
245 	TRANS_DIR_INO,		/* for trasactions dir ino list */
246 	FLUSH_INO,		/* for multiple device flushing */
247 	MAX_INO_ENTRY,		/* max. list */
248 };
249 
250 struct ino_entry {
251 	struct list_head list;		/* list head */
252 	nid_t ino;			/* inode number */
253 	unsigned int dirty_device;	/* dirty device bitmap */
254 };
255 
256 /* for the list of inodes to be GCed */
257 struct inode_entry {
258 	struct list_head list;	/* list head */
259 	struct inode *inode;	/* vfs inode pointer */
260 };
261 
262 struct fsync_node_entry {
263 	struct list_head list;	/* list head */
264 	struct page *page;	/* warm node page pointer */
265 	unsigned int seq_id;	/* sequence id */
266 };
267 
268 /* for the bitmap indicate blocks to be discarded */
269 struct discard_entry {
270 	struct list_head list;	/* list head */
271 	block_t start_blkaddr;	/* start blockaddr of current segment */
272 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
273 };
274 
275 /* default discard granularity of inner discard thread, unit: block count */
276 #define DEFAULT_DISCARD_GRANULARITY		16
277 
278 /* max discard pend list number */
279 #define MAX_PLIST_NUM		512
280 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
281 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
282 
283 enum {
284 	D_PREP,			/* initial */
285 	D_PARTIAL,		/* partially submitted */
286 	D_SUBMIT,		/* all submitted */
287 	D_DONE,			/* finished */
288 };
289 
290 struct discard_info {
291 	block_t lstart;			/* logical start address */
292 	block_t len;			/* length */
293 	block_t start;			/* actual start address in dev */
294 };
295 
296 struct discard_cmd {
297 	struct rb_node rb_node;		/* rb node located in rb-tree */
298 	union {
299 		struct {
300 			block_t lstart;	/* logical start address */
301 			block_t len;	/* length */
302 			block_t start;	/* actual start address in dev */
303 		};
304 		struct discard_info di;	/* discard info */
305 
306 	};
307 	struct list_head list;		/* command list */
308 	struct completion wait;		/* compleation */
309 	struct block_device *bdev;	/* bdev */
310 	unsigned short ref;		/* reference count */
311 	unsigned char state;		/* state */
312 	unsigned char queued;		/* queued discard */
313 	int error;			/* bio error */
314 	spinlock_t lock;		/* for state/bio_ref updating */
315 	unsigned short bio_ref;		/* bio reference count */
316 };
317 
318 enum {
319 	DPOLICY_BG,
320 	DPOLICY_FORCE,
321 	DPOLICY_FSTRIM,
322 	DPOLICY_UMOUNT,
323 	MAX_DPOLICY,
324 };
325 
326 struct discard_policy {
327 	int type;			/* type of discard */
328 	unsigned int min_interval;	/* used for candidates exist */
329 	unsigned int mid_interval;	/* used for device busy */
330 	unsigned int max_interval;	/* used for candidates not exist */
331 	unsigned int max_requests;	/* # of discards issued per round */
332 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
333 	bool io_aware;			/* issue discard in idle time */
334 	bool sync;			/* submit discard with REQ_SYNC flag */
335 	bool ordered;			/* issue discard by lba order */
336 	bool timeout;			/* discard timeout for put_super */
337 	unsigned int granularity;	/* discard granularity */
338 };
339 
340 struct discard_cmd_control {
341 	struct task_struct *f2fs_issue_discard;	/* discard thread */
342 	struct list_head entry_list;		/* 4KB discard entry list */
343 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
344 	struct list_head wait_list;		/* store on-flushing entries */
345 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
346 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
347 	unsigned int discard_wake;		/* to wake up discard thread */
348 	struct mutex cmd_lock;
349 	unsigned int nr_discards;		/* # of discards in the list */
350 	unsigned int max_discards;		/* max. discards to be issued */
351 	unsigned int discard_granularity;	/* discard granularity */
352 	unsigned int undiscard_blks;		/* # of undiscard blocks */
353 	unsigned int next_pos;			/* next discard position */
354 	atomic_t issued_discard;		/* # of issued discard */
355 	atomic_t queued_discard;		/* # of queued discard */
356 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
357 	struct rb_root_cached root;		/* root of discard rb-tree */
358 	bool rbtree_check;			/* config for consistence check */
359 };
360 
361 /* for the list of fsync inodes, used only during recovery */
362 struct fsync_inode_entry {
363 	struct list_head list;	/* list head */
364 	struct inode *inode;	/* vfs inode pointer */
365 	block_t blkaddr;	/* block address locating the last fsync */
366 	block_t last_dentry;	/* block address locating the last dentry */
367 };
368 
369 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
370 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
371 
372 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
373 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
374 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
375 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
376 
377 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
378 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
379 
380 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
381 {
382 	int before = nats_in_cursum(journal);
383 
384 	journal->n_nats = cpu_to_le16(before + i);
385 	return before;
386 }
387 
388 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
389 {
390 	int before = sits_in_cursum(journal);
391 
392 	journal->n_sits = cpu_to_le16(before + i);
393 	return before;
394 }
395 
396 static inline bool __has_cursum_space(struct f2fs_journal *journal,
397 							int size, int type)
398 {
399 	if (type == NAT_JOURNAL)
400 		return size <= MAX_NAT_JENTRIES(journal);
401 	return size <= MAX_SIT_JENTRIES(journal);
402 }
403 
404 /*
405  * f2fs-specific ioctl commands
406  */
407 #define F2FS_IOCTL_MAGIC		0xf5
408 #define F2FS_IOC_START_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 1)
409 #define F2FS_IOC_COMMIT_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 2)
410 #define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
411 #define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
412 #define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
413 #define F2FS_IOC_GARBAGE_COLLECT	_IOW(F2FS_IOCTL_MAGIC, 6, __u32)
414 #define F2FS_IOC_WRITE_CHECKPOINT	_IO(F2FS_IOCTL_MAGIC, 7)
415 #define F2FS_IOC_DEFRAGMENT		_IOWR(F2FS_IOCTL_MAGIC, 8,	\
416 						struct f2fs_defragment)
417 #define F2FS_IOC_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
418 						struct f2fs_move_range)
419 #define F2FS_IOC_FLUSH_DEVICE		_IOW(F2FS_IOCTL_MAGIC, 10,	\
420 						struct f2fs_flush_device)
421 #define F2FS_IOC_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,	\
422 						struct f2fs_gc_range)
423 #define F2FS_IOC_GET_FEATURES		_IOR(F2FS_IOCTL_MAGIC, 12, __u32)
424 #define F2FS_IOC_SET_PIN_FILE		_IOW(F2FS_IOCTL_MAGIC, 13, __u32)
425 #define F2FS_IOC_GET_PIN_FILE		_IOR(F2FS_IOCTL_MAGIC, 14, __u32)
426 #define F2FS_IOC_PRECACHE_EXTENTS	_IO(F2FS_IOCTL_MAGIC, 15)
427 #define F2FS_IOC_RESIZE_FS		_IOW(F2FS_IOCTL_MAGIC, 16, __u64)
428 #define F2FS_IOC_GET_COMPRESS_BLOCKS	_IOR(F2FS_IOCTL_MAGIC, 17, __u64)
429 #define F2FS_IOC_RELEASE_COMPRESS_BLOCKS				\
430 					_IOR(F2FS_IOCTL_MAGIC, 18, __u64)
431 #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS				\
432 					_IOR(F2FS_IOCTL_MAGIC, 19, __u64)
433 #define F2FS_IOC_SEC_TRIM_FILE		_IOW(F2FS_IOCTL_MAGIC, 20,	\
434 						struct f2fs_sectrim_range)
435 
436 /*
437  * should be same as XFS_IOC_GOINGDOWN.
438  * Flags for going down operation used by FS_IOC_GOINGDOWN
439  */
440 #define F2FS_IOC_SHUTDOWN	_IOR('X', 125, __u32)	/* Shutdown */
441 #define F2FS_GOING_DOWN_FULLSYNC	0x0	/* going down with full sync */
442 #define F2FS_GOING_DOWN_METASYNC	0x1	/* going down with metadata */
443 #define F2FS_GOING_DOWN_NOSYNC		0x2	/* going down */
444 #define F2FS_GOING_DOWN_METAFLUSH	0x3	/* going down with meta flush */
445 #define F2FS_GOING_DOWN_NEED_FSCK	0x4	/* going down to trigger fsck */
446 
447 /*
448  * Flags used by F2FS_IOC_SEC_TRIM_FILE
449  */
450 #define F2FS_TRIM_FILE_DISCARD		0x1	/* send discard command */
451 #define F2FS_TRIM_FILE_ZEROOUT		0x2	/* zero out */
452 #define F2FS_TRIM_FILE_MASK		0x3
453 
454 struct f2fs_gc_range {
455 	u32 sync;
456 	u64 start;
457 	u64 len;
458 };
459 
460 struct f2fs_defragment {
461 	u64 start;
462 	u64 len;
463 };
464 
465 struct f2fs_move_range {
466 	u32 dst_fd;		/* destination fd */
467 	u64 pos_in;		/* start position in src_fd */
468 	u64 pos_out;		/* start position in dst_fd */
469 	u64 len;		/* size to move */
470 };
471 
472 struct f2fs_flush_device {
473 	u32 dev_num;		/* device number to flush */
474 	u32 segments;		/* # of segments to flush */
475 };
476 
477 struct f2fs_sectrim_range {
478 	u64 start;
479 	u64 len;
480 	u64 flags;
481 };
482 
483 /* for inline stuff */
484 #define DEF_INLINE_RESERVED_SIZE	1
485 static inline int get_extra_isize(struct inode *inode);
486 static inline int get_inline_xattr_addrs(struct inode *inode);
487 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
488 				(CUR_ADDRS_PER_INODE(inode) -		\
489 				get_inline_xattr_addrs(inode) -	\
490 				DEF_INLINE_RESERVED_SIZE))
491 
492 /* for inline dir */
493 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
494 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
495 				BITS_PER_BYTE + 1))
496 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
497 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
498 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
499 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
500 				NR_INLINE_DENTRY(inode) + \
501 				INLINE_DENTRY_BITMAP_SIZE(inode)))
502 
503 /*
504  * For INODE and NODE manager
505  */
506 /* for directory operations */
507 
508 struct f2fs_filename {
509 	/*
510 	 * The filename the user specified.  This is NULL for some
511 	 * filesystem-internal operations, e.g. converting an inline directory
512 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
513 	 */
514 	const struct qstr *usr_fname;
515 
516 	/*
517 	 * The on-disk filename.  For encrypted directories, this is encrypted.
518 	 * This may be NULL for lookups in an encrypted dir without the key.
519 	 */
520 	struct fscrypt_str disk_name;
521 
522 	/* The dirhash of this filename */
523 	f2fs_hash_t hash;
524 
525 #ifdef CONFIG_FS_ENCRYPTION
526 	/*
527 	 * For lookups in encrypted directories: either the buffer backing
528 	 * disk_name, or a buffer that holds the decoded no-key name.
529 	 */
530 	struct fscrypt_str crypto_buf;
531 #endif
532 #ifdef CONFIG_UNICODE
533 	/*
534 	 * For casefolded directories: the casefolded name, but it's left NULL
535 	 * if the original name is not valid Unicode or if the filesystem is
536 	 * doing an internal operation where usr_fname is also NULL.  In these
537 	 * cases we fall back to treating the name as an opaque byte sequence.
538 	 */
539 	struct fscrypt_str cf_name;
540 #endif
541 };
542 
543 struct f2fs_dentry_ptr {
544 	struct inode *inode;
545 	void *bitmap;
546 	struct f2fs_dir_entry *dentry;
547 	__u8 (*filename)[F2FS_SLOT_LEN];
548 	int max;
549 	int nr_bitmap;
550 };
551 
552 static inline void make_dentry_ptr_block(struct inode *inode,
553 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
554 {
555 	d->inode = inode;
556 	d->max = NR_DENTRY_IN_BLOCK;
557 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
558 	d->bitmap = t->dentry_bitmap;
559 	d->dentry = t->dentry;
560 	d->filename = t->filename;
561 }
562 
563 static inline void make_dentry_ptr_inline(struct inode *inode,
564 					struct f2fs_dentry_ptr *d, void *t)
565 {
566 	int entry_cnt = NR_INLINE_DENTRY(inode);
567 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
568 	int reserved_size = INLINE_RESERVED_SIZE(inode);
569 
570 	d->inode = inode;
571 	d->max = entry_cnt;
572 	d->nr_bitmap = bitmap_size;
573 	d->bitmap = t;
574 	d->dentry = t + bitmap_size + reserved_size;
575 	d->filename = t + bitmap_size + reserved_size +
576 					SIZE_OF_DIR_ENTRY * entry_cnt;
577 }
578 
579 /*
580  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
581  * as its node offset to distinguish from index node blocks.
582  * But some bits are used to mark the node block.
583  */
584 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
585 				>> OFFSET_BIT_SHIFT)
586 enum {
587 	ALLOC_NODE,			/* allocate a new node page if needed */
588 	LOOKUP_NODE,			/* look up a node without readahead */
589 	LOOKUP_NODE_RA,			/*
590 					 * look up a node with readahead called
591 					 * by get_data_block.
592 					 */
593 };
594 
595 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */
596 
597 /* congestion wait timeout value, default: 20ms */
598 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
599 
600 /* maximum retry quota flush count */
601 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
602 
603 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
604 
605 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
606 
607 /* for in-memory extent cache entry */
608 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
609 
610 /* number of extent info in extent cache we try to shrink */
611 #define EXTENT_CACHE_SHRINK_NUMBER	128
612 
613 struct rb_entry {
614 	struct rb_node rb_node;		/* rb node located in rb-tree */
615 	unsigned int ofs;		/* start offset of the entry */
616 	unsigned int len;		/* length of the entry */
617 };
618 
619 struct extent_info {
620 	unsigned int fofs;		/* start offset in a file */
621 	unsigned int len;		/* length of the extent */
622 	u32 blk;			/* start block address of the extent */
623 };
624 
625 struct extent_node {
626 	struct rb_node rb_node;		/* rb node located in rb-tree */
627 	struct extent_info ei;		/* extent info */
628 	struct list_head list;		/* node in global extent list of sbi */
629 	struct extent_tree *et;		/* extent tree pointer */
630 };
631 
632 struct extent_tree {
633 	nid_t ino;			/* inode number */
634 	struct rb_root_cached root;	/* root of extent info rb-tree */
635 	struct extent_node *cached_en;	/* recently accessed extent node */
636 	struct extent_info largest;	/* largested extent info */
637 	struct list_head list;		/* to be used by sbi->zombie_list */
638 	rwlock_t lock;			/* protect extent info rb-tree */
639 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
640 	bool largest_updated;		/* largest extent updated */
641 };
642 
643 /*
644  * This structure is taken from ext4_map_blocks.
645  *
646  * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
647  */
648 #define F2FS_MAP_NEW		(1 << BH_New)
649 #define F2FS_MAP_MAPPED		(1 << BH_Mapped)
650 #define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
651 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
652 				F2FS_MAP_UNWRITTEN)
653 
654 struct f2fs_map_blocks {
655 	block_t m_pblk;
656 	block_t m_lblk;
657 	unsigned int m_len;
658 	unsigned int m_flags;
659 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
660 	pgoff_t *m_next_extent;		/* point to next possible extent */
661 	int m_seg_type;
662 	bool m_may_create;		/* indicate it is from write path */
663 };
664 
665 /* for flag in get_data_block */
666 enum {
667 	F2FS_GET_BLOCK_DEFAULT,
668 	F2FS_GET_BLOCK_FIEMAP,
669 	F2FS_GET_BLOCK_BMAP,
670 	F2FS_GET_BLOCK_DIO,
671 	F2FS_GET_BLOCK_PRE_DIO,
672 	F2FS_GET_BLOCK_PRE_AIO,
673 	F2FS_GET_BLOCK_PRECACHE,
674 };
675 
676 /*
677  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
678  */
679 #define FADVISE_COLD_BIT	0x01
680 #define FADVISE_LOST_PINO_BIT	0x02
681 #define FADVISE_ENCRYPT_BIT	0x04
682 #define FADVISE_ENC_NAME_BIT	0x08
683 #define FADVISE_KEEP_SIZE_BIT	0x10
684 #define FADVISE_HOT_BIT		0x20
685 #define FADVISE_VERITY_BIT	0x40
686 
687 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
688 
689 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
690 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
691 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
692 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
693 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
694 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
695 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
696 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
697 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
698 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
699 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
700 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
701 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
702 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
703 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
704 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
705 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
706 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
707 
708 #define DEF_DIR_LEVEL		0
709 
710 enum {
711 	GC_FAILURE_PIN,
712 	GC_FAILURE_ATOMIC,
713 	MAX_GC_FAILURE
714 };
715 
716 /* used for f2fs_inode_info->flags */
717 enum {
718 	FI_NEW_INODE,		/* indicate newly allocated inode */
719 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
720 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
721 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
722 	FI_INC_LINK,		/* need to increment i_nlink */
723 	FI_ACL_MODE,		/* indicate acl mode */
724 	FI_NO_ALLOC,		/* should not allocate any blocks */
725 	FI_FREE_NID,		/* free allocated nide */
726 	FI_NO_EXTENT,		/* not to use the extent cache */
727 	FI_INLINE_XATTR,	/* used for inline xattr */
728 	FI_INLINE_DATA,		/* used for inline data*/
729 	FI_INLINE_DENTRY,	/* used for inline dentry */
730 	FI_APPEND_WRITE,	/* inode has appended data */
731 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
732 	FI_NEED_IPU,		/* used for ipu per file */
733 	FI_ATOMIC_FILE,		/* indicate atomic file */
734 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
735 	FI_VOLATILE_FILE,	/* indicate volatile file */
736 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
737 	FI_DROP_CACHE,		/* drop dirty page cache */
738 	FI_DATA_EXIST,		/* indicate data exists */
739 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
740 	FI_DO_DEFRAG,		/* indicate defragment is running */
741 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
742 	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */
743 	FI_HOT_DATA,		/* indicate file is hot */
744 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
745 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
746 	FI_PIN_FILE,		/* indicate file should not be gced */
747 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
748 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
749 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
750 	FI_MMAP_FILE,		/* indicate file was mmapped */
751 	FI_MAX,			/* max flag, never be used */
752 };
753 
754 struct f2fs_inode_info {
755 	struct inode vfs_inode;		/* serve a vfs inode */
756 	unsigned long i_flags;		/* keep an inode flags for ioctl */
757 	unsigned char i_advise;		/* use to give file attribute hints */
758 	unsigned char i_dir_level;	/* use for dentry level for large dir */
759 	unsigned int i_current_depth;	/* only for directory depth */
760 	/* for gc failure statistic */
761 	unsigned int i_gc_failures[MAX_GC_FAILURE];
762 	unsigned int i_pino;		/* parent inode number */
763 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
764 
765 	/* Use below internally in f2fs*/
766 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
767 	struct rw_semaphore i_sem;	/* protect fi info */
768 	atomic_t dirty_pages;		/* # of dirty pages */
769 	f2fs_hash_t chash;		/* hash value of given file name */
770 	unsigned int clevel;		/* maximum level of given file name */
771 	struct task_struct *task;	/* lookup and create consistency */
772 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
773 	nid_t i_xattr_nid;		/* node id that contains xattrs */
774 	loff_t	last_disk_size;		/* lastly written file size */
775 	spinlock_t i_size_lock;		/* protect last_disk_size */
776 
777 #ifdef CONFIG_QUOTA
778 	struct dquot *i_dquot[MAXQUOTAS];
779 
780 	/* quota space reservation, managed internally by quota code */
781 	qsize_t i_reserved_quota;
782 #endif
783 	struct list_head dirty_list;	/* dirty list for dirs and files */
784 	struct list_head gdirty_list;	/* linked in global dirty list */
785 	struct list_head inmem_ilist;	/* list for inmem inodes */
786 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
787 	struct task_struct *inmem_task;	/* store inmemory task */
788 	struct mutex inmem_lock;	/* lock for inmemory pages */
789 	pgoff_t ra_offset;		/* ongoing readahead offset */
790 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
791 
792 	/* avoid racing between foreground op and gc */
793 	struct rw_semaphore i_gc_rwsem[2];
794 	struct rw_semaphore i_mmap_sem;
795 	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
796 
797 	int i_extra_isize;		/* size of extra space located in i_addr */
798 	kprojid_t i_projid;		/* id for project quota */
799 	int i_inline_xattr_size;	/* inline xattr size */
800 	struct timespec64 i_crtime;	/* inode creation time */
801 	struct timespec64 i_disk_time[4];/* inode disk times */
802 
803 	/* for file compress */
804 	u64 i_compr_blocks;			/* # of compressed blocks */
805 	unsigned char i_compress_algorithm;	/* algorithm type */
806 	unsigned char i_log_cluster_size;	/* log of cluster size */
807 	unsigned int i_cluster_size;		/* cluster size */
808 };
809 
810 static inline void get_extent_info(struct extent_info *ext,
811 					struct f2fs_extent *i_ext)
812 {
813 	ext->fofs = le32_to_cpu(i_ext->fofs);
814 	ext->blk = le32_to_cpu(i_ext->blk);
815 	ext->len = le32_to_cpu(i_ext->len);
816 }
817 
818 static inline void set_raw_extent(struct extent_info *ext,
819 					struct f2fs_extent *i_ext)
820 {
821 	i_ext->fofs = cpu_to_le32(ext->fofs);
822 	i_ext->blk = cpu_to_le32(ext->blk);
823 	i_ext->len = cpu_to_le32(ext->len);
824 }
825 
826 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
827 						u32 blk, unsigned int len)
828 {
829 	ei->fofs = fofs;
830 	ei->blk = blk;
831 	ei->len = len;
832 }
833 
834 static inline bool __is_discard_mergeable(struct discard_info *back,
835 			struct discard_info *front, unsigned int max_len)
836 {
837 	return (back->lstart + back->len == front->lstart) &&
838 		(back->len + front->len <= max_len);
839 }
840 
841 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
842 			struct discard_info *back, unsigned int max_len)
843 {
844 	return __is_discard_mergeable(back, cur, max_len);
845 }
846 
847 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
848 			struct discard_info *front, unsigned int max_len)
849 {
850 	return __is_discard_mergeable(cur, front, max_len);
851 }
852 
853 static inline bool __is_extent_mergeable(struct extent_info *back,
854 						struct extent_info *front)
855 {
856 	return (back->fofs + back->len == front->fofs &&
857 			back->blk + back->len == front->blk);
858 }
859 
860 static inline bool __is_back_mergeable(struct extent_info *cur,
861 						struct extent_info *back)
862 {
863 	return __is_extent_mergeable(back, cur);
864 }
865 
866 static inline bool __is_front_mergeable(struct extent_info *cur,
867 						struct extent_info *front)
868 {
869 	return __is_extent_mergeable(cur, front);
870 }
871 
872 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
873 static inline void __try_update_largest_extent(struct extent_tree *et,
874 						struct extent_node *en)
875 {
876 	if (en->ei.len > et->largest.len) {
877 		et->largest = en->ei;
878 		et->largest_updated = true;
879 	}
880 }
881 
882 /*
883  * For free nid management
884  */
885 enum nid_state {
886 	FREE_NID,		/* newly added to free nid list */
887 	PREALLOC_NID,		/* it is preallocated */
888 	MAX_NID_STATE,
889 };
890 
891 struct f2fs_nm_info {
892 	block_t nat_blkaddr;		/* base disk address of NAT */
893 	nid_t max_nid;			/* maximum possible node ids */
894 	nid_t available_nids;		/* # of available node ids */
895 	nid_t next_scan_nid;		/* the next nid to be scanned */
896 	unsigned int ram_thresh;	/* control the memory footprint */
897 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
898 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
899 
900 	/* NAT cache management */
901 	struct radix_tree_root nat_root;/* root of the nat entry cache */
902 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
903 	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
904 	struct list_head nat_entries;	/* cached nat entry list (clean) */
905 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
906 	unsigned int nat_cnt;		/* the # of cached nat entries */
907 	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
908 	unsigned int nat_blocks;	/* # of nat blocks */
909 
910 	/* free node ids management */
911 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
912 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
913 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
914 	spinlock_t nid_list_lock;	/* protect nid lists ops */
915 	struct mutex build_lock;	/* lock for build free nids */
916 	unsigned char **free_nid_bitmap;
917 	unsigned char *nat_block_bitmap;
918 	unsigned short *free_nid_count;	/* free nid count of NAT block */
919 
920 	/* for checkpoint */
921 	char *nat_bitmap;		/* NAT bitmap pointer */
922 
923 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
924 	unsigned char *nat_bits;	/* NAT bits blocks */
925 	unsigned char *full_nat_bits;	/* full NAT pages */
926 	unsigned char *empty_nat_bits;	/* empty NAT pages */
927 #ifdef CONFIG_F2FS_CHECK_FS
928 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
929 #endif
930 	int bitmap_size;		/* bitmap size */
931 };
932 
933 /*
934  * this structure is used as one of function parameters.
935  * all the information are dedicated to a given direct node block determined
936  * by the data offset in a file.
937  */
938 struct dnode_of_data {
939 	struct inode *inode;		/* vfs inode pointer */
940 	struct page *inode_page;	/* its inode page, NULL is possible */
941 	struct page *node_page;		/* cached direct node page */
942 	nid_t nid;			/* node id of the direct node block */
943 	unsigned int ofs_in_node;	/* data offset in the node page */
944 	bool inode_page_locked;		/* inode page is locked or not */
945 	bool node_changed;		/* is node block changed */
946 	char cur_level;			/* level of hole node page */
947 	char max_level;			/* level of current page located */
948 	block_t	data_blkaddr;		/* block address of the node block */
949 };
950 
951 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
952 		struct page *ipage, struct page *npage, nid_t nid)
953 {
954 	memset(dn, 0, sizeof(*dn));
955 	dn->inode = inode;
956 	dn->inode_page = ipage;
957 	dn->node_page = npage;
958 	dn->nid = nid;
959 }
960 
961 /*
962  * For SIT manager
963  *
964  * By default, there are 6 active log areas across the whole main area.
965  * When considering hot and cold data separation to reduce cleaning overhead,
966  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
967  * respectively.
968  * In the current design, you should not change the numbers intentionally.
969  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
970  * logs individually according to the underlying devices. (default: 6)
971  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
972  * data and 8 for node logs.
973  */
974 #define	NR_CURSEG_DATA_TYPE	(3)
975 #define NR_CURSEG_NODE_TYPE	(3)
976 #define NR_CURSEG_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
977 
978 enum {
979 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
980 	CURSEG_WARM_DATA,	/* data blocks */
981 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
982 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
983 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
984 	CURSEG_COLD_NODE,	/* indirect node blocks */
985 	NO_CHECK_TYPE,
986 	CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
987 };
988 
989 struct flush_cmd {
990 	struct completion wait;
991 	struct llist_node llnode;
992 	nid_t ino;
993 	int ret;
994 };
995 
996 struct flush_cmd_control {
997 	struct task_struct *f2fs_issue_flush;	/* flush thread */
998 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
999 	atomic_t issued_flush;			/* # of issued flushes */
1000 	atomic_t queued_flush;			/* # of queued flushes */
1001 	struct llist_head issue_list;		/* list for command issue */
1002 	struct llist_node *dispatch_list;	/* list for command dispatch */
1003 };
1004 
1005 struct f2fs_sm_info {
1006 	struct sit_info *sit_info;		/* whole segment information */
1007 	struct free_segmap_info *free_info;	/* free segment information */
1008 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1009 	struct curseg_info *curseg_array;	/* active segment information */
1010 
1011 	struct rw_semaphore curseg_lock;	/* for preventing curseg change */
1012 
1013 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1014 	block_t main_blkaddr;		/* start block address of main area */
1015 	block_t ssa_blkaddr;		/* start block address of SSA area */
1016 
1017 	unsigned int segment_count;	/* total # of segments */
1018 	unsigned int main_segments;	/* # of segments in main area */
1019 	unsigned int reserved_segments;	/* # of reserved segments */
1020 	unsigned int ovp_segments;	/* # of overprovision segments */
1021 
1022 	/* a threshold to reclaim prefree segments */
1023 	unsigned int rec_prefree_segments;
1024 
1025 	/* for batched trimming */
1026 	unsigned int trim_sections;		/* # of sections to trim */
1027 
1028 	struct list_head sit_entry_set;	/* sit entry set list */
1029 
1030 	unsigned int ipu_policy;	/* in-place-update policy */
1031 	unsigned int min_ipu_util;	/* in-place-update threshold */
1032 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1033 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1034 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1035 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1036 
1037 	/* for flush command control */
1038 	struct flush_cmd_control *fcc_info;
1039 
1040 	/* for discard command control */
1041 	struct discard_cmd_control *dcc_info;
1042 };
1043 
1044 /*
1045  * For superblock
1046  */
1047 /*
1048  * COUNT_TYPE for monitoring
1049  *
1050  * f2fs monitors the number of several block types such as on-writeback,
1051  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1052  */
1053 #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1054 enum count_type {
1055 	F2FS_DIRTY_DENTS,
1056 	F2FS_DIRTY_DATA,
1057 	F2FS_DIRTY_QDATA,
1058 	F2FS_DIRTY_NODES,
1059 	F2FS_DIRTY_META,
1060 	F2FS_INMEM_PAGES,
1061 	F2FS_DIRTY_IMETA,
1062 	F2FS_WB_CP_DATA,
1063 	F2FS_WB_DATA,
1064 	F2FS_RD_DATA,
1065 	F2FS_RD_NODE,
1066 	F2FS_RD_META,
1067 	F2FS_DIO_WRITE,
1068 	F2FS_DIO_READ,
1069 	NR_COUNT_TYPE,
1070 };
1071 
1072 /*
1073  * The below are the page types of bios used in submit_bio().
1074  * The available types are:
1075  * DATA			User data pages. It operates as async mode.
1076  * NODE			Node pages. It operates as async mode.
1077  * META			FS metadata pages such as SIT, NAT, CP.
1078  * NR_PAGE_TYPE		The number of page types.
1079  * META_FLUSH		Make sure the previous pages are written
1080  *			with waiting the bio's completion
1081  * ...			Only can be used with META.
1082  */
1083 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1084 enum page_type {
1085 	DATA,
1086 	NODE,
1087 	META,
1088 	NR_PAGE_TYPE,
1089 	META_FLUSH,
1090 	INMEM,		/* the below types are used by tracepoints only. */
1091 	INMEM_DROP,
1092 	INMEM_INVALIDATE,
1093 	INMEM_REVOKE,
1094 	IPU,
1095 	OPU,
1096 };
1097 
1098 enum temp_type {
1099 	HOT = 0,	/* must be zero for meta bio */
1100 	WARM,
1101 	COLD,
1102 	NR_TEMP_TYPE,
1103 };
1104 
1105 enum need_lock_type {
1106 	LOCK_REQ = 0,
1107 	LOCK_DONE,
1108 	LOCK_RETRY,
1109 };
1110 
1111 enum cp_reason_type {
1112 	CP_NO_NEEDED,
1113 	CP_NON_REGULAR,
1114 	CP_COMPRESSED,
1115 	CP_HARDLINK,
1116 	CP_SB_NEED_CP,
1117 	CP_WRONG_PINO,
1118 	CP_NO_SPC_ROLL,
1119 	CP_NODE_NEED_CP,
1120 	CP_FASTBOOT_MODE,
1121 	CP_SPEC_LOG_NUM,
1122 	CP_RECOVER_DIR,
1123 };
1124 
1125 enum iostat_type {
1126 	/* WRITE IO */
1127 	APP_DIRECT_IO,			/* app direct write IOs */
1128 	APP_BUFFERED_IO,		/* app buffered write IOs */
1129 	APP_WRITE_IO,			/* app write IOs */
1130 	APP_MAPPED_IO,			/* app mapped IOs */
1131 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1132 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1133 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1134 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1135 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1136 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1137 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1138 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1139 
1140 	/* READ IO */
1141 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1142 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1143 	APP_READ_IO,			/* app read IOs */
1144 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1145 	FS_DATA_READ_IO,		/* data read IOs */
1146 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1147 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1148 	FS_NODE_READ_IO,		/* node read IOs */
1149 	FS_META_READ_IO,		/* meta read IOs */
1150 
1151 	/* other */
1152 	FS_DISCARD,			/* discard */
1153 	NR_IO_TYPE,
1154 };
1155 
1156 struct f2fs_io_info {
1157 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1158 	nid_t ino;		/* inode number */
1159 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1160 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1161 	int op;			/* contains REQ_OP_ */
1162 	int op_flags;		/* req_flag_bits */
1163 	block_t new_blkaddr;	/* new block address to be written */
1164 	block_t old_blkaddr;	/* old block address before Cow */
1165 	struct page *page;	/* page to be written */
1166 	struct page *encrypted_page;	/* encrypted page */
1167 	struct page *compressed_page;	/* compressed page */
1168 	struct list_head list;		/* serialize IOs */
1169 	bool submitted;		/* indicate IO submission */
1170 	int need_lock;		/* indicate we need to lock cp_rwsem */
1171 	bool in_list;		/* indicate fio is in io_list */
1172 	bool is_por;		/* indicate IO is from recovery or not */
1173 	bool retry;		/* need to reallocate block address */
1174 	int compr_blocks;	/* # of compressed block addresses */
1175 	bool encrypted;		/* indicate file is encrypted */
1176 	enum iostat_type io_type;	/* io type */
1177 	struct writeback_control *io_wbc; /* writeback control */
1178 	struct bio **bio;		/* bio for ipu */
1179 	sector_t *last_block;		/* last block number in bio */
1180 	unsigned char version;		/* version of the node */
1181 };
1182 
1183 struct bio_entry {
1184 	struct bio *bio;
1185 	struct list_head list;
1186 };
1187 
1188 #define is_read_io(rw) ((rw) == READ)
1189 struct f2fs_bio_info {
1190 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1191 	struct bio *bio;		/* bios to merge */
1192 	sector_t last_block_in_bio;	/* last block number */
1193 	struct f2fs_io_info fio;	/* store buffered io info. */
1194 	struct rw_semaphore io_rwsem;	/* blocking op for bio */
1195 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1196 	struct list_head io_list;	/* track fios */
1197 	struct list_head bio_list;	/* bio entry list head */
1198 	struct rw_semaphore bio_list_lock;	/* lock to protect bio entry list */
1199 };
1200 
1201 #define FDEV(i)				(sbi->devs[i])
1202 #define RDEV(i)				(raw_super->devs[i])
1203 struct f2fs_dev_info {
1204 	struct block_device *bdev;
1205 	char path[MAX_PATH_LEN];
1206 	unsigned int total_segments;
1207 	block_t start_blk;
1208 	block_t end_blk;
1209 #ifdef CONFIG_BLK_DEV_ZONED
1210 	unsigned int nr_blkz;		/* Total number of zones */
1211 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1212 #endif
1213 };
1214 
1215 enum inode_type {
1216 	DIR_INODE,			/* for dirty dir inode */
1217 	FILE_INODE,			/* for dirty regular/symlink inode */
1218 	DIRTY_META,			/* for all dirtied inode metadata */
1219 	ATOMIC_FILE,			/* for all atomic files */
1220 	NR_INODE_TYPE,
1221 };
1222 
1223 /* for inner inode cache management */
1224 struct inode_management {
1225 	struct radix_tree_root ino_root;	/* ino entry array */
1226 	spinlock_t ino_lock;			/* for ino entry lock */
1227 	struct list_head ino_list;		/* inode list head */
1228 	unsigned long ino_num;			/* number of entries */
1229 };
1230 
1231 /* For s_flag in struct f2fs_sb_info */
1232 enum {
1233 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1234 	SBI_IS_CLOSE,				/* specify unmounting */
1235 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1236 	SBI_POR_DOING,				/* recovery is doing or not */
1237 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1238 	SBI_NEED_CP,				/* need to checkpoint */
1239 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1240 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1241 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1242 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1243 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1244 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1245 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1246 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1247 };
1248 
1249 enum {
1250 	CP_TIME,
1251 	REQ_TIME,
1252 	DISCARD_TIME,
1253 	GC_TIME,
1254 	DISABLE_TIME,
1255 	UMOUNT_DISCARD_TIMEOUT,
1256 	MAX_TIME,
1257 };
1258 
1259 enum {
1260 	GC_NORMAL,
1261 	GC_IDLE_CB,
1262 	GC_IDLE_GREEDY,
1263 	GC_URGENT_HIGH,
1264 	GC_URGENT_LOW,
1265 };
1266 
1267 enum {
1268 	BGGC_MODE_ON,		/* background gc is on */
1269 	BGGC_MODE_OFF,		/* background gc is off */
1270 	BGGC_MODE_SYNC,		/*
1271 				 * background gc is on, migrating blocks
1272 				 * like foreground gc
1273 				 */
1274 };
1275 
1276 enum {
1277 	FS_MODE_ADAPTIVE,	/* use both lfs/ssr allocation */
1278 	FS_MODE_LFS,		/* use lfs allocation only */
1279 };
1280 
1281 enum {
1282 	WHINT_MODE_OFF,		/* not pass down write hints */
1283 	WHINT_MODE_USER,	/* try to pass down hints given by users */
1284 	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
1285 };
1286 
1287 enum {
1288 	ALLOC_MODE_DEFAULT,	/* stay default */
1289 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1290 };
1291 
1292 enum fsync_mode {
1293 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1294 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1295 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1296 };
1297 
1298 /*
1299  * this value is set in page as a private data which indicate that
1300  * the page is atomically written, and it is in inmem_pages list.
1301  */
1302 #define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
1303 #define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
1304 
1305 #define IS_ATOMIC_WRITTEN_PAGE(page)			\
1306 		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
1307 #define IS_DUMMY_WRITTEN_PAGE(page)			\
1308 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
1309 
1310 #ifdef CONFIG_F2FS_IO_TRACE
1311 #define IS_IO_TRACED_PAGE(page)			\
1312 		(page_private(page) > 0 &&		\
1313 		 page_private(page) < (unsigned long)PID_MAX_LIMIT)
1314 #else
1315 #define IS_IO_TRACED_PAGE(page) (0)
1316 #endif
1317 
1318 /* For compression */
1319 enum compress_algorithm_type {
1320 	COMPRESS_LZO,
1321 	COMPRESS_LZ4,
1322 	COMPRESS_ZSTD,
1323 	COMPRESS_LZORLE,
1324 	COMPRESS_MAX,
1325 };
1326 
1327 #define COMPRESS_DATA_RESERVED_SIZE		5
1328 struct compress_data {
1329 	__le32 clen;			/* compressed data size */
1330 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1331 	u8 cdata[];			/* compressed data */
1332 };
1333 
1334 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1335 
1336 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1337 
1338 /* compress context */
1339 struct compress_ctx {
1340 	struct inode *inode;		/* inode the context belong to */
1341 	pgoff_t cluster_idx;		/* cluster index number */
1342 	unsigned int cluster_size;	/* page count in cluster */
1343 	unsigned int log_cluster_size;	/* log of cluster size */
1344 	struct page **rpages;		/* pages store raw data in cluster */
1345 	unsigned int nr_rpages;		/* total page number in rpages */
1346 	struct page **cpages;		/* pages store compressed data in cluster */
1347 	unsigned int nr_cpages;		/* total page number in cpages */
1348 	void *rbuf;			/* virtual mapped address on rpages */
1349 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1350 	size_t rlen;			/* valid data length in rbuf */
1351 	size_t clen;			/* valid data length in cbuf */
1352 	void *private;			/* payload buffer for specified compression algorithm */
1353 	void *private2;			/* extra payload buffer */
1354 };
1355 
1356 /* compress context for write IO path */
1357 struct compress_io_ctx {
1358 	u32 magic;			/* magic number to indicate page is compressed */
1359 	struct inode *inode;		/* inode the context belong to */
1360 	struct page **rpages;		/* pages store raw data in cluster */
1361 	unsigned int nr_rpages;		/* total page number in rpages */
1362 	refcount_t ref;			/* referrence count of raw page */
1363 };
1364 
1365 /* decompress io context for read IO path */
1366 struct decompress_io_ctx {
1367 	u32 magic;			/* magic number to indicate page is compressed */
1368 	struct inode *inode;		/* inode the context belong to */
1369 	pgoff_t cluster_idx;		/* cluster index number */
1370 	unsigned int cluster_size;	/* page count in cluster */
1371 	unsigned int log_cluster_size;	/* log of cluster size */
1372 	struct page **rpages;		/* pages store raw data in cluster */
1373 	unsigned int nr_rpages;		/* total page number in rpages */
1374 	struct page **cpages;		/* pages store compressed data in cluster */
1375 	unsigned int nr_cpages;		/* total page number in cpages */
1376 	struct page **tpages;		/* temp pages to pad holes in cluster */
1377 	void *rbuf;			/* virtual mapped address on rpages */
1378 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1379 	size_t rlen;			/* valid data length in rbuf */
1380 	size_t clen;			/* valid data length in cbuf */
1381 	refcount_t ref;			/* referrence count of compressed page */
1382 	bool failed;			/* indicate IO error during decompression */
1383 	void *private;			/* payload buffer for specified decompression algorithm */
1384 	void *private2;			/* extra payload buffer */
1385 };
1386 
1387 #define NULL_CLUSTER			((unsigned int)(~0))
1388 #define MIN_COMPRESS_LOG_SIZE		2
1389 #define MAX_COMPRESS_LOG_SIZE		8
1390 #define MAX_COMPRESS_WINDOW_SIZE	((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE)
1391 
1392 struct f2fs_sb_info {
1393 	struct super_block *sb;			/* pointer to VFS super block */
1394 	struct proc_dir_entry *s_proc;		/* proc entry */
1395 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1396 	struct rw_semaphore sb_lock;		/* lock for raw super block */
1397 	int valid_super_block;			/* valid super block no */
1398 	unsigned long s_flag;				/* flags for sbi */
1399 	struct mutex writepages;		/* mutex for writepages() */
1400 #ifdef CONFIG_UNICODE
1401 	struct unicode_map *s_encoding;
1402 	__u16 s_encoding_flags;
1403 #endif
1404 
1405 #ifdef CONFIG_BLK_DEV_ZONED
1406 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1407 	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
1408 #endif
1409 
1410 	/* for node-related operations */
1411 	struct f2fs_nm_info *nm_info;		/* node manager */
1412 	struct inode *node_inode;		/* cache node blocks */
1413 
1414 	/* for segment-related operations */
1415 	struct f2fs_sm_info *sm_info;		/* segment manager */
1416 
1417 	/* for bio operations */
1418 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1419 	/* keep migration IO order for LFS mode */
1420 	struct rw_semaphore io_order_lock;
1421 	mempool_t *write_io_dummy;		/* Dummy pages */
1422 
1423 	/* for checkpoint */
1424 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1425 	int cur_cp_pack;			/* remain current cp pack */
1426 	spinlock_t cp_lock;			/* for flag in ckpt */
1427 	struct inode *meta_inode;		/* cache meta blocks */
1428 	struct mutex cp_mutex;			/* checkpoint procedure lock */
1429 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
1430 	struct rw_semaphore node_write;		/* locking node writes */
1431 	struct rw_semaphore node_change;	/* locking node change */
1432 	wait_queue_head_t cp_wait;
1433 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1434 	long interval_time[MAX_TIME];		/* to store thresholds */
1435 
1436 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1437 
1438 	spinlock_t fsync_node_lock;		/* for node entry lock */
1439 	struct list_head fsync_node_list;	/* node list head */
1440 	unsigned int fsync_seg_id;		/* sequence id */
1441 	unsigned int fsync_node_num;		/* number of node entries */
1442 
1443 	/* for orphan inode, use 0'th array */
1444 	unsigned int max_orphans;		/* max orphan inodes */
1445 
1446 	/* for inode management */
1447 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1448 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1449 	struct mutex flush_lock;		/* for flush exclusion */
1450 
1451 	/* for extent tree cache */
1452 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1453 	struct mutex extent_tree_lock;	/* locking extent radix tree */
1454 	struct list_head extent_list;		/* lru list for shrinker */
1455 	spinlock_t extent_lock;			/* locking extent lru list */
1456 	atomic_t total_ext_tree;		/* extent tree count */
1457 	struct list_head zombie_list;		/* extent zombie tree list */
1458 	atomic_t total_zombie_tree;		/* extent zombie tree count */
1459 	atomic_t total_ext_node;		/* extent info count */
1460 
1461 	/* basic filesystem units */
1462 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1463 	unsigned int log_blocksize;		/* log2 block size */
1464 	unsigned int blocksize;			/* block size */
1465 	unsigned int root_ino_num;		/* root inode number*/
1466 	unsigned int node_ino_num;		/* node inode number*/
1467 	unsigned int meta_ino_num;		/* meta inode number*/
1468 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1469 	unsigned int blocks_per_seg;		/* blocks per segment */
1470 	unsigned int segs_per_sec;		/* segments per section */
1471 	unsigned int secs_per_zone;		/* sections per zone */
1472 	unsigned int total_sections;		/* total section count */
1473 	unsigned int total_node_count;		/* total node block count */
1474 	unsigned int total_valid_node_count;	/* valid node block count */
1475 	loff_t max_file_blocks;			/* max block index of file */
1476 	int dir_level;				/* directory level */
1477 	int readdir_ra;				/* readahead inode in readdir */
1478 
1479 	block_t user_block_count;		/* # of user blocks */
1480 	block_t total_valid_block_count;	/* # of valid blocks */
1481 	block_t discard_blks;			/* discard command candidats */
1482 	block_t last_valid_block_count;		/* for recovery */
1483 	block_t reserved_blocks;		/* configurable reserved blocks */
1484 	block_t current_reserved_blocks;	/* current reserved blocks */
1485 
1486 	/* Additional tracking for no checkpoint mode */
1487 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1488 
1489 	unsigned int nquota_files;		/* # of quota sysfile */
1490 	struct rw_semaphore quota_sem;		/* blocking cp for flags */
1491 
1492 	/* # of pages, see count_type */
1493 	atomic_t nr_pages[NR_COUNT_TYPE];
1494 	/* # of allocated blocks */
1495 	struct percpu_counter alloc_valid_block_count;
1496 
1497 	/* writeback control */
1498 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1499 
1500 	/* valid inode count */
1501 	struct percpu_counter total_valid_inode_count;
1502 
1503 	struct f2fs_mount_info mount_opt;	/* mount options */
1504 
1505 	/* for cleaning operations */
1506 	struct rw_semaphore gc_lock;		/*
1507 						 * semaphore for GC, avoid
1508 						 * race between GC and GC or CP
1509 						 */
1510 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1511 	unsigned int cur_victim_sec;		/* current victim section num */
1512 	unsigned int gc_mode;			/* current GC state */
1513 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1514 
1515 	/* for skip statistic */
1516 	unsigned int atomic_files;		/* # of opened atomic file */
1517 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
1518 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1519 
1520 	/* threshold for gc trials on pinned files */
1521 	u64 gc_pin_file_threshold;
1522 	struct rw_semaphore pin_sem;
1523 
1524 	/* maximum # of trials to find a victim segment for SSR and GC */
1525 	unsigned int max_victim_search;
1526 	/* migration granularity of garbage collection, unit: segment */
1527 	unsigned int migration_granularity;
1528 
1529 	/*
1530 	 * for stat information.
1531 	 * one is for the LFS mode, and the other is for the SSR mode.
1532 	 */
1533 #ifdef CONFIG_F2FS_STAT_FS
1534 	struct f2fs_stat_info *stat_info;	/* FS status information */
1535 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1536 	unsigned int segment_count[2];		/* # of allocated segments */
1537 	unsigned int block_count[2];		/* # of allocated blocks */
1538 	atomic_t inplace_count;		/* # of inplace update */
1539 	atomic64_t total_hit_ext;		/* # of lookup extent cache */
1540 	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
1541 	atomic64_t read_hit_largest;		/* # of hit largest extent node */
1542 	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1543 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1544 	atomic_t inline_inode;			/* # of inline_data inodes */
1545 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1546 	atomic_t compr_inode;			/* # of compressed inodes */
1547 	atomic_t compr_blocks;			/* # of compressed blocks */
1548 	atomic_t vw_cnt;			/* # of volatile writes */
1549 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1550 	atomic_t max_vw_cnt;			/* max # of volatile writes */
1551 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1552 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1553 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1554 #endif
1555 	spinlock_t stat_lock;			/* lock for stat operations */
1556 
1557 	/* For app/fs IO statistics */
1558 	spinlock_t iostat_lock;
1559 	unsigned long long rw_iostat[NR_IO_TYPE];
1560 	unsigned long long prev_rw_iostat[NR_IO_TYPE];
1561 	bool iostat_enable;
1562 	unsigned long iostat_next_period;
1563 	unsigned int iostat_period_ms;
1564 
1565 	/* to attach REQ_META|REQ_FUA flags */
1566 	unsigned int data_io_flag;
1567 	unsigned int node_io_flag;
1568 
1569 	/* For sysfs suppport */
1570 	struct kobject s_kobj;
1571 	struct completion s_kobj_unregister;
1572 
1573 	/* For shrinker support */
1574 	struct list_head s_list;
1575 	int s_ndevs;				/* number of devices */
1576 	struct f2fs_dev_info *devs;		/* for device list */
1577 	unsigned int dirty_device;		/* for checkpoint data flush */
1578 	spinlock_t dev_lock;			/* protect dirty_device */
1579 	struct mutex umount_mutex;
1580 	unsigned int shrinker_run_no;
1581 
1582 	/* For write statistics */
1583 	u64 sectors_written_start;
1584 	u64 kbytes_written;
1585 
1586 	/* Reference to checksum algorithm driver via cryptoapi */
1587 	struct crypto_shash *s_chksum_driver;
1588 
1589 	/* Precomputed FS UUID checksum for seeding other checksums */
1590 	__u32 s_chksum_seed;
1591 
1592 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1593 
1594 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1595 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1596 };
1597 
1598 struct f2fs_private_dio {
1599 	struct inode *inode;
1600 	void *orig_private;
1601 	bio_end_io_t *orig_end_io;
1602 	bool write;
1603 };
1604 
1605 #ifdef CONFIG_F2FS_FAULT_INJECTION
1606 #define f2fs_show_injection_info(sbi, type)					\
1607 	printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",	\
1608 		KERN_INFO, sbi->sb->s_id,				\
1609 		f2fs_fault_name[type],					\
1610 		__func__, __builtin_return_address(0))
1611 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1612 {
1613 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1614 
1615 	if (!ffi->inject_rate)
1616 		return false;
1617 
1618 	if (!IS_FAULT_SET(ffi, type))
1619 		return false;
1620 
1621 	atomic_inc(&ffi->inject_ops);
1622 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1623 		atomic_set(&ffi->inject_ops, 0);
1624 		return true;
1625 	}
1626 	return false;
1627 }
1628 #else
1629 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1630 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1631 {
1632 	return false;
1633 }
1634 #endif
1635 
1636 /*
1637  * Test if the mounted volume is a multi-device volume.
1638  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1639  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1640  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1641  */
1642 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1643 {
1644 	return sbi->s_ndevs > 1;
1645 }
1646 
1647 /* For write statistics. Suppose sector size is 512 bytes,
1648  * and the return value is in kbytes. s is of struct f2fs_sb_info.
1649  */
1650 #define BD_PART_WRITTEN(s)						 \
1651 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) -   \
1652 		(s)->sectors_written_start) >> 1)
1653 
1654 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1655 {
1656 	unsigned long now = jiffies;
1657 
1658 	sbi->last_time[type] = now;
1659 
1660 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1661 	if (type == REQ_TIME) {
1662 		sbi->last_time[DISCARD_TIME] = now;
1663 		sbi->last_time[GC_TIME] = now;
1664 	}
1665 }
1666 
1667 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1668 {
1669 	unsigned long interval = sbi->interval_time[type] * HZ;
1670 
1671 	return time_after(jiffies, sbi->last_time[type] + interval);
1672 }
1673 
1674 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1675 						int type)
1676 {
1677 	unsigned long interval = sbi->interval_time[type] * HZ;
1678 	unsigned int wait_ms = 0;
1679 	long delta;
1680 
1681 	delta = (sbi->last_time[type] + interval) - jiffies;
1682 	if (delta > 0)
1683 		wait_ms = jiffies_to_msecs(delta);
1684 
1685 	return wait_ms;
1686 }
1687 
1688 /*
1689  * Inline functions
1690  */
1691 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1692 			      const void *address, unsigned int length)
1693 {
1694 	struct {
1695 		struct shash_desc shash;
1696 		char ctx[4];
1697 	} desc;
1698 	int err;
1699 
1700 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1701 
1702 	desc.shash.tfm = sbi->s_chksum_driver;
1703 	*(u32 *)desc.ctx = crc;
1704 
1705 	err = crypto_shash_update(&desc.shash, address, length);
1706 	BUG_ON(err);
1707 
1708 	return *(u32 *)desc.ctx;
1709 }
1710 
1711 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1712 			   unsigned int length)
1713 {
1714 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1715 }
1716 
1717 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1718 				  void *buf, size_t buf_size)
1719 {
1720 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1721 }
1722 
1723 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1724 			      const void *address, unsigned int length)
1725 {
1726 	return __f2fs_crc32(sbi, crc, address, length);
1727 }
1728 
1729 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1730 {
1731 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1732 }
1733 
1734 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1735 {
1736 	return sb->s_fs_info;
1737 }
1738 
1739 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1740 {
1741 	return F2FS_SB(inode->i_sb);
1742 }
1743 
1744 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1745 {
1746 	return F2FS_I_SB(mapping->host);
1747 }
1748 
1749 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1750 {
1751 	return F2FS_M_SB(page_file_mapping(page));
1752 }
1753 
1754 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1755 {
1756 	return (struct f2fs_super_block *)(sbi->raw_super);
1757 }
1758 
1759 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1760 {
1761 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1762 }
1763 
1764 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1765 {
1766 	return (struct f2fs_node *)page_address(page);
1767 }
1768 
1769 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1770 {
1771 	return &((struct f2fs_node *)page_address(page))->i;
1772 }
1773 
1774 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1775 {
1776 	return (struct f2fs_nm_info *)(sbi->nm_info);
1777 }
1778 
1779 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1780 {
1781 	return (struct f2fs_sm_info *)(sbi->sm_info);
1782 }
1783 
1784 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1785 {
1786 	return (struct sit_info *)(SM_I(sbi)->sit_info);
1787 }
1788 
1789 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1790 {
1791 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1792 }
1793 
1794 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1795 {
1796 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1797 }
1798 
1799 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1800 {
1801 	return sbi->meta_inode->i_mapping;
1802 }
1803 
1804 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1805 {
1806 	return sbi->node_inode->i_mapping;
1807 }
1808 
1809 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1810 {
1811 	return test_bit(type, &sbi->s_flag);
1812 }
1813 
1814 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1815 {
1816 	set_bit(type, &sbi->s_flag);
1817 }
1818 
1819 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1820 {
1821 	clear_bit(type, &sbi->s_flag);
1822 }
1823 
1824 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1825 {
1826 	return le64_to_cpu(cp->checkpoint_ver);
1827 }
1828 
1829 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
1830 {
1831 	if (type < F2FS_MAX_QUOTAS)
1832 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
1833 	return 0;
1834 }
1835 
1836 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
1837 {
1838 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
1839 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
1840 }
1841 
1842 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1843 {
1844 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1845 
1846 	return ckpt_flags & f;
1847 }
1848 
1849 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1850 {
1851 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1852 }
1853 
1854 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1855 {
1856 	unsigned int ckpt_flags;
1857 
1858 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1859 	ckpt_flags |= f;
1860 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1861 }
1862 
1863 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1864 {
1865 	unsigned long flags;
1866 
1867 	spin_lock_irqsave(&sbi->cp_lock, flags);
1868 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
1869 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1870 }
1871 
1872 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1873 {
1874 	unsigned int ckpt_flags;
1875 
1876 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1877 	ckpt_flags &= (~f);
1878 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1879 }
1880 
1881 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1882 {
1883 	unsigned long flags;
1884 
1885 	spin_lock_irqsave(&sbi->cp_lock, flags);
1886 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
1887 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1888 }
1889 
1890 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
1891 {
1892 	unsigned long flags;
1893 	unsigned char *nat_bits;
1894 
1895 	/*
1896 	 * In order to re-enable nat_bits we need to call fsck.f2fs by
1897 	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
1898 	 * so let's rely on regular fsck or unclean shutdown.
1899 	 */
1900 
1901 	if (lock)
1902 		spin_lock_irqsave(&sbi->cp_lock, flags);
1903 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
1904 	nat_bits = NM_I(sbi)->nat_bits;
1905 	NM_I(sbi)->nat_bits = NULL;
1906 	if (lock)
1907 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
1908 
1909 	kvfree(nat_bits);
1910 }
1911 
1912 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
1913 					struct cp_control *cpc)
1914 {
1915 	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1916 
1917 	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1918 }
1919 
1920 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1921 {
1922 	down_read(&sbi->cp_rwsem);
1923 }
1924 
1925 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
1926 {
1927 	return down_read_trylock(&sbi->cp_rwsem);
1928 }
1929 
1930 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1931 {
1932 	up_read(&sbi->cp_rwsem);
1933 }
1934 
1935 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1936 {
1937 	down_write(&sbi->cp_rwsem);
1938 }
1939 
1940 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1941 {
1942 	up_write(&sbi->cp_rwsem);
1943 }
1944 
1945 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1946 {
1947 	int reason = CP_SYNC;
1948 
1949 	if (test_opt(sbi, FASTBOOT))
1950 		reason = CP_FASTBOOT;
1951 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1952 		reason = CP_UMOUNT;
1953 	return reason;
1954 }
1955 
1956 static inline bool __remain_node_summaries(int reason)
1957 {
1958 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
1959 }
1960 
1961 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1962 {
1963 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1964 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1965 }
1966 
1967 /*
1968  * Check whether the inode has blocks or not
1969  */
1970 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
1971 {
1972 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
1973 
1974 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
1975 }
1976 
1977 static inline bool f2fs_has_xattr_block(unsigned int ofs)
1978 {
1979 	return ofs == XATTR_NODE_OFFSET;
1980 }
1981 
1982 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
1983 					struct inode *inode, bool cap)
1984 {
1985 	if (!inode)
1986 		return true;
1987 	if (!test_opt(sbi, RESERVE_ROOT))
1988 		return false;
1989 	if (IS_NOQUOTA(inode))
1990 		return true;
1991 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
1992 		return true;
1993 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
1994 					in_group_p(F2FS_OPTION(sbi).s_resgid))
1995 		return true;
1996 	if (cap && capable(CAP_SYS_RESOURCE))
1997 		return true;
1998 	return false;
1999 }
2000 
2001 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2002 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2003 				 struct inode *inode, blkcnt_t *count)
2004 {
2005 	blkcnt_t diff = 0, release = 0;
2006 	block_t avail_user_block_count;
2007 	int ret;
2008 
2009 	ret = dquot_reserve_block(inode, *count);
2010 	if (ret)
2011 		return ret;
2012 
2013 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2014 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2015 		release = *count;
2016 		goto release_quota;
2017 	}
2018 
2019 	/*
2020 	 * let's increase this in prior to actual block count change in order
2021 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2022 	 */
2023 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2024 
2025 	spin_lock(&sbi->stat_lock);
2026 	sbi->total_valid_block_count += (block_t)(*count);
2027 	avail_user_block_count = sbi->user_block_count -
2028 					sbi->current_reserved_blocks;
2029 
2030 	if (!__allow_reserved_blocks(sbi, inode, true))
2031 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2032 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2033 		if (avail_user_block_count > sbi->unusable_block_count)
2034 			avail_user_block_count -= sbi->unusable_block_count;
2035 		else
2036 			avail_user_block_count = 0;
2037 	}
2038 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2039 		diff = sbi->total_valid_block_count - avail_user_block_count;
2040 		if (diff > *count)
2041 			diff = *count;
2042 		*count -= diff;
2043 		release = diff;
2044 		sbi->total_valid_block_count -= diff;
2045 		if (!*count) {
2046 			spin_unlock(&sbi->stat_lock);
2047 			goto enospc;
2048 		}
2049 	}
2050 	spin_unlock(&sbi->stat_lock);
2051 
2052 	if (unlikely(release)) {
2053 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2054 		dquot_release_reservation_block(inode, release);
2055 	}
2056 	f2fs_i_blocks_write(inode, *count, true, true);
2057 	return 0;
2058 
2059 enospc:
2060 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2061 release_quota:
2062 	dquot_release_reservation_block(inode, release);
2063 	return -ENOSPC;
2064 }
2065 
2066 __printf(2, 3)
2067 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2068 
2069 #define f2fs_err(sbi, fmt, ...)						\
2070 	f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2071 #define f2fs_warn(sbi, fmt, ...)					\
2072 	f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2073 #define f2fs_notice(sbi, fmt, ...)					\
2074 	f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2075 #define f2fs_info(sbi, fmt, ...)					\
2076 	f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2077 #define f2fs_debug(sbi, fmt, ...)					\
2078 	f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2079 
2080 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2081 						struct inode *inode,
2082 						block_t count)
2083 {
2084 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2085 
2086 	spin_lock(&sbi->stat_lock);
2087 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2088 	sbi->total_valid_block_count -= (block_t)count;
2089 	if (sbi->reserved_blocks &&
2090 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2091 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2092 					sbi->current_reserved_blocks + count);
2093 	spin_unlock(&sbi->stat_lock);
2094 	if (unlikely(inode->i_blocks < sectors)) {
2095 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2096 			  inode->i_ino,
2097 			  (unsigned long long)inode->i_blocks,
2098 			  (unsigned long long)sectors);
2099 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2100 		return;
2101 	}
2102 	f2fs_i_blocks_write(inode, count, false, true);
2103 }
2104 
2105 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2106 {
2107 	atomic_inc(&sbi->nr_pages[count_type]);
2108 
2109 	if (count_type == F2FS_DIRTY_DENTS ||
2110 			count_type == F2FS_DIRTY_NODES ||
2111 			count_type == F2FS_DIRTY_META ||
2112 			count_type == F2FS_DIRTY_QDATA ||
2113 			count_type == F2FS_DIRTY_IMETA)
2114 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2115 }
2116 
2117 static inline void inode_inc_dirty_pages(struct inode *inode)
2118 {
2119 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2120 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2121 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2122 	if (IS_NOQUOTA(inode))
2123 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2124 }
2125 
2126 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2127 {
2128 	atomic_dec(&sbi->nr_pages[count_type]);
2129 }
2130 
2131 static inline void inode_dec_dirty_pages(struct inode *inode)
2132 {
2133 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2134 			!S_ISLNK(inode->i_mode))
2135 		return;
2136 
2137 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2138 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2139 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2140 	if (IS_NOQUOTA(inode))
2141 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2142 }
2143 
2144 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2145 {
2146 	return atomic_read(&sbi->nr_pages[count_type]);
2147 }
2148 
2149 static inline int get_dirty_pages(struct inode *inode)
2150 {
2151 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2152 }
2153 
2154 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2155 {
2156 	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2157 	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2158 						sbi->log_blocks_per_seg;
2159 
2160 	return segs / sbi->segs_per_sec;
2161 }
2162 
2163 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2164 {
2165 	return sbi->total_valid_block_count;
2166 }
2167 
2168 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2169 {
2170 	return sbi->discard_blks;
2171 }
2172 
2173 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2174 {
2175 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2176 
2177 	/* return NAT or SIT bitmap */
2178 	if (flag == NAT_BITMAP)
2179 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2180 	else if (flag == SIT_BITMAP)
2181 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2182 
2183 	return 0;
2184 }
2185 
2186 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2187 {
2188 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2189 }
2190 
2191 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2192 {
2193 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2194 	int offset;
2195 
2196 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2197 		offset = (flag == SIT_BITMAP) ?
2198 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2199 		/*
2200 		 * if large_nat_bitmap feature is enabled, leave checksum
2201 		 * protection for all nat/sit bitmaps.
2202 		 */
2203 		return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32);
2204 	}
2205 
2206 	if (__cp_payload(sbi) > 0) {
2207 		if (flag == NAT_BITMAP)
2208 			return &ckpt->sit_nat_version_bitmap;
2209 		else
2210 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2211 	} else {
2212 		offset = (flag == NAT_BITMAP) ?
2213 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2214 		return &ckpt->sit_nat_version_bitmap + offset;
2215 	}
2216 }
2217 
2218 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2219 {
2220 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2221 
2222 	if (sbi->cur_cp_pack == 2)
2223 		start_addr += sbi->blocks_per_seg;
2224 	return start_addr;
2225 }
2226 
2227 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2228 {
2229 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2230 
2231 	if (sbi->cur_cp_pack == 1)
2232 		start_addr += sbi->blocks_per_seg;
2233 	return start_addr;
2234 }
2235 
2236 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2237 {
2238 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2239 }
2240 
2241 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2242 {
2243 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2244 }
2245 
2246 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2247 					struct inode *inode, bool is_inode)
2248 {
2249 	block_t	valid_block_count;
2250 	unsigned int valid_node_count, user_block_count;
2251 	int err;
2252 
2253 	if (is_inode) {
2254 		if (inode) {
2255 			err = dquot_alloc_inode(inode);
2256 			if (err)
2257 				return err;
2258 		}
2259 	} else {
2260 		err = dquot_reserve_block(inode, 1);
2261 		if (err)
2262 			return err;
2263 	}
2264 
2265 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2266 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2267 		goto enospc;
2268 	}
2269 
2270 	spin_lock(&sbi->stat_lock);
2271 
2272 	valid_block_count = sbi->total_valid_block_count +
2273 					sbi->current_reserved_blocks + 1;
2274 
2275 	if (!__allow_reserved_blocks(sbi, inode, false))
2276 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2277 	user_block_count = sbi->user_block_count;
2278 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2279 		user_block_count -= sbi->unusable_block_count;
2280 
2281 	if (unlikely(valid_block_count > user_block_count)) {
2282 		spin_unlock(&sbi->stat_lock);
2283 		goto enospc;
2284 	}
2285 
2286 	valid_node_count = sbi->total_valid_node_count + 1;
2287 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2288 		spin_unlock(&sbi->stat_lock);
2289 		goto enospc;
2290 	}
2291 
2292 	sbi->total_valid_node_count++;
2293 	sbi->total_valid_block_count++;
2294 	spin_unlock(&sbi->stat_lock);
2295 
2296 	if (inode) {
2297 		if (is_inode)
2298 			f2fs_mark_inode_dirty_sync(inode, true);
2299 		else
2300 			f2fs_i_blocks_write(inode, 1, true, true);
2301 	}
2302 
2303 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2304 	return 0;
2305 
2306 enospc:
2307 	if (is_inode) {
2308 		if (inode)
2309 			dquot_free_inode(inode);
2310 	} else {
2311 		dquot_release_reservation_block(inode, 1);
2312 	}
2313 	return -ENOSPC;
2314 }
2315 
2316 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2317 					struct inode *inode, bool is_inode)
2318 {
2319 	spin_lock(&sbi->stat_lock);
2320 
2321 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2322 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2323 
2324 	sbi->total_valid_node_count--;
2325 	sbi->total_valid_block_count--;
2326 	if (sbi->reserved_blocks &&
2327 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2328 		sbi->current_reserved_blocks++;
2329 
2330 	spin_unlock(&sbi->stat_lock);
2331 
2332 	if (is_inode) {
2333 		dquot_free_inode(inode);
2334 	} else {
2335 		if (unlikely(inode->i_blocks == 0)) {
2336 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2337 				  inode->i_ino,
2338 				  (unsigned long long)inode->i_blocks);
2339 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2340 			return;
2341 		}
2342 		f2fs_i_blocks_write(inode, 1, false, true);
2343 	}
2344 }
2345 
2346 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2347 {
2348 	return sbi->total_valid_node_count;
2349 }
2350 
2351 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2352 {
2353 	percpu_counter_inc(&sbi->total_valid_inode_count);
2354 }
2355 
2356 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2357 {
2358 	percpu_counter_dec(&sbi->total_valid_inode_count);
2359 }
2360 
2361 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2362 {
2363 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2364 }
2365 
2366 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2367 						pgoff_t index, bool for_write)
2368 {
2369 	struct page *page;
2370 
2371 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2372 		if (!for_write)
2373 			page = find_get_page_flags(mapping, index,
2374 							FGP_LOCK | FGP_ACCESSED);
2375 		else
2376 			page = find_lock_page(mapping, index);
2377 		if (page)
2378 			return page;
2379 
2380 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2381 			f2fs_show_injection_info(F2FS_M_SB(mapping),
2382 							FAULT_PAGE_ALLOC);
2383 			return NULL;
2384 		}
2385 	}
2386 
2387 	if (!for_write)
2388 		return grab_cache_page(mapping, index);
2389 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2390 }
2391 
2392 static inline struct page *f2fs_pagecache_get_page(
2393 				struct address_space *mapping, pgoff_t index,
2394 				int fgp_flags, gfp_t gfp_mask)
2395 {
2396 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2397 		f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2398 		return NULL;
2399 	}
2400 
2401 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2402 }
2403 
2404 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2405 {
2406 	char *src_kaddr = kmap(src);
2407 	char *dst_kaddr = kmap(dst);
2408 
2409 	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2410 	kunmap(dst);
2411 	kunmap(src);
2412 }
2413 
2414 static inline void f2fs_put_page(struct page *page, int unlock)
2415 {
2416 	if (!page)
2417 		return;
2418 
2419 	if (unlock) {
2420 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2421 		unlock_page(page);
2422 	}
2423 	put_page(page);
2424 }
2425 
2426 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2427 {
2428 	if (dn->node_page)
2429 		f2fs_put_page(dn->node_page, 1);
2430 	if (dn->inode_page && dn->node_page != dn->inode_page)
2431 		f2fs_put_page(dn->inode_page, 0);
2432 	dn->node_page = NULL;
2433 	dn->inode_page = NULL;
2434 }
2435 
2436 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2437 					size_t size)
2438 {
2439 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2440 }
2441 
2442 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2443 						gfp_t flags)
2444 {
2445 	void *entry;
2446 
2447 	entry = kmem_cache_alloc(cachep, flags);
2448 	if (!entry)
2449 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2450 	return entry;
2451 }
2452 
2453 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2454 {
2455 	if (sbi->gc_mode == GC_URGENT_HIGH)
2456 		return true;
2457 
2458 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2459 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2460 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2461 		get_pages(sbi, F2FS_DIO_READ) ||
2462 		get_pages(sbi, F2FS_DIO_WRITE))
2463 		return false;
2464 
2465 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2466 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2467 		return false;
2468 
2469 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2470 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2471 		return false;
2472 
2473 	if (sbi->gc_mode == GC_URGENT_LOW &&
2474 			(type == DISCARD_TIME || type == GC_TIME))
2475 		return true;
2476 
2477 	return f2fs_time_over(sbi, type);
2478 }
2479 
2480 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2481 				unsigned long index, void *item)
2482 {
2483 	while (radix_tree_insert(root, index, item))
2484 		cond_resched();
2485 }
2486 
2487 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2488 
2489 static inline bool IS_INODE(struct page *page)
2490 {
2491 	struct f2fs_node *p = F2FS_NODE(page);
2492 
2493 	return RAW_IS_INODE(p);
2494 }
2495 
2496 static inline int offset_in_addr(struct f2fs_inode *i)
2497 {
2498 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2499 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2500 }
2501 
2502 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2503 {
2504 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2505 }
2506 
2507 static inline int f2fs_has_extra_attr(struct inode *inode);
2508 static inline block_t data_blkaddr(struct inode *inode,
2509 			struct page *node_page, unsigned int offset)
2510 {
2511 	struct f2fs_node *raw_node;
2512 	__le32 *addr_array;
2513 	int base = 0;
2514 	bool is_inode = IS_INODE(node_page);
2515 
2516 	raw_node = F2FS_NODE(node_page);
2517 
2518 	if (is_inode) {
2519 		if (!inode)
2520 			/* from GC path only */
2521 			base = offset_in_addr(&raw_node->i);
2522 		else if (f2fs_has_extra_attr(inode))
2523 			base = get_extra_isize(inode);
2524 	}
2525 
2526 	addr_array = blkaddr_in_node(raw_node);
2527 	return le32_to_cpu(addr_array[base + offset]);
2528 }
2529 
2530 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2531 {
2532 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2533 }
2534 
2535 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2536 {
2537 	int mask;
2538 
2539 	addr += (nr >> 3);
2540 	mask = 1 << (7 - (nr & 0x07));
2541 	return mask & *addr;
2542 }
2543 
2544 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2545 {
2546 	int mask;
2547 
2548 	addr += (nr >> 3);
2549 	mask = 1 << (7 - (nr & 0x07));
2550 	*addr |= mask;
2551 }
2552 
2553 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2554 {
2555 	int mask;
2556 
2557 	addr += (nr >> 3);
2558 	mask = 1 << (7 - (nr & 0x07));
2559 	*addr &= ~mask;
2560 }
2561 
2562 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2563 {
2564 	int mask;
2565 	int ret;
2566 
2567 	addr += (nr >> 3);
2568 	mask = 1 << (7 - (nr & 0x07));
2569 	ret = mask & *addr;
2570 	*addr |= mask;
2571 	return ret;
2572 }
2573 
2574 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2575 {
2576 	int mask;
2577 	int ret;
2578 
2579 	addr += (nr >> 3);
2580 	mask = 1 << (7 - (nr & 0x07));
2581 	ret = mask & *addr;
2582 	*addr &= ~mask;
2583 	return ret;
2584 }
2585 
2586 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2587 {
2588 	int mask;
2589 
2590 	addr += (nr >> 3);
2591 	mask = 1 << (7 - (nr & 0x07));
2592 	*addr ^= mask;
2593 }
2594 
2595 /*
2596  * On-disk inode flags (f2fs_inode::i_flags)
2597  */
2598 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2599 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2600 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2601 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2602 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2603 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2604 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2605 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2606 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2607 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2608 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2609 
2610 /* Flags that should be inherited by new inodes from their parent. */
2611 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2612 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2613 			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2614 
2615 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2616 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2617 				F2FS_CASEFOLD_FL))
2618 
2619 /* Flags that are appropriate for non-directories/regular files. */
2620 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2621 
2622 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2623 {
2624 	if (S_ISDIR(mode))
2625 		return flags;
2626 	else if (S_ISREG(mode))
2627 		return flags & F2FS_REG_FLMASK;
2628 	else
2629 		return flags & F2FS_OTHER_FLMASK;
2630 }
2631 
2632 static inline void __mark_inode_dirty_flag(struct inode *inode,
2633 						int flag, bool set)
2634 {
2635 	switch (flag) {
2636 	case FI_INLINE_XATTR:
2637 	case FI_INLINE_DATA:
2638 	case FI_INLINE_DENTRY:
2639 	case FI_NEW_INODE:
2640 		if (set)
2641 			return;
2642 		fallthrough;
2643 	case FI_DATA_EXIST:
2644 	case FI_INLINE_DOTS:
2645 	case FI_PIN_FILE:
2646 		f2fs_mark_inode_dirty_sync(inode, true);
2647 	}
2648 }
2649 
2650 static inline void set_inode_flag(struct inode *inode, int flag)
2651 {
2652 	set_bit(flag, F2FS_I(inode)->flags);
2653 	__mark_inode_dirty_flag(inode, flag, true);
2654 }
2655 
2656 static inline int is_inode_flag_set(struct inode *inode, int flag)
2657 {
2658 	return test_bit(flag, F2FS_I(inode)->flags);
2659 }
2660 
2661 static inline void clear_inode_flag(struct inode *inode, int flag)
2662 {
2663 	clear_bit(flag, F2FS_I(inode)->flags);
2664 	__mark_inode_dirty_flag(inode, flag, false);
2665 }
2666 
2667 static inline bool f2fs_verity_in_progress(struct inode *inode)
2668 {
2669 	return IS_ENABLED(CONFIG_FS_VERITY) &&
2670 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2671 }
2672 
2673 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2674 {
2675 	F2FS_I(inode)->i_acl_mode = mode;
2676 	set_inode_flag(inode, FI_ACL_MODE);
2677 	f2fs_mark_inode_dirty_sync(inode, false);
2678 }
2679 
2680 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2681 {
2682 	if (inc)
2683 		inc_nlink(inode);
2684 	else
2685 		drop_nlink(inode);
2686 	f2fs_mark_inode_dirty_sync(inode, true);
2687 }
2688 
2689 static inline void f2fs_i_blocks_write(struct inode *inode,
2690 					block_t diff, bool add, bool claim)
2691 {
2692 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2693 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2694 
2695 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
2696 	if (add) {
2697 		if (claim)
2698 			dquot_claim_block(inode, diff);
2699 		else
2700 			dquot_alloc_block_nofail(inode, diff);
2701 	} else {
2702 		dquot_free_block(inode, diff);
2703 	}
2704 
2705 	f2fs_mark_inode_dirty_sync(inode, true);
2706 	if (clean || recover)
2707 		set_inode_flag(inode, FI_AUTO_RECOVER);
2708 }
2709 
2710 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2711 {
2712 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2713 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2714 
2715 	if (i_size_read(inode) == i_size)
2716 		return;
2717 
2718 	i_size_write(inode, i_size);
2719 	f2fs_mark_inode_dirty_sync(inode, true);
2720 	if (clean || recover)
2721 		set_inode_flag(inode, FI_AUTO_RECOVER);
2722 }
2723 
2724 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2725 {
2726 	F2FS_I(inode)->i_current_depth = depth;
2727 	f2fs_mark_inode_dirty_sync(inode, true);
2728 }
2729 
2730 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2731 					unsigned int count)
2732 {
2733 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2734 	f2fs_mark_inode_dirty_sync(inode, true);
2735 }
2736 
2737 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2738 {
2739 	F2FS_I(inode)->i_xattr_nid = xnid;
2740 	f2fs_mark_inode_dirty_sync(inode, true);
2741 }
2742 
2743 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2744 {
2745 	F2FS_I(inode)->i_pino = pino;
2746 	f2fs_mark_inode_dirty_sync(inode, true);
2747 }
2748 
2749 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2750 {
2751 	struct f2fs_inode_info *fi = F2FS_I(inode);
2752 
2753 	if (ri->i_inline & F2FS_INLINE_XATTR)
2754 		set_bit(FI_INLINE_XATTR, fi->flags);
2755 	if (ri->i_inline & F2FS_INLINE_DATA)
2756 		set_bit(FI_INLINE_DATA, fi->flags);
2757 	if (ri->i_inline & F2FS_INLINE_DENTRY)
2758 		set_bit(FI_INLINE_DENTRY, fi->flags);
2759 	if (ri->i_inline & F2FS_DATA_EXIST)
2760 		set_bit(FI_DATA_EXIST, fi->flags);
2761 	if (ri->i_inline & F2FS_INLINE_DOTS)
2762 		set_bit(FI_INLINE_DOTS, fi->flags);
2763 	if (ri->i_inline & F2FS_EXTRA_ATTR)
2764 		set_bit(FI_EXTRA_ATTR, fi->flags);
2765 	if (ri->i_inline & F2FS_PIN_FILE)
2766 		set_bit(FI_PIN_FILE, fi->flags);
2767 }
2768 
2769 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2770 {
2771 	ri->i_inline = 0;
2772 
2773 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2774 		ri->i_inline |= F2FS_INLINE_XATTR;
2775 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
2776 		ri->i_inline |= F2FS_INLINE_DATA;
2777 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2778 		ri->i_inline |= F2FS_INLINE_DENTRY;
2779 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
2780 		ri->i_inline |= F2FS_DATA_EXIST;
2781 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2782 		ri->i_inline |= F2FS_INLINE_DOTS;
2783 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2784 		ri->i_inline |= F2FS_EXTRA_ATTR;
2785 	if (is_inode_flag_set(inode, FI_PIN_FILE))
2786 		ri->i_inline |= F2FS_PIN_FILE;
2787 }
2788 
2789 static inline int f2fs_has_extra_attr(struct inode *inode)
2790 {
2791 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2792 }
2793 
2794 static inline int f2fs_has_inline_xattr(struct inode *inode)
2795 {
2796 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
2797 }
2798 
2799 static inline int f2fs_compressed_file(struct inode *inode)
2800 {
2801 	return S_ISREG(inode->i_mode) &&
2802 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
2803 }
2804 
2805 static inline unsigned int addrs_per_inode(struct inode *inode)
2806 {
2807 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
2808 				get_inline_xattr_addrs(inode);
2809 
2810 	if (!f2fs_compressed_file(inode))
2811 		return addrs;
2812 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
2813 }
2814 
2815 static inline unsigned int addrs_per_block(struct inode *inode)
2816 {
2817 	if (!f2fs_compressed_file(inode))
2818 		return DEF_ADDRS_PER_BLOCK;
2819 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
2820 }
2821 
2822 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2823 {
2824 	struct f2fs_inode *ri = F2FS_INODE(page);
2825 
2826 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
2827 					get_inline_xattr_addrs(inode)]);
2828 }
2829 
2830 static inline int inline_xattr_size(struct inode *inode)
2831 {
2832 	if (f2fs_has_inline_xattr(inode))
2833 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
2834 	return 0;
2835 }
2836 
2837 static inline int f2fs_has_inline_data(struct inode *inode)
2838 {
2839 	return is_inode_flag_set(inode, FI_INLINE_DATA);
2840 }
2841 
2842 static inline int f2fs_exist_data(struct inode *inode)
2843 {
2844 	return is_inode_flag_set(inode, FI_DATA_EXIST);
2845 }
2846 
2847 static inline int f2fs_has_inline_dots(struct inode *inode)
2848 {
2849 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
2850 }
2851 
2852 static inline int f2fs_is_mmap_file(struct inode *inode)
2853 {
2854 	return is_inode_flag_set(inode, FI_MMAP_FILE);
2855 }
2856 
2857 static inline bool f2fs_is_pinned_file(struct inode *inode)
2858 {
2859 	return is_inode_flag_set(inode, FI_PIN_FILE);
2860 }
2861 
2862 static inline bool f2fs_is_atomic_file(struct inode *inode)
2863 {
2864 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
2865 }
2866 
2867 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
2868 {
2869 	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
2870 }
2871 
2872 static inline bool f2fs_is_volatile_file(struct inode *inode)
2873 {
2874 	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2875 }
2876 
2877 static inline bool f2fs_is_first_block_written(struct inode *inode)
2878 {
2879 	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2880 }
2881 
2882 static inline bool f2fs_is_drop_cache(struct inode *inode)
2883 {
2884 	return is_inode_flag_set(inode, FI_DROP_CACHE);
2885 }
2886 
2887 static inline void *inline_data_addr(struct inode *inode, struct page *page)
2888 {
2889 	struct f2fs_inode *ri = F2FS_INODE(page);
2890 	int extra_size = get_extra_isize(inode);
2891 
2892 	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2893 }
2894 
2895 static inline int f2fs_has_inline_dentry(struct inode *inode)
2896 {
2897 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2898 }
2899 
2900 static inline int is_file(struct inode *inode, int type)
2901 {
2902 	return F2FS_I(inode)->i_advise & type;
2903 }
2904 
2905 static inline void set_file(struct inode *inode, int type)
2906 {
2907 	F2FS_I(inode)->i_advise |= type;
2908 	f2fs_mark_inode_dirty_sync(inode, true);
2909 }
2910 
2911 static inline void clear_file(struct inode *inode, int type)
2912 {
2913 	F2FS_I(inode)->i_advise &= ~type;
2914 	f2fs_mark_inode_dirty_sync(inode, true);
2915 }
2916 
2917 static inline bool f2fs_is_time_consistent(struct inode *inode)
2918 {
2919 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
2920 		return false;
2921 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
2922 		return false;
2923 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
2924 		return false;
2925 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
2926 						&F2FS_I(inode)->i_crtime))
2927 		return false;
2928 	return true;
2929 }
2930 
2931 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
2932 {
2933 	bool ret;
2934 
2935 	if (dsync) {
2936 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2937 
2938 		spin_lock(&sbi->inode_lock[DIRTY_META]);
2939 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
2940 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
2941 		return ret;
2942 	}
2943 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
2944 			file_keep_isize(inode) ||
2945 			i_size_read(inode) & ~PAGE_MASK)
2946 		return false;
2947 
2948 	if (!f2fs_is_time_consistent(inode))
2949 		return false;
2950 
2951 	spin_lock(&F2FS_I(inode)->i_size_lock);
2952 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
2953 	spin_unlock(&F2FS_I(inode)->i_size_lock);
2954 
2955 	return ret;
2956 }
2957 
2958 static inline bool f2fs_readonly(struct super_block *sb)
2959 {
2960 	return sb_rdonly(sb);
2961 }
2962 
2963 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
2964 {
2965 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
2966 }
2967 
2968 static inline bool is_dot_dotdot(const u8 *name, size_t len)
2969 {
2970 	if (len == 1 && name[0] == '.')
2971 		return true;
2972 
2973 	if (len == 2 && name[0] == '.' && name[1] == '.')
2974 		return true;
2975 
2976 	return false;
2977 }
2978 
2979 static inline bool f2fs_may_extent_tree(struct inode *inode)
2980 {
2981 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2982 
2983 	if (!test_opt(sbi, EXTENT_CACHE) ||
2984 			is_inode_flag_set(inode, FI_NO_EXTENT) ||
2985 			is_inode_flag_set(inode, FI_COMPRESSED_FILE))
2986 		return false;
2987 
2988 	/*
2989 	 * for recovered files during mount do not create extents
2990 	 * if shrinker is not registered.
2991 	 */
2992 	if (list_empty(&sbi->s_list))
2993 		return false;
2994 
2995 	return S_ISREG(inode->i_mode);
2996 }
2997 
2998 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
2999 					size_t size, gfp_t flags)
3000 {
3001 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
3002 		f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3003 		return NULL;
3004 	}
3005 
3006 	return kmalloc(size, flags);
3007 }
3008 
3009 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3010 					size_t size, gfp_t flags)
3011 {
3012 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3013 }
3014 
3015 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3016 					size_t size, gfp_t flags)
3017 {
3018 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3019 		f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3020 		return NULL;
3021 	}
3022 
3023 	return kvmalloc(size, flags);
3024 }
3025 
3026 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3027 					size_t size, gfp_t flags)
3028 {
3029 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3030 }
3031 
3032 static inline int get_extra_isize(struct inode *inode)
3033 {
3034 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3035 }
3036 
3037 static inline int get_inline_xattr_addrs(struct inode *inode)
3038 {
3039 	return F2FS_I(inode)->i_inline_xattr_size;
3040 }
3041 
3042 #define f2fs_get_inode_mode(i) \
3043 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3044 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3045 
3046 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3047 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3048 	offsetof(struct f2fs_inode, i_extra_isize))	\
3049 
3050 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3051 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3052 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3053 		sizeof((f2fs_inode)->field))			\
3054 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3055 
3056 #define DEFAULT_IOSTAT_PERIOD_MS	3000
3057 #define MIN_IOSTAT_PERIOD_MS		100
3058 /* maximum period of iostat tracing is 1 day */
3059 #define MAX_IOSTAT_PERIOD_MS		8640000
3060 
3061 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3062 {
3063 	int i;
3064 
3065 	spin_lock(&sbi->iostat_lock);
3066 	for (i = 0; i < NR_IO_TYPE; i++) {
3067 		sbi->rw_iostat[i] = 0;
3068 		sbi->prev_rw_iostat[i] = 0;
3069 	}
3070 	spin_unlock(&sbi->iostat_lock);
3071 }
3072 
3073 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3074 
3075 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3076 			enum iostat_type type, unsigned long long io_bytes)
3077 {
3078 	if (!sbi->iostat_enable)
3079 		return;
3080 	spin_lock(&sbi->iostat_lock);
3081 	sbi->rw_iostat[type] += io_bytes;
3082 
3083 	if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3084 		sbi->rw_iostat[APP_BUFFERED_IO] =
3085 			sbi->rw_iostat[APP_WRITE_IO] -
3086 			sbi->rw_iostat[APP_DIRECT_IO];
3087 
3088 	if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3089 		sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3090 			sbi->rw_iostat[APP_READ_IO] -
3091 			sbi->rw_iostat[APP_DIRECT_READ_IO];
3092 	spin_unlock(&sbi->iostat_lock);
3093 
3094 	f2fs_record_iostat(sbi);
3095 }
3096 
3097 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
3098 
3099 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3100 
3101 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3102 					block_t blkaddr, int type);
3103 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3104 					block_t blkaddr, int type)
3105 {
3106 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3107 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3108 			 blkaddr, type);
3109 		f2fs_bug_on(sbi, 1);
3110 	}
3111 }
3112 
3113 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3114 {
3115 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3116 			blkaddr == COMPRESS_ADDR)
3117 		return false;
3118 	return true;
3119 }
3120 
3121 static inline void f2fs_set_page_private(struct page *page,
3122 						unsigned long data)
3123 {
3124 	if (PagePrivate(page))
3125 		return;
3126 
3127 	attach_page_private(page, (void *)data);
3128 }
3129 
3130 static inline void f2fs_clear_page_private(struct page *page)
3131 {
3132 	detach_page_private(page);
3133 }
3134 
3135 /*
3136  * file.c
3137  */
3138 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3139 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3140 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3141 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3142 int f2fs_truncate(struct inode *inode);
3143 int f2fs_getattr(const struct path *path, struct kstat *stat,
3144 			u32 request_mask, unsigned int flags);
3145 int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
3146 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3147 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3148 int f2fs_precache_extents(struct inode *inode);
3149 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3150 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3151 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3152 int f2fs_pin_file_control(struct inode *inode, bool inc);
3153 
3154 /*
3155  * inode.c
3156  */
3157 void f2fs_set_inode_flags(struct inode *inode);
3158 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3159 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3160 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3161 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3162 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3163 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3164 void f2fs_update_inode_page(struct inode *inode);
3165 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3166 void f2fs_evict_inode(struct inode *inode);
3167 void f2fs_handle_failed_inode(struct inode *inode);
3168 
3169 /*
3170  * namei.c
3171  */
3172 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3173 							bool hot, bool set);
3174 struct dentry *f2fs_get_parent(struct dentry *child);
3175 
3176 /*
3177  * dir.c
3178  */
3179 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3180 int f2fs_init_casefolded_name(const struct inode *dir,
3181 			      struct f2fs_filename *fname);
3182 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3183 			int lookup, struct f2fs_filename *fname);
3184 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3185 			struct f2fs_filename *fname);
3186 void f2fs_free_filename(struct f2fs_filename *fname);
3187 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3188 			const struct f2fs_filename *fname, int *max_slots);
3189 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3190 			unsigned int start_pos, struct fscrypt_str *fstr);
3191 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3192 			struct f2fs_dentry_ptr *d);
3193 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3194 			const struct f2fs_filename *fname, struct page *dpage);
3195 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3196 			unsigned int current_depth);
3197 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3198 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3199 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3200 					 const struct f2fs_filename *fname,
3201 					 struct page **res_page);
3202 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3203 			const struct qstr *child, struct page **res_page);
3204 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3205 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3206 			struct page **page);
3207 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3208 			struct page *page, struct inode *inode);
3209 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3210 			  const struct f2fs_filename *fname);
3211 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3212 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3213 			unsigned int bit_pos);
3214 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3215 			struct inode *inode, nid_t ino, umode_t mode);
3216 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3217 			struct inode *inode, nid_t ino, umode_t mode);
3218 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3219 			struct inode *inode, nid_t ino, umode_t mode);
3220 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3221 			struct inode *dir, struct inode *inode);
3222 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3223 bool f2fs_empty_dir(struct inode *dir);
3224 
3225 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3226 {
3227 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3228 				inode, inode->i_ino, inode->i_mode);
3229 }
3230 
3231 /*
3232  * super.c
3233  */
3234 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3235 void f2fs_inode_synced(struct inode *inode);
3236 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3237 int f2fs_quota_sync(struct super_block *sb, int type);
3238 void f2fs_quota_off_umount(struct super_block *sb);
3239 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3240 int f2fs_sync_fs(struct super_block *sb, int sync);
3241 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3242 
3243 /*
3244  * hash.c
3245  */
3246 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3247 
3248 /*
3249  * node.c
3250  */
3251 struct dnode_of_data;
3252 struct node_info;
3253 
3254 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3255 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3256 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3257 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3258 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3259 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3260 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3261 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3262 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3263 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3264 						struct node_info *ni);
3265 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3266 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3267 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3268 int f2fs_truncate_xattr_node(struct inode *inode);
3269 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3270 					unsigned int seq_id);
3271 int f2fs_remove_inode_page(struct inode *inode);
3272 struct page *f2fs_new_inode_page(struct inode *inode);
3273 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3274 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3275 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3276 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3277 int f2fs_move_node_page(struct page *node_page, int gc_type);
3278 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3279 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3280 			struct writeback_control *wbc, bool atomic,
3281 			unsigned int *seq_id);
3282 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3283 			struct writeback_control *wbc,
3284 			bool do_balance, enum iostat_type io_type);
3285 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3286 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3287 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3288 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3289 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3290 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3291 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3292 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3293 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3294 			unsigned int segno, struct f2fs_summary_block *sum);
3295 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3296 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3297 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3298 int __init f2fs_create_node_manager_caches(void);
3299 void f2fs_destroy_node_manager_caches(void);
3300 
3301 /*
3302  * segment.c
3303  */
3304 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3305 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3306 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3307 void f2fs_drop_inmem_pages(struct inode *inode);
3308 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3309 int f2fs_commit_inmem_pages(struct inode *inode);
3310 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3311 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3312 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3313 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3314 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3315 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3316 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3317 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3318 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3319 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3320 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3321 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3322 					struct cp_control *cpc);
3323 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3324 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3325 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3326 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3327 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3328 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3329 					unsigned int start, unsigned int end);
3330 void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
3331 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3332 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3333 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3334 					struct cp_control *cpc);
3335 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3336 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3337 					block_t blk_addr);
3338 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3339 						enum iostat_type io_type);
3340 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3341 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3342 			struct f2fs_io_info *fio);
3343 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3344 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3345 			block_t old_blkaddr, block_t new_blkaddr,
3346 			bool recover_curseg, bool recover_newaddr);
3347 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3348 			block_t old_addr, block_t new_addr,
3349 			unsigned char version, bool recover_curseg,
3350 			bool recover_newaddr);
3351 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3352 			block_t old_blkaddr, block_t *new_blkaddr,
3353 			struct f2fs_summary *sum, int type,
3354 			struct f2fs_io_info *fio);
3355 void f2fs_wait_on_page_writeback(struct page *page,
3356 			enum page_type type, bool ordered, bool locked);
3357 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3358 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3359 								block_t len);
3360 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3361 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3362 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3363 			unsigned int val, int alloc);
3364 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3365 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3366 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3367 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3368 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3369 int __init f2fs_create_segment_manager_caches(void);
3370 void f2fs_destroy_segment_manager_caches(void);
3371 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3372 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3373 			enum page_type type, enum temp_type temp);
3374 
3375 /*
3376  * checkpoint.c
3377  */
3378 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3379 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3380 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3381 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
3382 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3383 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3384 					block_t blkaddr, int type);
3385 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3386 			int type, bool sync);
3387 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3388 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3389 			long nr_to_write, enum iostat_type io_type);
3390 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3391 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3392 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3393 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3394 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3395 					unsigned int devidx, int type);
3396 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3397 					unsigned int devidx, int type);
3398 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3399 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3400 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3401 void f2fs_add_orphan_inode(struct inode *inode);
3402 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3403 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3404 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3405 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3406 void f2fs_remove_dirty_inode(struct inode *inode);
3407 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3408 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3409 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3410 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3411 int __init f2fs_create_checkpoint_caches(void);
3412 void f2fs_destroy_checkpoint_caches(void);
3413 
3414 /*
3415  * data.c
3416  */
3417 int __init f2fs_init_bioset(void);
3418 void f2fs_destroy_bioset(void);
3419 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
3420 int f2fs_init_bio_entry_cache(void);
3421 void f2fs_destroy_bio_entry_cache(void);
3422 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3423 				struct bio *bio, enum page_type type);
3424 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3425 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3426 				struct inode *inode, struct page *page,
3427 				nid_t ino, enum page_type type);
3428 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3429 					struct bio **bio, struct page *page);
3430 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3431 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3432 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3433 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3434 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3435 			block_t blk_addr, struct bio *bio);
3436 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3437 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3438 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3439 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3440 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3441 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3442 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3443 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3444 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3445 			int op_flags, bool for_write);
3446 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3447 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3448 			bool for_write);
3449 struct page *f2fs_get_new_data_page(struct inode *inode,
3450 			struct page *ipage, pgoff_t index, bool new_i_size);
3451 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3452 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3453 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3454 			int create, int flag);
3455 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3456 			u64 start, u64 len);
3457 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3458 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3459 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3460 int f2fs_write_single_data_page(struct page *page, int *submitted,
3461 				struct bio **bio, sector_t *last_block,
3462 				struct writeback_control *wbc,
3463 				enum iostat_type io_type,
3464 				int compr_blocks);
3465 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3466 			unsigned int length);
3467 int f2fs_release_page(struct page *page, gfp_t wait);
3468 #ifdef CONFIG_MIGRATION
3469 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3470 			struct page *page, enum migrate_mode mode);
3471 #endif
3472 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3473 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3474 int f2fs_init_post_read_processing(void);
3475 void f2fs_destroy_post_read_processing(void);
3476 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3477 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3478 
3479 /*
3480  * gc.c
3481  */
3482 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3483 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3484 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3485 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
3486 			unsigned int segno);
3487 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3488 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3489 
3490 /*
3491  * recovery.c
3492  */
3493 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3494 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3495 
3496 /*
3497  * debug.c
3498  */
3499 #ifdef CONFIG_F2FS_STAT_FS
3500 struct f2fs_stat_info {
3501 	struct list_head stat_list;
3502 	struct f2fs_sb_info *sbi;
3503 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3504 	int main_area_segs, main_area_sections, main_area_zones;
3505 	unsigned long long hit_largest, hit_cached, hit_rbtree;
3506 	unsigned long long hit_total, total_ext;
3507 	int ext_tree, zombie_tree, ext_node;
3508 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3509 	int ndirty_data, ndirty_qdata;
3510 	int inmem_pages;
3511 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3512 	int nats, dirty_nats, sits, dirty_sits;
3513 	int free_nids, avail_nids, alloc_nids;
3514 	int total_count, utilization;
3515 	int bg_gc, nr_wb_cp_data, nr_wb_data;
3516 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3517 	int nr_dio_read, nr_dio_write;
3518 	unsigned int io_skip_bggc, other_skip_bggc;
3519 	int nr_flushing, nr_flushed, flush_list_empty;
3520 	int nr_discarding, nr_discarded;
3521 	int nr_discard_cmd;
3522 	unsigned int undiscard_blks;
3523 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3524 	int compr_inode, compr_blocks;
3525 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3526 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3527 	unsigned int bimodal, avg_vblocks;
3528 	int util_free, util_valid, util_invalid;
3529 	int rsvd_segs, overp_segs;
3530 	int dirty_count, node_pages, meta_pages;
3531 	int prefree_count, call_count, cp_count, bg_cp_count;
3532 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
3533 	int bg_node_segs, bg_data_segs;
3534 	int tot_blks, data_blks, node_blks;
3535 	int bg_data_blks, bg_node_blks;
3536 	unsigned long long skipped_atomic_files[2];
3537 	int curseg[NR_CURSEG_TYPE];
3538 	int cursec[NR_CURSEG_TYPE];
3539 	int curzone[NR_CURSEG_TYPE];
3540 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3541 	unsigned int full_seg[NR_CURSEG_TYPE];
3542 	unsigned int valid_blks[NR_CURSEG_TYPE];
3543 
3544 	unsigned int meta_count[META_MAX];
3545 	unsigned int segment_count[2];
3546 	unsigned int block_count[2];
3547 	unsigned int inplace_count;
3548 	unsigned long long base_mem, cache_mem, page_mem;
3549 };
3550 
3551 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3552 {
3553 	return (struct f2fs_stat_info *)sbi->stat_info;
3554 }
3555 
3556 #define stat_inc_cp_count(si)		((si)->cp_count++)
3557 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
3558 #define stat_inc_call_count(si)		((si)->call_count++)
3559 #define stat_inc_bggc_count(si)		((si)->bg_gc++)
3560 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3561 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3562 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3563 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3564 #define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
3565 #define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
3566 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3567 #define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
3568 #define stat_inc_inline_xattr(inode)					\
3569 	do {								\
3570 		if (f2fs_has_inline_xattr(inode))			\
3571 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3572 	} while (0)
3573 #define stat_dec_inline_xattr(inode)					\
3574 	do {								\
3575 		if (f2fs_has_inline_xattr(inode))			\
3576 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3577 	} while (0)
3578 #define stat_inc_inline_inode(inode)					\
3579 	do {								\
3580 		if (f2fs_has_inline_data(inode))			\
3581 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
3582 	} while (0)
3583 #define stat_dec_inline_inode(inode)					\
3584 	do {								\
3585 		if (f2fs_has_inline_data(inode))			\
3586 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
3587 	} while (0)
3588 #define stat_inc_inline_dir(inode)					\
3589 	do {								\
3590 		if (f2fs_has_inline_dentry(inode))			\
3591 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
3592 	} while (0)
3593 #define stat_dec_inline_dir(inode)					\
3594 	do {								\
3595 		if (f2fs_has_inline_dentry(inode))			\
3596 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
3597 	} while (0)
3598 #define stat_inc_compr_inode(inode)					\
3599 	do {								\
3600 		if (f2fs_compressed_file(inode))			\
3601 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
3602 	} while (0)
3603 #define stat_dec_compr_inode(inode)					\
3604 	do {								\
3605 		if (f2fs_compressed_file(inode))			\
3606 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
3607 	} while (0)
3608 #define stat_add_compr_blocks(inode, blocks)				\
3609 		(atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3610 #define stat_sub_compr_blocks(inode, blocks)				\
3611 		(atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3612 #define stat_inc_meta_count(sbi, blkaddr)				\
3613 	do {								\
3614 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
3615 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
3616 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
3617 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
3618 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
3619 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
3620 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
3621 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
3622 	} while (0)
3623 #define stat_inc_seg_type(sbi, curseg)					\
3624 		((sbi)->segment_count[(curseg)->alloc_type]++)
3625 #define stat_inc_block_count(sbi, curseg)				\
3626 		((sbi)->block_count[(curseg)->alloc_type]++)
3627 #define stat_inc_inplace_blocks(sbi)					\
3628 		(atomic_inc(&(sbi)->inplace_count))
3629 #define stat_update_max_atomic_write(inode)				\
3630 	do {								\
3631 		int cur = F2FS_I_SB(inode)->atomic_files;	\
3632 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
3633 		if (cur > max)						\
3634 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
3635 	} while (0)
3636 #define stat_inc_volatile_write(inode)					\
3637 		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3638 #define stat_dec_volatile_write(inode)					\
3639 		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3640 #define stat_update_max_volatile_write(inode)				\
3641 	do {								\
3642 		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
3643 		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
3644 		if (cur > max)						\
3645 			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
3646 	} while (0)
3647 #define stat_inc_seg_count(sbi, type, gc_type)				\
3648 	do {								\
3649 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3650 		si->tot_segs++;						\
3651 		if ((type) == SUM_TYPE_DATA) {				\
3652 			si->data_segs++;				\
3653 			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
3654 		} else {						\
3655 			si->node_segs++;				\
3656 			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
3657 		}							\
3658 	} while (0)
3659 
3660 #define stat_inc_tot_blk_count(si, blks)				\
3661 	((si)->tot_blks += (blks))
3662 
3663 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
3664 	do {								\
3665 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3666 		stat_inc_tot_blk_count(si, blks);			\
3667 		si->data_blks += (blks);				\
3668 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3669 	} while (0)
3670 
3671 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
3672 	do {								\
3673 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3674 		stat_inc_tot_blk_count(si, blks);			\
3675 		si->node_blks += (blks);				\
3676 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3677 	} while (0)
3678 
3679 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3680 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3681 void __init f2fs_create_root_stats(void);
3682 void f2fs_destroy_root_stats(void);
3683 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3684 #else
3685 #define stat_inc_cp_count(si)				do { } while (0)
3686 #define stat_inc_bg_cp_count(si)			do { } while (0)
3687 #define stat_inc_call_count(si)				do { } while (0)
3688 #define stat_inc_bggc_count(si)				do { } while (0)
3689 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
3690 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
3691 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
3692 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
3693 #define stat_inc_total_hit(sbi)				do { } while (0)
3694 #define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
3695 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
3696 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
3697 #define stat_inc_inline_xattr(inode)			do { } while (0)
3698 #define stat_dec_inline_xattr(inode)			do { } while (0)
3699 #define stat_inc_inline_inode(inode)			do { } while (0)
3700 #define stat_dec_inline_inode(inode)			do { } while (0)
3701 #define stat_inc_inline_dir(inode)			do { } while (0)
3702 #define stat_dec_inline_dir(inode)			do { } while (0)
3703 #define stat_inc_compr_inode(inode)			do { } while (0)
3704 #define stat_dec_compr_inode(inode)			do { } while (0)
3705 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
3706 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
3707 #define stat_inc_atomic_write(inode)			do { } while (0)
3708 #define stat_dec_atomic_write(inode)			do { } while (0)
3709 #define stat_update_max_atomic_write(inode)		do { } while (0)
3710 #define stat_inc_volatile_write(inode)			do { } while (0)
3711 #define stat_dec_volatile_write(inode)			do { } while (0)
3712 #define stat_update_max_volatile_write(inode)		do { } while (0)
3713 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
3714 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
3715 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
3716 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
3717 #define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
3718 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
3719 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
3720 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
3721 
3722 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
3723 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
3724 static inline void __init f2fs_create_root_stats(void) { }
3725 static inline void f2fs_destroy_root_stats(void) { }
3726 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3727 #endif
3728 
3729 extern const struct file_operations f2fs_dir_operations;
3730 #ifdef CONFIG_UNICODE
3731 extern const struct dentry_operations f2fs_dentry_ops;
3732 #endif
3733 extern const struct file_operations f2fs_file_operations;
3734 extern const struct inode_operations f2fs_file_inode_operations;
3735 extern const struct address_space_operations f2fs_dblock_aops;
3736 extern const struct address_space_operations f2fs_node_aops;
3737 extern const struct address_space_operations f2fs_meta_aops;
3738 extern const struct inode_operations f2fs_dir_inode_operations;
3739 extern const struct inode_operations f2fs_symlink_inode_operations;
3740 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3741 extern const struct inode_operations f2fs_special_inode_operations;
3742 extern struct kmem_cache *f2fs_inode_entry_slab;
3743 
3744 /*
3745  * inline.c
3746  */
3747 bool f2fs_may_inline_data(struct inode *inode);
3748 bool f2fs_may_inline_dentry(struct inode *inode);
3749 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3750 void f2fs_truncate_inline_inode(struct inode *inode,
3751 						struct page *ipage, u64 from);
3752 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3753 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3754 int f2fs_convert_inline_inode(struct inode *inode);
3755 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3756 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3757 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3758 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3759 					const struct f2fs_filename *fname,
3760 					struct page **res_page);
3761 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3762 			struct page *ipage);
3763 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3764 			struct inode *inode, nid_t ino, umode_t mode);
3765 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3766 				struct page *page, struct inode *dir,
3767 				struct inode *inode);
3768 bool f2fs_empty_inline_dir(struct inode *dir);
3769 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3770 			struct fscrypt_str *fstr);
3771 int f2fs_inline_data_fiemap(struct inode *inode,
3772 			struct fiemap_extent_info *fieinfo,
3773 			__u64 start, __u64 len);
3774 
3775 /*
3776  * shrinker.c
3777  */
3778 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3779 			struct shrink_control *sc);
3780 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3781 			struct shrink_control *sc);
3782 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3783 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3784 
3785 /*
3786  * extent_cache.c
3787  */
3788 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3789 				struct rb_entry *cached_re, unsigned int ofs);
3790 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3791 				struct rb_root_cached *root,
3792 				struct rb_node **parent,
3793 				unsigned int ofs, bool *leftmost);
3794 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3795 		struct rb_entry *cached_re, unsigned int ofs,
3796 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
3797 		struct rb_node ***insert_p, struct rb_node **insert_parent,
3798 		bool force, bool *leftmost);
3799 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3800 						struct rb_root_cached *root);
3801 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3802 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
3803 void f2fs_drop_extent_tree(struct inode *inode);
3804 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3805 void f2fs_destroy_extent_tree(struct inode *inode);
3806 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3807 			struct extent_info *ei);
3808 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3809 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3810 			pgoff_t fofs, block_t blkaddr, unsigned int len);
3811 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3812 int __init f2fs_create_extent_cache(void);
3813 void f2fs_destroy_extent_cache(void);
3814 
3815 /*
3816  * sysfs.c
3817  */
3818 int __init f2fs_init_sysfs(void);
3819 void f2fs_exit_sysfs(void);
3820 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
3821 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
3822 
3823 /* verity.c */
3824 extern const struct fsverity_operations f2fs_verityops;
3825 
3826 /*
3827  * crypto support
3828  */
3829 static inline bool f2fs_encrypted_file(struct inode *inode)
3830 {
3831 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
3832 }
3833 
3834 static inline void f2fs_set_encrypted_inode(struct inode *inode)
3835 {
3836 #ifdef CONFIG_FS_ENCRYPTION
3837 	file_set_encrypt(inode);
3838 	f2fs_set_inode_flags(inode);
3839 #endif
3840 }
3841 
3842 /*
3843  * Returns true if the reads of the inode's data need to undergo some
3844  * postprocessing step, like decryption or authenticity verification.
3845  */
3846 static inline bool f2fs_post_read_required(struct inode *inode)
3847 {
3848 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
3849 		f2fs_compressed_file(inode);
3850 }
3851 
3852 /*
3853  * compress.c
3854  */
3855 #ifdef CONFIG_F2FS_FS_COMPRESSION
3856 bool f2fs_is_compressed_page(struct page *page);
3857 struct page *f2fs_compress_control_page(struct page *page);
3858 int f2fs_prepare_compress_overwrite(struct inode *inode,
3859 			struct page **pagep, pgoff_t index, void **fsdata);
3860 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
3861 					pgoff_t index, unsigned copied);
3862 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
3863 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
3864 bool f2fs_is_compress_backend_ready(struct inode *inode);
3865 int f2fs_init_compress_mempool(void);
3866 void f2fs_destroy_compress_mempool(void);
3867 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
3868 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
3869 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
3870 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
3871 int f2fs_write_multi_pages(struct compress_ctx *cc,
3872 						int *submitted,
3873 						struct writeback_control *wbc,
3874 						enum iostat_type io_type);
3875 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
3876 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
3877 				unsigned nr_pages, sector_t *last_block_in_bio,
3878 				bool is_readahead, bool for_write);
3879 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
3880 void f2fs_free_dic(struct decompress_io_ctx *dic);
3881 void f2fs_decompress_end_io(struct page **rpages,
3882 			unsigned int cluster_size, bool err, bool verity);
3883 int f2fs_init_compress_ctx(struct compress_ctx *cc);
3884 void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
3885 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
3886 #else
3887 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
3888 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
3889 {
3890 	if (!f2fs_compressed_file(inode))
3891 		return true;
3892 	/* not support compression */
3893 	return false;
3894 }
3895 static inline struct page *f2fs_compress_control_page(struct page *page)
3896 {
3897 	WARN_ON_ONCE(1);
3898 	return ERR_PTR(-EINVAL);
3899 }
3900 static inline int f2fs_init_compress_mempool(void) { return 0; }
3901 static inline void f2fs_destroy_compress_mempool(void) { }
3902 #endif
3903 
3904 static inline void set_compress_context(struct inode *inode)
3905 {
3906 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3907 
3908 	F2FS_I(inode)->i_compress_algorithm =
3909 			F2FS_OPTION(sbi).compress_algorithm;
3910 	F2FS_I(inode)->i_log_cluster_size =
3911 			F2FS_OPTION(sbi).compress_log_size;
3912 	F2FS_I(inode)->i_cluster_size =
3913 			1 << F2FS_I(inode)->i_log_cluster_size;
3914 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
3915 	set_inode_flag(inode, FI_COMPRESSED_FILE);
3916 	stat_inc_compr_inode(inode);
3917 	f2fs_mark_inode_dirty_sync(inode, true);
3918 }
3919 
3920 static inline u64 f2fs_disable_compressed_file(struct inode *inode)
3921 {
3922 	struct f2fs_inode_info *fi = F2FS_I(inode);
3923 
3924 	if (!f2fs_compressed_file(inode))
3925 		return 0;
3926 	if (S_ISREG(inode->i_mode)) {
3927 		if (get_dirty_pages(inode))
3928 			return 1;
3929 		if (fi->i_compr_blocks)
3930 			return fi->i_compr_blocks;
3931 	}
3932 
3933 	fi->i_flags &= ~F2FS_COMPR_FL;
3934 	stat_dec_compr_inode(inode);
3935 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
3936 	f2fs_mark_inode_dirty_sync(inode, true);
3937 	return 0;
3938 }
3939 
3940 #define F2FS_FEATURE_FUNCS(name, flagname) \
3941 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
3942 { \
3943 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
3944 }
3945 
3946 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
3947 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
3948 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
3949 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
3950 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
3951 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
3952 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
3953 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
3954 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
3955 F2FS_FEATURE_FUNCS(verity, VERITY);
3956 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
3957 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
3958 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
3959 
3960 #ifdef CONFIG_BLK_DEV_ZONED
3961 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
3962 				    block_t blkaddr)
3963 {
3964 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
3965 
3966 	return test_bit(zno, FDEV(devi).blkz_seq);
3967 }
3968 #endif
3969 
3970 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
3971 {
3972 	return f2fs_sb_has_blkzoned(sbi);
3973 }
3974 
3975 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
3976 {
3977 	return blk_queue_discard(bdev_get_queue(bdev)) ||
3978 	       bdev_is_zoned(bdev);
3979 }
3980 
3981 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
3982 {
3983 	int i;
3984 
3985 	if (!f2fs_is_multi_device(sbi))
3986 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
3987 
3988 	for (i = 0; i < sbi->s_ndevs; i++)
3989 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
3990 			return true;
3991 	return false;
3992 }
3993 
3994 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
3995 {
3996 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
3997 					f2fs_hw_should_discard(sbi);
3998 }
3999 
4000 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4001 {
4002 	int i;
4003 
4004 	if (!f2fs_is_multi_device(sbi))
4005 		return bdev_read_only(sbi->sb->s_bdev);
4006 
4007 	for (i = 0; i < sbi->s_ndevs; i++)
4008 		if (bdev_read_only(FDEV(i).bdev))
4009 			return true;
4010 	return false;
4011 }
4012 
4013 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4014 {
4015 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4016 }
4017 
4018 static inline bool f2fs_may_compress(struct inode *inode)
4019 {
4020 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4021 				f2fs_is_atomic_file(inode) ||
4022 				f2fs_is_volatile_file(inode))
4023 		return false;
4024 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4025 }
4026 
4027 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4028 						u64 blocks, bool add)
4029 {
4030 	int diff = F2FS_I(inode)->i_cluster_size - blocks;
4031 
4032 	/* don't update i_compr_blocks if saved blocks were released */
4033 	if (!add && !F2FS_I(inode)->i_compr_blocks)
4034 		return;
4035 
4036 	if (add) {
4037 		F2FS_I(inode)->i_compr_blocks += diff;
4038 		stat_add_compr_blocks(inode, diff);
4039 	} else {
4040 		F2FS_I(inode)->i_compr_blocks -= diff;
4041 		stat_sub_compr_blocks(inode, diff);
4042 	}
4043 	f2fs_mark_inode_dirty_sync(inode, true);
4044 }
4045 
4046 static inline int block_unaligned_IO(struct inode *inode,
4047 				struct kiocb *iocb, struct iov_iter *iter)
4048 {
4049 	unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4050 	unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4051 	loff_t offset = iocb->ki_pos;
4052 	unsigned long align = offset | iov_iter_alignment(iter);
4053 
4054 	return align & blocksize_mask;
4055 }
4056 
4057 static inline int allow_outplace_dio(struct inode *inode,
4058 				struct kiocb *iocb, struct iov_iter *iter)
4059 {
4060 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4061 	int rw = iov_iter_rw(iter);
4062 
4063 	return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
4064 				!block_unaligned_IO(inode, iocb, iter));
4065 }
4066 
4067 static inline bool f2fs_force_buffered_io(struct inode *inode,
4068 				struct kiocb *iocb, struct iov_iter *iter)
4069 {
4070 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4071 	int rw = iov_iter_rw(iter);
4072 
4073 	if (f2fs_post_read_required(inode))
4074 		return true;
4075 	if (f2fs_is_multi_device(sbi))
4076 		return true;
4077 	/*
4078 	 * for blkzoned device, fallback direct IO to buffered IO, so
4079 	 * all IOs can be serialized by log-structured write.
4080 	 */
4081 	if (f2fs_sb_has_blkzoned(sbi))
4082 		return true;
4083 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4084 		if (block_unaligned_IO(inode, iocb, iter))
4085 			return true;
4086 		if (F2FS_IO_ALIGNED(sbi))
4087 			return true;
4088 	}
4089 	if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
4090 					!IS_SWAPFILE(inode))
4091 		return true;
4092 
4093 	return false;
4094 }
4095 
4096 #ifdef CONFIG_F2FS_FAULT_INJECTION
4097 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4098 							unsigned int type);
4099 #else
4100 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4101 #endif
4102 
4103 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4104 {
4105 #ifdef CONFIG_QUOTA
4106 	if (f2fs_sb_has_quota_ino(sbi))
4107 		return true;
4108 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4109 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4110 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4111 		return true;
4112 #endif
4113 	return false;
4114 }
4115 
4116 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4117 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4118 
4119 #endif /* _LINUX_F2FS_H */
4120