xref: /openbmc/linux/fs/f2fs/f2fs.h (revision 3d697a4a6b7dab8fb8a8c928b640999af3a08d87)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition)					\
35 	do {								\
36 		if (WARN_ON(condition))					\
37 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
38 	} while (0)
39 #endif
40 
41 enum {
42 	FAULT_KMALLOC,
43 	FAULT_KVMALLOC,
44 	FAULT_PAGE_ALLOC,
45 	FAULT_PAGE_GET,
46 	FAULT_ALLOC_BIO,	/* it's obsolete due to bio_alloc() will never fail */
47 	FAULT_ALLOC_NID,
48 	FAULT_ORPHAN,
49 	FAULT_BLOCK,
50 	FAULT_DIR_DEPTH,
51 	FAULT_EVICT_INODE,
52 	FAULT_TRUNCATE,
53 	FAULT_READ_IO,
54 	FAULT_CHECKPOINT,
55 	FAULT_DISCARD,
56 	FAULT_WRITE_IO,
57 	FAULT_SLAB_ALLOC,
58 	FAULT_DQUOT_INIT,
59 	FAULT_MAX,
60 };
61 
62 #ifdef CONFIG_F2FS_FAULT_INJECTION
63 #define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
64 
65 struct f2fs_fault_info {
66 	atomic_t inject_ops;
67 	unsigned int inject_rate;
68 	unsigned int inject_type;
69 };
70 
71 extern const char *f2fs_fault_name[FAULT_MAX];
72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
73 #endif
74 
75 /*
76  * For mount options
77  */
78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
79 #define F2FS_MOUNT_DISCARD		0x00000004
80 #define F2FS_MOUNT_NOHEAP		0x00000008
81 #define F2FS_MOUNT_XATTR_USER		0x00000010
82 #define F2FS_MOUNT_POSIX_ACL		0x00000020
83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
84 #define F2FS_MOUNT_INLINE_XATTR		0x00000080
85 #define F2FS_MOUNT_INLINE_DATA		0x00000100
86 #define F2FS_MOUNT_INLINE_DENTRY	0x00000200
87 #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
88 #define F2FS_MOUNT_NOBARRIER		0x00000800
89 #define F2FS_MOUNT_FASTBOOT		0x00001000
90 #define F2FS_MOUNT_EXTENT_CACHE		0x00002000
91 #define F2FS_MOUNT_DATA_FLUSH		0x00008000
92 #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
93 #define F2FS_MOUNT_USRQUOTA		0x00080000
94 #define F2FS_MOUNT_GRPQUOTA		0x00100000
95 #define F2FS_MOUNT_PRJQUOTA		0x00200000
96 #define F2FS_MOUNT_QUOTA		0x00400000
97 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
98 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
99 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
100 #define F2FS_MOUNT_NORECOVERY		0x04000000
101 #define F2FS_MOUNT_ATGC			0x08000000
102 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x10000000
103 #define	F2FS_MOUNT_GC_MERGE		0x20000000
104 #define F2FS_MOUNT_COMPRESS_CACHE	0x40000000
105 
106 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
107 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
108 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
109 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
110 
111 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
112 		typecheck(unsigned long long, b) &&			\
113 		((long long)((a) - (b)) > 0))
114 
115 typedef u32 block_t;	/*
116 			 * should not change u32, since it is the on-disk block
117 			 * address format, __le32.
118 			 */
119 typedef u32 nid_t;
120 
121 #define COMPRESS_EXT_NUM		16
122 
123 struct f2fs_mount_info {
124 	unsigned int opt;
125 	int write_io_size_bits;		/* Write IO size bits */
126 	block_t root_reserved_blocks;	/* root reserved blocks */
127 	kuid_t s_resuid;		/* reserved blocks for uid */
128 	kgid_t s_resgid;		/* reserved blocks for gid */
129 	int active_logs;		/* # of active logs */
130 	int inline_xattr_size;		/* inline xattr size */
131 #ifdef CONFIG_F2FS_FAULT_INJECTION
132 	struct f2fs_fault_info fault_info;	/* For fault injection */
133 #endif
134 #ifdef CONFIG_QUOTA
135 	/* Names of quota files with journalled quota */
136 	char *s_qf_names[MAXQUOTAS];
137 	int s_jquota_fmt;			/* Format of quota to use */
138 #endif
139 	/* For which write hints are passed down to block layer */
140 	int whint_mode;
141 	int alloc_mode;			/* segment allocation policy */
142 	int fsync_mode;			/* fsync policy */
143 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
144 	int bggc_mode;			/* bggc mode: off, on or sync */
145 	int discard_unit;		/*
146 					 * discard command's offset/size should
147 					 * be aligned to this unit: block,
148 					 * segment or section
149 					 */
150 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
151 	block_t unusable_cap_perc;	/* percentage for cap */
152 	block_t unusable_cap;		/* Amount of space allowed to be
153 					 * unusable when disabling checkpoint
154 					 */
155 
156 	/* For compression */
157 	unsigned char compress_algorithm;	/* algorithm type */
158 	unsigned char compress_log_size;	/* cluster log size */
159 	unsigned char compress_level;		/* compress level */
160 	bool compress_chksum;			/* compressed data chksum */
161 	unsigned char compress_ext_cnt;		/* extension count */
162 	unsigned char nocompress_ext_cnt;		/* nocompress extension count */
163 	int compress_mode;			/* compression mode */
164 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
165 	unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
166 };
167 
168 #define F2FS_FEATURE_ENCRYPT		0x0001
169 #define F2FS_FEATURE_BLKZONED		0x0002
170 #define F2FS_FEATURE_ATOMIC_WRITE	0x0004
171 #define F2FS_FEATURE_EXTRA_ATTR		0x0008
172 #define F2FS_FEATURE_PRJQUOTA		0x0010
173 #define F2FS_FEATURE_INODE_CHKSUM	0x0020
174 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
175 #define F2FS_FEATURE_QUOTA_INO		0x0080
176 #define F2FS_FEATURE_INODE_CRTIME	0x0100
177 #define F2FS_FEATURE_LOST_FOUND		0x0200
178 #define F2FS_FEATURE_VERITY		0x0400
179 #define F2FS_FEATURE_SB_CHKSUM		0x0800
180 #define F2FS_FEATURE_CASEFOLD		0x1000
181 #define F2FS_FEATURE_COMPRESSION	0x2000
182 #define F2FS_FEATURE_RO			0x4000
183 
184 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
185 	((raw_super->feature & cpu_to_le32(mask)) != 0)
186 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
187 #define F2FS_SET_FEATURE(sbi, mask)					\
188 	(sbi->raw_super->feature |= cpu_to_le32(mask))
189 #define F2FS_CLEAR_FEATURE(sbi, mask)					\
190 	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
191 
192 /*
193  * Default values for user and/or group using reserved blocks
194  */
195 #define	F2FS_DEF_RESUID		0
196 #define	F2FS_DEF_RESGID		0
197 
198 /*
199  * For checkpoint manager
200  */
201 enum {
202 	NAT_BITMAP,
203 	SIT_BITMAP
204 };
205 
206 #define	CP_UMOUNT	0x00000001
207 #define	CP_FASTBOOT	0x00000002
208 #define	CP_SYNC		0x00000004
209 #define	CP_RECOVERY	0x00000008
210 #define	CP_DISCARD	0x00000010
211 #define CP_TRIMMED	0x00000020
212 #define CP_PAUSE	0x00000040
213 #define CP_RESIZE 	0x00000080
214 
215 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
216 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
217 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
218 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
219 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
220 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
221 #define DEF_CP_INTERVAL			60	/* 60 secs */
222 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
223 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
224 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
225 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
226 
227 struct cp_control {
228 	int reason;
229 	__u64 trim_start;
230 	__u64 trim_end;
231 	__u64 trim_minlen;
232 };
233 
234 /*
235  * indicate meta/data type
236  */
237 enum {
238 	META_CP,
239 	META_NAT,
240 	META_SIT,
241 	META_SSA,
242 	META_MAX,
243 	META_POR,
244 	DATA_GENERIC,		/* check range only */
245 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
246 	DATA_GENERIC_ENHANCE_READ,	/*
247 					 * strong check on range and segment
248 					 * bitmap but no warning due to race
249 					 * condition of read on truncated area
250 					 * by extent_cache
251 					 */
252 	META_GENERIC,
253 };
254 
255 /* for the list of ino */
256 enum {
257 	ORPHAN_INO,		/* for orphan ino list */
258 	APPEND_INO,		/* for append ino list */
259 	UPDATE_INO,		/* for update ino list */
260 	TRANS_DIR_INO,		/* for trasactions dir ino list */
261 	FLUSH_INO,		/* for multiple device flushing */
262 	MAX_INO_ENTRY,		/* max. list */
263 };
264 
265 struct ino_entry {
266 	struct list_head list;		/* list head */
267 	nid_t ino;			/* inode number */
268 	unsigned int dirty_device;	/* dirty device bitmap */
269 };
270 
271 /* for the list of inodes to be GCed */
272 struct inode_entry {
273 	struct list_head list;	/* list head */
274 	struct inode *inode;	/* vfs inode pointer */
275 };
276 
277 struct fsync_node_entry {
278 	struct list_head list;	/* list head */
279 	struct page *page;	/* warm node page pointer */
280 	unsigned int seq_id;	/* sequence id */
281 };
282 
283 struct ckpt_req {
284 	struct completion wait;		/* completion for checkpoint done */
285 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
286 	int ret;			/* return code of checkpoint */
287 	ktime_t queue_time;		/* request queued time */
288 };
289 
290 struct ckpt_req_control {
291 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
292 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
293 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
294 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
295 	atomic_t total_ckpt;		/* # of total ckpts */
296 	atomic_t queued_ckpt;		/* # of queued ckpts */
297 	struct llist_head issue_list;	/* list for command issue */
298 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
299 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
300 	unsigned int peak_time;		/* peak wait time in msec until now */
301 };
302 
303 /* for the bitmap indicate blocks to be discarded */
304 struct discard_entry {
305 	struct list_head list;	/* list head */
306 	block_t start_blkaddr;	/* start blockaddr of current segment */
307 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
308 };
309 
310 /* default discard granularity of inner discard thread, unit: block count */
311 #define DEFAULT_DISCARD_GRANULARITY		16
312 
313 /* max discard pend list number */
314 #define MAX_PLIST_NUM		512
315 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
316 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
317 
318 enum {
319 	D_PREP,			/* initial */
320 	D_PARTIAL,		/* partially submitted */
321 	D_SUBMIT,		/* all submitted */
322 	D_DONE,			/* finished */
323 };
324 
325 struct discard_info {
326 	block_t lstart;			/* logical start address */
327 	block_t len;			/* length */
328 	block_t start;			/* actual start address in dev */
329 };
330 
331 struct discard_cmd {
332 	struct rb_node rb_node;		/* rb node located in rb-tree */
333 	union {
334 		struct {
335 			block_t lstart;	/* logical start address */
336 			block_t len;	/* length */
337 			block_t start;	/* actual start address in dev */
338 		};
339 		struct discard_info di;	/* discard info */
340 
341 	};
342 	struct list_head list;		/* command list */
343 	struct completion wait;		/* compleation */
344 	struct block_device *bdev;	/* bdev */
345 	unsigned short ref;		/* reference count */
346 	unsigned char state;		/* state */
347 	unsigned char queued;		/* queued discard */
348 	int error;			/* bio error */
349 	spinlock_t lock;		/* for state/bio_ref updating */
350 	unsigned short bio_ref;		/* bio reference count */
351 };
352 
353 enum {
354 	DPOLICY_BG,
355 	DPOLICY_FORCE,
356 	DPOLICY_FSTRIM,
357 	DPOLICY_UMOUNT,
358 	MAX_DPOLICY,
359 };
360 
361 struct discard_policy {
362 	int type;			/* type of discard */
363 	unsigned int min_interval;	/* used for candidates exist */
364 	unsigned int mid_interval;	/* used for device busy */
365 	unsigned int max_interval;	/* used for candidates not exist */
366 	unsigned int max_requests;	/* # of discards issued per round */
367 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
368 	bool io_aware;			/* issue discard in idle time */
369 	bool sync;			/* submit discard with REQ_SYNC flag */
370 	bool ordered;			/* issue discard by lba order */
371 	bool timeout;			/* discard timeout for put_super */
372 	unsigned int granularity;	/* discard granularity */
373 };
374 
375 struct discard_cmd_control {
376 	struct task_struct *f2fs_issue_discard;	/* discard thread */
377 	struct list_head entry_list;		/* 4KB discard entry list */
378 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
379 	struct list_head wait_list;		/* store on-flushing entries */
380 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
381 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
382 	unsigned int discard_wake;		/* to wake up discard thread */
383 	struct mutex cmd_lock;
384 	unsigned int nr_discards;		/* # of discards in the list */
385 	unsigned int max_discards;		/* max. discards to be issued */
386 	unsigned int discard_granularity;	/* discard granularity */
387 	unsigned int undiscard_blks;		/* # of undiscard blocks */
388 	unsigned int next_pos;			/* next discard position */
389 	atomic_t issued_discard;		/* # of issued discard */
390 	atomic_t queued_discard;		/* # of queued discard */
391 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
392 	struct rb_root_cached root;		/* root of discard rb-tree */
393 	bool rbtree_check;			/* config for consistence check */
394 };
395 
396 /* for the list of fsync inodes, used only during recovery */
397 struct fsync_inode_entry {
398 	struct list_head list;	/* list head */
399 	struct inode *inode;	/* vfs inode pointer */
400 	block_t blkaddr;	/* block address locating the last fsync */
401 	block_t last_dentry;	/* block address locating the last dentry */
402 };
403 
404 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
405 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
406 
407 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
408 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
409 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
410 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
411 
412 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
413 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
414 
415 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
416 {
417 	int before = nats_in_cursum(journal);
418 
419 	journal->n_nats = cpu_to_le16(before + i);
420 	return before;
421 }
422 
423 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
424 {
425 	int before = sits_in_cursum(journal);
426 
427 	journal->n_sits = cpu_to_le16(before + i);
428 	return before;
429 }
430 
431 static inline bool __has_cursum_space(struct f2fs_journal *journal,
432 							int size, int type)
433 {
434 	if (type == NAT_JOURNAL)
435 		return size <= MAX_NAT_JENTRIES(journal);
436 	return size <= MAX_SIT_JENTRIES(journal);
437 }
438 
439 /* for inline stuff */
440 #define DEF_INLINE_RESERVED_SIZE	1
441 static inline int get_extra_isize(struct inode *inode);
442 static inline int get_inline_xattr_addrs(struct inode *inode);
443 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
444 				(CUR_ADDRS_PER_INODE(inode) -		\
445 				get_inline_xattr_addrs(inode) -	\
446 				DEF_INLINE_RESERVED_SIZE))
447 
448 /* for inline dir */
449 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
450 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
451 				BITS_PER_BYTE + 1))
452 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
453 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
454 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
455 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
456 				NR_INLINE_DENTRY(inode) + \
457 				INLINE_DENTRY_BITMAP_SIZE(inode)))
458 
459 /*
460  * For INODE and NODE manager
461  */
462 /* for directory operations */
463 
464 struct f2fs_filename {
465 	/*
466 	 * The filename the user specified.  This is NULL for some
467 	 * filesystem-internal operations, e.g. converting an inline directory
468 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
469 	 */
470 	const struct qstr *usr_fname;
471 
472 	/*
473 	 * The on-disk filename.  For encrypted directories, this is encrypted.
474 	 * This may be NULL for lookups in an encrypted dir without the key.
475 	 */
476 	struct fscrypt_str disk_name;
477 
478 	/* The dirhash of this filename */
479 	f2fs_hash_t hash;
480 
481 #ifdef CONFIG_FS_ENCRYPTION
482 	/*
483 	 * For lookups in encrypted directories: either the buffer backing
484 	 * disk_name, or a buffer that holds the decoded no-key name.
485 	 */
486 	struct fscrypt_str crypto_buf;
487 #endif
488 #ifdef CONFIG_UNICODE
489 	/*
490 	 * For casefolded directories: the casefolded name, but it's left NULL
491 	 * if the original name is not valid Unicode, if the directory is both
492 	 * casefolded and encrypted and its encryption key is unavailable, or if
493 	 * the filesystem is doing an internal operation where usr_fname is also
494 	 * NULL.  In all these cases we fall back to treating the name as an
495 	 * opaque byte sequence.
496 	 */
497 	struct fscrypt_str cf_name;
498 #endif
499 };
500 
501 struct f2fs_dentry_ptr {
502 	struct inode *inode;
503 	void *bitmap;
504 	struct f2fs_dir_entry *dentry;
505 	__u8 (*filename)[F2FS_SLOT_LEN];
506 	int max;
507 	int nr_bitmap;
508 };
509 
510 static inline void make_dentry_ptr_block(struct inode *inode,
511 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
512 {
513 	d->inode = inode;
514 	d->max = NR_DENTRY_IN_BLOCK;
515 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
516 	d->bitmap = t->dentry_bitmap;
517 	d->dentry = t->dentry;
518 	d->filename = t->filename;
519 }
520 
521 static inline void make_dentry_ptr_inline(struct inode *inode,
522 					struct f2fs_dentry_ptr *d, void *t)
523 {
524 	int entry_cnt = NR_INLINE_DENTRY(inode);
525 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
526 	int reserved_size = INLINE_RESERVED_SIZE(inode);
527 
528 	d->inode = inode;
529 	d->max = entry_cnt;
530 	d->nr_bitmap = bitmap_size;
531 	d->bitmap = t;
532 	d->dentry = t + bitmap_size + reserved_size;
533 	d->filename = t + bitmap_size + reserved_size +
534 					SIZE_OF_DIR_ENTRY * entry_cnt;
535 }
536 
537 /*
538  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
539  * as its node offset to distinguish from index node blocks.
540  * But some bits are used to mark the node block.
541  */
542 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
543 				>> OFFSET_BIT_SHIFT)
544 enum {
545 	ALLOC_NODE,			/* allocate a new node page if needed */
546 	LOOKUP_NODE,			/* look up a node without readahead */
547 	LOOKUP_NODE_RA,			/*
548 					 * look up a node with readahead called
549 					 * by get_data_block.
550 					 */
551 };
552 
553 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO or flush count */
554 
555 /* congestion wait timeout value, default: 20ms */
556 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
557 
558 /* maximum retry quota flush count */
559 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
560 
561 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
562 
563 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
564 
565 /* dirty segments threshold for triggering CP */
566 #define DEFAULT_DIRTY_THRESHOLD		4
567 
568 /* for in-memory extent cache entry */
569 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
570 
571 /* number of extent info in extent cache we try to shrink */
572 #define EXTENT_CACHE_SHRINK_NUMBER	128
573 
574 struct rb_entry {
575 	struct rb_node rb_node;		/* rb node located in rb-tree */
576 	union {
577 		struct {
578 			unsigned int ofs;	/* start offset of the entry */
579 			unsigned int len;	/* length of the entry */
580 		};
581 		unsigned long long key;		/* 64-bits key */
582 	} __packed;
583 };
584 
585 struct extent_info {
586 	unsigned int fofs;		/* start offset in a file */
587 	unsigned int len;		/* length of the extent */
588 	u32 blk;			/* start block address of the extent */
589 #ifdef CONFIG_F2FS_FS_COMPRESSION
590 	unsigned int c_len;		/* physical extent length of compressed blocks */
591 #endif
592 };
593 
594 struct extent_node {
595 	struct rb_node rb_node;		/* rb node located in rb-tree */
596 	struct extent_info ei;		/* extent info */
597 	struct list_head list;		/* node in global extent list of sbi */
598 	struct extent_tree *et;		/* extent tree pointer */
599 };
600 
601 struct extent_tree {
602 	nid_t ino;			/* inode number */
603 	struct rb_root_cached root;	/* root of extent info rb-tree */
604 	struct extent_node *cached_en;	/* recently accessed extent node */
605 	struct extent_info largest;	/* largested extent info */
606 	struct list_head list;		/* to be used by sbi->zombie_list */
607 	rwlock_t lock;			/* protect extent info rb-tree */
608 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
609 	bool largest_updated;		/* largest extent updated */
610 };
611 
612 /*
613  * This structure is taken from ext4_map_blocks.
614  *
615  * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
616  */
617 #define F2FS_MAP_NEW		(1 << BH_New)
618 #define F2FS_MAP_MAPPED		(1 << BH_Mapped)
619 #define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
620 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
621 				F2FS_MAP_UNWRITTEN)
622 
623 struct f2fs_map_blocks {
624 	struct block_device *m_bdev;	/* for multi-device dio */
625 	block_t m_pblk;
626 	block_t m_lblk;
627 	unsigned int m_len;
628 	unsigned int m_flags;
629 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
630 	pgoff_t *m_next_extent;		/* point to next possible extent */
631 	int m_seg_type;
632 	bool m_may_create;		/* indicate it is from write path */
633 	bool m_multidev_dio;		/* indicate it allows multi-device dio */
634 };
635 
636 /* for flag in get_data_block */
637 enum {
638 	F2FS_GET_BLOCK_DEFAULT,
639 	F2FS_GET_BLOCK_FIEMAP,
640 	F2FS_GET_BLOCK_BMAP,
641 	F2FS_GET_BLOCK_DIO,
642 	F2FS_GET_BLOCK_PRE_DIO,
643 	F2FS_GET_BLOCK_PRE_AIO,
644 	F2FS_GET_BLOCK_PRECACHE,
645 };
646 
647 /*
648  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
649  */
650 #define FADVISE_COLD_BIT	0x01
651 #define FADVISE_LOST_PINO_BIT	0x02
652 #define FADVISE_ENCRYPT_BIT	0x04
653 #define FADVISE_ENC_NAME_BIT	0x08
654 #define FADVISE_KEEP_SIZE_BIT	0x10
655 #define FADVISE_HOT_BIT		0x20
656 #define FADVISE_VERITY_BIT	0x40
657 
658 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
659 
660 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
661 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
662 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
663 
664 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
665 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
666 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
667 
668 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
669 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
670 
671 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
672 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
673 
674 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
675 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
676 
677 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
678 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
679 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
680 
681 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
682 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
683 
684 #define DEF_DIR_LEVEL		0
685 
686 enum {
687 	GC_FAILURE_PIN,
688 	GC_FAILURE_ATOMIC,
689 	MAX_GC_FAILURE
690 };
691 
692 /* used for f2fs_inode_info->flags */
693 enum {
694 	FI_NEW_INODE,		/* indicate newly allocated inode */
695 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
696 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
697 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
698 	FI_INC_LINK,		/* need to increment i_nlink */
699 	FI_ACL_MODE,		/* indicate acl mode */
700 	FI_NO_ALLOC,		/* should not allocate any blocks */
701 	FI_FREE_NID,		/* free allocated nide */
702 	FI_NO_EXTENT,		/* not to use the extent cache */
703 	FI_INLINE_XATTR,	/* used for inline xattr */
704 	FI_INLINE_DATA,		/* used for inline data*/
705 	FI_INLINE_DENTRY,	/* used for inline dentry */
706 	FI_APPEND_WRITE,	/* inode has appended data */
707 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
708 	FI_NEED_IPU,		/* used for ipu per file */
709 	FI_ATOMIC_FILE,		/* indicate atomic file */
710 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
711 	FI_VOLATILE_FILE,	/* indicate volatile file */
712 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
713 	FI_DROP_CACHE,		/* drop dirty page cache */
714 	FI_DATA_EXIST,		/* indicate data exists */
715 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
716 	FI_DO_DEFRAG,		/* indicate defragment is running */
717 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
718 	FI_PREALLOCATED_ALL,	/* all blocks for write were preallocated */
719 	FI_HOT_DATA,		/* indicate file is hot */
720 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
721 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
722 	FI_PIN_FILE,		/* indicate file should not be gced */
723 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
724 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
725 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
726 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
727 	FI_MMAP_FILE,		/* indicate file was mmapped */
728 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
729 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
730 	FI_ALIGNED_WRITE,	/* enable aligned write */
731 	FI_MAX,			/* max flag, never be used */
732 };
733 
734 struct f2fs_inode_info {
735 	struct inode vfs_inode;		/* serve a vfs inode */
736 	unsigned long i_flags;		/* keep an inode flags for ioctl */
737 	unsigned char i_advise;		/* use to give file attribute hints */
738 	unsigned char i_dir_level;	/* use for dentry level for large dir */
739 	unsigned int i_current_depth;	/* only for directory depth */
740 	/* for gc failure statistic */
741 	unsigned int i_gc_failures[MAX_GC_FAILURE];
742 	unsigned int i_pino;		/* parent inode number */
743 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
744 
745 	/* Use below internally in f2fs*/
746 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
747 	struct rw_semaphore i_sem;	/* protect fi info */
748 	atomic_t dirty_pages;		/* # of dirty pages */
749 	f2fs_hash_t chash;		/* hash value of given file name */
750 	unsigned int clevel;		/* maximum level of given file name */
751 	struct task_struct *task;	/* lookup and create consistency */
752 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
753 	nid_t i_xattr_nid;		/* node id that contains xattrs */
754 	loff_t	last_disk_size;		/* lastly written file size */
755 	spinlock_t i_size_lock;		/* protect last_disk_size */
756 
757 #ifdef CONFIG_QUOTA
758 	struct dquot *i_dquot[MAXQUOTAS];
759 
760 	/* quota space reservation, managed internally by quota code */
761 	qsize_t i_reserved_quota;
762 #endif
763 	struct list_head dirty_list;	/* dirty list for dirs and files */
764 	struct list_head gdirty_list;	/* linked in global dirty list */
765 	struct list_head inmem_ilist;	/* list for inmem inodes */
766 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
767 	struct task_struct *inmem_task;	/* store inmemory task */
768 	struct mutex inmem_lock;	/* lock for inmemory pages */
769 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
770 
771 	/* avoid racing between foreground op and gc */
772 	struct rw_semaphore i_gc_rwsem[2];
773 	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
774 
775 	int i_extra_isize;		/* size of extra space located in i_addr */
776 	kprojid_t i_projid;		/* id for project quota */
777 	int i_inline_xattr_size;	/* inline xattr size */
778 	struct timespec64 i_crtime;	/* inode creation time */
779 	struct timespec64 i_disk_time[4];/* inode disk times */
780 
781 	/* for file compress */
782 	atomic_t i_compr_blocks;		/* # of compressed blocks */
783 	unsigned char i_compress_algorithm;	/* algorithm type */
784 	unsigned char i_log_cluster_size;	/* log of cluster size */
785 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
786 	unsigned short i_compress_flag;		/* compress flag */
787 	unsigned int i_cluster_size;		/* cluster size */
788 };
789 
790 static inline void get_extent_info(struct extent_info *ext,
791 					struct f2fs_extent *i_ext)
792 {
793 	ext->fofs = le32_to_cpu(i_ext->fofs);
794 	ext->blk = le32_to_cpu(i_ext->blk);
795 	ext->len = le32_to_cpu(i_ext->len);
796 }
797 
798 static inline void set_raw_extent(struct extent_info *ext,
799 					struct f2fs_extent *i_ext)
800 {
801 	i_ext->fofs = cpu_to_le32(ext->fofs);
802 	i_ext->blk = cpu_to_le32(ext->blk);
803 	i_ext->len = cpu_to_le32(ext->len);
804 }
805 
806 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
807 						u32 blk, unsigned int len)
808 {
809 	ei->fofs = fofs;
810 	ei->blk = blk;
811 	ei->len = len;
812 #ifdef CONFIG_F2FS_FS_COMPRESSION
813 	ei->c_len = 0;
814 #endif
815 }
816 
817 static inline bool __is_discard_mergeable(struct discard_info *back,
818 			struct discard_info *front, unsigned int max_len)
819 {
820 	return (back->lstart + back->len == front->lstart) &&
821 		(back->len + front->len <= max_len);
822 }
823 
824 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
825 			struct discard_info *back, unsigned int max_len)
826 {
827 	return __is_discard_mergeable(back, cur, max_len);
828 }
829 
830 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
831 			struct discard_info *front, unsigned int max_len)
832 {
833 	return __is_discard_mergeable(cur, front, max_len);
834 }
835 
836 static inline bool __is_extent_mergeable(struct extent_info *back,
837 						struct extent_info *front)
838 {
839 #ifdef CONFIG_F2FS_FS_COMPRESSION
840 	if (back->c_len && back->len != back->c_len)
841 		return false;
842 	if (front->c_len && front->len != front->c_len)
843 		return false;
844 #endif
845 	return (back->fofs + back->len == front->fofs &&
846 			back->blk + back->len == front->blk);
847 }
848 
849 static inline bool __is_back_mergeable(struct extent_info *cur,
850 						struct extent_info *back)
851 {
852 	return __is_extent_mergeable(back, cur);
853 }
854 
855 static inline bool __is_front_mergeable(struct extent_info *cur,
856 						struct extent_info *front)
857 {
858 	return __is_extent_mergeable(cur, front);
859 }
860 
861 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
862 static inline void __try_update_largest_extent(struct extent_tree *et,
863 						struct extent_node *en)
864 {
865 	if (en->ei.len > et->largest.len) {
866 		et->largest = en->ei;
867 		et->largest_updated = true;
868 	}
869 }
870 
871 /*
872  * For free nid management
873  */
874 enum nid_state {
875 	FREE_NID,		/* newly added to free nid list */
876 	PREALLOC_NID,		/* it is preallocated */
877 	MAX_NID_STATE,
878 };
879 
880 enum nat_state {
881 	TOTAL_NAT,
882 	DIRTY_NAT,
883 	RECLAIMABLE_NAT,
884 	MAX_NAT_STATE,
885 };
886 
887 struct f2fs_nm_info {
888 	block_t nat_blkaddr;		/* base disk address of NAT */
889 	nid_t max_nid;			/* maximum possible node ids */
890 	nid_t available_nids;		/* # of available node ids */
891 	nid_t next_scan_nid;		/* the next nid to be scanned */
892 	unsigned int ram_thresh;	/* control the memory footprint */
893 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
894 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
895 
896 	/* NAT cache management */
897 	struct radix_tree_root nat_root;/* root of the nat entry cache */
898 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
899 	struct rw_semaphore nat_tree_lock;	/* protect nat entry tree */
900 	struct list_head nat_entries;	/* cached nat entry list (clean) */
901 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
902 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
903 	unsigned int nat_blocks;	/* # of nat blocks */
904 
905 	/* free node ids management */
906 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
907 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
908 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
909 	spinlock_t nid_list_lock;	/* protect nid lists ops */
910 	struct mutex build_lock;	/* lock for build free nids */
911 	unsigned char **free_nid_bitmap;
912 	unsigned char *nat_block_bitmap;
913 	unsigned short *free_nid_count;	/* free nid count of NAT block */
914 
915 	/* for checkpoint */
916 	char *nat_bitmap;		/* NAT bitmap pointer */
917 
918 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
919 	unsigned char *nat_bits;	/* NAT bits blocks */
920 	unsigned char *full_nat_bits;	/* full NAT pages */
921 	unsigned char *empty_nat_bits;	/* empty NAT pages */
922 #ifdef CONFIG_F2FS_CHECK_FS
923 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
924 #endif
925 	int bitmap_size;		/* bitmap size */
926 };
927 
928 /*
929  * this structure is used as one of function parameters.
930  * all the information are dedicated to a given direct node block determined
931  * by the data offset in a file.
932  */
933 struct dnode_of_data {
934 	struct inode *inode;		/* vfs inode pointer */
935 	struct page *inode_page;	/* its inode page, NULL is possible */
936 	struct page *node_page;		/* cached direct node page */
937 	nid_t nid;			/* node id of the direct node block */
938 	unsigned int ofs_in_node;	/* data offset in the node page */
939 	bool inode_page_locked;		/* inode page is locked or not */
940 	bool node_changed;		/* is node block changed */
941 	char cur_level;			/* level of hole node page */
942 	char max_level;			/* level of current page located */
943 	block_t	data_blkaddr;		/* block address of the node block */
944 };
945 
946 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
947 		struct page *ipage, struct page *npage, nid_t nid)
948 {
949 	memset(dn, 0, sizeof(*dn));
950 	dn->inode = inode;
951 	dn->inode_page = ipage;
952 	dn->node_page = npage;
953 	dn->nid = nid;
954 }
955 
956 /*
957  * For SIT manager
958  *
959  * By default, there are 6 active log areas across the whole main area.
960  * When considering hot and cold data separation to reduce cleaning overhead,
961  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
962  * respectively.
963  * In the current design, you should not change the numbers intentionally.
964  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
965  * logs individually according to the underlying devices. (default: 6)
966  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
967  * data and 8 for node logs.
968  */
969 #define	NR_CURSEG_DATA_TYPE	(3)
970 #define NR_CURSEG_NODE_TYPE	(3)
971 #define NR_CURSEG_INMEM_TYPE	(2)
972 #define NR_CURSEG_RO_TYPE	(2)
973 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
974 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
975 
976 enum {
977 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
978 	CURSEG_WARM_DATA,	/* data blocks */
979 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
980 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
981 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
982 	CURSEG_COLD_NODE,	/* indirect node blocks */
983 	NR_PERSISTENT_LOG,	/* number of persistent log */
984 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
985 				/* pinned file that needs consecutive block address */
986 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
987 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
988 };
989 
990 struct flush_cmd {
991 	struct completion wait;
992 	struct llist_node llnode;
993 	nid_t ino;
994 	int ret;
995 };
996 
997 struct flush_cmd_control {
998 	struct task_struct *f2fs_issue_flush;	/* flush thread */
999 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
1000 	atomic_t issued_flush;			/* # of issued flushes */
1001 	atomic_t queued_flush;			/* # of queued flushes */
1002 	struct llist_head issue_list;		/* list for command issue */
1003 	struct llist_node *dispatch_list;	/* list for command dispatch */
1004 };
1005 
1006 struct f2fs_sm_info {
1007 	struct sit_info *sit_info;		/* whole segment information */
1008 	struct free_segmap_info *free_info;	/* free segment information */
1009 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1010 	struct curseg_info *curseg_array;	/* active segment information */
1011 
1012 	struct rw_semaphore curseg_lock;	/* for preventing curseg change */
1013 
1014 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1015 	block_t main_blkaddr;		/* start block address of main area */
1016 	block_t ssa_blkaddr;		/* start block address of SSA area */
1017 
1018 	unsigned int segment_count;	/* total # of segments */
1019 	unsigned int main_segments;	/* # of segments in main area */
1020 	unsigned int reserved_segments;	/* # of reserved segments */
1021 	unsigned int ovp_segments;	/* # of overprovision segments */
1022 
1023 	/* a threshold to reclaim prefree segments */
1024 	unsigned int rec_prefree_segments;
1025 
1026 	/* for batched trimming */
1027 	unsigned int trim_sections;		/* # of sections to trim */
1028 
1029 	struct list_head sit_entry_set;	/* sit entry set list */
1030 
1031 	unsigned int ipu_policy;	/* in-place-update policy */
1032 	unsigned int min_ipu_util;	/* in-place-update threshold */
1033 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1034 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1035 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1036 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1037 
1038 	/* for flush command control */
1039 	struct flush_cmd_control *fcc_info;
1040 
1041 	/* for discard command control */
1042 	struct discard_cmd_control *dcc_info;
1043 };
1044 
1045 /*
1046  * For superblock
1047  */
1048 /*
1049  * COUNT_TYPE for monitoring
1050  *
1051  * f2fs monitors the number of several block types such as on-writeback,
1052  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1053  */
1054 #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1055 enum count_type {
1056 	F2FS_DIRTY_DENTS,
1057 	F2FS_DIRTY_DATA,
1058 	F2FS_DIRTY_QDATA,
1059 	F2FS_DIRTY_NODES,
1060 	F2FS_DIRTY_META,
1061 	F2FS_INMEM_PAGES,
1062 	F2FS_DIRTY_IMETA,
1063 	F2FS_WB_CP_DATA,
1064 	F2FS_WB_DATA,
1065 	F2FS_RD_DATA,
1066 	F2FS_RD_NODE,
1067 	F2FS_RD_META,
1068 	F2FS_DIO_WRITE,
1069 	F2FS_DIO_READ,
1070 	NR_COUNT_TYPE,
1071 };
1072 
1073 /*
1074  * The below are the page types of bios used in submit_bio().
1075  * The available types are:
1076  * DATA			User data pages. It operates as async mode.
1077  * NODE			Node pages. It operates as async mode.
1078  * META			FS metadata pages such as SIT, NAT, CP.
1079  * NR_PAGE_TYPE		The number of page types.
1080  * META_FLUSH		Make sure the previous pages are written
1081  *			with waiting the bio's completion
1082  * ...			Only can be used with META.
1083  */
1084 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1085 enum page_type {
1086 	DATA,
1087 	NODE,
1088 	META,
1089 	NR_PAGE_TYPE,
1090 	META_FLUSH,
1091 	INMEM,		/* the below types are used by tracepoints only. */
1092 	INMEM_DROP,
1093 	INMEM_INVALIDATE,
1094 	INMEM_REVOKE,
1095 	IPU,
1096 	OPU,
1097 };
1098 
1099 enum temp_type {
1100 	HOT = 0,	/* must be zero for meta bio */
1101 	WARM,
1102 	COLD,
1103 	NR_TEMP_TYPE,
1104 };
1105 
1106 enum need_lock_type {
1107 	LOCK_REQ = 0,
1108 	LOCK_DONE,
1109 	LOCK_RETRY,
1110 };
1111 
1112 enum cp_reason_type {
1113 	CP_NO_NEEDED,
1114 	CP_NON_REGULAR,
1115 	CP_COMPRESSED,
1116 	CP_HARDLINK,
1117 	CP_SB_NEED_CP,
1118 	CP_WRONG_PINO,
1119 	CP_NO_SPC_ROLL,
1120 	CP_NODE_NEED_CP,
1121 	CP_FASTBOOT_MODE,
1122 	CP_SPEC_LOG_NUM,
1123 	CP_RECOVER_DIR,
1124 };
1125 
1126 enum iostat_type {
1127 	/* WRITE IO */
1128 	APP_DIRECT_IO,			/* app direct write IOs */
1129 	APP_BUFFERED_IO,		/* app buffered write IOs */
1130 	APP_WRITE_IO,			/* app write IOs */
1131 	APP_MAPPED_IO,			/* app mapped IOs */
1132 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1133 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1134 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1135 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1136 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1137 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1138 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1139 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1140 
1141 	/* READ IO */
1142 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1143 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1144 	APP_READ_IO,			/* app read IOs */
1145 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1146 	FS_DATA_READ_IO,		/* data read IOs */
1147 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1148 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1149 	FS_NODE_READ_IO,		/* node read IOs */
1150 	FS_META_READ_IO,		/* meta read IOs */
1151 
1152 	/* other */
1153 	FS_DISCARD,			/* discard */
1154 	NR_IO_TYPE,
1155 };
1156 
1157 struct f2fs_io_info {
1158 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1159 	nid_t ino;		/* inode number */
1160 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1161 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1162 	int op;			/* contains REQ_OP_ */
1163 	int op_flags;		/* req_flag_bits */
1164 	block_t new_blkaddr;	/* new block address to be written */
1165 	block_t old_blkaddr;	/* old block address before Cow */
1166 	struct page *page;	/* page to be written */
1167 	struct page *encrypted_page;	/* encrypted page */
1168 	struct page *compressed_page;	/* compressed page */
1169 	struct list_head list;		/* serialize IOs */
1170 	bool submitted;		/* indicate IO submission */
1171 	int need_lock;		/* indicate we need to lock cp_rwsem */
1172 	bool in_list;		/* indicate fio is in io_list */
1173 	bool is_por;		/* indicate IO is from recovery or not */
1174 	bool retry;		/* need to reallocate block address */
1175 	int compr_blocks;	/* # of compressed block addresses */
1176 	bool encrypted;		/* indicate file is encrypted */
1177 	enum iostat_type io_type;	/* io type */
1178 	struct writeback_control *io_wbc; /* writeback control */
1179 	struct bio **bio;		/* bio for ipu */
1180 	sector_t *last_block;		/* last block number in bio */
1181 	unsigned char version;		/* version of the node */
1182 };
1183 
1184 struct bio_entry {
1185 	struct bio *bio;
1186 	struct list_head list;
1187 };
1188 
1189 #define is_read_io(rw) ((rw) == READ)
1190 struct f2fs_bio_info {
1191 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1192 	struct bio *bio;		/* bios to merge */
1193 	sector_t last_block_in_bio;	/* last block number */
1194 	struct f2fs_io_info fio;	/* store buffered io info. */
1195 	struct rw_semaphore io_rwsem;	/* blocking op for bio */
1196 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1197 	struct list_head io_list;	/* track fios */
1198 	struct list_head bio_list;	/* bio entry list head */
1199 	struct rw_semaphore bio_list_lock;	/* lock to protect bio entry list */
1200 };
1201 
1202 #define FDEV(i)				(sbi->devs[i])
1203 #define RDEV(i)				(raw_super->devs[i])
1204 struct f2fs_dev_info {
1205 	struct block_device *bdev;
1206 	char path[MAX_PATH_LEN];
1207 	unsigned int total_segments;
1208 	block_t start_blk;
1209 	block_t end_blk;
1210 #ifdef CONFIG_BLK_DEV_ZONED
1211 	unsigned int nr_blkz;		/* Total number of zones */
1212 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1213 	block_t *zone_capacity_blocks;  /* Array of zone capacity in blks */
1214 #endif
1215 };
1216 
1217 enum inode_type {
1218 	DIR_INODE,			/* for dirty dir inode */
1219 	FILE_INODE,			/* for dirty regular/symlink inode */
1220 	DIRTY_META,			/* for all dirtied inode metadata */
1221 	ATOMIC_FILE,			/* for all atomic files */
1222 	NR_INODE_TYPE,
1223 };
1224 
1225 /* for inner inode cache management */
1226 struct inode_management {
1227 	struct radix_tree_root ino_root;	/* ino entry array */
1228 	spinlock_t ino_lock;			/* for ino entry lock */
1229 	struct list_head ino_list;		/* inode list head */
1230 	unsigned long ino_num;			/* number of entries */
1231 };
1232 
1233 /* for GC_AT */
1234 struct atgc_management {
1235 	bool atgc_enabled;			/* ATGC is enabled or not */
1236 	struct rb_root_cached root;		/* root of victim rb-tree */
1237 	struct list_head victim_list;		/* linked with all victim entries */
1238 	unsigned int victim_count;		/* victim count in rb-tree */
1239 	unsigned int candidate_ratio;		/* candidate ratio */
1240 	unsigned int max_candidate_count;	/* max candidate count */
1241 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1242 	unsigned long long age_threshold;	/* age threshold */
1243 };
1244 
1245 /* For s_flag in struct f2fs_sb_info */
1246 enum {
1247 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1248 	SBI_IS_CLOSE,				/* specify unmounting */
1249 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1250 	SBI_POR_DOING,				/* recovery is doing or not */
1251 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1252 	SBI_NEED_CP,				/* need to checkpoint */
1253 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1254 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1255 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1256 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1257 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1258 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1259 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1260 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1261 };
1262 
1263 enum {
1264 	CP_TIME,
1265 	REQ_TIME,
1266 	DISCARD_TIME,
1267 	GC_TIME,
1268 	DISABLE_TIME,
1269 	UMOUNT_DISCARD_TIMEOUT,
1270 	MAX_TIME,
1271 };
1272 
1273 enum {
1274 	GC_NORMAL,
1275 	GC_IDLE_CB,
1276 	GC_IDLE_GREEDY,
1277 	GC_IDLE_AT,
1278 	GC_URGENT_HIGH,
1279 	GC_URGENT_LOW,
1280 	MAX_GC_MODE,
1281 };
1282 
1283 enum {
1284 	BGGC_MODE_ON,		/* background gc is on */
1285 	BGGC_MODE_OFF,		/* background gc is off */
1286 	BGGC_MODE_SYNC,		/*
1287 				 * background gc is on, migrating blocks
1288 				 * like foreground gc
1289 				 */
1290 };
1291 
1292 enum {
1293 	FS_MODE_ADAPTIVE,		/* use both lfs/ssr allocation */
1294 	FS_MODE_LFS,			/* use lfs allocation only */
1295 	FS_MODE_FRAGMENT_SEG,		/* segment fragmentation mode */
1296 	FS_MODE_FRAGMENT_BLK,		/* block fragmentation mode */
1297 };
1298 
1299 enum {
1300 	WHINT_MODE_OFF,		/* not pass down write hints */
1301 	WHINT_MODE_USER,	/* try to pass down hints given by users */
1302 	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
1303 };
1304 
1305 enum {
1306 	ALLOC_MODE_DEFAULT,	/* stay default */
1307 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1308 };
1309 
1310 enum fsync_mode {
1311 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1312 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1313 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1314 };
1315 
1316 enum {
1317 	COMPR_MODE_FS,		/*
1318 				 * automatically compress compression
1319 				 * enabled files
1320 				 */
1321 	COMPR_MODE_USER,	/*
1322 				 * automatical compression is disabled.
1323 				 * user can control the file compression
1324 				 * using ioctls
1325 				 */
1326 };
1327 
1328 enum {
1329 	DISCARD_UNIT_BLOCK,	/* basic discard unit is block */
1330 	DISCARD_UNIT_SEGMENT,	/* basic discard unit is segment */
1331 	DISCARD_UNIT_SECTION,	/* basic discard unit is section */
1332 };
1333 
1334 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1335 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1336 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1337 
1338 /*
1339  * Layout of f2fs page.private:
1340  *
1341  * Layout A: lowest bit should be 1
1342  * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1343  * bit 0	PAGE_PRIVATE_NOT_POINTER
1344  * bit 1	PAGE_PRIVATE_ATOMIC_WRITE
1345  * bit 2	PAGE_PRIVATE_DUMMY_WRITE
1346  * bit 3	PAGE_PRIVATE_ONGOING_MIGRATION
1347  * bit 4	PAGE_PRIVATE_INLINE_INODE
1348  * bit 5	PAGE_PRIVATE_REF_RESOURCE
1349  * bit 6-	f2fs private data
1350  *
1351  * Layout B: lowest bit should be 0
1352  * page.private is a wrapped pointer.
1353  */
1354 enum {
1355 	PAGE_PRIVATE_NOT_POINTER,		/* private contains non-pointer data */
1356 	PAGE_PRIVATE_ATOMIC_WRITE,		/* data page from atomic write path */
1357 	PAGE_PRIVATE_DUMMY_WRITE,		/* data page for padding aligned IO */
1358 	PAGE_PRIVATE_ONGOING_MIGRATION,		/* data page which is on-going migrating */
1359 	PAGE_PRIVATE_INLINE_INODE,		/* inode page contains inline data */
1360 	PAGE_PRIVATE_REF_RESOURCE,		/* dirty page has referenced resources */
1361 	PAGE_PRIVATE_MAX
1362 };
1363 
1364 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
1365 static inline bool page_private_##name(struct page *page) \
1366 { \
1367 	return PagePrivate(page) && \
1368 		test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
1369 		test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1370 }
1371 
1372 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
1373 static inline void set_page_private_##name(struct page *page) \
1374 { \
1375 	if (!PagePrivate(page)) { \
1376 		get_page(page); \
1377 		SetPagePrivate(page); \
1378 		set_page_private(page, 0); \
1379 	} \
1380 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
1381 	set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1382 }
1383 
1384 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
1385 static inline void clear_page_private_##name(struct page *page) \
1386 { \
1387 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1388 	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
1389 		set_page_private(page, 0); \
1390 		if (PagePrivate(page)) { \
1391 			ClearPagePrivate(page); \
1392 			put_page(page); \
1393 		}\
1394 	} \
1395 }
1396 
1397 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
1398 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
1399 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
1400 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
1401 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
1402 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
1403 
1404 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
1405 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
1406 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
1407 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
1408 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
1409 
1410 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
1411 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
1412 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
1413 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
1414 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
1415 
1416 static inline unsigned long get_page_private_data(struct page *page)
1417 {
1418 	unsigned long data = page_private(page);
1419 
1420 	if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
1421 		return 0;
1422 	return data >> PAGE_PRIVATE_MAX;
1423 }
1424 
1425 static inline void set_page_private_data(struct page *page, unsigned long data)
1426 {
1427 	if (!PagePrivate(page)) {
1428 		get_page(page);
1429 		SetPagePrivate(page);
1430 		set_page_private(page, 0);
1431 	}
1432 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
1433 	page_private(page) |= data << PAGE_PRIVATE_MAX;
1434 }
1435 
1436 static inline void clear_page_private_data(struct page *page)
1437 {
1438 	page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
1439 	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
1440 		set_page_private(page, 0);
1441 		if (PagePrivate(page)) {
1442 			ClearPagePrivate(page);
1443 			put_page(page);
1444 		}
1445 	}
1446 }
1447 
1448 /* For compression */
1449 enum compress_algorithm_type {
1450 	COMPRESS_LZO,
1451 	COMPRESS_LZ4,
1452 	COMPRESS_ZSTD,
1453 	COMPRESS_LZORLE,
1454 	COMPRESS_MAX,
1455 };
1456 
1457 enum compress_flag {
1458 	COMPRESS_CHKSUM,
1459 	COMPRESS_MAX_FLAG,
1460 };
1461 
1462 #define	COMPRESS_WATERMARK			20
1463 #define	COMPRESS_PERCENT			20
1464 
1465 #define COMPRESS_DATA_RESERVED_SIZE		4
1466 struct compress_data {
1467 	__le32 clen;			/* compressed data size */
1468 	__le32 chksum;			/* compressed data chksum */
1469 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1470 	u8 cdata[];			/* compressed data */
1471 };
1472 
1473 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1474 
1475 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1476 
1477 #define	COMPRESS_LEVEL_OFFSET	8
1478 
1479 /* compress context */
1480 struct compress_ctx {
1481 	struct inode *inode;		/* inode the context belong to */
1482 	pgoff_t cluster_idx;		/* cluster index number */
1483 	unsigned int cluster_size;	/* page count in cluster */
1484 	unsigned int log_cluster_size;	/* log of cluster size */
1485 	struct page **rpages;		/* pages store raw data in cluster */
1486 	unsigned int nr_rpages;		/* total page number in rpages */
1487 	struct page **cpages;		/* pages store compressed data in cluster */
1488 	unsigned int nr_cpages;		/* total page number in cpages */
1489 	unsigned int valid_nr_cpages;	/* valid page number in cpages */
1490 	void *rbuf;			/* virtual mapped address on rpages */
1491 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1492 	size_t rlen;			/* valid data length in rbuf */
1493 	size_t clen;			/* valid data length in cbuf */
1494 	void *private;			/* payload buffer for specified compression algorithm */
1495 	void *private2;			/* extra payload buffer */
1496 };
1497 
1498 /* compress context for write IO path */
1499 struct compress_io_ctx {
1500 	u32 magic;			/* magic number to indicate page is compressed */
1501 	struct inode *inode;		/* inode the context belong to */
1502 	struct page **rpages;		/* pages store raw data in cluster */
1503 	unsigned int nr_rpages;		/* total page number in rpages */
1504 	atomic_t pending_pages;		/* in-flight compressed page count */
1505 };
1506 
1507 /* Context for decompressing one cluster on the read IO path */
1508 struct decompress_io_ctx {
1509 	u32 magic;			/* magic number to indicate page is compressed */
1510 	struct inode *inode;		/* inode the context belong to */
1511 	pgoff_t cluster_idx;		/* cluster index number */
1512 	unsigned int cluster_size;	/* page count in cluster */
1513 	unsigned int log_cluster_size;	/* log of cluster size */
1514 	struct page **rpages;		/* pages store raw data in cluster */
1515 	unsigned int nr_rpages;		/* total page number in rpages */
1516 	struct page **cpages;		/* pages store compressed data in cluster */
1517 	unsigned int nr_cpages;		/* total page number in cpages */
1518 	struct page **tpages;		/* temp pages to pad holes in cluster */
1519 	void *rbuf;			/* virtual mapped address on rpages */
1520 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1521 	size_t rlen;			/* valid data length in rbuf */
1522 	size_t clen;			/* valid data length in cbuf */
1523 
1524 	/*
1525 	 * The number of compressed pages remaining to be read in this cluster.
1526 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1527 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1528 	 * is decompressed (or an error is reported).
1529 	 *
1530 	 * If an error occurs before all the pages have been submitted for I/O,
1531 	 * then this will never reach 0.  In this case the I/O submitter is
1532 	 * responsible for calling f2fs_decompress_end_io() instead.
1533 	 */
1534 	atomic_t remaining_pages;
1535 
1536 	/*
1537 	 * Number of references to this decompress_io_ctx.
1538 	 *
1539 	 * One reference is held for I/O completion.  This reference is dropped
1540 	 * after the pagecache pages are updated and unlocked -- either after
1541 	 * decompression (and verity if enabled), or after an error.
1542 	 *
1543 	 * In addition, each compressed page holds a reference while it is in a
1544 	 * bio.  These references are necessary prevent compressed pages from
1545 	 * being freed while they are still in a bio.
1546 	 */
1547 	refcount_t refcnt;
1548 
1549 	bool failed;			/* IO error occurred before decompression? */
1550 	bool need_verity;		/* need fs-verity verification after decompression? */
1551 	void *private;			/* payload buffer for specified decompression algorithm */
1552 	void *private2;			/* extra payload buffer */
1553 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1554 };
1555 
1556 #define NULL_CLUSTER			((unsigned int)(~0))
1557 #define MIN_COMPRESS_LOG_SIZE		2
1558 #define MAX_COMPRESS_LOG_SIZE		8
1559 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1560 
1561 struct f2fs_sb_info {
1562 	struct super_block *sb;			/* pointer to VFS super block */
1563 	struct proc_dir_entry *s_proc;		/* proc entry */
1564 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1565 	struct rw_semaphore sb_lock;		/* lock for raw super block */
1566 	int valid_super_block;			/* valid super block no */
1567 	unsigned long s_flag;				/* flags for sbi */
1568 	struct mutex writepages;		/* mutex for writepages() */
1569 
1570 #ifdef CONFIG_BLK_DEV_ZONED
1571 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1572 	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
1573 #endif
1574 
1575 	/* for node-related operations */
1576 	struct f2fs_nm_info *nm_info;		/* node manager */
1577 	struct inode *node_inode;		/* cache node blocks */
1578 
1579 	/* for segment-related operations */
1580 	struct f2fs_sm_info *sm_info;		/* segment manager */
1581 
1582 	/* for bio operations */
1583 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1584 	/* keep migration IO order for LFS mode */
1585 	struct rw_semaphore io_order_lock;
1586 	mempool_t *write_io_dummy;		/* Dummy pages */
1587 
1588 	/* for checkpoint */
1589 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1590 	int cur_cp_pack;			/* remain current cp pack */
1591 	spinlock_t cp_lock;			/* for flag in ckpt */
1592 	struct inode *meta_inode;		/* cache meta blocks */
1593 	struct rw_semaphore cp_global_sem;	/* checkpoint procedure lock */
1594 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
1595 	struct rw_semaphore node_write;		/* locking node writes */
1596 	struct rw_semaphore node_change;	/* locking node change */
1597 	wait_queue_head_t cp_wait;
1598 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1599 	long interval_time[MAX_TIME];		/* to store thresholds */
1600 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1601 
1602 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1603 
1604 	spinlock_t fsync_node_lock;		/* for node entry lock */
1605 	struct list_head fsync_node_list;	/* node list head */
1606 	unsigned int fsync_seg_id;		/* sequence id */
1607 	unsigned int fsync_node_num;		/* number of node entries */
1608 
1609 	/* for orphan inode, use 0'th array */
1610 	unsigned int max_orphans;		/* max orphan inodes */
1611 
1612 	/* for inode management */
1613 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1614 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1615 	struct mutex flush_lock;		/* for flush exclusion */
1616 
1617 	/* for extent tree cache */
1618 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1619 	struct mutex extent_tree_lock;	/* locking extent radix tree */
1620 	struct list_head extent_list;		/* lru list for shrinker */
1621 	spinlock_t extent_lock;			/* locking extent lru list */
1622 	atomic_t total_ext_tree;		/* extent tree count */
1623 	struct list_head zombie_list;		/* extent zombie tree list */
1624 	atomic_t total_zombie_tree;		/* extent zombie tree count */
1625 	atomic_t total_ext_node;		/* extent info count */
1626 
1627 	/* basic filesystem units */
1628 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1629 	unsigned int log_blocksize;		/* log2 block size */
1630 	unsigned int blocksize;			/* block size */
1631 	unsigned int root_ino_num;		/* root inode number*/
1632 	unsigned int node_ino_num;		/* node inode number*/
1633 	unsigned int meta_ino_num;		/* meta inode number*/
1634 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1635 	unsigned int blocks_per_seg;		/* blocks per segment */
1636 	unsigned int segs_per_sec;		/* segments per section */
1637 	unsigned int secs_per_zone;		/* sections per zone */
1638 	unsigned int total_sections;		/* total section count */
1639 	unsigned int total_node_count;		/* total node block count */
1640 	unsigned int total_valid_node_count;	/* valid node block count */
1641 	int dir_level;				/* directory level */
1642 	int readdir_ra;				/* readahead inode in readdir */
1643 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1644 
1645 	block_t user_block_count;		/* # of user blocks */
1646 	block_t total_valid_block_count;	/* # of valid blocks */
1647 	block_t discard_blks;			/* discard command candidats */
1648 	block_t last_valid_block_count;		/* for recovery */
1649 	block_t reserved_blocks;		/* configurable reserved blocks */
1650 	block_t current_reserved_blocks;	/* current reserved blocks */
1651 
1652 	/* Additional tracking for no checkpoint mode */
1653 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1654 
1655 	unsigned int nquota_files;		/* # of quota sysfile */
1656 	struct rw_semaphore quota_sem;		/* blocking cp for flags */
1657 
1658 	/* # of pages, see count_type */
1659 	atomic_t nr_pages[NR_COUNT_TYPE];
1660 	/* # of allocated blocks */
1661 	struct percpu_counter alloc_valid_block_count;
1662 
1663 	/* writeback control */
1664 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1665 
1666 	/* valid inode count */
1667 	struct percpu_counter total_valid_inode_count;
1668 
1669 	struct f2fs_mount_info mount_opt;	/* mount options */
1670 
1671 	/* for cleaning operations */
1672 	struct rw_semaphore gc_lock;		/*
1673 						 * semaphore for GC, avoid
1674 						 * race between GC and GC or CP
1675 						 */
1676 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1677 	struct atgc_management am;		/* atgc management */
1678 	unsigned int cur_victim_sec;		/* current victim section num */
1679 	unsigned int gc_mode;			/* current GC state */
1680 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1681 
1682 	/* for skip statistic */
1683 	unsigned int atomic_files;		/* # of opened atomic file */
1684 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
1685 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1686 
1687 	/* threshold for gc trials on pinned files */
1688 	u64 gc_pin_file_threshold;
1689 	struct rw_semaphore pin_sem;
1690 
1691 	/* maximum # of trials to find a victim segment for SSR and GC */
1692 	unsigned int max_victim_search;
1693 	/* migration granularity of garbage collection, unit: segment */
1694 	unsigned int migration_granularity;
1695 
1696 	/*
1697 	 * for stat information.
1698 	 * one is for the LFS mode, and the other is for the SSR mode.
1699 	 */
1700 #ifdef CONFIG_F2FS_STAT_FS
1701 	struct f2fs_stat_info *stat_info;	/* FS status information */
1702 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1703 	unsigned int segment_count[2];		/* # of allocated segments */
1704 	unsigned int block_count[2];		/* # of allocated blocks */
1705 	atomic_t inplace_count;		/* # of inplace update */
1706 	atomic64_t total_hit_ext;		/* # of lookup extent cache */
1707 	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
1708 	atomic64_t read_hit_largest;		/* # of hit largest extent node */
1709 	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1710 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1711 	atomic_t inline_inode;			/* # of inline_data inodes */
1712 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1713 	atomic_t compr_inode;			/* # of compressed inodes */
1714 	atomic64_t compr_blocks;		/* # of compressed blocks */
1715 	atomic_t vw_cnt;			/* # of volatile writes */
1716 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1717 	atomic_t max_vw_cnt;			/* max # of volatile writes */
1718 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1719 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1720 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1721 #endif
1722 	spinlock_t stat_lock;			/* lock for stat operations */
1723 
1724 	/* to attach REQ_META|REQ_FUA flags */
1725 	unsigned int data_io_flag;
1726 	unsigned int node_io_flag;
1727 
1728 	/* For sysfs suppport */
1729 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1730 	struct completion s_kobj_unregister;
1731 
1732 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1733 	struct completion s_stat_kobj_unregister;
1734 
1735 	struct kobject s_feature_list_kobj;		/* /sys/fs/f2fs/<devname>/feature_list */
1736 	struct completion s_feature_list_kobj_unregister;
1737 
1738 	/* For shrinker support */
1739 	struct list_head s_list;
1740 	struct mutex umount_mutex;
1741 	unsigned int shrinker_run_no;
1742 
1743 	/* For multi devices */
1744 	int s_ndevs;				/* number of devices */
1745 	struct f2fs_dev_info *devs;		/* for device list */
1746 	unsigned int dirty_device;		/* for checkpoint data flush */
1747 	spinlock_t dev_lock;			/* protect dirty_device */
1748 	bool aligned_blksize;			/* all devices has the same logical blksize */
1749 
1750 	/* For write statistics */
1751 	u64 sectors_written_start;
1752 	u64 kbytes_written;
1753 
1754 	/* Reference to checksum algorithm driver via cryptoapi */
1755 	struct crypto_shash *s_chksum_driver;
1756 
1757 	/* Precomputed FS UUID checksum for seeding other checksums */
1758 	__u32 s_chksum_seed;
1759 
1760 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1761 
1762 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1763 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1764 
1765 	/* For reclaimed segs statistics per each GC mode */
1766 	unsigned int gc_segment_mode;		/* GC state for reclaimed segments */
1767 	unsigned int gc_reclaimed_segs[MAX_GC_MODE];	/* Reclaimed segs for each mode */
1768 
1769 	unsigned long seq_file_ra_mul;		/* multiplier for ra_pages of seq. files in fadvise */
1770 
1771 	int max_fragment_chunk;			/* max chunk size for block fragmentation mode */
1772 	int max_fragment_hole;			/* max hole size for block fragmentation mode */
1773 
1774 #ifdef CONFIG_F2FS_FS_COMPRESSION
1775 	struct kmem_cache *page_array_slab;	/* page array entry */
1776 	unsigned int page_array_slab_size;	/* default page array slab size */
1777 
1778 	/* For runtime compression statistics */
1779 	u64 compr_written_block;
1780 	u64 compr_saved_block;
1781 	u32 compr_new_inode;
1782 
1783 	/* For compressed block cache */
1784 	struct inode *compress_inode;		/* cache compressed blocks */
1785 	unsigned int compress_percent;		/* cache page percentage */
1786 	unsigned int compress_watermark;	/* cache page watermark */
1787 	atomic_t compress_page_hit;		/* cache hit count */
1788 #endif
1789 
1790 #ifdef CONFIG_F2FS_IOSTAT
1791 	/* For app/fs IO statistics */
1792 	spinlock_t iostat_lock;
1793 	unsigned long long rw_iostat[NR_IO_TYPE];
1794 	unsigned long long prev_rw_iostat[NR_IO_TYPE];
1795 	bool iostat_enable;
1796 	unsigned long iostat_next_period;
1797 	unsigned int iostat_period_ms;
1798 
1799 	/* For io latency related statistics info in one iostat period */
1800 	spinlock_t iostat_lat_lock;
1801 	struct iostat_lat_info *iostat_io_lat;
1802 #endif
1803 };
1804 
1805 struct f2fs_private_dio {
1806 	struct inode *inode;
1807 	void *orig_private;
1808 	bio_end_io_t *orig_end_io;
1809 	bool write;
1810 };
1811 
1812 #ifdef CONFIG_F2FS_FAULT_INJECTION
1813 #define f2fs_show_injection_info(sbi, type)					\
1814 	printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",	\
1815 		KERN_INFO, sbi->sb->s_id,				\
1816 		f2fs_fault_name[type],					\
1817 		__func__, __builtin_return_address(0))
1818 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1819 {
1820 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1821 
1822 	if (!ffi->inject_rate)
1823 		return false;
1824 
1825 	if (!IS_FAULT_SET(ffi, type))
1826 		return false;
1827 
1828 	atomic_inc(&ffi->inject_ops);
1829 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1830 		atomic_set(&ffi->inject_ops, 0);
1831 		return true;
1832 	}
1833 	return false;
1834 }
1835 #else
1836 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1837 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1838 {
1839 	return false;
1840 }
1841 #endif
1842 
1843 /*
1844  * Test if the mounted volume is a multi-device volume.
1845  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1846  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1847  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1848  */
1849 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1850 {
1851 	return sbi->s_ndevs > 1;
1852 }
1853 
1854 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1855 {
1856 	unsigned long now = jiffies;
1857 
1858 	sbi->last_time[type] = now;
1859 
1860 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1861 	if (type == REQ_TIME) {
1862 		sbi->last_time[DISCARD_TIME] = now;
1863 		sbi->last_time[GC_TIME] = now;
1864 	}
1865 }
1866 
1867 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1868 {
1869 	unsigned long interval = sbi->interval_time[type] * HZ;
1870 
1871 	return time_after(jiffies, sbi->last_time[type] + interval);
1872 }
1873 
1874 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1875 						int type)
1876 {
1877 	unsigned long interval = sbi->interval_time[type] * HZ;
1878 	unsigned int wait_ms = 0;
1879 	long delta;
1880 
1881 	delta = (sbi->last_time[type] + interval) - jiffies;
1882 	if (delta > 0)
1883 		wait_ms = jiffies_to_msecs(delta);
1884 
1885 	return wait_ms;
1886 }
1887 
1888 /*
1889  * Inline functions
1890  */
1891 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1892 			      const void *address, unsigned int length)
1893 {
1894 	struct {
1895 		struct shash_desc shash;
1896 		char ctx[4];
1897 	} desc;
1898 	int err;
1899 
1900 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1901 
1902 	desc.shash.tfm = sbi->s_chksum_driver;
1903 	*(u32 *)desc.ctx = crc;
1904 
1905 	err = crypto_shash_update(&desc.shash, address, length);
1906 	BUG_ON(err);
1907 
1908 	return *(u32 *)desc.ctx;
1909 }
1910 
1911 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1912 			   unsigned int length)
1913 {
1914 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1915 }
1916 
1917 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1918 				  void *buf, size_t buf_size)
1919 {
1920 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1921 }
1922 
1923 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1924 			      const void *address, unsigned int length)
1925 {
1926 	return __f2fs_crc32(sbi, crc, address, length);
1927 }
1928 
1929 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1930 {
1931 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1932 }
1933 
1934 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1935 {
1936 	return sb->s_fs_info;
1937 }
1938 
1939 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1940 {
1941 	return F2FS_SB(inode->i_sb);
1942 }
1943 
1944 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1945 {
1946 	return F2FS_I_SB(mapping->host);
1947 }
1948 
1949 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1950 {
1951 	return F2FS_M_SB(page_file_mapping(page));
1952 }
1953 
1954 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1955 {
1956 	return (struct f2fs_super_block *)(sbi->raw_super);
1957 }
1958 
1959 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1960 {
1961 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1962 }
1963 
1964 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1965 {
1966 	return (struct f2fs_node *)page_address(page);
1967 }
1968 
1969 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1970 {
1971 	return &((struct f2fs_node *)page_address(page))->i;
1972 }
1973 
1974 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1975 {
1976 	return (struct f2fs_nm_info *)(sbi->nm_info);
1977 }
1978 
1979 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1980 {
1981 	return (struct f2fs_sm_info *)(sbi->sm_info);
1982 }
1983 
1984 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1985 {
1986 	return (struct sit_info *)(SM_I(sbi)->sit_info);
1987 }
1988 
1989 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1990 {
1991 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1992 }
1993 
1994 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1995 {
1996 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1997 }
1998 
1999 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2000 {
2001 	return sbi->meta_inode->i_mapping;
2002 }
2003 
2004 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2005 {
2006 	return sbi->node_inode->i_mapping;
2007 }
2008 
2009 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2010 {
2011 	return test_bit(type, &sbi->s_flag);
2012 }
2013 
2014 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2015 {
2016 	set_bit(type, &sbi->s_flag);
2017 }
2018 
2019 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2020 {
2021 	clear_bit(type, &sbi->s_flag);
2022 }
2023 
2024 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2025 {
2026 	return le64_to_cpu(cp->checkpoint_ver);
2027 }
2028 
2029 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2030 {
2031 	if (type < F2FS_MAX_QUOTAS)
2032 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2033 	return 0;
2034 }
2035 
2036 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2037 {
2038 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2039 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2040 }
2041 
2042 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2043 {
2044 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2045 
2046 	return ckpt_flags & f;
2047 }
2048 
2049 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2050 {
2051 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2052 }
2053 
2054 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2055 {
2056 	unsigned int ckpt_flags;
2057 
2058 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2059 	ckpt_flags |= f;
2060 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2061 }
2062 
2063 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2064 {
2065 	unsigned long flags;
2066 
2067 	spin_lock_irqsave(&sbi->cp_lock, flags);
2068 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
2069 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2070 }
2071 
2072 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2073 {
2074 	unsigned int ckpt_flags;
2075 
2076 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2077 	ckpt_flags &= (~f);
2078 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2079 }
2080 
2081 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2082 {
2083 	unsigned long flags;
2084 
2085 	spin_lock_irqsave(&sbi->cp_lock, flags);
2086 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
2087 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2088 }
2089 
2090 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2091 {
2092 	down_read(&sbi->cp_rwsem);
2093 }
2094 
2095 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2096 {
2097 	return down_read_trylock(&sbi->cp_rwsem);
2098 }
2099 
2100 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2101 {
2102 	up_read(&sbi->cp_rwsem);
2103 }
2104 
2105 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2106 {
2107 	down_write(&sbi->cp_rwsem);
2108 }
2109 
2110 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2111 {
2112 	up_write(&sbi->cp_rwsem);
2113 }
2114 
2115 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2116 {
2117 	int reason = CP_SYNC;
2118 
2119 	if (test_opt(sbi, FASTBOOT))
2120 		reason = CP_FASTBOOT;
2121 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2122 		reason = CP_UMOUNT;
2123 	return reason;
2124 }
2125 
2126 static inline bool __remain_node_summaries(int reason)
2127 {
2128 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
2129 }
2130 
2131 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2132 {
2133 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2134 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2135 }
2136 
2137 /*
2138  * Check whether the inode has blocks or not
2139  */
2140 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2141 {
2142 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2143 
2144 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2145 }
2146 
2147 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2148 {
2149 	return ofs == XATTR_NODE_OFFSET;
2150 }
2151 
2152 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2153 					struct inode *inode, bool cap)
2154 {
2155 	if (!inode)
2156 		return true;
2157 	if (!test_opt(sbi, RESERVE_ROOT))
2158 		return false;
2159 	if (IS_NOQUOTA(inode))
2160 		return true;
2161 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2162 		return true;
2163 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2164 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2165 		return true;
2166 	if (cap && capable(CAP_SYS_RESOURCE))
2167 		return true;
2168 	return false;
2169 }
2170 
2171 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2172 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2173 				 struct inode *inode, blkcnt_t *count)
2174 {
2175 	blkcnt_t diff = 0, release = 0;
2176 	block_t avail_user_block_count;
2177 	int ret;
2178 
2179 	ret = dquot_reserve_block(inode, *count);
2180 	if (ret)
2181 		return ret;
2182 
2183 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2184 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2185 		release = *count;
2186 		goto release_quota;
2187 	}
2188 
2189 	/*
2190 	 * let's increase this in prior to actual block count change in order
2191 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2192 	 */
2193 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2194 
2195 	spin_lock(&sbi->stat_lock);
2196 	sbi->total_valid_block_count += (block_t)(*count);
2197 	avail_user_block_count = sbi->user_block_count -
2198 					sbi->current_reserved_blocks;
2199 
2200 	if (!__allow_reserved_blocks(sbi, inode, true))
2201 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2202 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2203 		if (avail_user_block_count > sbi->unusable_block_count)
2204 			avail_user_block_count -= sbi->unusable_block_count;
2205 		else
2206 			avail_user_block_count = 0;
2207 	}
2208 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2209 		diff = sbi->total_valid_block_count - avail_user_block_count;
2210 		if (diff > *count)
2211 			diff = *count;
2212 		*count -= diff;
2213 		release = diff;
2214 		sbi->total_valid_block_count -= diff;
2215 		if (!*count) {
2216 			spin_unlock(&sbi->stat_lock);
2217 			goto enospc;
2218 		}
2219 	}
2220 	spin_unlock(&sbi->stat_lock);
2221 
2222 	if (unlikely(release)) {
2223 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2224 		dquot_release_reservation_block(inode, release);
2225 	}
2226 	f2fs_i_blocks_write(inode, *count, true, true);
2227 	return 0;
2228 
2229 enospc:
2230 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2231 release_quota:
2232 	dquot_release_reservation_block(inode, release);
2233 	return -ENOSPC;
2234 }
2235 
2236 __printf(2, 3)
2237 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2238 
2239 #define f2fs_err(sbi, fmt, ...)						\
2240 	f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2241 #define f2fs_warn(sbi, fmt, ...)					\
2242 	f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2243 #define f2fs_notice(sbi, fmt, ...)					\
2244 	f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2245 #define f2fs_info(sbi, fmt, ...)					\
2246 	f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2247 #define f2fs_debug(sbi, fmt, ...)					\
2248 	f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2249 
2250 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2251 						struct inode *inode,
2252 						block_t count)
2253 {
2254 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2255 
2256 	spin_lock(&sbi->stat_lock);
2257 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2258 	sbi->total_valid_block_count -= (block_t)count;
2259 	if (sbi->reserved_blocks &&
2260 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2261 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2262 					sbi->current_reserved_blocks + count);
2263 	spin_unlock(&sbi->stat_lock);
2264 	if (unlikely(inode->i_blocks < sectors)) {
2265 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2266 			  inode->i_ino,
2267 			  (unsigned long long)inode->i_blocks,
2268 			  (unsigned long long)sectors);
2269 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2270 		return;
2271 	}
2272 	f2fs_i_blocks_write(inode, count, false, true);
2273 }
2274 
2275 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2276 {
2277 	atomic_inc(&sbi->nr_pages[count_type]);
2278 
2279 	if (count_type == F2FS_DIRTY_DENTS ||
2280 			count_type == F2FS_DIRTY_NODES ||
2281 			count_type == F2FS_DIRTY_META ||
2282 			count_type == F2FS_DIRTY_QDATA ||
2283 			count_type == F2FS_DIRTY_IMETA)
2284 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2285 }
2286 
2287 static inline void inode_inc_dirty_pages(struct inode *inode)
2288 {
2289 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2290 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2291 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2292 	if (IS_NOQUOTA(inode))
2293 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2294 }
2295 
2296 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2297 {
2298 	atomic_dec(&sbi->nr_pages[count_type]);
2299 }
2300 
2301 static inline void inode_dec_dirty_pages(struct inode *inode)
2302 {
2303 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2304 			!S_ISLNK(inode->i_mode))
2305 		return;
2306 
2307 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2308 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2309 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2310 	if (IS_NOQUOTA(inode))
2311 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2312 }
2313 
2314 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2315 {
2316 	return atomic_read(&sbi->nr_pages[count_type]);
2317 }
2318 
2319 static inline int get_dirty_pages(struct inode *inode)
2320 {
2321 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2322 }
2323 
2324 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2325 {
2326 	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2327 	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2328 						sbi->log_blocks_per_seg;
2329 
2330 	return segs / sbi->segs_per_sec;
2331 }
2332 
2333 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2334 {
2335 	return sbi->total_valid_block_count;
2336 }
2337 
2338 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2339 {
2340 	return sbi->discard_blks;
2341 }
2342 
2343 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2344 {
2345 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2346 
2347 	/* return NAT or SIT bitmap */
2348 	if (flag == NAT_BITMAP)
2349 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2350 	else if (flag == SIT_BITMAP)
2351 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2352 
2353 	return 0;
2354 }
2355 
2356 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2357 {
2358 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2359 }
2360 
2361 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2362 {
2363 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2364 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2365 	int offset;
2366 
2367 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2368 		offset = (flag == SIT_BITMAP) ?
2369 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2370 		/*
2371 		 * if large_nat_bitmap feature is enabled, leave checksum
2372 		 * protection for all nat/sit bitmaps.
2373 		 */
2374 		return tmp_ptr + offset + sizeof(__le32);
2375 	}
2376 
2377 	if (__cp_payload(sbi) > 0) {
2378 		if (flag == NAT_BITMAP)
2379 			return &ckpt->sit_nat_version_bitmap;
2380 		else
2381 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2382 	} else {
2383 		offset = (flag == NAT_BITMAP) ?
2384 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2385 		return tmp_ptr + offset;
2386 	}
2387 }
2388 
2389 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2390 {
2391 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2392 
2393 	if (sbi->cur_cp_pack == 2)
2394 		start_addr += sbi->blocks_per_seg;
2395 	return start_addr;
2396 }
2397 
2398 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2399 {
2400 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2401 
2402 	if (sbi->cur_cp_pack == 1)
2403 		start_addr += sbi->blocks_per_seg;
2404 	return start_addr;
2405 }
2406 
2407 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2408 {
2409 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2410 }
2411 
2412 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2413 {
2414 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2415 }
2416 
2417 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2418 					struct inode *inode, bool is_inode)
2419 {
2420 	block_t	valid_block_count;
2421 	unsigned int valid_node_count, user_block_count;
2422 	int err;
2423 
2424 	if (is_inode) {
2425 		if (inode) {
2426 			err = dquot_alloc_inode(inode);
2427 			if (err)
2428 				return err;
2429 		}
2430 	} else {
2431 		err = dquot_reserve_block(inode, 1);
2432 		if (err)
2433 			return err;
2434 	}
2435 
2436 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2437 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2438 		goto enospc;
2439 	}
2440 
2441 	spin_lock(&sbi->stat_lock);
2442 
2443 	valid_block_count = sbi->total_valid_block_count +
2444 					sbi->current_reserved_blocks + 1;
2445 
2446 	if (!__allow_reserved_blocks(sbi, inode, false))
2447 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2448 	user_block_count = sbi->user_block_count;
2449 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2450 		user_block_count -= sbi->unusable_block_count;
2451 
2452 	if (unlikely(valid_block_count > user_block_count)) {
2453 		spin_unlock(&sbi->stat_lock);
2454 		goto enospc;
2455 	}
2456 
2457 	valid_node_count = sbi->total_valid_node_count + 1;
2458 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2459 		spin_unlock(&sbi->stat_lock);
2460 		goto enospc;
2461 	}
2462 
2463 	sbi->total_valid_node_count++;
2464 	sbi->total_valid_block_count++;
2465 	spin_unlock(&sbi->stat_lock);
2466 
2467 	if (inode) {
2468 		if (is_inode)
2469 			f2fs_mark_inode_dirty_sync(inode, true);
2470 		else
2471 			f2fs_i_blocks_write(inode, 1, true, true);
2472 	}
2473 
2474 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2475 	return 0;
2476 
2477 enospc:
2478 	if (is_inode) {
2479 		if (inode)
2480 			dquot_free_inode(inode);
2481 	} else {
2482 		dquot_release_reservation_block(inode, 1);
2483 	}
2484 	return -ENOSPC;
2485 }
2486 
2487 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2488 					struct inode *inode, bool is_inode)
2489 {
2490 	spin_lock(&sbi->stat_lock);
2491 
2492 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2493 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2494 
2495 	sbi->total_valid_node_count--;
2496 	sbi->total_valid_block_count--;
2497 	if (sbi->reserved_blocks &&
2498 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2499 		sbi->current_reserved_blocks++;
2500 
2501 	spin_unlock(&sbi->stat_lock);
2502 
2503 	if (is_inode) {
2504 		dquot_free_inode(inode);
2505 	} else {
2506 		if (unlikely(inode->i_blocks == 0)) {
2507 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2508 				  inode->i_ino,
2509 				  (unsigned long long)inode->i_blocks);
2510 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2511 			return;
2512 		}
2513 		f2fs_i_blocks_write(inode, 1, false, true);
2514 	}
2515 }
2516 
2517 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2518 {
2519 	return sbi->total_valid_node_count;
2520 }
2521 
2522 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2523 {
2524 	percpu_counter_inc(&sbi->total_valid_inode_count);
2525 }
2526 
2527 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2528 {
2529 	percpu_counter_dec(&sbi->total_valid_inode_count);
2530 }
2531 
2532 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2533 {
2534 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2535 }
2536 
2537 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2538 						pgoff_t index, bool for_write)
2539 {
2540 	struct page *page;
2541 
2542 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2543 		if (!for_write)
2544 			page = find_get_page_flags(mapping, index,
2545 							FGP_LOCK | FGP_ACCESSED);
2546 		else
2547 			page = find_lock_page(mapping, index);
2548 		if (page)
2549 			return page;
2550 
2551 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2552 			f2fs_show_injection_info(F2FS_M_SB(mapping),
2553 							FAULT_PAGE_ALLOC);
2554 			return NULL;
2555 		}
2556 	}
2557 
2558 	if (!for_write)
2559 		return grab_cache_page(mapping, index);
2560 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2561 }
2562 
2563 static inline struct page *f2fs_pagecache_get_page(
2564 				struct address_space *mapping, pgoff_t index,
2565 				int fgp_flags, gfp_t gfp_mask)
2566 {
2567 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2568 		f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2569 		return NULL;
2570 	}
2571 
2572 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2573 }
2574 
2575 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2576 {
2577 	char *src_kaddr = kmap(src);
2578 	char *dst_kaddr = kmap(dst);
2579 
2580 	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2581 	kunmap(dst);
2582 	kunmap(src);
2583 }
2584 
2585 static inline void f2fs_put_page(struct page *page, int unlock)
2586 {
2587 	if (!page)
2588 		return;
2589 
2590 	if (unlock) {
2591 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2592 		unlock_page(page);
2593 	}
2594 	put_page(page);
2595 }
2596 
2597 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2598 {
2599 	if (dn->node_page)
2600 		f2fs_put_page(dn->node_page, 1);
2601 	if (dn->inode_page && dn->node_page != dn->inode_page)
2602 		f2fs_put_page(dn->inode_page, 0);
2603 	dn->node_page = NULL;
2604 	dn->inode_page = NULL;
2605 }
2606 
2607 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2608 					size_t size)
2609 {
2610 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2611 }
2612 
2613 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2614 						gfp_t flags)
2615 {
2616 	void *entry;
2617 
2618 	entry = kmem_cache_alloc(cachep, flags);
2619 	if (!entry)
2620 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2621 	return entry;
2622 }
2623 
2624 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2625 			gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2626 {
2627 	if (nofail)
2628 		return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2629 
2630 	if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) {
2631 		f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC);
2632 		return NULL;
2633 	}
2634 
2635 	return kmem_cache_alloc(cachep, flags);
2636 }
2637 
2638 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2639 {
2640 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2641 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2642 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2643 		get_pages(sbi, F2FS_DIO_READ) ||
2644 		get_pages(sbi, F2FS_DIO_WRITE))
2645 		return true;
2646 
2647 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2648 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2649 		return true;
2650 
2651 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2652 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2653 		return true;
2654 	return false;
2655 }
2656 
2657 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2658 {
2659 	if (sbi->gc_mode == GC_URGENT_HIGH)
2660 		return true;
2661 
2662 	if (is_inflight_io(sbi, type))
2663 		return false;
2664 
2665 	if (sbi->gc_mode == GC_URGENT_LOW &&
2666 			(type == DISCARD_TIME || type == GC_TIME))
2667 		return true;
2668 
2669 	return f2fs_time_over(sbi, type);
2670 }
2671 
2672 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2673 				unsigned long index, void *item)
2674 {
2675 	while (radix_tree_insert(root, index, item))
2676 		cond_resched();
2677 }
2678 
2679 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2680 
2681 static inline bool IS_INODE(struct page *page)
2682 {
2683 	struct f2fs_node *p = F2FS_NODE(page);
2684 
2685 	return RAW_IS_INODE(p);
2686 }
2687 
2688 static inline int offset_in_addr(struct f2fs_inode *i)
2689 {
2690 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2691 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2692 }
2693 
2694 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2695 {
2696 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2697 }
2698 
2699 static inline int f2fs_has_extra_attr(struct inode *inode);
2700 static inline block_t data_blkaddr(struct inode *inode,
2701 			struct page *node_page, unsigned int offset)
2702 {
2703 	struct f2fs_node *raw_node;
2704 	__le32 *addr_array;
2705 	int base = 0;
2706 	bool is_inode = IS_INODE(node_page);
2707 
2708 	raw_node = F2FS_NODE(node_page);
2709 
2710 	if (is_inode) {
2711 		if (!inode)
2712 			/* from GC path only */
2713 			base = offset_in_addr(&raw_node->i);
2714 		else if (f2fs_has_extra_attr(inode))
2715 			base = get_extra_isize(inode);
2716 	}
2717 
2718 	addr_array = blkaddr_in_node(raw_node);
2719 	return le32_to_cpu(addr_array[base + offset]);
2720 }
2721 
2722 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2723 {
2724 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2725 }
2726 
2727 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2728 {
2729 	int mask;
2730 
2731 	addr += (nr >> 3);
2732 	mask = 1 << (7 - (nr & 0x07));
2733 	return mask & *addr;
2734 }
2735 
2736 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2737 {
2738 	int mask;
2739 
2740 	addr += (nr >> 3);
2741 	mask = 1 << (7 - (nr & 0x07));
2742 	*addr |= mask;
2743 }
2744 
2745 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2746 {
2747 	int mask;
2748 
2749 	addr += (nr >> 3);
2750 	mask = 1 << (7 - (nr & 0x07));
2751 	*addr &= ~mask;
2752 }
2753 
2754 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2755 {
2756 	int mask;
2757 	int ret;
2758 
2759 	addr += (nr >> 3);
2760 	mask = 1 << (7 - (nr & 0x07));
2761 	ret = mask & *addr;
2762 	*addr |= mask;
2763 	return ret;
2764 }
2765 
2766 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2767 {
2768 	int mask;
2769 	int ret;
2770 
2771 	addr += (nr >> 3);
2772 	mask = 1 << (7 - (nr & 0x07));
2773 	ret = mask & *addr;
2774 	*addr &= ~mask;
2775 	return ret;
2776 }
2777 
2778 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2779 {
2780 	int mask;
2781 
2782 	addr += (nr >> 3);
2783 	mask = 1 << (7 - (nr & 0x07));
2784 	*addr ^= mask;
2785 }
2786 
2787 /*
2788  * On-disk inode flags (f2fs_inode::i_flags)
2789  */
2790 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2791 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2792 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2793 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2794 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2795 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2796 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2797 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2798 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2799 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2800 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2801 
2802 /* Flags that should be inherited by new inodes from their parent. */
2803 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2804 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2805 			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2806 
2807 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2808 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2809 				F2FS_CASEFOLD_FL))
2810 
2811 /* Flags that are appropriate for non-directories/regular files. */
2812 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2813 
2814 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2815 {
2816 	if (S_ISDIR(mode))
2817 		return flags;
2818 	else if (S_ISREG(mode))
2819 		return flags & F2FS_REG_FLMASK;
2820 	else
2821 		return flags & F2FS_OTHER_FLMASK;
2822 }
2823 
2824 static inline void __mark_inode_dirty_flag(struct inode *inode,
2825 						int flag, bool set)
2826 {
2827 	switch (flag) {
2828 	case FI_INLINE_XATTR:
2829 	case FI_INLINE_DATA:
2830 	case FI_INLINE_DENTRY:
2831 	case FI_NEW_INODE:
2832 		if (set)
2833 			return;
2834 		fallthrough;
2835 	case FI_DATA_EXIST:
2836 	case FI_INLINE_DOTS:
2837 	case FI_PIN_FILE:
2838 	case FI_COMPRESS_RELEASED:
2839 		f2fs_mark_inode_dirty_sync(inode, true);
2840 	}
2841 }
2842 
2843 static inline void set_inode_flag(struct inode *inode, int flag)
2844 {
2845 	set_bit(flag, F2FS_I(inode)->flags);
2846 	__mark_inode_dirty_flag(inode, flag, true);
2847 }
2848 
2849 static inline int is_inode_flag_set(struct inode *inode, int flag)
2850 {
2851 	return test_bit(flag, F2FS_I(inode)->flags);
2852 }
2853 
2854 static inline void clear_inode_flag(struct inode *inode, int flag)
2855 {
2856 	clear_bit(flag, F2FS_I(inode)->flags);
2857 	__mark_inode_dirty_flag(inode, flag, false);
2858 }
2859 
2860 static inline bool f2fs_verity_in_progress(struct inode *inode)
2861 {
2862 	return IS_ENABLED(CONFIG_FS_VERITY) &&
2863 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2864 }
2865 
2866 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2867 {
2868 	F2FS_I(inode)->i_acl_mode = mode;
2869 	set_inode_flag(inode, FI_ACL_MODE);
2870 	f2fs_mark_inode_dirty_sync(inode, false);
2871 }
2872 
2873 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2874 {
2875 	if (inc)
2876 		inc_nlink(inode);
2877 	else
2878 		drop_nlink(inode);
2879 	f2fs_mark_inode_dirty_sync(inode, true);
2880 }
2881 
2882 static inline void f2fs_i_blocks_write(struct inode *inode,
2883 					block_t diff, bool add, bool claim)
2884 {
2885 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2886 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2887 
2888 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
2889 	if (add) {
2890 		if (claim)
2891 			dquot_claim_block(inode, diff);
2892 		else
2893 			dquot_alloc_block_nofail(inode, diff);
2894 	} else {
2895 		dquot_free_block(inode, diff);
2896 	}
2897 
2898 	f2fs_mark_inode_dirty_sync(inode, true);
2899 	if (clean || recover)
2900 		set_inode_flag(inode, FI_AUTO_RECOVER);
2901 }
2902 
2903 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2904 {
2905 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2906 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2907 
2908 	if (i_size_read(inode) == i_size)
2909 		return;
2910 
2911 	i_size_write(inode, i_size);
2912 	f2fs_mark_inode_dirty_sync(inode, true);
2913 	if (clean || recover)
2914 		set_inode_flag(inode, FI_AUTO_RECOVER);
2915 }
2916 
2917 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2918 {
2919 	F2FS_I(inode)->i_current_depth = depth;
2920 	f2fs_mark_inode_dirty_sync(inode, true);
2921 }
2922 
2923 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2924 					unsigned int count)
2925 {
2926 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2927 	f2fs_mark_inode_dirty_sync(inode, true);
2928 }
2929 
2930 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2931 {
2932 	F2FS_I(inode)->i_xattr_nid = xnid;
2933 	f2fs_mark_inode_dirty_sync(inode, true);
2934 }
2935 
2936 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2937 {
2938 	F2FS_I(inode)->i_pino = pino;
2939 	f2fs_mark_inode_dirty_sync(inode, true);
2940 }
2941 
2942 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2943 {
2944 	struct f2fs_inode_info *fi = F2FS_I(inode);
2945 
2946 	if (ri->i_inline & F2FS_INLINE_XATTR)
2947 		set_bit(FI_INLINE_XATTR, fi->flags);
2948 	if (ri->i_inline & F2FS_INLINE_DATA)
2949 		set_bit(FI_INLINE_DATA, fi->flags);
2950 	if (ri->i_inline & F2FS_INLINE_DENTRY)
2951 		set_bit(FI_INLINE_DENTRY, fi->flags);
2952 	if (ri->i_inline & F2FS_DATA_EXIST)
2953 		set_bit(FI_DATA_EXIST, fi->flags);
2954 	if (ri->i_inline & F2FS_INLINE_DOTS)
2955 		set_bit(FI_INLINE_DOTS, fi->flags);
2956 	if (ri->i_inline & F2FS_EXTRA_ATTR)
2957 		set_bit(FI_EXTRA_ATTR, fi->flags);
2958 	if (ri->i_inline & F2FS_PIN_FILE)
2959 		set_bit(FI_PIN_FILE, fi->flags);
2960 	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
2961 		set_bit(FI_COMPRESS_RELEASED, fi->flags);
2962 }
2963 
2964 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2965 {
2966 	ri->i_inline = 0;
2967 
2968 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2969 		ri->i_inline |= F2FS_INLINE_XATTR;
2970 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
2971 		ri->i_inline |= F2FS_INLINE_DATA;
2972 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2973 		ri->i_inline |= F2FS_INLINE_DENTRY;
2974 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
2975 		ri->i_inline |= F2FS_DATA_EXIST;
2976 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2977 		ri->i_inline |= F2FS_INLINE_DOTS;
2978 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2979 		ri->i_inline |= F2FS_EXTRA_ATTR;
2980 	if (is_inode_flag_set(inode, FI_PIN_FILE))
2981 		ri->i_inline |= F2FS_PIN_FILE;
2982 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
2983 		ri->i_inline |= F2FS_COMPRESS_RELEASED;
2984 }
2985 
2986 static inline int f2fs_has_extra_attr(struct inode *inode)
2987 {
2988 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2989 }
2990 
2991 static inline int f2fs_has_inline_xattr(struct inode *inode)
2992 {
2993 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
2994 }
2995 
2996 static inline int f2fs_compressed_file(struct inode *inode)
2997 {
2998 	return S_ISREG(inode->i_mode) &&
2999 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3000 }
3001 
3002 static inline bool f2fs_need_compress_data(struct inode *inode)
3003 {
3004 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3005 
3006 	if (!f2fs_compressed_file(inode))
3007 		return false;
3008 
3009 	if (compress_mode == COMPR_MODE_FS)
3010 		return true;
3011 	else if (compress_mode == COMPR_MODE_USER &&
3012 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3013 		return true;
3014 
3015 	return false;
3016 }
3017 
3018 static inline unsigned int addrs_per_inode(struct inode *inode)
3019 {
3020 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3021 				get_inline_xattr_addrs(inode);
3022 
3023 	if (!f2fs_compressed_file(inode))
3024 		return addrs;
3025 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3026 }
3027 
3028 static inline unsigned int addrs_per_block(struct inode *inode)
3029 {
3030 	if (!f2fs_compressed_file(inode))
3031 		return DEF_ADDRS_PER_BLOCK;
3032 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3033 }
3034 
3035 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3036 {
3037 	struct f2fs_inode *ri = F2FS_INODE(page);
3038 
3039 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3040 					get_inline_xattr_addrs(inode)]);
3041 }
3042 
3043 static inline int inline_xattr_size(struct inode *inode)
3044 {
3045 	if (f2fs_has_inline_xattr(inode))
3046 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
3047 	return 0;
3048 }
3049 
3050 static inline int f2fs_has_inline_data(struct inode *inode)
3051 {
3052 	return is_inode_flag_set(inode, FI_INLINE_DATA);
3053 }
3054 
3055 static inline int f2fs_exist_data(struct inode *inode)
3056 {
3057 	return is_inode_flag_set(inode, FI_DATA_EXIST);
3058 }
3059 
3060 static inline int f2fs_has_inline_dots(struct inode *inode)
3061 {
3062 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
3063 }
3064 
3065 static inline int f2fs_is_mmap_file(struct inode *inode)
3066 {
3067 	return is_inode_flag_set(inode, FI_MMAP_FILE);
3068 }
3069 
3070 static inline bool f2fs_is_pinned_file(struct inode *inode)
3071 {
3072 	return is_inode_flag_set(inode, FI_PIN_FILE);
3073 }
3074 
3075 static inline bool f2fs_is_atomic_file(struct inode *inode)
3076 {
3077 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3078 }
3079 
3080 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
3081 {
3082 	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
3083 }
3084 
3085 static inline bool f2fs_is_volatile_file(struct inode *inode)
3086 {
3087 	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
3088 }
3089 
3090 static inline bool f2fs_is_first_block_written(struct inode *inode)
3091 {
3092 	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
3093 }
3094 
3095 static inline bool f2fs_is_drop_cache(struct inode *inode)
3096 {
3097 	return is_inode_flag_set(inode, FI_DROP_CACHE);
3098 }
3099 
3100 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3101 {
3102 	struct f2fs_inode *ri = F2FS_INODE(page);
3103 	int extra_size = get_extra_isize(inode);
3104 
3105 	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
3106 }
3107 
3108 static inline int f2fs_has_inline_dentry(struct inode *inode)
3109 {
3110 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3111 }
3112 
3113 static inline int is_file(struct inode *inode, int type)
3114 {
3115 	return F2FS_I(inode)->i_advise & type;
3116 }
3117 
3118 static inline void set_file(struct inode *inode, int type)
3119 {
3120 	F2FS_I(inode)->i_advise |= type;
3121 	f2fs_mark_inode_dirty_sync(inode, true);
3122 }
3123 
3124 static inline void clear_file(struct inode *inode, int type)
3125 {
3126 	F2FS_I(inode)->i_advise &= ~type;
3127 	f2fs_mark_inode_dirty_sync(inode, true);
3128 }
3129 
3130 static inline bool f2fs_is_time_consistent(struct inode *inode)
3131 {
3132 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3133 		return false;
3134 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
3135 		return false;
3136 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3137 		return false;
3138 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
3139 						&F2FS_I(inode)->i_crtime))
3140 		return false;
3141 	return true;
3142 }
3143 
3144 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3145 {
3146 	bool ret;
3147 
3148 	if (dsync) {
3149 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3150 
3151 		spin_lock(&sbi->inode_lock[DIRTY_META]);
3152 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
3153 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3154 		return ret;
3155 	}
3156 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3157 			file_keep_isize(inode) ||
3158 			i_size_read(inode) & ~PAGE_MASK)
3159 		return false;
3160 
3161 	if (!f2fs_is_time_consistent(inode))
3162 		return false;
3163 
3164 	spin_lock(&F2FS_I(inode)->i_size_lock);
3165 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3166 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3167 
3168 	return ret;
3169 }
3170 
3171 static inline bool f2fs_readonly(struct super_block *sb)
3172 {
3173 	return sb_rdonly(sb);
3174 }
3175 
3176 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3177 {
3178 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3179 }
3180 
3181 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3182 {
3183 	if (len == 1 && name[0] == '.')
3184 		return true;
3185 
3186 	if (len == 2 && name[0] == '.' && name[1] == '.')
3187 		return true;
3188 
3189 	return false;
3190 }
3191 
3192 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3193 					size_t size, gfp_t flags)
3194 {
3195 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
3196 		f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3197 		return NULL;
3198 	}
3199 
3200 	return kmalloc(size, flags);
3201 }
3202 
3203 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3204 					size_t size, gfp_t flags)
3205 {
3206 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3207 }
3208 
3209 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3210 					size_t size, gfp_t flags)
3211 {
3212 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3213 		f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3214 		return NULL;
3215 	}
3216 
3217 	return kvmalloc(size, flags);
3218 }
3219 
3220 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3221 					size_t size, gfp_t flags)
3222 {
3223 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3224 }
3225 
3226 static inline int get_extra_isize(struct inode *inode)
3227 {
3228 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3229 }
3230 
3231 static inline int get_inline_xattr_addrs(struct inode *inode)
3232 {
3233 	return F2FS_I(inode)->i_inline_xattr_size;
3234 }
3235 
3236 #define f2fs_get_inode_mode(i) \
3237 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3238 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3239 
3240 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3241 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3242 	offsetof(struct f2fs_inode, i_extra_isize))	\
3243 
3244 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3245 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3246 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3247 		sizeof((f2fs_inode)->field))			\
3248 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3249 
3250 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
3251 
3252 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3253 
3254 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3255 					block_t blkaddr, int type);
3256 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3257 					block_t blkaddr, int type)
3258 {
3259 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3260 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3261 			 blkaddr, type);
3262 		f2fs_bug_on(sbi, 1);
3263 	}
3264 }
3265 
3266 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3267 {
3268 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3269 			blkaddr == COMPRESS_ADDR)
3270 		return false;
3271 	return true;
3272 }
3273 
3274 /*
3275  * file.c
3276  */
3277 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3278 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3279 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3280 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3281 int f2fs_truncate(struct inode *inode);
3282 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3283 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3284 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3285 		 struct iattr *attr);
3286 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3287 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3288 int f2fs_precache_extents(struct inode *inode);
3289 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3290 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3291 		      struct dentry *dentry, struct fileattr *fa);
3292 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3293 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3294 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3295 int f2fs_pin_file_control(struct inode *inode, bool inc);
3296 
3297 /*
3298  * inode.c
3299  */
3300 void f2fs_set_inode_flags(struct inode *inode);
3301 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3302 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3303 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3304 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3305 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3306 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3307 void f2fs_update_inode_page(struct inode *inode);
3308 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3309 void f2fs_evict_inode(struct inode *inode);
3310 void f2fs_handle_failed_inode(struct inode *inode);
3311 
3312 /*
3313  * namei.c
3314  */
3315 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3316 							bool hot, bool set);
3317 struct dentry *f2fs_get_parent(struct dentry *child);
3318 
3319 /*
3320  * dir.c
3321  */
3322 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3323 int f2fs_init_casefolded_name(const struct inode *dir,
3324 			      struct f2fs_filename *fname);
3325 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3326 			int lookup, struct f2fs_filename *fname);
3327 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3328 			struct f2fs_filename *fname);
3329 void f2fs_free_filename(struct f2fs_filename *fname);
3330 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3331 			const struct f2fs_filename *fname, int *max_slots);
3332 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3333 			unsigned int start_pos, struct fscrypt_str *fstr);
3334 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3335 			struct f2fs_dentry_ptr *d);
3336 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3337 			const struct f2fs_filename *fname, struct page *dpage);
3338 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3339 			unsigned int current_depth);
3340 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3341 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3342 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3343 					 const struct f2fs_filename *fname,
3344 					 struct page **res_page);
3345 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3346 			const struct qstr *child, struct page **res_page);
3347 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3348 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3349 			struct page **page);
3350 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3351 			struct page *page, struct inode *inode);
3352 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3353 			  const struct f2fs_filename *fname);
3354 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3355 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3356 			unsigned int bit_pos);
3357 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3358 			struct inode *inode, nid_t ino, umode_t mode);
3359 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3360 			struct inode *inode, nid_t ino, umode_t mode);
3361 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3362 			struct inode *inode, nid_t ino, umode_t mode);
3363 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3364 			struct inode *dir, struct inode *inode);
3365 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3366 bool f2fs_empty_dir(struct inode *dir);
3367 
3368 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3369 {
3370 	if (fscrypt_is_nokey_name(dentry))
3371 		return -ENOKEY;
3372 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3373 				inode, inode->i_ino, inode->i_mode);
3374 }
3375 
3376 /*
3377  * super.c
3378  */
3379 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3380 void f2fs_inode_synced(struct inode *inode);
3381 int f2fs_dquot_initialize(struct inode *inode);
3382 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3383 int f2fs_quota_sync(struct super_block *sb, int type);
3384 loff_t max_file_blocks(struct inode *inode);
3385 void f2fs_quota_off_umount(struct super_block *sb);
3386 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3387 int f2fs_sync_fs(struct super_block *sb, int sync);
3388 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3389 
3390 /*
3391  * hash.c
3392  */
3393 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3394 
3395 /*
3396  * node.c
3397  */
3398 struct node_info;
3399 
3400 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3401 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3402 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3403 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3404 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3405 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3406 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3407 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3408 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3409 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3410 						struct node_info *ni);
3411 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3412 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3413 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3414 int f2fs_truncate_xattr_node(struct inode *inode);
3415 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3416 					unsigned int seq_id);
3417 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3418 int f2fs_remove_inode_page(struct inode *inode);
3419 struct page *f2fs_new_inode_page(struct inode *inode);
3420 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3421 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3422 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3423 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3424 int f2fs_move_node_page(struct page *node_page, int gc_type);
3425 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3426 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3427 			struct writeback_control *wbc, bool atomic,
3428 			unsigned int *seq_id);
3429 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3430 			struct writeback_control *wbc,
3431 			bool do_balance, enum iostat_type io_type);
3432 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3433 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3434 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3435 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3436 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3437 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3438 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3439 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3440 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3441 			unsigned int segno, struct f2fs_summary_block *sum);
3442 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3443 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3444 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3445 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3446 int __init f2fs_create_node_manager_caches(void);
3447 void f2fs_destroy_node_manager_caches(void);
3448 
3449 /*
3450  * segment.c
3451  */
3452 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3453 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3454 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3455 void f2fs_drop_inmem_pages(struct inode *inode);
3456 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3457 int f2fs_commit_inmem_pages(struct inode *inode);
3458 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3459 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3460 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3461 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3462 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3463 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3464 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3465 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3466 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3467 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3468 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3469 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3470 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3471 					struct cp_control *cpc);
3472 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3473 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3474 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3475 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3476 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3477 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3478 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3479 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3480 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3481 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3482 			unsigned int *newseg, bool new_sec, int dir);
3483 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3484 					unsigned int start, unsigned int end);
3485 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3486 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3487 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3488 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3489 					struct cp_control *cpc);
3490 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3491 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3492 					block_t blk_addr);
3493 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3494 						enum iostat_type io_type);
3495 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3496 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3497 			struct f2fs_io_info *fio);
3498 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3499 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3500 			block_t old_blkaddr, block_t new_blkaddr,
3501 			bool recover_curseg, bool recover_newaddr,
3502 			bool from_gc);
3503 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3504 			block_t old_addr, block_t new_addr,
3505 			unsigned char version, bool recover_curseg,
3506 			bool recover_newaddr);
3507 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3508 			block_t old_blkaddr, block_t *new_blkaddr,
3509 			struct f2fs_summary *sum, int type,
3510 			struct f2fs_io_info *fio);
3511 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3512 					block_t blkaddr, unsigned int blkcnt);
3513 void f2fs_wait_on_page_writeback(struct page *page,
3514 			enum page_type type, bool ordered, bool locked);
3515 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3516 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3517 								block_t len);
3518 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3519 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3520 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3521 			unsigned int val, int alloc);
3522 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3523 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3524 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3525 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3526 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3527 int __init f2fs_create_segment_manager_caches(void);
3528 void f2fs_destroy_segment_manager_caches(void);
3529 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3530 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3531 			enum page_type type, enum temp_type temp);
3532 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3533 			unsigned int segno);
3534 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3535 			unsigned int segno);
3536 
3537 #define DEF_FRAGMENT_SIZE	4
3538 #define MIN_FRAGMENT_SIZE	1
3539 #define MAX_FRAGMENT_SIZE	512
3540 
3541 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3542 {
3543 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3544 		F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3545 }
3546 
3547 /*
3548  * checkpoint.c
3549  */
3550 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3551 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3552 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3553 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3554 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3555 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3556 					block_t blkaddr, int type);
3557 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3558 			int type, bool sync);
3559 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3560 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3561 			long nr_to_write, enum iostat_type io_type);
3562 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3563 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3564 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3565 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3566 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3567 					unsigned int devidx, int type);
3568 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3569 					unsigned int devidx, int type);
3570 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3571 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3572 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3573 void f2fs_add_orphan_inode(struct inode *inode);
3574 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3575 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3576 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3577 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3578 void f2fs_remove_dirty_inode(struct inode *inode);
3579 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3580 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3581 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3582 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3583 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3584 int __init f2fs_create_checkpoint_caches(void);
3585 void f2fs_destroy_checkpoint_caches(void);
3586 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3587 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3588 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3589 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3590 
3591 /*
3592  * data.c
3593  */
3594 int __init f2fs_init_bioset(void);
3595 void f2fs_destroy_bioset(void);
3596 int f2fs_init_bio_entry_cache(void);
3597 void f2fs_destroy_bio_entry_cache(void);
3598 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3599 				struct bio *bio, enum page_type type);
3600 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3601 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3602 				struct inode *inode, struct page *page,
3603 				nid_t ino, enum page_type type);
3604 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3605 					struct bio **bio, struct page *page);
3606 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3607 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3608 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3609 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3610 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3611 			block_t blk_addr, struct bio *bio);
3612 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3613 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3614 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3615 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3616 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3617 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3618 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3619 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3620 			int op_flags, bool for_write);
3621 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3622 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3623 			bool for_write);
3624 struct page *f2fs_get_new_data_page(struct inode *inode,
3625 			struct page *ipage, pgoff_t index, bool new_i_size);
3626 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3627 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3628 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3629 			int create, int flag);
3630 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3631 			u64 start, u64 len);
3632 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3633 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3634 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3635 int f2fs_write_single_data_page(struct page *page, int *submitted,
3636 				struct bio **bio, sector_t *last_block,
3637 				struct writeback_control *wbc,
3638 				enum iostat_type io_type,
3639 				int compr_blocks, bool allow_balance);
3640 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3641 			unsigned int length);
3642 int f2fs_release_page(struct page *page, gfp_t wait);
3643 #ifdef CONFIG_MIGRATION
3644 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3645 			struct page *page, enum migrate_mode mode);
3646 #endif
3647 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3648 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3649 int f2fs_init_post_read_processing(void);
3650 void f2fs_destroy_post_read_processing(void);
3651 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3652 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3653 
3654 /*
3655  * gc.c
3656  */
3657 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3658 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3659 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3660 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3661 			unsigned int segno);
3662 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3663 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3664 int __init f2fs_create_garbage_collection_cache(void);
3665 void f2fs_destroy_garbage_collection_cache(void);
3666 
3667 /*
3668  * recovery.c
3669  */
3670 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3671 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3672 int __init f2fs_create_recovery_cache(void);
3673 void f2fs_destroy_recovery_cache(void);
3674 
3675 /*
3676  * debug.c
3677  */
3678 #ifdef CONFIG_F2FS_STAT_FS
3679 struct f2fs_stat_info {
3680 	struct list_head stat_list;
3681 	struct f2fs_sb_info *sbi;
3682 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3683 	int main_area_segs, main_area_sections, main_area_zones;
3684 	unsigned long long hit_largest, hit_cached, hit_rbtree;
3685 	unsigned long long hit_total, total_ext;
3686 	int ext_tree, zombie_tree, ext_node;
3687 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3688 	int ndirty_data, ndirty_qdata;
3689 	int inmem_pages;
3690 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3691 	int nats, dirty_nats, sits, dirty_sits;
3692 	int free_nids, avail_nids, alloc_nids;
3693 	int total_count, utilization;
3694 	int bg_gc, nr_wb_cp_data, nr_wb_data;
3695 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3696 	int nr_dio_read, nr_dio_write;
3697 	unsigned int io_skip_bggc, other_skip_bggc;
3698 	int nr_flushing, nr_flushed, flush_list_empty;
3699 	int nr_discarding, nr_discarded;
3700 	int nr_discard_cmd;
3701 	unsigned int undiscard_blks;
3702 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3703 	unsigned int cur_ckpt_time, peak_ckpt_time;
3704 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3705 	int compr_inode;
3706 	unsigned long long compr_blocks;
3707 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3708 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3709 	unsigned int bimodal, avg_vblocks;
3710 	int util_free, util_valid, util_invalid;
3711 	int rsvd_segs, overp_segs;
3712 	int dirty_count, node_pages, meta_pages, compress_pages;
3713 	int compress_page_hit;
3714 	int prefree_count, call_count, cp_count, bg_cp_count;
3715 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
3716 	int bg_node_segs, bg_data_segs;
3717 	int tot_blks, data_blks, node_blks;
3718 	int bg_data_blks, bg_node_blks;
3719 	unsigned long long skipped_atomic_files[2];
3720 	int curseg[NR_CURSEG_TYPE];
3721 	int cursec[NR_CURSEG_TYPE];
3722 	int curzone[NR_CURSEG_TYPE];
3723 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3724 	unsigned int full_seg[NR_CURSEG_TYPE];
3725 	unsigned int valid_blks[NR_CURSEG_TYPE];
3726 
3727 	unsigned int meta_count[META_MAX];
3728 	unsigned int segment_count[2];
3729 	unsigned int block_count[2];
3730 	unsigned int inplace_count;
3731 	unsigned long long base_mem, cache_mem, page_mem;
3732 };
3733 
3734 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3735 {
3736 	return (struct f2fs_stat_info *)sbi->stat_info;
3737 }
3738 
3739 #define stat_inc_cp_count(si)		((si)->cp_count++)
3740 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
3741 #define stat_inc_call_count(si)		((si)->call_count++)
3742 #define stat_inc_bggc_count(si)		((si)->bg_gc++)
3743 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3744 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3745 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3746 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3747 #define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
3748 #define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
3749 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3750 #define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
3751 #define stat_inc_inline_xattr(inode)					\
3752 	do {								\
3753 		if (f2fs_has_inline_xattr(inode))			\
3754 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3755 	} while (0)
3756 #define stat_dec_inline_xattr(inode)					\
3757 	do {								\
3758 		if (f2fs_has_inline_xattr(inode))			\
3759 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3760 	} while (0)
3761 #define stat_inc_inline_inode(inode)					\
3762 	do {								\
3763 		if (f2fs_has_inline_data(inode))			\
3764 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
3765 	} while (0)
3766 #define stat_dec_inline_inode(inode)					\
3767 	do {								\
3768 		if (f2fs_has_inline_data(inode))			\
3769 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
3770 	} while (0)
3771 #define stat_inc_inline_dir(inode)					\
3772 	do {								\
3773 		if (f2fs_has_inline_dentry(inode))			\
3774 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
3775 	} while (0)
3776 #define stat_dec_inline_dir(inode)					\
3777 	do {								\
3778 		if (f2fs_has_inline_dentry(inode))			\
3779 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
3780 	} while (0)
3781 #define stat_inc_compr_inode(inode)					\
3782 	do {								\
3783 		if (f2fs_compressed_file(inode))			\
3784 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
3785 	} while (0)
3786 #define stat_dec_compr_inode(inode)					\
3787 	do {								\
3788 		if (f2fs_compressed_file(inode))			\
3789 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
3790 	} while (0)
3791 #define stat_add_compr_blocks(inode, blocks)				\
3792 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3793 #define stat_sub_compr_blocks(inode, blocks)				\
3794 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3795 #define stat_inc_meta_count(sbi, blkaddr)				\
3796 	do {								\
3797 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
3798 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
3799 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
3800 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
3801 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
3802 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
3803 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
3804 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
3805 	} while (0)
3806 #define stat_inc_seg_type(sbi, curseg)					\
3807 		((sbi)->segment_count[(curseg)->alloc_type]++)
3808 #define stat_inc_block_count(sbi, curseg)				\
3809 		((sbi)->block_count[(curseg)->alloc_type]++)
3810 #define stat_inc_inplace_blocks(sbi)					\
3811 		(atomic_inc(&(sbi)->inplace_count))
3812 #define stat_update_max_atomic_write(inode)				\
3813 	do {								\
3814 		int cur = F2FS_I_SB(inode)->atomic_files;	\
3815 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
3816 		if (cur > max)						\
3817 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
3818 	} while (0)
3819 #define stat_inc_volatile_write(inode)					\
3820 		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3821 #define stat_dec_volatile_write(inode)					\
3822 		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3823 #define stat_update_max_volatile_write(inode)				\
3824 	do {								\
3825 		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
3826 		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
3827 		if (cur > max)						\
3828 			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
3829 	} while (0)
3830 #define stat_inc_seg_count(sbi, type, gc_type)				\
3831 	do {								\
3832 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3833 		si->tot_segs++;						\
3834 		if ((type) == SUM_TYPE_DATA) {				\
3835 			si->data_segs++;				\
3836 			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
3837 		} else {						\
3838 			si->node_segs++;				\
3839 			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
3840 		}							\
3841 	} while (0)
3842 
3843 #define stat_inc_tot_blk_count(si, blks)				\
3844 	((si)->tot_blks += (blks))
3845 
3846 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
3847 	do {								\
3848 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3849 		stat_inc_tot_blk_count(si, blks);			\
3850 		si->data_blks += (blks);				\
3851 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3852 	} while (0)
3853 
3854 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
3855 	do {								\
3856 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3857 		stat_inc_tot_blk_count(si, blks);			\
3858 		si->node_blks += (blks);				\
3859 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3860 	} while (0)
3861 
3862 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3863 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3864 void __init f2fs_create_root_stats(void);
3865 void f2fs_destroy_root_stats(void);
3866 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3867 #else
3868 #define stat_inc_cp_count(si)				do { } while (0)
3869 #define stat_inc_bg_cp_count(si)			do { } while (0)
3870 #define stat_inc_call_count(si)				do { } while (0)
3871 #define stat_inc_bggc_count(si)				do { } while (0)
3872 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
3873 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
3874 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
3875 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
3876 #define stat_inc_total_hit(sbi)				do { } while (0)
3877 #define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
3878 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
3879 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
3880 #define stat_inc_inline_xattr(inode)			do { } while (0)
3881 #define stat_dec_inline_xattr(inode)			do { } while (0)
3882 #define stat_inc_inline_inode(inode)			do { } while (0)
3883 #define stat_dec_inline_inode(inode)			do { } while (0)
3884 #define stat_inc_inline_dir(inode)			do { } while (0)
3885 #define stat_dec_inline_dir(inode)			do { } while (0)
3886 #define stat_inc_compr_inode(inode)			do { } while (0)
3887 #define stat_dec_compr_inode(inode)			do { } while (0)
3888 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
3889 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
3890 #define stat_update_max_atomic_write(inode)		do { } while (0)
3891 #define stat_inc_volatile_write(inode)			do { } while (0)
3892 #define stat_dec_volatile_write(inode)			do { } while (0)
3893 #define stat_update_max_volatile_write(inode)		do { } while (0)
3894 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
3895 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
3896 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
3897 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
3898 #define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
3899 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
3900 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
3901 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
3902 
3903 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
3904 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
3905 static inline void __init f2fs_create_root_stats(void) { }
3906 static inline void f2fs_destroy_root_stats(void) { }
3907 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3908 #endif
3909 
3910 extern const struct file_operations f2fs_dir_operations;
3911 extern const struct file_operations f2fs_file_operations;
3912 extern const struct inode_operations f2fs_file_inode_operations;
3913 extern const struct address_space_operations f2fs_dblock_aops;
3914 extern const struct address_space_operations f2fs_node_aops;
3915 extern const struct address_space_operations f2fs_meta_aops;
3916 extern const struct inode_operations f2fs_dir_inode_operations;
3917 extern const struct inode_operations f2fs_symlink_inode_operations;
3918 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3919 extern const struct inode_operations f2fs_special_inode_operations;
3920 extern struct kmem_cache *f2fs_inode_entry_slab;
3921 
3922 /*
3923  * inline.c
3924  */
3925 bool f2fs_may_inline_data(struct inode *inode);
3926 bool f2fs_may_inline_dentry(struct inode *inode);
3927 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3928 void f2fs_truncate_inline_inode(struct inode *inode,
3929 						struct page *ipage, u64 from);
3930 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3931 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3932 int f2fs_convert_inline_inode(struct inode *inode);
3933 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3934 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3935 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3936 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3937 					const struct f2fs_filename *fname,
3938 					struct page **res_page);
3939 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3940 			struct page *ipage);
3941 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3942 			struct inode *inode, nid_t ino, umode_t mode);
3943 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3944 				struct page *page, struct inode *dir,
3945 				struct inode *inode);
3946 bool f2fs_empty_inline_dir(struct inode *dir);
3947 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3948 			struct fscrypt_str *fstr);
3949 int f2fs_inline_data_fiemap(struct inode *inode,
3950 			struct fiemap_extent_info *fieinfo,
3951 			__u64 start, __u64 len);
3952 
3953 /*
3954  * shrinker.c
3955  */
3956 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3957 			struct shrink_control *sc);
3958 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3959 			struct shrink_control *sc);
3960 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3961 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3962 
3963 /*
3964  * extent_cache.c
3965  */
3966 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3967 				struct rb_entry *cached_re, unsigned int ofs);
3968 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
3969 				struct rb_root_cached *root,
3970 				struct rb_node **parent,
3971 				unsigned long long key, bool *left_most);
3972 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3973 				struct rb_root_cached *root,
3974 				struct rb_node **parent,
3975 				unsigned int ofs, bool *leftmost);
3976 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3977 		struct rb_entry *cached_re, unsigned int ofs,
3978 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
3979 		struct rb_node ***insert_p, struct rb_node **insert_parent,
3980 		bool force, bool *leftmost);
3981 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3982 				struct rb_root_cached *root, bool check_key);
3983 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3984 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
3985 void f2fs_drop_extent_tree(struct inode *inode);
3986 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3987 void f2fs_destroy_extent_tree(struct inode *inode);
3988 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3989 			struct extent_info *ei);
3990 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3991 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3992 			pgoff_t fofs, block_t blkaddr, unsigned int len);
3993 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3994 int __init f2fs_create_extent_cache(void);
3995 void f2fs_destroy_extent_cache(void);
3996 
3997 /*
3998  * sysfs.c
3999  */
4000 #define MIN_RA_MUL	2
4001 #define MAX_RA_MUL	256
4002 
4003 int __init f2fs_init_sysfs(void);
4004 void f2fs_exit_sysfs(void);
4005 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4006 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4007 
4008 /* verity.c */
4009 extern const struct fsverity_operations f2fs_verityops;
4010 
4011 /*
4012  * crypto support
4013  */
4014 static inline bool f2fs_encrypted_file(struct inode *inode)
4015 {
4016 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4017 }
4018 
4019 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4020 {
4021 #ifdef CONFIG_FS_ENCRYPTION
4022 	file_set_encrypt(inode);
4023 	f2fs_set_inode_flags(inode);
4024 #endif
4025 }
4026 
4027 /*
4028  * Returns true if the reads of the inode's data need to undergo some
4029  * postprocessing step, like decryption or authenticity verification.
4030  */
4031 static inline bool f2fs_post_read_required(struct inode *inode)
4032 {
4033 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4034 		f2fs_compressed_file(inode);
4035 }
4036 
4037 /*
4038  * compress.c
4039  */
4040 #ifdef CONFIG_F2FS_FS_COMPRESSION
4041 bool f2fs_is_compressed_page(struct page *page);
4042 struct page *f2fs_compress_control_page(struct page *page);
4043 int f2fs_prepare_compress_overwrite(struct inode *inode,
4044 			struct page **pagep, pgoff_t index, void **fsdata);
4045 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4046 					pgoff_t index, unsigned copied);
4047 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4048 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4049 bool f2fs_is_compress_backend_ready(struct inode *inode);
4050 int f2fs_init_compress_mempool(void);
4051 void f2fs_destroy_compress_mempool(void);
4052 void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
4053 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4054 							block_t blkaddr);
4055 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4056 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4057 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
4058 				int index, int nr_pages);
4059 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4060 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4061 int f2fs_write_multi_pages(struct compress_ctx *cc,
4062 						int *submitted,
4063 						struct writeback_control *wbc,
4064 						enum iostat_type io_type);
4065 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4066 void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4067 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
4068 				unsigned int c_len);
4069 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4070 				unsigned nr_pages, sector_t *last_block_in_bio,
4071 				bool is_readahead, bool for_write);
4072 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4073 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
4074 void f2fs_put_page_dic(struct page *page);
4075 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
4076 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4077 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4078 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4079 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4080 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4081 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4082 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4083 int __init f2fs_init_compress_cache(void);
4084 void f2fs_destroy_compress_cache(void);
4085 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4086 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4087 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4088 						nid_t ino, block_t blkaddr);
4089 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4090 								block_t blkaddr);
4091 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4092 #define inc_compr_inode_stat(inode)					\
4093 	do {								\
4094 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4095 		sbi->compr_new_inode++;					\
4096 	} while (0)
4097 #define add_compr_block_stat(inode, blocks)				\
4098 	do {								\
4099 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4100 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
4101 		sbi->compr_written_block += blocks;			\
4102 		sbi->compr_saved_block += diff;				\
4103 	} while (0)
4104 #else
4105 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
4106 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4107 {
4108 	if (!f2fs_compressed_file(inode))
4109 		return true;
4110 	/* not support compression */
4111 	return false;
4112 }
4113 static inline struct page *f2fs_compress_control_page(struct page *page)
4114 {
4115 	WARN_ON_ONCE(1);
4116 	return ERR_PTR(-EINVAL);
4117 }
4118 static inline int f2fs_init_compress_mempool(void) { return 0; }
4119 static inline void f2fs_destroy_compress_mempool(void) { }
4120 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
4121 static inline void f2fs_end_read_compressed_page(struct page *page,
4122 						bool failed, block_t blkaddr)
4123 {
4124 	WARN_ON_ONCE(1);
4125 }
4126 static inline void f2fs_put_page_dic(struct page *page)
4127 {
4128 	WARN_ON_ONCE(1);
4129 }
4130 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
4131 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4132 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4133 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4134 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4135 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4136 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4137 static inline void f2fs_destroy_compress_cache(void) { }
4138 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4139 				block_t blkaddr) { }
4140 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4141 				struct page *page, nid_t ino, block_t blkaddr) { }
4142 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4143 				struct page *page, block_t blkaddr) { return false; }
4144 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4145 							nid_t ino) { }
4146 #define inc_compr_inode_stat(inode)		do { } while (0)
4147 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4148 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
4149 				unsigned int c_len) { }
4150 #endif
4151 
4152 static inline void set_compress_context(struct inode *inode)
4153 {
4154 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4155 
4156 	F2FS_I(inode)->i_compress_algorithm =
4157 			F2FS_OPTION(sbi).compress_algorithm;
4158 	F2FS_I(inode)->i_log_cluster_size =
4159 			F2FS_OPTION(sbi).compress_log_size;
4160 	F2FS_I(inode)->i_compress_flag =
4161 			F2FS_OPTION(sbi).compress_chksum ?
4162 				1 << COMPRESS_CHKSUM : 0;
4163 	F2FS_I(inode)->i_cluster_size =
4164 			1 << F2FS_I(inode)->i_log_cluster_size;
4165 	if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4166 		F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4167 			F2FS_OPTION(sbi).compress_level)
4168 		F2FS_I(inode)->i_compress_flag |=
4169 				F2FS_OPTION(sbi).compress_level <<
4170 				COMPRESS_LEVEL_OFFSET;
4171 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4172 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4173 	stat_inc_compr_inode(inode);
4174 	inc_compr_inode_stat(inode);
4175 	f2fs_mark_inode_dirty_sync(inode, true);
4176 }
4177 
4178 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4179 {
4180 	struct f2fs_inode_info *fi = F2FS_I(inode);
4181 
4182 	if (!f2fs_compressed_file(inode))
4183 		return true;
4184 	if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
4185 		return false;
4186 
4187 	fi->i_flags &= ~F2FS_COMPR_FL;
4188 	stat_dec_compr_inode(inode);
4189 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4190 	f2fs_mark_inode_dirty_sync(inode, true);
4191 	return true;
4192 }
4193 
4194 #define F2FS_FEATURE_FUNCS(name, flagname) \
4195 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4196 { \
4197 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4198 }
4199 
4200 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4201 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4202 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4203 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4204 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4205 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4206 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4207 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4208 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4209 F2FS_FEATURE_FUNCS(verity, VERITY);
4210 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4211 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4212 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4213 F2FS_FEATURE_FUNCS(readonly, RO);
4214 
4215 static inline bool f2fs_may_extent_tree(struct inode *inode)
4216 {
4217 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4218 
4219 	if (!test_opt(sbi, EXTENT_CACHE) ||
4220 			is_inode_flag_set(inode, FI_NO_EXTENT) ||
4221 			(is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
4222 			 !f2fs_sb_has_readonly(sbi)))
4223 		return false;
4224 
4225 	/*
4226 	 * for recovered files during mount do not create extents
4227 	 * if shrinker is not registered.
4228 	 */
4229 	if (list_empty(&sbi->s_list))
4230 		return false;
4231 
4232 	return S_ISREG(inode->i_mode);
4233 }
4234 
4235 #ifdef CONFIG_BLK_DEV_ZONED
4236 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4237 				    block_t blkaddr)
4238 {
4239 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4240 
4241 	return test_bit(zno, FDEV(devi).blkz_seq);
4242 }
4243 #endif
4244 
4245 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4246 {
4247 	return f2fs_sb_has_blkzoned(sbi);
4248 }
4249 
4250 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4251 {
4252 	return blk_queue_discard(bdev_get_queue(bdev)) ||
4253 	       bdev_is_zoned(bdev);
4254 }
4255 
4256 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4257 {
4258 	int i;
4259 
4260 	if (!f2fs_is_multi_device(sbi))
4261 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4262 
4263 	for (i = 0; i < sbi->s_ndevs; i++)
4264 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4265 			return true;
4266 	return false;
4267 }
4268 
4269 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4270 {
4271 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4272 					f2fs_hw_should_discard(sbi);
4273 }
4274 
4275 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4276 {
4277 	int i;
4278 
4279 	if (!f2fs_is_multi_device(sbi))
4280 		return bdev_read_only(sbi->sb->s_bdev);
4281 
4282 	for (i = 0; i < sbi->s_ndevs; i++)
4283 		if (bdev_read_only(FDEV(i).bdev))
4284 			return true;
4285 	return false;
4286 }
4287 
4288 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4289 {
4290 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4291 }
4292 
4293 static inline bool f2fs_may_compress(struct inode *inode)
4294 {
4295 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4296 				f2fs_is_atomic_file(inode) ||
4297 				f2fs_is_volatile_file(inode))
4298 		return false;
4299 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4300 }
4301 
4302 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4303 						u64 blocks, bool add)
4304 {
4305 	int diff = F2FS_I(inode)->i_cluster_size - blocks;
4306 	struct f2fs_inode_info *fi = F2FS_I(inode);
4307 
4308 	/* don't update i_compr_blocks if saved blocks were released */
4309 	if (!add && !atomic_read(&fi->i_compr_blocks))
4310 		return;
4311 
4312 	if (add) {
4313 		atomic_add(diff, &fi->i_compr_blocks);
4314 		stat_add_compr_blocks(inode, diff);
4315 	} else {
4316 		atomic_sub(diff, &fi->i_compr_blocks);
4317 		stat_sub_compr_blocks(inode, diff);
4318 	}
4319 	f2fs_mark_inode_dirty_sync(inode, true);
4320 }
4321 
4322 static inline int block_unaligned_IO(struct inode *inode,
4323 				struct kiocb *iocb, struct iov_iter *iter)
4324 {
4325 	unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4326 	unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4327 	loff_t offset = iocb->ki_pos;
4328 	unsigned long align = offset | iov_iter_alignment(iter);
4329 
4330 	return align & blocksize_mask;
4331 }
4332 
4333 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4334 								int flag)
4335 {
4336 	if (!f2fs_is_multi_device(sbi))
4337 		return false;
4338 	if (flag != F2FS_GET_BLOCK_DIO)
4339 		return false;
4340 	return sbi->aligned_blksize;
4341 }
4342 
4343 static inline bool f2fs_force_buffered_io(struct inode *inode,
4344 				struct kiocb *iocb, struct iov_iter *iter)
4345 {
4346 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4347 	int rw = iov_iter_rw(iter);
4348 
4349 	if (f2fs_post_read_required(inode))
4350 		return true;
4351 
4352 	/* disallow direct IO if any of devices has unaligned blksize */
4353 	if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
4354 		return true;
4355 	/*
4356 	 * for blkzoned device, fallback direct IO to buffered IO, so
4357 	 * all IOs can be serialized by log-structured write.
4358 	 */
4359 	if (f2fs_sb_has_blkzoned(sbi))
4360 		return true;
4361 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4362 		if (block_unaligned_IO(inode, iocb, iter))
4363 			return true;
4364 		if (F2FS_IO_ALIGNED(sbi))
4365 			return true;
4366 	}
4367 	if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4368 		return true;
4369 
4370 	return false;
4371 }
4372 
4373 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4374 {
4375 	return fsverity_active(inode) &&
4376 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4377 }
4378 
4379 #ifdef CONFIG_F2FS_FAULT_INJECTION
4380 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4381 							unsigned int type);
4382 #else
4383 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4384 #endif
4385 
4386 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4387 {
4388 #ifdef CONFIG_QUOTA
4389 	if (f2fs_sb_has_quota_ino(sbi))
4390 		return true;
4391 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4392 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4393 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4394 		return true;
4395 #endif
4396 	return false;
4397 }
4398 
4399 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4400 {
4401 	return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4402 }
4403 
4404 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4405 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4406 
4407 #endif /* _LINUX_F2FS_H */
4408