xref: /openbmc/linux/fs/f2fs/f2fs.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 struct pagevec;
32 
33 #ifdef CONFIG_F2FS_CHECK_FS
34 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
35 #else
36 #define f2fs_bug_on(sbi, condition)					\
37 	do {								\
38 		if (WARN_ON(condition))					\
39 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
40 	} while (0)
41 #endif
42 
43 enum {
44 	FAULT_KMALLOC,
45 	FAULT_KVMALLOC,
46 	FAULT_PAGE_ALLOC,
47 	FAULT_PAGE_GET,
48 	FAULT_ALLOC_BIO,	/* it's obsolete due to bio_alloc() will never fail */
49 	FAULT_ALLOC_NID,
50 	FAULT_ORPHAN,
51 	FAULT_BLOCK,
52 	FAULT_DIR_DEPTH,
53 	FAULT_EVICT_INODE,
54 	FAULT_TRUNCATE,
55 	FAULT_READ_IO,
56 	FAULT_CHECKPOINT,
57 	FAULT_DISCARD,
58 	FAULT_WRITE_IO,
59 	FAULT_SLAB_ALLOC,
60 	FAULT_DQUOT_INIT,
61 	FAULT_LOCK_OP,
62 	FAULT_MAX,
63 };
64 
65 #ifdef CONFIG_F2FS_FAULT_INJECTION
66 #define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
67 
68 struct f2fs_fault_info {
69 	atomic_t inject_ops;
70 	unsigned int inject_rate;
71 	unsigned int inject_type;
72 };
73 
74 extern const char *f2fs_fault_name[FAULT_MAX];
75 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
76 #endif
77 
78 /*
79  * For mount options
80  */
81 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
82 #define F2FS_MOUNT_DISCARD		0x00000004
83 #define F2FS_MOUNT_NOHEAP		0x00000008
84 #define F2FS_MOUNT_XATTR_USER		0x00000010
85 #define F2FS_MOUNT_POSIX_ACL		0x00000020
86 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
87 #define F2FS_MOUNT_INLINE_XATTR		0x00000080
88 #define F2FS_MOUNT_INLINE_DATA		0x00000100
89 #define F2FS_MOUNT_INLINE_DENTRY	0x00000200
90 #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
91 #define F2FS_MOUNT_NOBARRIER		0x00000800
92 #define F2FS_MOUNT_FASTBOOT		0x00001000
93 #define F2FS_MOUNT_EXTENT_CACHE		0x00002000
94 #define F2FS_MOUNT_DATA_FLUSH		0x00008000
95 #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
96 #define F2FS_MOUNT_USRQUOTA		0x00080000
97 #define F2FS_MOUNT_GRPQUOTA		0x00100000
98 #define F2FS_MOUNT_PRJQUOTA		0x00200000
99 #define F2FS_MOUNT_QUOTA		0x00400000
100 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
101 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
102 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
103 #define F2FS_MOUNT_NORECOVERY		0x04000000
104 #define F2FS_MOUNT_ATGC			0x08000000
105 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x10000000
106 #define	F2FS_MOUNT_GC_MERGE		0x20000000
107 #define F2FS_MOUNT_COMPRESS_CACHE	0x40000000
108 
109 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
110 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
111 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
112 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
113 
114 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
115 		typecheck(unsigned long long, b) &&			\
116 		((long long)((a) - (b)) > 0))
117 
118 typedef u32 block_t;	/*
119 			 * should not change u32, since it is the on-disk block
120 			 * address format, __le32.
121 			 */
122 typedef u32 nid_t;
123 
124 #define COMPRESS_EXT_NUM		16
125 
126 /*
127  * An implementation of an rwsem that is explicitly unfair to readers. This
128  * prevents priority inversion when a low-priority reader acquires the read lock
129  * while sleeping on the write lock but the write lock is needed by
130  * higher-priority clients.
131  */
132 
133 struct f2fs_rwsem {
134         struct rw_semaphore internal_rwsem;
135 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
136         wait_queue_head_t read_waiters;
137 #endif
138 };
139 
140 struct f2fs_mount_info {
141 	unsigned int opt;
142 	int write_io_size_bits;		/* Write IO size bits */
143 	block_t root_reserved_blocks;	/* root reserved blocks */
144 	kuid_t s_resuid;		/* reserved blocks for uid */
145 	kgid_t s_resgid;		/* reserved blocks for gid */
146 	int active_logs;		/* # of active logs */
147 	int inline_xattr_size;		/* inline xattr size */
148 #ifdef CONFIG_F2FS_FAULT_INJECTION
149 	struct f2fs_fault_info fault_info;	/* For fault injection */
150 #endif
151 #ifdef CONFIG_QUOTA
152 	/* Names of quota files with journalled quota */
153 	char *s_qf_names[MAXQUOTAS];
154 	int s_jquota_fmt;			/* Format of quota to use */
155 #endif
156 	/* For which write hints are passed down to block layer */
157 	int alloc_mode;			/* segment allocation policy */
158 	int fsync_mode;			/* fsync policy */
159 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
160 	int bggc_mode;			/* bggc mode: off, on or sync */
161 	int discard_unit;		/*
162 					 * discard command's offset/size should
163 					 * be aligned to this unit: block,
164 					 * segment or section
165 					 */
166 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
167 	block_t unusable_cap_perc;	/* percentage for cap */
168 	block_t unusable_cap;		/* Amount of space allowed to be
169 					 * unusable when disabling checkpoint
170 					 */
171 
172 	/* For compression */
173 	unsigned char compress_algorithm;	/* algorithm type */
174 	unsigned char compress_log_size;	/* cluster log size */
175 	unsigned char compress_level;		/* compress level */
176 	bool compress_chksum;			/* compressed data chksum */
177 	unsigned char compress_ext_cnt;		/* extension count */
178 	unsigned char nocompress_ext_cnt;		/* nocompress extension count */
179 	int compress_mode;			/* compression mode */
180 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
181 	unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
182 };
183 
184 #define F2FS_FEATURE_ENCRYPT		0x0001
185 #define F2FS_FEATURE_BLKZONED		0x0002
186 #define F2FS_FEATURE_ATOMIC_WRITE	0x0004
187 #define F2FS_FEATURE_EXTRA_ATTR		0x0008
188 #define F2FS_FEATURE_PRJQUOTA		0x0010
189 #define F2FS_FEATURE_INODE_CHKSUM	0x0020
190 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
191 #define F2FS_FEATURE_QUOTA_INO		0x0080
192 #define F2FS_FEATURE_INODE_CRTIME	0x0100
193 #define F2FS_FEATURE_LOST_FOUND		0x0200
194 #define F2FS_FEATURE_VERITY		0x0400
195 #define F2FS_FEATURE_SB_CHKSUM		0x0800
196 #define F2FS_FEATURE_CASEFOLD		0x1000
197 #define F2FS_FEATURE_COMPRESSION	0x2000
198 #define F2FS_FEATURE_RO			0x4000
199 
200 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
201 	((raw_super->feature & cpu_to_le32(mask)) != 0)
202 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
203 #define F2FS_SET_FEATURE(sbi, mask)					\
204 	(sbi->raw_super->feature |= cpu_to_le32(mask))
205 #define F2FS_CLEAR_FEATURE(sbi, mask)					\
206 	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
207 
208 /*
209  * Default values for user and/or group using reserved blocks
210  */
211 #define	F2FS_DEF_RESUID		0
212 #define	F2FS_DEF_RESGID		0
213 
214 /*
215  * For checkpoint manager
216  */
217 enum {
218 	NAT_BITMAP,
219 	SIT_BITMAP
220 };
221 
222 #define	CP_UMOUNT	0x00000001
223 #define	CP_FASTBOOT	0x00000002
224 #define	CP_SYNC		0x00000004
225 #define	CP_RECOVERY	0x00000008
226 #define	CP_DISCARD	0x00000010
227 #define CP_TRIMMED	0x00000020
228 #define CP_PAUSE	0x00000040
229 #define CP_RESIZE 	0x00000080
230 
231 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
232 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
233 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
234 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
235 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
236 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
237 #define DEF_CP_INTERVAL			60	/* 60 secs */
238 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
239 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
240 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
241 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
242 
243 struct cp_control {
244 	int reason;
245 	__u64 trim_start;
246 	__u64 trim_end;
247 	__u64 trim_minlen;
248 };
249 
250 /*
251  * indicate meta/data type
252  */
253 enum {
254 	META_CP,
255 	META_NAT,
256 	META_SIT,
257 	META_SSA,
258 	META_MAX,
259 	META_POR,
260 	DATA_GENERIC,		/* check range only */
261 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
262 	DATA_GENERIC_ENHANCE_READ,	/*
263 					 * strong check on range and segment
264 					 * bitmap but no warning due to race
265 					 * condition of read on truncated area
266 					 * by extent_cache
267 					 */
268 	META_GENERIC,
269 };
270 
271 /* for the list of ino */
272 enum {
273 	ORPHAN_INO,		/* for orphan ino list */
274 	APPEND_INO,		/* for append ino list */
275 	UPDATE_INO,		/* for update ino list */
276 	TRANS_DIR_INO,		/* for trasactions dir ino list */
277 	FLUSH_INO,		/* for multiple device flushing */
278 	MAX_INO_ENTRY,		/* max. list */
279 };
280 
281 struct ino_entry {
282 	struct list_head list;		/* list head */
283 	nid_t ino;			/* inode number */
284 	unsigned int dirty_device;	/* dirty device bitmap */
285 };
286 
287 /* for the list of inodes to be GCed */
288 struct inode_entry {
289 	struct list_head list;	/* list head */
290 	struct inode *inode;	/* vfs inode pointer */
291 };
292 
293 struct fsync_node_entry {
294 	struct list_head list;	/* list head */
295 	struct page *page;	/* warm node page pointer */
296 	unsigned int seq_id;	/* sequence id */
297 };
298 
299 struct ckpt_req {
300 	struct completion wait;		/* completion for checkpoint done */
301 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
302 	int ret;			/* return code of checkpoint */
303 	ktime_t queue_time;		/* request queued time */
304 };
305 
306 struct ckpt_req_control {
307 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
308 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
309 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
310 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
311 	atomic_t total_ckpt;		/* # of total ckpts */
312 	atomic_t queued_ckpt;		/* # of queued ckpts */
313 	struct llist_head issue_list;	/* list for command issue */
314 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
315 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
316 	unsigned int peak_time;		/* peak wait time in msec until now */
317 };
318 
319 /* for the bitmap indicate blocks to be discarded */
320 struct discard_entry {
321 	struct list_head list;	/* list head */
322 	block_t start_blkaddr;	/* start blockaddr of current segment */
323 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
324 };
325 
326 /* default discard granularity of inner discard thread, unit: block count */
327 #define DEFAULT_DISCARD_GRANULARITY		16
328 
329 /* max discard pend list number */
330 #define MAX_PLIST_NUM		512
331 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
332 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
333 
334 enum {
335 	D_PREP,			/* initial */
336 	D_PARTIAL,		/* partially submitted */
337 	D_SUBMIT,		/* all submitted */
338 	D_DONE,			/* finished */
339 };
340 
341 struct discard_info {
342 	block_t lstart;			/* logical start address */
343 	block_t len;			/* length */
344 	block_t start;			/* actual start address in dev */
345 };
346 
347 struct discard_cmd {
348 	struct rb_node rb_node;		/* rb node located in rb-tree */
349 	union {
350 		struct {
351 			block_t lstart;	/* logical start address */
352 			block_t len;	/* length */
353 			block_t start;	/* actual start address in dev */
354 		};
355 		struct discard_info di;	/* discard info */
356 
357 	};
358 	struct list_head list;		/* command list */
359 	struct completion wait;		/* compleation */
360 	struct block_device *bdev;	/* bdev */
361 	unsigned short ref;		/* reference count */
362 	unsigned char state;		/* state */
363 	unsigned char queued;		/* queued discard */
364 	int error;			/* bio error */
365 	spinlock_t lock;		/* for state/bio_ref updating */
366 	unsigned short bio_ref;		/* bio reference count */
367 };
368 
369 enum {
370 	DPOLICY_BG,
371 	DPOLICY_FORCE,
372 	DPOLICY_FSTRIM,
373 	DPOLICY_UMOUNT,
374 	MAX_DPOLICY,
375 };
376 
377 struct discard_policy {
378 	int type;			/* type of discard */
379 	unsigned int min_interval;	/* used for candidates exist */
380 	unsigned int mid_interval;	/* used for device busy */
381 	unsigned int max_interval;	/* used for candidates not exist */
382 	unsigned int max_requests;	/* # of discards issued per round */
383 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
384 	bool io_aware;			/* issue discard in idle time */
385 	bool sync;			/* submit discard with REQ_SYNC flag */
386 	bool ordered;			/* issue discard by lba order */
387 	bool timeout;			/* discard timeout for put_super */
388 	unsigned int granularity;	/* discard granularity */
389 };
390 
391 struct discard_cmd_control {
392 	struct task_struct *f2fs_issue_discard;	/* discard thread */
393 	struct list_head entry_list;		/* 4KB discard entry list */
394 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
395 	struct list_head wait_list;		/* store on-flushing entries */
396 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
397 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
398 	unsigned int discard_wake;		/* to wake up discard thread */
399 	struct mutex cmd_lock;
400 	unsigned int nr_discards;		/* # of discards in the list */
401 	unsigned int max_discards;		/* max. discards to be issued */
402 	unsigned int max_discard_request;	/* max. discard request per round */
403 	unsigned int min_discard_issue_time;	/* min. interval between discard issue */
404 	unsigned int mid_discard_issue_time;	/* mid. interval between discard issue */
405 	unsigned int max_discard_issue_time;	/* max. interval between discard issue */
406 	unsigned int discard_granularity;	/* discard granularity */
407 	unsigned int undiscard_blks;		/* # of undiscard blocks */
408 	unsigned int next_pos;			/* next discard position */
409 	atomic_t issued_discard;		/* # of issued discard */
410 	atomic_t queued_discard;		/* # of queued discard */
411 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
412 	struct rb_root_cached root;		/* root of discard rb-tree */
413 	bool rbtree_check;			/* config for consistence check */
414 };
415 
416 /* for the list of fsync inodes, used only during recovery */
417 struct fsync_inode_entry {
418 	struct list_head list;	/* list head */
419 	struct inode *inode;	/* vfs inode pointer */
420 	block_t blkaddr;	/* block address locating the last fsync */
421 	block_t last_dentry;	/* block address locating the last dentry */
422 };
423 
424 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
425 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
426 
427 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
428 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
429 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
430 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
431 
432 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
433 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
434 
435 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
436 {
437 	int before = nats_in_cursum(journal);
438 
439 	journal->n_nats = cpu_to_le16(before + i);
440 	return before;
441 }
442 
443 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
444 {
445 	int before = sits_in_cursum(journal);
446 
447 	journal->n_sits = cpu_to_le16(before + i);
448 	return before;
449 }
450 
451 static inline bool __has_cursum_space(struct f2fs_journal *journal,
452 							int size, int type)
453 {
454 	if (type == NAT_JOURNAL)
455 		return size <= MAX_NAT_JENTRIES(journal);
456 	return size <= MAX_SIT_JENTRIES(journal);
457 }
458 
459 /* for inline stuff */
460 #define DEF_INLINE_RESERVED_SIZE	1
461 static inline int get_extra_isize(struct inode *inode);
462 static inline int get_inline_xattr_addrs(struct inode *inode);
463 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
464 				(CUR_ADDRS_PER_INODE(inode) -		\
465 				get_inline_xattr_addrs(inode) -	\
466 				DEF_INLINE_RESERVED_SIZE))
467 
468 /* for inline dir */
469 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
470 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
471 				BITS_PER_BYTE + 1))
472 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
473 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
474 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
475 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
476 				NR_INLINE_DENTRY(inode) + \
477 				INLINE_DENTRY_BITMAP_SIZE(inode)))
478 
479 /*
480  * For INODE and NODE manager
481  */
482 /* for directory operations */
483 
484 struct f2fs_filename {
485 	/*
486 	 * The filename the user specified.  This is NULL for some
487 	 * filesystem-internal operations, e.g. converting an inline directory
488 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
489 	 */
490 	const struct qstr *usr_fname;
491 
492 	/*
493 	 * The on-disk filename.  For encrypted directories, this is encrypted.
494 	 * This may be NULL for lookups in an encrypted dir without the key.
495 	 */
496 	struct fscrypt_str disk_name;
497 
498 	/* The dirhash of this filename */
499 	f2fs_hash_t hash;
500 
501 #ifdef CONFIG_FS_ENCRYPTION
502 	/*
503 	 * For lookups in encrypted directories: either the buffer backing
504 	 * disk_name, or a buffer that holds the decoded no-key name.
505 	 */
506 	struct fscrypt_str crypto_buf;
507 #endif
508 #if IS_ENABLED(CONFIG_UNICODE)
509 	/*
510 	 * For casefolded directories: the casefolded name, but it's left NULL
511 	 * if the original name is not valid Unicode, if the directory is both
512 	 * casefolded and encrypted and its encryption key is unavailable, or if
513 	 * the filesystem is doing an internal operation where usr_fname is also
514 	 * NULL.  In all these cases we fall back to treating the name as an
515 	 * opaque byte sequence.
516 	 */
517 	struct fscrypt_str cf_name;
518 #endif
519 };
520 
521 struct f2fs_dentry_ptr {
522 	struct inode *inode;
523 	void *bitmap;
524 	struct f2fs_dir_entry *dentry;
525 	__u8 (*filename)[F2FS_SLOT_LEN];
526 	int max;
527 	int nr_bitmap;
528 };
529 
530 static inline void make_dentry_ptr_block(struct inode *inode,
531 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
532 {
533 	d->inode = inode;
534 	d->max = NR_DENTRY_IN_BLOCK;
535 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
536 	d->bitmap = t->dentry_bitmap;
537 	d->dentry = t->dentry;
538 	d->filename = t->filename;
539 }
540 
541 static inline void make_dentry_ptr_inline(struct inode *inode,
542 					struct f2fs_dentry_ptr *d, void *t)
543 {
544 	int entry_cnt = NR_INLINE_DENTRY(inode);
545 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
546 	int reserved_size = INLINE_RESERVED_SIZE(inode);
547 
548 	d->inode = inode;
549 	d->max = entry_cnt;
550 	d->nr_bitmap = bitmap_size;
551 	d->bitmap = t;
552 	d->dentry = t + bitmap_size + reserved_size;
553 	d->filename = t + bitmap_size + reserved_size +
554 					SIZE_OF_DIR_ENTRY * entry_cnt;
555 }
556 
557 /*
558  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
559  * as its node offset to distinguish from index node blocks.
560  * But some bits are used to mark the node block.
561  */
562 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
563 				>> OFFSET_BIT_SHIFT)
564 enum {
565 	ALLOC_NODE,			/* allocate a new node page if needed */
566 	LOOKUP_NODE,			/* look up a node without readahead */
567 	LOOKUP_NODE_RA,			/*
568 					 * look up a node with readahead called
569 					 * by get_data_block.
570 					 */
571 };
572 
573 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO or flush count */
574 
575 /* congestion wait timeout value, default: 20ms */
576 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
577 
578 /* maximum retry quota flush count */
579 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
580 
581 /* maximum retry of EIO'ed meta page */
582 #define MAX_RETRY_META_PAGE_EIO			100
583 
584 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
585 
586 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
587 
588 /* dirty segments threshold for triggering CP */
589 #define DEFAULT_DIRTY_THRESHOLD		4
590 
591 /* for in-memory extent cache entry */
592 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
593 
594 /* number of extent info in extent cache we try to shrink */
595 #define EXTENT_CACHE_SHRINK_NUMBER	128
596 
597 #define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
598 #define RECOVERY_MIN_RA_BLOCKS		1
599 
600 struct rb_entry {
601 	struct rb_node rb_node;		/* rb node located in rb-tree */
602 	union {
603 		struct {
604 			unsigned int ofs;	/* start offset of the entry */
605 			unsigned int len;	/* length of the entry */
606 		};
607 		unsigned long long key;		/* 64-bits key */
608 	} __packed;
609 };
610 
611 struct extent_info {
612 	unsigned int fofs;		/* start offset in a file */
613 	unsigned int len;		/* length of the extent */
614 	u32 blk;			/* start block address of the extent */
615 #ifdef CONFIG_F2FS_FS_COMPRESSION
616 	unsigned int c_len;		/* physical extent length of compressed blocks */
617 #endif
618 };
619 
620 struct extent_node {
621 	struct rb_node rb_node;		/* rb node located in rb-tree */
622 	struct extent_info ei;		/* extent info */
623 	struct list_head list;		/* node in global extent list of sbi */
624 	struct extent_tree *et;		/* extent tree pointer */
625 };
626 
627 struct extent_tree {
628 	nid_t ino;			/* inode number */
629 	struct rb_root_cached root;	/* root of extent info rb-tree */
630 	struct extent_node *cached_en;	/* recently accessed extent node */
631 	struct extent_info largest;	/* largested extent info */
632 	struct list_head list;		/* to be used by sbi->zombie_list */
633 	rwlock_t lock;			/* protect extent info rb-tree */
634 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
635 	bool largest_updated;		/* largest extent updated */
636 };
637 
638 /*
639  * This structure is taken from ext4_map_blocks.
640  *
641  * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
642  */
643 #define F2FS_MAP_NEW		(1 << BH_New)
644 #define F2FS_MAP_MAPPED		(1 << BH_Mapped)
645 #define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
646 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
647 				F2FS_MAP_UNWRITTEN)
648 
649 struct f2fs_map_blocks {
650 	struct block_device *m_bdev;	/* for multi-device dio */
651 	block_t m_pblk;
652 	block_t m_lblk;
653 	unsigned int m_len;
654 	unsigned int m_flags;
655 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
656 	pgoff_t *m_next_extent;		/* point to next possible extent */
657 	int m_seg_type;
658 	bool m_may_create;		/* indicate it is from write path */
659 	bool m_multidev_dio;		/* indicate it allows multi-device dio */
660 };
661 
662 /* for flag in get_data_block */
663 enum {
664 	F2FS_GET_BLOCK_DEFAULT,
665 	F2FS_GET_BLOCK_FIEMAP,
666 	F2FS_GET_BLOCK_BMAP,
667 	F2FS_GET_BLOCK_DIO,
668 	F2FS_GET_BLOCK_PRE_DIO,
669 	F2FS_GET_BLOCK_PRE_AIO,
670 	F2FS_GET_BLOCK_PRECACHE,
671 };
672 
673 /*
674  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
675  */
676 #define FADVISE_COLD_BIT	0x01
677 #define FADVISE_LOST_PINO_BIT	0x02
678 #define FADVISE_ENCRYPT_BIT	0x04
679 #define FADVISE_ENC_NAME_BIT	0x08
680 #define FADVISE_KEEP_SIZE_BIT	0x10
681 #define FADVISE_HOT_BIT		0x20
682 #define FADVISE_VERITY_BIT	0x40
683 #define FADVISE_TRUNC_BIT	0x80
684 
685 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
686 
687 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
688 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
689 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
690 
691 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
692 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
693 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
694 
695 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
696 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
697 
698 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
699 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
700 
701 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
702 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
703 
704 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
705 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
706 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
707 
708 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
709 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
710 
711 #define file_should_truncate(inode)	is_file(inode, FADVISE_TRUNC_BIT)
712 #define file_need_truncate(inode)	set_file(inode, FADVISE_TRUNC_BIT)
713 #define file_dont_truncate(inode)	clear_file(inode, FADVISE_TRUNC_BIT)
714 
715 #define DEF_DIR_LEVEL		0
716 
717 enum {
718 	GC_FAILURE_PIN,
719 	GC_FAILURE_ATOMIC,
720 	MAX_GC_FAILURE
721 };
722 
723 /* used for f2fs_inode_info->flags */
724 enum {
725 	FI_NEW_INODE,		/* indicate newly allocated inode */
726 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
727 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
728 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
729 	FI_INC_LINK,		/* need to increment i_nlink */
730 	FI_ACL_MODE,		/* indicate acl mode */
731 	FI_NO_ALLOC,		/* should not allocate any blocks */
732 	FI_FREE_NID,		/* free allocated nide */
733 	FI_NO_EXTENT,		/* not to use the extent cache */
734 	FI_INLINE_XATTR,	/* used for inline xattr */
735 	FI_INLINE_DATA,		/* used for inline data*/
736 	FI_INLINE_DENTRY,	/* used for inline dentry */
737 	FI_APPEND_WRITE,	/* inode has appended data */
738 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
739 	FI_NEED_IPU,		/* used for ipu per file */
740 	FI_ATOMIC_FILE,		/* indicate atomic file */
741 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
742 	FI_VOLATILE_FILE,	/* indicate volatile file */
743 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
744 	FI_DROP_CACHE,		/* drop dirty page cache */
745 	FI_DATA_EXIST,		/* indicate data exists */
746 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
747 	FI_SKIP_WRITES,		/* should skip data page writeback */
748 	FI_OPU_WRITE,		/* used for opu per file */
749 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
750 	FI_PREALLOCATED_ALL,	/* all blocks for write were preallocated */
751 	FI_HOT_DATA,		/* indicate file is hot */
752 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
753 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
754 	FI_PIN_FILE,		/* indicate file should not be gced */
755 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
756 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
757 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
758 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
759 	FI_MMAP_FILE,		/* indicate file was mmapped */
760 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
761 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
762 	FI_ALIGNED_WRITE,	/* enable aligned write */
763 	FI_MAX,			/* max flag, never be used */
764 };
765 
766 struct f2fs_inode_info {
767 	struct inode vfs_inode;		/* serve a vfs inode */
768 	unsigned long i_flags;		/* keep an inode flags for ioctl */
769 	unsigned char i_advise;		/* use to give file attribute hints */
770 	unsigned char i_dir_level;	/* use for dentry level for large dir */
771 	unsigned int i_current_depth;	/* only for directory depth */
772 	/* for gc failure statistic */
773 	unsigned int i_gc_failures[MAX_GC_FAILURE];
774 	unsigned int i_pino;		/* parent inode number */
775 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
776 
777 	/* Use below internally in f2fs*/
778 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
779 	struct f2fs_rwsem i_sem;	/* protect fi info */
780 	atomic_t dirty_pages;		/* # of dirty pages */
781 	f2fs_hash_t chash;		/* hash value of given file name */
782 	unsigned int clevel;		/* maximum level of given file name */
783 	struct task_struct *task;	/* lookup and create consistency */
784 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
785 	nid_t i_xattr_nid;		/* node id that contains xattrs */
786 	loff_t	last_disk_size;		/* lastly written file size */
787 	spinlock_t i_size_lock;		/* protect last_disk_size */
788 
789 #ifdef CONFIG_QUOTA
790 	struct dquot *i_dquot[MAXQUOTAS];
791 
792 	/* quota space reservation, managed internally by quota code */
793 	qsize_t i_reserved_quota;
794 #endif
795 	struct list_head dirty_list;	/* dirty list for dirs and files */
796 	struct list_head gdirty_list;	/* linked in global dirty list */
797 	struct list_head inmem_ilist;	/* list for inmem inodes */
798 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
799 	struct task_struct *inmem_task;	/* store inmemory task */
800 	struct mutex inmem_lock;	/* lock for inmemory pages */
801 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
802 
803 	/* avoid racing between foreground op and gc */
804 	struct f2fs_rwsem i_gc_rwsem[2];
805 	struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
806 
807 	int i_extra_isize;		/* size of extra space located in i_addr */
808 	kprojid_t i_projid;		/* id for project quota */
809 	int i_inline_xattr_size;	/* inline xattr size */
810 	struct timespec64 i_crtime;	/* inode creation time */
811 	struct timespec64 i_disk_time[4];/* inode disk times */
812 
813 	/* for file compress */
814 	atomic_t i_compr_blocks;		/* # of compressed blocks */
815 	unsigned char i_compress_algorithm;	/* algorithm type */
816 	unsigned char i_log_cluster_size;	/* log of cluster size */
817 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
818 	unsigned short i_compress_flag;		/* compress flag */
819 	unsigned int i_cluster_size;		/* cluster size */
820 };
821 
822 static inline void get_extent_info(struct extent_info *ext,
823 					struct f2fs_extent *i_ext)
824 {
825 	ext->fofs = le32_to_cpu(i_ext->fofs);
826 	ext->blk = le32_to_cpu(i_ext->blk);
827 	ext->len = le32_to_cpu(i_ext->len);
828 }
829 
830 static inline void set_raw_extent(struct extent_info *ext,
831 					struct f2fs_extent *i_ext)
832 {
833 	i_ext->fofs = cpu_to_le32(ext->fofs);
834 	i_ext->blk = cpu_to_le32(ext->blk);
835 	i_ext->len = cpu_to_le32(ext->len);
836 }
837 
838 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
839 						u32 blk, unsigned int len)
840 {
841 	ei->fofs = fofs;
842 	ei->blk = blk;
843 	ei->len = len;
844 #ifdef CONFIG_F2FS_FS_COMPRESSION
845 	ei->c_len = 0;
846 #endif
847 }
848 
849 static inline bool __is_discard_mergeable(struct discard_info *back,
850 			struct discard_info *front, unsigned int max_len)
851 {
852 	return (back->lstart + back->len == front->lstart) &&
853 		(back->len + front->len <= max_len);
854 }
855 
856 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
857 			struct discard_info *back, unsigned int max_len)
858 {
859 	return __is_discard_mergeable(back, cur, max_len);
860 }
861 
862 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
863 			struct discard_info *front, unsigned int max_len)
864 {
865 	return __is_discard_mergeable(cur, front, max_len);
866 }
867 
868 static inline bool __is_extent_mergeable(struct extent_info *back,
869 						struct extent_info *front)
870 {
871 #ifdef CONFIG_F2FS_FS_COMPRESSION
872 	if (back->c_len && back->len != back->c_len)
873 		return false;
874 	if (front->c_len && front->len != front->c_len)
875 		return false;
876 #endif
877 	return (back->fofs + back->len == front->fofs &&
878 			back->blk + back->len == front->blk);
879 }
880 
881 static inline bool __is_back_mergeable(struct extent_info *cur,
882 						struct extent_info *back)
883 {
884 	return __is_extent_mergeable(back, cur);
885 }
886 
887 static inline bool __is_front_mergeable(struct extent_info *cur,
888 						struct extent_info *front)
889 {
890 	return __is_extent_mergeable(cur, front);
891 }
892 
893 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
894 static inline void __try_update_largest_extent(struct extent_tree *et,
895 						struct extent_node *en)
896 {
897 	if (en->ei.len > et->largest.len) {
898 		et->largest = en->ei;
899 		et->largest_updated = true;
900 	}
901 }
902 
903 /*
904  * For free nid management
905  */
906 enum nid_state {
907 	FREE_NID,		/* newly added to free nid list */
908 	PREALLOC_NID,		/* it is preallocated */
909 	MAX_NID_STATE,
910 };
911 
912 enum nat_state {
913 	TOTAL_NAT,
914 	DIRTY_NAT,
915 	RECLAIMABLE_NAT,
916 	MAX_NAT_STATE,
917 };
918 
919 struct f2fs_nm_info {
920 	block_t nat_blkaddr;		/* base disk address of NAT */
921 	nid_t max_nid;			/* maximum possible node ids */
922 	nid_t available_nids;		/* # of available node ids */
923 	nid_t next_scan_nid;		/* the next nid to be scanned */
924 	nid_t max_rf_node_blocks;	/* max # of nodes for recovery */
925 	unsigned int ram_thresh;	/* control the memory footprint */
926 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
927 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
928 
929 	/* NAT cache management */
930 	struct radix_tree_root nat_root;/* root of the nat entry cache */
931 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
932 	struct f2fs_rwsem nat_tree_lock;	/* protect nat entry tree */
933 	struct list_head nat_entries;	/* cached nat entry list (clean) */
934 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
935 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
936 	unsigned int nat_blocks;	/* # of nat blocks */
937 
938 	/* free node ids management */
939 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
940 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
941 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
942 	spinlock_t nid_list_lock;	/* protect nid lists ops */
943 	struct mutex build_lock;	/* lock for build free nids */
944 	unsigned char **free_nid_bitmap;
945 	unsigned char *nat_block_bitmap;
946 	unsigned short *free_nid_count;	/* free nid count of NAT block */
947 
948 	/* for checkpoint */
949 	char *nat_bitmap;		/* NAT bitmap pointer */
950 
951 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
952 	unsigned char *nat_bits;	/* NAT bits blocks */
953 	unsigned char *full_nat_bits;	/* full NAT pages */
954 	unsigned char *empty_nat_bits;	/* empty NAT pages */
955 #ifdef CONFIG_F2FS_CHECK_FS
956 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
957 #endif
958 	int bitmap_size;		/* bitmap size */
959 };
960 
961 /*
962  * this structure is used as one of function parameters.
963  * all the information are dedicated to a given direct node block determined
964  * by the data offset in a file.
965  */
966 struct dnode_of_data {
967 	struct inode *inode;		/* vfs inode pointer */
968 	struct page *inode_page;	/* its inode page, NULL is possible */
969 	struct page *node_page;		/* cached direct node page */
970 	nid_t nid;			/* node id of the direct node block */
971 	unsigned int ofs_in_node;	/* data offset in the node page */
972 	bool inode_page_locked;		/* inode page is locked or not */
973 	bool node_changed;		/* is node block changed */
974 	char cur_level;			/* level of hole node page */
975 	char max_level;			/* level of current page located */
976 	block_t	data_blkaddr;		/* block address of the node block */
977 };
978 
979 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
980 		struct page *ipage, struct page *npage, nid_t nid)
981 {
982 	memset(dn, 0, sizeof(*dn));
983 	dn->inode = inode;
984 	dn->inode_page = ipage;
985 	dn->node_page = npage;
986 	dn->nid = nid;
987 }
988 
989 /*
990  * For SIT manager
991  *
992  * By default, there are 6 active log areas across the whole main area.
993  * When considering hot and cold data separation to reduce cleaning overhead,
994  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
995  * respectively.
996  * In the current design, you should not change the numbers intentionally.
997  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
998  * logs individually according to the underlying devices. (default: 6)
999  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
1000  * data and 8 for node logs.
1001  */
1002 #define	NR_CURSEG_DATA_TYPE	(3)
1003 #define NR_CURSEG_NODE_TYPE	(3)
1004 #define NR_CURSEG_INMEM_TYPE	(2)
1005 #define NR_CURSEG_RO_TYPE	(2)
1006 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1007 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1008 
1009 enum {
1010 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
1011 	CURSEG_WARM_DATA,	/* data blocks */
1012 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
1013 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
1014 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
1015 	CURSEG_COLD_NODE,	/* indirect node blocks */
1016 	NR_PERSISTENT_LOG,	/* number of persistent log */
1017 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1018 				/* pinned file that needs consecutive block address */
1019 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
1020 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
1021 };
1022 
1023 struct flush_cmd {
1024 	struct completion wait;
1025 	struct llist_node llnode;
1026 	nid_t ino;
1027 	int ret;
1028 };
1029 
1030 struct flush_cmd_control {
1031 	struct task_struct *f2fs_issue_flush;	/* flush thread */
1032 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
1033 	atomic_t issued_flush;			/* # of issued flushes */
1034 	atomic_t queued_flush;			/* # of queued flushes */
1035 	struct llist_head issue_list;		/* list for command issue */
1036 	struct llist_node *dispatch_list;	/* list for command dispatch */
1037 };
1038 
1039 struct f2fs_sm_info {
1040 	struct sit_info *sit_info;		/* whole segment information */
1041 	struct free_segmap_info *free_info;	/* free segment information */
1042 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1043 	struct curseg_info *curseg_array;	/* active segment information */
1044 
1045 	struct f2fs_rwsem curseg_lock;	/* for preventing curseg change */
1046 
1047 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1048 	block_t main_blkaddr;		/* start block address of main area */
1049 	block_t ssa_blkaddr;		/* start block address of SSA area */
1050 
1051 	unsigned int segment_count;	/* total # of segments */
1052 	unsigned int main_segments;	/* # of segments in main area */
1053 	unsigned int reserved_segments;	/* # of reserved segments */
1054 	unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1055 	unsigned int ovp_segments;	/* # of overprovision segments */
1056 
1057 	/* a threshold to reclaim prefree segments */
1058 	unsigned int rec_prefree_segments;
1059 
1060 	/* for batched trimming */
1061 	unsigned int trim_sections;		/* # of sections to trim */
1062 
1063 	struct list_head sit_entry_set;	/* sit entry set list */
1064 
1065 	unsigned int ipu_policy;	/* in-place-update policy */
1066 	unsigned int min_ipu_util;	/* in-place-update threshold */
1067 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1068 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1069 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1070 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1071 
1072 	/* for flush command control */
1073 	struct flush_cmd_control *fcc_info;
1074 
1075 	/* for discard command control */
1076 	struct discard_cmd_control *dcc_info;
1077 };
1078 
1079 /*
1080  * For superblock
1081  */
1082 /*
1083  * COUNT_TYPE for monitoring
1084  *
1085  * f2fs monitors the number of several block types such as on-writeback,
1086  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1087  */
1088 #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1089 enum count_type {
1090 	F2FS_DIRTY_DENTS,
1091 	F2FS_DIRTY_DATA,
1092 	F2FS_DIRTY_QDATA,
1093 	F2FS_DIRTY_NODES,
1094 	F2FS_DIRTY_META,
1095 	F2FS_INMEM_PAGES,
1096 	F2FS_DIRTY_IMETA,
1097 	F2FS_WB_CP_DATA,
1098 	F2FS_WB_DATA,
1099 	F2FS_RD_DATA,
1100 	F2FS_RD_NODE,
1101 	F2FS_RD_META,
1102 	F2FS_DIO_WRITE,
1103 	F2FS_DIO_READ,
1104 	NR_COUNT_TYPE,
1105 };
1106 
1107 /*
1108  * The below are the page types of bios used in submit_bio().
1109  * The available types are:
1110  * DATA			User data pages. It operates as async mode.
1111  * NODE			Node pages. It operates as async mode.
1112  * META			FS metadata pages such as SIT, NAT, CP.
1113  * NR_PAGE_TYPE		The number of page types.
1114  * META_FLUSH		Make sure the previous pages are written
1115  *			with waiting the bio's completion
1116  * ...			Only can be used with META.
1117  */
1118 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1119 enum page_type {
1120 	DATA,
1121 	NODE,
1122 	META,
1123 	NR_PAGE_TYPE,
1124 	META_FLUSH,
1125 	INMEM,		/* the below types are used by tracepoints only. */
1126 	INMEM_DROP,
1127 	INMEM_INVALIDATE,
1128 	INMEM_REVOKE,
1129 	IPU,
1130 	OPU,
1131 };
1132 
1133 enum temp_type {
1134 	HOT = 0,	/* must be zero for meta bio */
1135 	WARM,
1136 	COLD,
1137 	NR_TEMP_TYPE,
1138 };
1139 
1140 enum need_lock_type {
1141 	LOCK_REQ = 0,
1142 	LOCK_DONE,
1143 	LOCK_RETRY,
1144 };
1145 
1146 enum cp_reason_type {
1147 	CP_NO_NEEDED,
1148 	CP_NON_REGULAR,
1149 	CP_COMPRESSED,
1150 	CP_HARDLINK,
1151 	CP_SB_NEED_CP,
1152 	CP_WRONG_PINO,
1153 	CP_NO_SPC_ROLL,
1154 	CP_NODE_NEED_CP,
1155 	CP_FASTBOOT_MODE,
1156 	CP_SPEC_LOG_NUM,
1157 	CP_RECOVER_DIR,
1158 };
1159 
1160 enum iostat_type {
1161 	/* WRITE IO */
1162 	APP_DIRECT_IO,			/* app direct write IOs */
1163 	APP_BUFFERED_IO,		/* app buffered write IOs */
1164 	APP_WRITE_IO,			/* app write IOs */
1165 	APP_MAPPED_IO,			/* app mapped IOs */
1166 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1167 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1168 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1169 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1170 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1171 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1172 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1173 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1174 
1175 	/* READ IO */
1176 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1177 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1178 	APP_READ_IO,			/* app read IOs */
1179 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1180 	FS_DATA_READ_IO,		/* data read IOs */
1181 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1182 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1183 	FS_NODE_READ_IO,		/* node read IOs */
1184 	FS_META_READ_IO,		/* meta read IOs */
1185 
1186 	/* other */
1187 	FS_DISCARD,			/* discard */
1188 	NR_IO_TYPE,
1189 };
1190 
1191 struct f2fs_io_info {
1192 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1193 	nid_t ino;		/* inode number */
1194 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1195 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1196 	int op;			/* contains REQ_OP_ */
1197 	int op_flags;		/* req_flag_bits */
1198 	block_t new_blkaddr;	/* new block address to be written */
1199 	block_t old_blkaddr;	/* old block address before Cow */
1200 	struct page *page;	/* page to be written */
1201 	struct page *encrypted_page;	/* encrypted page */
1202 	struct page *compressed_page;	/* compressed page */
1203 	struct list_head list;		/* serialize IOs */
1204 	bool submitted;		/* indicate IO submission */
1205 	int need_lock;		/* indicate we need to lock cp_rwsem */
1206 	bool in_list;		/* indicate fio is in io_list */
1207 	bool is_por;		/* indicate IO is from recovery or not */
1208 	bool retry;		/* need to reallocate block address */
1209 	int compr_blocks;	/* # of compressed block addresses */
1210 	bool encrypted;		/* indicate file is encrypted */
1211 	enum iostat_type io_type;	/* io type */
1212 	struct writeback_control *io_wbc; /* writeback control */
1213 	struct bio **bio;		/* bio for ipu */
1214 	sector_t *last_block;		/* last block number in bio */
1215 	unsigned char version;		/* version of the node */
1216 };
1217 
1218 struct bio_entry {
1219 	struct bio *bio;
1220 	struct list_head list;
1221 };
1222 
1223 #define is_read_io(rw) ((rw) == READ)
1224 struct f2fs_bio_info {
1225 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1226 	struct bio *bio;		/* bios to merge */
1227 	sector_t last_block_in_bio;	/* last block number */
1228 	struct f2fs_io_info fio;	/* store buffered io info. */
1229 	struct f2fs_rwsem io_rwsem;	/* blocking op for bio */
1230 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1231 	struct list_head io_list;	/* track fios */
1232 	struct list_head bio_list;	/* bio entry list head */
1233 	struct f2fs_rwsem bio_list_lock;	/* lock to protect bio entry list */
1234 };
1235 
1236 #define FDEV(i)				(sbi->devs[i])
1237 #define RDEV(i)				(raw_super->devs[i])
1238 struct f2fs_dev_info {
1239 	struct block_device *bdev;
1240 	char path[MAX_PATH_LEN];
1241 	unsigned int total_segments;
1242 	block_t start_blk;
1243 	block_t end_blk;
1244 #ifdef CONFIG_BLK_DEV_ZONED
1245 	unsigned int nr_blkz;		/* Total number of zones */
1246 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1247 	block_t *zone_capacity_blocks;  /* Array of zone capacity in blks */
1248 #endif
1249 };
1250 
1251 enum inode_type {
1252 	DIR_INODE,			/* for dirty dir inode */
1253 	FILE_INODE,			/* for dirty regular/symlink inode */
1254 	DIRTY_META,			/* for all dirtied inode metadata */
1255 	ATOMIC_FILE,			/* for all atomic files */
1256 	NR_INODE_TYPE,
1257 };
1258 
1259 /* for inner inode cache management */
1260 struct inode_management {
1261 	struct radix_tree_root ino_root;	/* ino entry array */
1262 	spinlock_t ino_lock;			/* for ino entry lock */
1263 	struct list_head ino_list;		/* inode list head */
1264 	unsigned long ino_num;			/* number of entries */
1265 };
1266 
1267 /* for GC_AT */
1268 struct atgc_management {
1269 	bool atgc_enabled;			/* ATGC is enabled or not */
1270 	struct rb_root_cached root;		/* root of victim rb-tree */
1271 	struct list_head victim_list;		/* linked with all victim entries */
1272 	unsigned int victim_count;		/* victim count in rb-tree */
1273 	unsigned int candidate_ratio;		/* candidate ratio */
1274 	unsigned int max_candidate_count;	/* max candidate count */
1275 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1276 	unsigned long long age_threshold;	/* age threshold */
1277 };
1278 
1279 /* For s_flag in struct f2fs_sb_info */
1280 enum {
1281 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1282 	SBI_IS_CLOSE,				/* specify unmounting */
1283 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1284 	SBI_POR_DOING,				/* recovery is doing or not */
1285 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1286 	SBI_NEED_CP,				/* need to checkpoint */
1287 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1288 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1289 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1290 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1291 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1292 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1293 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1294 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1295 	SBI_IS_FREEZING,			/* freezefs is in process */
1296 };
1297 
1298 enum {
1299 	CP_TIME,
1300 	REQ_TIME,
1301 	DISCARD_TIME,
1302 	GC_TIME,
1303 	DISABLE_TIME,
1304 	UMOUNT_DISCARD_TIMEOUT,
1305 	MAX_TIME,
1306 };
1307 
1308 enum {
1309 	GC_NORMAL,
1310 	GC_IDLE_CB,
1311 	GC_IDLE_GREEDY,
1312 	GC_IDLE_AT,
1313 	GC_URGENT_HIGH,
1314 	GC_URGENT_LOW,
1315 	GC_URGENT_MID,
1316 	MAX_GC_MODE,
1317 };
1318 
1319 enum {
1320 	BGGC_MODE_ON,		/* background gc is on */
1321 	BGGC_MODE_OFF,		/* background gc is off */
1322 	BGGC_MODE_SYNC,		/*
1323 				 * background gc is on, migrating blocks
1324 				 * like foreground gc
1325 				 */
1326 };
1327 
1328 enum {
1329 	FS_MODE_ADAPTIVE,		/* use both lfs/ssr allocation */
1330 	FS_MODE_LFS,			/* use lfs allocation only */
1331 	FS_MODE_FRAGMENT_SEG,		/* segment fragmentation mode */
1332 	FS_MODE_FRAGMENT_BLK,		/* block fragmentation mode */
1333 };
1334 
1335 enum {
1336 	ALLOC_MODE_DEFAULT,	/* stay default */
1337 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1338 };
1339 
1340 enum fsync_mode {
1341 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1342 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1343 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1344 };
1345 
1346 enum {
1347 	COMPR_MODE_FS,		/*
1348 				 * automatically compress compression
1349 				 * enabled files
1350 				 */
1351 	COMPR_MODE_USER,	/*
1352 				 * automatical compression is disabled.
1353 				 * user can control the file compression
1354 				 * using ioctls
1355 				 */
1356 };
1357 
1358 enum {
1359 	DISCARD_UNIT_BLOCK,	/* basic discard unit is block */
1360 	DISCARD_UNIT_SEGMENT,	/* basic discard unit is segment */
1361 	DISCARD_UNIT_SECTION,	/* basic discard unit is section */
1362 };
1363 
1364 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1365 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1366 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1367 
1368 /*
1369  * Layout of f2fs page.private:
1370  *
1371  * Layout A: lowest bit should be 1
1372  * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1373  * bit 0	PAGE_PRIVATE_NOT_POINTER
1374  * bit 1	PAGE_PRIVATE_ATOMIC_WRITE
1375  * bit 2	PAGE_PRIVATE_DUMMY_WRITE
1376  * bit 3	PAGE_PRIVATE_ONGOING_MIGRATION
1377  * bit 4	PAGE_PRIVATE_INLINE_INODE
1378  * bit 5	PAGE_PRIVATE_REF_RESOURCE
1379  * bit 6-	f2fs private data
1380  *
1381  * Layout B: lowest bit should be 0
1382  * page.private is a wrapped pointer.
1383  */
1384 enum {
1385 	PAGE_PRIVATE_NOT_POINTER,		/* private contains non-pointer data */
1386 	PAGE_PRIVATE_ATOMIC_WRITE,		/* data page from atomic write path */
1387 	PAGE_PRIVATE_DUMMY_WRITE,		/* data page for padding aligned IO */
1388 	PAGE_PRIVATE_ONGOING_MIGRATION,		/* data page which is on-going migrating */
1389 	PAGE_PRIVATE_INLINE_INODE,		/* inode page contains inline data */
1390 	PAGE_PRIVATE_REF_RESOURCE,		/* dirty page has referenced resources */
1391 	PAGE_PRIVATE_MAX
1392 };
1393 
1394 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
1395 static inline bool page_private_##name(struct page *page) \
1396 { \
1397 	return PagePrivate(page) && \
1398 		test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
1399 		test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1400 }
1401 
1402 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
1403 static inline void set_page_private_##name(struct page *page) \
1404 { \
1405 	if (!PagePrivate(page)) { \
1406 		get_page(page); \
1407 		SetPagePrivate(page); \
1408 		set_page_private(page, 0); \
1409 	} \
1410 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
1411 	set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1412 }
1413 
1414 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
1415 static inline void clear_page_private_##name(struct page *page) \
1416 { \
1417 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
1418 	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) { \
1419 		set_page_private(page, 0); \
1420 		if (PagePrivate(page)) { \
1421 			ClearPagePrivate(page); \
1422 			put_page(page); \
1423 		}\
1424 	} \
1425 }
1426 
1427 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
1428 PAGE_PRIVATE_GET_FUNC(reference, REF_RESOURCE);
1429 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
1430 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
1431 PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
1432 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
1433 
1434 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
1435 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
1436 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
1437 PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
1438 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
1439 
1440 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
1441 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
1442 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
1443 PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
1444 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
1445 
1446 static inline unsigned long get_page_private_data(struct page *page)
1447 {
1448 	unsigned long data = page_private(page);
1449 
1450 	if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
1451 		return 0;
1452 	return data >> PAGE_PRIVATE_MAX;
1453 }
1454 
1455 static inline void set_page_private_data(struct page *page, unsigned long data)
1456 {
1457 	if (!PagePrivate(page)) {
1458 		get_page(page);
1459 		SetPagePrivate(page);
1460 		set_page_private(page, 0);
1461 	}
1462 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
1463 	page_private(page) |= data << PAGE_PRIVATE_MAX;
1464 }
1465 
1466 static inline void clear_page_private_data(struct page *page)
1467 {
1468 	page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
1469 	if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
1470 		set_page_private(page, 0);
1471 		if (PagePrivate(page)) {
1472 			ClearPagePrivate(page);
1473 			put_page(page);
1474 		}
1475 	}
1476 }
1477 
1478 /* For compression */
1479 enum compress_algorithm_type {
1480 	COMPRESS_LZO,
1481 	COMPRESS_LZ4,
1482 	COMPRESS_ZSTD,
1483 	COMPRESS_LZORLE,
1484 	COMPRESS_MAX,
1485 };
1486 
1487 enum compress_flag {
1488 	COMPRESS_CHKSUM,
1489 	COMPRESS_MAX_FLAG,
1490 };
1491 
1492 #define	COMPRESS_WATERMARK			20
1493 #define	COMPRESS_PERCENT			20
1494 
1495 #define COMPRESS_DATA_RESERVED_SIZE		4
1496 struct compress_data {
1497 	__le32 clen;			/* compressed data size */
1498 	__le32 chksum;			/* compressed data chksum */
1499 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1500 	u8 cdata[];			/* compressed data */
1501 };
1502 
1503 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1504 
1505 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1506 
1507 #define	COMPRESS_LEVEL_OFFSET	8
1508 
1509 /* compress context */
1510 struct compress_ctx {
1511 	struct inode *inode;		/* inode the context belong to */
1512 	pgoff_t cluster_idx;		/* cluster index number */
1513 	unsigned int cluster_size;	/* page count in cluster */
1514 	unsigned int log_cluster_size;	/* log of cluster size */
1515 	struct page **rpages;		/* pages store raw data in cluster */
1516 	unsigned int nr_rpages;		/* total page number in rpages */
1517 	struct page **cpages;		/* pages store compressed data in cluster */
1518 	unsigned int nr_cpages;		/* total page number in cpages */
1519 	unsigned int valid_nr_cpages;	/* valid page number in cpages */
1520 	void *rbuf;			/* virtual mapped address on rpages */
1521 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1522 	size_t rlen;			/* valid data length in rbuf */
1523 	size_t clen;			/* valid data length in cbuf */
1524 	void *private;			/* payload buffer for specified compression algorithm */
1525 	void *private2;			/* extra payload buffer */
1526 };
1527 
1528 /* compress context for write IO path */
1529 struct compress_io_ctx {
1530 	u32 magic;			/* magic number to indicate page is compressed */
1531 	struct inode *inode;		/* inode the context belong to */
1532 	struct page **rpages;		/* pages store raw data in cluster */
1533 	unsigned int nr_rpages;		/* total page number in rpages */
1534 	atomic_t pending_pages;		/* in-flight compressed page count */
1535 };
1536 
1537 /* Context for decompressing one cluster on the read IO path */
1538 struct decompress_io_ctx {
1539 	u32 magic;			/* magic number to indicate page is compressed */
1540 	struct inode *inode;		/* inode the context belong to */
1541 	pgoff_t cluster_idx;		/* cluster index number */
1542 	unsigned int cluster_size;	/* page count in cluster */
1543 	unsigned int log_cluster_size;	/* log of cluster size */
1544 	struct page **rpages;		/* pages store raw data in cluster */
1545 	unsigned int nr_rpages;		/* total page number in rpages */
1546 	struct page **cpages;		/* pages store compressed data in cluster */
1547 	unsigned int nr_cpages;		/* total page number in cpages */
1548 	struct page **tpages;		/* temp pages to pad holes in cluster */
1549 	void *rbuf;			/* virtual mapped address on rpages */
1550 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1551 	size_t rlen;			/* valid data length in rbuf */
1552 	size_t clen;			/* valid data length in cbuf */
1553 
1554 	/*
1555 	 * The number of compressed pages remaining to be read in this cluster.
1556 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1557 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1558 	 * is decompressed (or an error is reported).
1559 	 *
1560 	 * If an error occurs before all the pages have been submitted for I/O,
1561 	 * then this will never reach 0.  In this case the I/O submitter is
1562 	 * responsible for calling f2fs_decompress_end_io() instead.
1563 	 */
1564 	atomic_t remaining_pages;
1565 
1566 	/*
1567 	 * Number of references to this decompress_io_ctx.
1568 	 *
1569 	 * One reference is held for I/O completion.  This reference is dropped
1570 	 * after the pagecache pages are updated and unlocked -- either after
1571 	 * decompression (and verity if enabled), or after an error.
1572 	 *
1573 	 * In addition, each compressed page holds a reference while it is in a
1574 	 * bio.  These references are necessary prevent compressed pages from
1575 	 * being freed while they are still in a bio.
1576 	 */
1577 	refcount_t refcnt;
1578 
1579 	bool failed;			/* IO error occurred before decompression? */
1580 	bool need_verity;		/* need fs-verity verification after decompression? */
1581 	void *private;			/* payload buffer for specified decompression algorithm */
1582 	void *private2;			/* extra payload buffer */
1583 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1584 };
1585 
1586 #define NULL_CLUSTER			((unsigned int)(~0))
1587 #define MIN_COMPRESS_LOG_SIZE		2
1588 #define MAX_COMPRESS_LOG_SIZE		8
1589 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1590 
1591 struct f2fs_sb_info {
1592 	struct super_block *sb;			/* pointer to VFS super block */
1593 	struct proc_dir_entry *s_proc;		/* proc entry */
1594 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1595 	struct f2fs_rwsem sb_lock;		/* lock for raw super block */
1596 	int valid_super_block;			/* valid super block no */
1597 	unsigned long s_flag;				/* flags for sbi */
1598 	struct mutex writepages;		/* mutex for writepages() */
1599 
1600 #ifdef CONFIG_BLK_DEV_ZONED
1601 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1602 	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
1603 #endif
1604 
1605 	/* for node-related operations */
1606 	struct f2fs_nm_info *nm_info;		/* node manager */
1607 	struct inode *node_inode;		/* cache node blocks */
1608 
1609 	/* for segment-related operations */
1610 	struct f2fs_sm_info *sm_info;		/* segment manager */
1611 
1612 	/* for bio operations */
1613 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1614 	/* keep migration IO order for LFS mode */
1615 	struct f2fs_rwsem io_order_lock;
1616 	mempool_t *write_io_dummy;		/* Dummy pages */
1617 	pgoff_t metapage_eio_ofs;		/* EIO page offset */
1618 	int metapage_eio_cnt;			/* EIO count */
1619 
1620 	/* for checkpoint */
1621 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1622 	int cur_cp_pack;			/* remain current cp pack */
1623 	spinlock_t cp_lock;			/* for flag in ckpt */
1624 	struct inode *meta_inode;		/* cache meta blocks */
1625 	struct f2fs_rwsem cp_global_sem;	/* checkpoint procedure lock */
1626 	struct f2fs_rwsem cp_rwsem;		/* blocking FS operations */
1627 	struct f2fs_rwsem node_write;		/* locking node writes */
1628 	struct f2fs_rwsem node_change;	/* locking node change */
1629 	wait_queue_head_t cp_wait;
1630 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1631 	long interval_time[MAX_TIME];		/* to store thresholds */
1632 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1633 
1634 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1635 
1636 	spinlock_t fsync_node_lock;		/* for node entry lock */
1637 	struct list_head fsync_node_list;	/* node list head */
1638 	unsigned int fsync_seg_id;		/* sequence id */
1639 	unsigned int fsync_node_num;		/* number of node entries */
1640 
1641 	/* for orphan inode, use 0'th array */
1642 	unsigned int max_orphans;		/* max orphan inodes */
1643 
1644 	/* for inode management */
1645 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1646 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1647 	struct mutex flush_lock;		/* for flush exclusion */
1648 
1649 	/* for extent tree cache */
1650 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1651 	struct mutex extent_tree_lock;	/* locking extent radix tree */
1652 	struct list_head extent_list;		/* lru list for shrinker */
1653 	spinlock_t extent_lock;			/* locking extent lru list */
1654 	atomic_t total_ext_tree;		/* extent tree count */
1655 	struct list_head zombie_list;		/* extent zombie tree list */
1656 	atomic_t total_zombie_tree;		/* extent zombie tree count */
1657 	atomic_t total_ext_node;		/* extent info count */
1658 
1659 	/* basic filesystem units */
1660 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1661 	unsigned int log_blocksize;		/* log2 block size */
1662 	unsigned int blocksize;			/* block size */
1663 	unsigned int root_ino_num;		/* root inode number*/
1664 	unsigned int node_ino_num;		/* node inode number*/
1665 	unsigned int meta_ino_num;		/* meta inode number*/
1666 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1667 	unsigned int blocks_per_seg;		/* blocks per segment */
1668 	unsigned int segs_per_sec;		/* segments per section */
1669 	unsigned int secs_per_zone;		/* sections per zone */
1670 	unsigned int total_sections;		/* total section count */
1671 	unsigned int total_node_count;		/* total node block count */
1672 	unsigned int total_valid_node_count;	/* valid node block count */
1673 	int dir_level;				/* directory level */
1674 	int readdir_ra;				/* readahead inode in readdir */
1675 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1676 
1677 	block_t user_block_count;		/* # of user blocks */
1678 	block_t total_valid_block_count;	/* # of valid blocks */
1679 	block_t discard_blks;			/* discard command candidats */
1680 	block_t last_valid_block_count;		/* for recovery */
1681 	block_t reserved_blocks;		/* configurable reserved blocks */
1682 	block_t current_reserved_blocks;	/* current reserved blocks */
1683 
1684 	/* Additional tracking for no checkpoint mode */
1685 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1686 
1687 	unsigned int nquota_files;		/* # of quota sysfile */
1688 	struct f2fs_rwsem quota_sem;		/* blocking cp for flags */
1689 
1690 	/* # of pages, see count_type */
1691 	atomic_t nr_pages[NR_COUNT_TYPE];
1692 	/* # of allocated blocks */
1693 	struct percpu_counter alloc_valid_block_count;
1694 	/* # of node block writes as roll forward recovery */
1695 	struct percpu_counter rf_node_block_count;
1696 
1697 	/* writeback control */
1698 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1699 
1700 	/* valid inode count */
1701 	struct percpu_counter total_valid_inode_count;
1702 
1703 	struct f2fs_mount_info mount_opt;	/* mount options */
1704 
1705 	/* for cleaning operations */
1706 	struct f2fs_rwsem gc_lock;		/*
1707 						 * semaphore for GC, avoid
1708 						 * race between GC and GC or CP
1709 						 */
1710 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1711 	struct atgc_management am;		/* atgc management */
1712 	unsigned int cur_victim_sec;		/* current victim section num */
1713 	unsigned int gc_mode;			/* current GC state */
1714 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1715 	spinlock_t gc_urgent_high_lock;
1716 	bool gc_urgent_high_limited;		/* indicates having limited trial count */
1717 	unsigned int gc_urgent_high_remaining;	/* remaining trial count for GC_URGENT_HIGH */
1718 
1719 	/* for skip statistic */
1720 	unsigned int atomic_files;		/* # of opened atomic file */
1721 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
1722 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1723 
1724 	/* threshold for gc trials on pinned files */
1725 	u64 gc_pin_file_threshold;
1726 	struct f2fs_rwsem pin_sem;
1727 
1728 	/* maximum # of trials to find a victim segment for SSR and GC */
1729 	unsigned int max_victim_search;
1730 	/* migration granularity of garbage collection, unit: segment */
1731 	unsigned int migration_granularity;
1732 
1733 	/*
1734 	 * for stat information.
1735 	 * one is for the LFS mode, and the other is for the SSR mode.
1736 	 */
1737 #ifdef CONFIG_F2FS_STAT_FS
1738 	struct f2fs_stat_info *stat_info;	/* FS status information */
1739 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1740 	unsigned int segment_count[2];		/* # of allocated segments */
1741 	unsigned int block_count[2];		/* # of allocated blocks */
1742 	atomic_t inplace_count;		/* # of inplace update */
1743 	atomic64_t total_hit_ext;		/* # of lookup extent cache */
1744 	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
1745 	atomic64_t read_hit_largest;		/* # of hit largest extent node */
1746 	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1747 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1748 	atomic_t inline_inode;			/* # of inline_data inodes */
1749 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1750 	atomic_t compr_inode;			/* # of compressed inodes */
1751 	atomic64_t compr_blocks;		/* # of compressed blocks */
1752 	atomic_t vw_cnt;			/* # of volatile writes */
1753 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1754 	atomic_t max_vw_cnt;			/* max # of volatile writes */
1755 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1756 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1757 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1758 #endif
1759 	spinlock_t stat_lock;			/* lock for stat operations */
1760 
1761 	/* to attach REQ_META|REQ_FUA flags */
1762 	unsigned int data_io_flag;
1763 	unsigned int node_io_flag;
1764 
1765 	/* For sysfs suppport */
1766 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1767 	struct completion s_kobj_unregister;
1768 
1769 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1770 	struct completion s_stat_kobj_unregister;
1771 
1772 	struct kobject s_feature_list_kobj;		/* /sys/fs/f2fs/<devname>/feature_list */
1773 	struct completion s_feature_list_kobj_unregister;
1774 
1775 	/* For shrinker support */
1776 	struct list_head s_list;
1777 	struct mutex umount_mutex;
1778 	unsigned int shrinker_run_no;
1779 
1780 	/* For multi devices */
1781 	int s_ndevs;				/* number of devices */
1782 	struct f2fs_dev_info *devs;		/* for device list */
1783 	unsigned int dirty_device;		/* for checkpoint data flush */
1784 	spinlock_t dev_lock;			/* protect dirty_device */
1785 	bool aligned_blksize;			/* all devices has the same logical blksize */
1786 
1787 	/* For write statistics */
1788 	u64 sectors_written_start;
1789 	u64 kbytes_written;
1790 
1791 	/* Reference to checksum algorithm driver via cryptoapi */
1792 	struct crypto_shash *s_chksum_driver;
1793 
1794 	/* Precomputed FS UUID checksum for seeding other checksums */
1795 	__u32 s_chksum_seed;
1796 
1797 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1798 
1799 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1800 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1801 
1802 	/* For reclaimed segs statistics per each GC mode */
1803 	unsigned int gc_segment_mode;		/* GC state for reclaimed segments */
1804 	unsigned int gc_reclaimed_segs[MAX_GC_MODE];	/* Reclaimed segs for each mode */
1805 
1806 	unsigned long seq_file_ra_mul;		/* multiplier for ra_pages of seq. files in fadvise */
1807 
1808 	int max_fragment_chunk;			/* max chunk size for block fragmentation mode */
1809 	int max_fragment_hole;			/* max hole size for block fragmentation mode */
1810 
1811 #ifdef CONFIG_F2FS_FS_COMPRESSION
1812 	struct kmem_cache *page_array_slab;	/* page array entry */
1813 	unsigned int page_array_slab_size;	/* default page array slab size */
1814 
1815 	/* For runtime compression statistics */
1816 	u64 compr_written_block;
1817 	u64 compr_saved_block;
1818 	u32 compr_new_inode;
1819 
1820 	/* For compressed block cache */
1821 	struct inode *compress_inode;		/* cache compressed blocks */
1822 	unsigned int compress_percent;		/* cache page percentage */
1823 	unsigned int compress_watermark;	/* cache page watermark */
1824 	atomic_t compress_page_hit;		/* cache hit count */
1825 #endif
1826 
1827 #ifdef CONFIG_F2FS_IOSTAT
1828 	/* For app/fs IO statistics */
1829 	spinlock_t iostat_lock;
1830 	unsigned long long rw_iostat[NR_IO_TYPE];
1831 	unsigned long long prev_rw_iostat[NR_IO_TYPE];
1832 	bool iostat_enable;
1833 	unsigned long iostat_next_period;
1834 	unsigned int iostat_period_ms;
1835 
1836 	/* For io latency related statistics info in one iostat period */
1837 	spinlock_t iostat_lat_lock;
1838 	struct iostat_lat_info *iostat_io_lat;
1839 #endif
1840 };
1841 
1842 #ifdef CONFIG_F2FS_FAULT_INJECTION
1843 #define f2fs_show_injection_info(sbi, type)					\
1844 	printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",	\
1845 		KERN_INFO, sbi->sb->s_id,				\
1846 		f2fs_fault_name[type],					\
1847 		__func__, __builtin_return_address(0))
1848 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1849 {
1850 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1851 
1852 	if (!ffi->inject_rate)
1853 		return false;
1854 
1855 	if (!IS_FAULT_SET(ffi, type))
1856 		return false;
1857 
1858 	atomic_inc(&ffi->inject_ops);
1859 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1860 		atomic_set(&ffi->inject_ops, 0);
1861 		return true;
1862 	}
1863 	return false;
1864 }
1865 #else
1866 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1867 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1868 {
1869 	return false;
1870 }
1871 #endif
1872 
1873 /*
1874  * Test if the mounted volume is a multi-device volume.
1875  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1876  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1877  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1878  */
1879 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1880 {
1881 	return sbi->s_ndevs > 1;
1882 }
1883 
1884 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1885 {
1886 	unsigned long now = jiffies;
1887 
1888 	sbi->last_time[type] = now;
1889 
1890 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1891 	if (type == REQ_TIME) {
1892 		sbi->last_time[DISCARD_TIME] = now;
1893 		sbi->last_time[GC_TIME] = now;
1894 	}
1895 }
1896 
1897 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1898 {
1899 	unsigned long interval = sbi->interval_time[type] * HZ;
1900 
1901 	return time_after(jiffies, sbi->last_time[type] + interval);
1902 }
1903 
1904 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1905 						int type)
1906 {
1907 	unsigned long interval = sbi->interval_time[type] * HZ;
1908 	unsigned int wait_ms = 0;
1909 	long delta;
1910 
1911 	delta = (sbi->last_time[type] + interval) - jiffies;
1912 	if (delta > 0)
1913 		wait_ms = jiffies_to_msecs(delta);
1914 
1915 	return wait_ms;
1916 }
1917 
1918 /*
1919  * Inline functions
1920  */
1921 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1922 			      const void *address, unsigned int length)
1923 {
1924 	struct {
1925 		struct shash_desc shash;
1926 		char ctx[4];
1927 	} desc;
1928 	int err;
1929 
1930 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1931 
1932 	desc.shash.tfm = sbi->s_chksum_driver;
1933 	*(u32 *)desc.ctx = crc;
1934 
1935 	err = crypto_shash_update(&desc.shash, address, length);
1936 	BUG_ON(err);
1937 
1938 	return *(u32 *)desc.ctx;
1939 }
1940 
1941 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1942 			   unsigned int length)
1943 {
1944 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1945 }
1946 
1947 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1948 				  void *buf, size_t buf_size)
1949 {
1950 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1951 }
1952 
1953 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1954 			      const void *address, unsigned int length)
1955 {
1956 	return __f2fs_crc32(sbi, crc, address, length);
1957 }
1958 
1959 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1960 {
1961 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1962 }
1963 
1964 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1965 {
1966 	return sb->s_fs_info;
1967 }
1968 
1969 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1970 {
1971 	return F2FS_SB(inode->i_sb);
1972 }
1973 
1974 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1975 {
1976 	return F2FS_I_SB(mapping->host);
1977 }
1978 
1979 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1980 {
1981 	return F2FS_M_SB(page_file_mapping(page));
1982 }
1983 
1984 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1985 {
1986 	return (struct f2fs_super_block *)(sbi->raw_super);
1987 }
1988 
1989 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1990 {
1991 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1992 }
1993 
1994 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1995 {
1996 	return (struct f2fs_node *)page_address(page);
1997 }
1998 
1999 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
2000 {
2001 	return &((struct f2fs_node *)page_address(page))->i;
2002 }
2003 
2004 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
2005 {
2006 	return (struct f2fs_nm_info *)(sbi->nm_info);
2007 }
2008 
2009 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
2010 {
2011 	return (struct f2fs_sm_info *)(sbi->sm_info);
2012 }
2013 
2014 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
2015 {
2016 	return (struct sit_info *)(SM_I(sbi)->sit_info);
2017 }
2018 
2019 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
2020 {
2021 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
2022 }
2023 
2024 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
2025 {
2026 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
2027 }
2028 
2029 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2030 {
2031 	return sbi->meta_inode->i_mapping;
2032 }
2033 
2034 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2035 {
2036 	return sbi->node_inode->i_mapping;
2037 }
2038 
2039 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2040 {
2041 	return test_bit(type, &sbi->s_flag);
2042 }
2043 
2044 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2045 {
2046 	set_bit(type, &sbi->s_flag);
2047 }
2048 
2049 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2050 {
2051 	clear_bit(type, &sbi->s_flag);
2052 }
2053 
2054 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2055 {
2056 	return le64_to_cpu(cp->checkpoint_ver);
2057 }
2058 
2059 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2060 {
2061 	if (type < F2FS_MAX_QUOTAS)
2062 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2063 	return 0;
2064 }
2065 
2066 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2067 {
2068 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2069 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2070 }
2071 
2072 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2073 {
2074 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2075 
2076 	return ckpt_flags & f;
2077 }
2078 
2079 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2080 {
2081 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2082 }
2083 
2084 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2085 {
2086 	unsigned int ckpt_flags;
2087 
2088 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2089 	ckpt_flags |= f;
2090 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2091 }
2092 
2093 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2094 {
2095 	unsigned long flags;
2096 
2097 	spin_lock_irqsave(&sbi->cp_lock, flags);
2098 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
2099 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2100 }
2101 
2102 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2103 {
2104 	unsigned int ckpt_flags;
2105 
2106 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2107 	ckpt_flags &= (~f);
2108 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2109 }
2110 
2111 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2112 {
2113 	unsigned long flags;
2114 
2115 	spin_lock_irqsave(&sbi->cp_lock, flags);
2116 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
2117 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2118 }
2119 
2120 #define init_f2fs_rwsem(sem)					\
2121 do {								\
2122 	static struct lock_class_key __key;			\
2123 								\
2124 	__init_f2fs_rwsem((sem), #sem, &__key);			\
2125 } while (0)
2126 
2127 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2128 		const char *sem_name, struct lock_class_key *key)
2129 {
2130 	__init_rwsem(&sem->internal_rwsem, sem_name, key);
2131 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2132 	init_waitqueue_head(&sem->read_waiters);
2133 #endif
2134 }
2135 
2136 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2137 {
2138 	return rwsem_is_locked(&sem->internal_rwsem);
2139 }
2140 
2141 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2142 {
2143 	return rwsem_is_contended(&sem->internal_rwsem);
2144 }
2145 
2146 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2147 {
2148 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2149 	wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2150 #else
2151 	down_read(&sem->internal_rwsem);
2152 #endif
2153 }
2154 
2155 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2156 {
2157 	return down_read_trylock(&sem->internal_rwsem);
2158 }
2159 
2160 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2161 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2162 {
2163 	down_read_nested(&sem->internal_rwsem, subclass);
2164 }
2165 #else
2166 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2167 #endif
2168 
2169 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2170 {
2171 	up_read(&sem->internal_rwsem);
2172 }
2173 
2174 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2175 {
2176 	down_write(&sem->internal_rwsem);
2177 }
2178 
2179 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2180 {
2181 	return down_write_trylock(&sem->internal_rwsem);
2182 }
2183 
2184 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2185 {
2186 	up_write(&sem->internal_rwsem);
2187 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2188 	wake_up_all(&sem->read_waiters);
2189 #endif
2190 }
2191 
2192 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2193 {
2194 	f2fs_down_read(&sbi->cp_rwsem);
2195 }
2196 
2197 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2198 {
2199 	if (time_to_inject(sbi, FAULT_LOCK_OP)) {
2200 		f2fs_show_injection_info(sbi, FAULT_LOCK_OP);
2201 		return 0;
2202 	}
2203 	return f2fs_down_read_trylock(&sbi->cp_rwsem);
2204 }
2205 
2206 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2207 {
2208 	f2fs_up_read(&sbi->cp_rwsem);
2209 }
2210 
2211 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2212 {
2213 	f2fs_down_write(&sbi->cp_rwsem);
2214 }
2215 
2216 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2217 {
2218 	f2fs_up_write(&sbi->cp_rwsem);
2219 }
2220 
2221 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2222 {
2223 	int reason = CP_SYNC;
2224 
2225 	if (test_opt(sbi, FASTBOOT))
2226 		reason = CP_FASTBOOT;
2227 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2228 		reason = CP_UMOUNT;
2229 	return reason;
2230 }
2231 
2232 static inline bool __remain_node_summaries(int reason)
2233 {
2234 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
2235 }
2236 
2237 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2238 {
2239 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2240 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2241 }
2242 
2243 /*
2244  * Check whether the inode has blocks or not
2245  */
2246 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2247 {
2248 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2249 
2250 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2251 }
2252 
2253 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2254 {
2255 	return ofs == XATTR_NODE_OFFSET;
2256 }
2257 
2258 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2259 					struct inode *inode, bool cap)
2260 {
2261 	if (!inode)
2262 		return true;
2263 	if (!test_opt(sbi, RESERVE_ROOT))
2264 		return false;
2265 	if (IS_NOQUOTA(inode))
2266 		return true;
2267 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2268 		return true;
2269 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2270 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2271 		return true;
2272 	if (cap && capable(CAP_SYS_RESOURCE))
2273 		return true;
2274 	return false;
2275 }
2276 
2277 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2278 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2279 				 struct inode *inode, blkcnt_t *count)
2280 {
2281 	blkcnt_t diff = 0, release = 0;
2282 	block_t avail_user_block_count;
2283 	int ret;
2284 
2285 	ret = dquot_reserve_block(inode, *count);
2286 	if (ret)
2287 		return ret;
2288 
2289 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2290 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2291 		release = *count;
2292 		goto release_quota;
2293 	}
2294 
2295 	/*
2296 	 * let's increase this in prior to actual block count change in order
2297 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2298 	 */
2299 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2300 
2301 	spin_lock(&sbi->stat_lock);
2302 	sbi->total_valid_block_count += (block_t)(*count);
2303 	avail_user_block_count = sbi->user_block_count -
2304 					sbi->current_reserved_blocks;
2305 
2306 	if (!__allow_reserved_blocks(sbi, inode, true))
2307 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2308 
2309 	if (F2FS_IO_ALIGNED(sbi))
2310 		avail_user_block_count -= sbi->blocks_per_seg *
2311 				SM_I(sbi)->additional_reserved_segments;
2312 
2313 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2314 		if (avail_user_block_count > sbi->unusable_block_count)
2315 			avail_user_block_count -= sbi->unusable_block_count;
2316 		else
2317 			avail_user_block_count = 0;
2318 	}
2319 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2320 		diff = sbi->total_valid_block_count - avail_user_block_count;
2321 		if (diff > *count)
2322 			diff = *count;
2323 		*count -= diff;
2324 		release = diff;
2325 		sbi->total_valid_block_count -= diff;
2326 		if (!*count) {
2327 			spin_unlock(&sbi->stat_lock);
2328 			goto enospc;
2329 		}
2330 	}
2331 	spin_unlock(&sbi->stat_lock);
2332 
2333 	if (unlikely(release)) {
2334 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2335 		dquot_release_reservation_block(inode, release);
2336 	}
2337 	f2fs_i_blocks_write(inode, *count, true, true);
2338 	return 0;
2339 
2340 enospc:
2341 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2342 release_quota:
2343 	dquot_release_reservation_block(inode, release);
2344 	return -ENOSPC;
2345 }
2346 
2347 __printf(2, 3)
2348 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2349 
2350 #define f2fs_err(sbi, fmt, ...)						\
2351 	f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2352 #define f2fs_warn(sbi, fmt, ...)					\
2353 	f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2354 #define f2fs_notice(sbi, fmt, ...)					\
2355 	f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2356 #define f2fs_info(sbi, fmt, ...)					\
2357 	f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2358 #define f2fs_debug(sbi, fmt, ...)					\
2359 	f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2360 
2361 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2362 						struct inode *inode,
2363 						block_t count)
2364 {
2365 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2366 
2367 	spin_lock(&sbi->stat_lock);
2368 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2369 	sbi->total_valid_block_count -= (block_t)count;
2370 	if (sbi->reserved_blocks &&
2371 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2372 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2373 					sbi->current_reserved_blocks + count);
2374 	spin_unlock(&sbi->stat_lock);
2375 	if (unlikely(inode->i_blocks < sectors)) {
2376 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2377 			  inode->i_ino,
2378 			  (unsigned long long)inode->i_blocks,
2379 			  (unsigned long long)sectors);
2380 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2381 		return;
2382 	}
2383 	f2fs_i_blocks_write(inode, count, false, true);
2384 }
2385 
2386 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2387 {
2388 	atomic_inc(&sbi->nr_pages[count_type]);
2389 
2390 	if (count_type == F2FS_DIRTY_DENTS ||
2391 			count_type == F2FS_DIRTY_NODES ||
2392 			count_type == F2FS_DIRTY_META ||
2393 			count_type == F2FS_DIRTY_QDATA ||
2394 			count_type == F2FS_DIRTY_IMETA)
2395 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2396 }
2397 
2398 static inline void inode_inc_dirty_pages(struct inode *inode)
2399 {
2400 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2401 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2402 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2403 	if (IS_NOQUOTA(inode))
2404 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2405 }
2406 
2407 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2408 {
2409 	atomic_dec(&sbi->nr_pages[count_type]);
2410 }
2411 
2412 static inline void inode_dec_dirty_pages(struct inode *inode)
2413 {
2414 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2415 			!S_ISLNK(inode->i_mode))
2416 		return;
2417 
2418 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2419 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2420 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2421 	if (IS_NOQUOTA(inode))
2422 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2423 }
2424 
2425 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2426 {
2427 	return atomic_read(&sbi->nr_pages[count_type]);
2428 }
2429 
2430 static inline int get_dirty_pages(struct inode *inode)
2431 {
2432 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2433 }
2434 
2435 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2436 {
2437 	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2438 	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2439 						sbi->log_blocks_per_seg;
2440 
2441 	return segs / sbi->segs_per_sec;
2442 }
2443 
2444 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2445 {
2446 	return sbi->total_valid_block_count;
2447 }
2448 
2449 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2450 {
2451 	return sbi->discard_blks;
2452 }
2453 
2454 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2455 {
2456 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2457 
2458 	/* return NAT or SIT bitmap */
2459 	if (flag == NAT_BITMAP)
2460 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2461 	else if (flag == SIT_BITMAP)
2462 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2463 
2464 	return 0;
2465 }
2466 
2467 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2468 {
2469 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2470 }
2471 
2472 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2473 {
2474 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2475 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2476 	int offset;
2477 
2478 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2479 		offset = (flag == SIT_BITMAP) ?
2480 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2481 		/*
2482 		 * if large_nat_bitmap feature is enabled, leave checksum
2483 		 * protection for all nat/sit bitmaps.
2484 		 */
2485 		return tmp_ptr + offset + sizeof(__le32);
2486 	}
2487 
2488 	if (__cp_payload(sbi) > 0) {
2489 		if (flag == NAT_BITMAP)
2490 			return &ckpt->sit_nat_version_bitmap;
2491 		else
2492 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2493 	} else {
2494 		offset = (flag == NAT_BITMAP) ?
2495 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2496 		return tmp_ptr + offset;
2497 	}
2498 }
2499 
2500 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2501 {
2502 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2503 
2504 	if (sbi->cur_cp_pack == 2)
2505 		start_addr += sbi->blocks_per_seg;
2506 	return start_addr;
2507 }
2508 
2509 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2510 {
2511 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2512 
2513 	if (sbi->cur_cp_pack == 1)
2514 		start_addr += sbi->blocks_per_seg;
2515 	return start_addr;
2516 }
2517 
2518 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2519 {
2520 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2521 }
2522 
2523 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2524 {
2525 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2526 }
2527 
2528 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2529 					struct inode *inode, bool is_inode)
2530 {
2531 	block_t	valid_block_count;
2532 	unsigned int valid_node_count, user_block_count;
2533 	int err;
2534 
2535 	if (is_inode) {
2536 		if (inode) {
2537 			err = dquot_alloc_inode(inode);
2538 			if (err)
2539 				return err;
2540 		}
2541 	} else {
2542 		err = dquot_reserve_block(inode, 1);
2543 		if (err)
2544 			return err;
2545 	}
2546 
2547 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2548 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2549 		goto enospc;
2550 	}
2551 
2552 	spin_lock(&sbi->stat_lock);
2553 
2554 	valid_block_count = sbi->total_valid_block_count +
2555 					sbi->current_reserved_blocks + 1;
2556 
2557 	if (!__allow_reserved_blocks(sbi, inode, false))
2558 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2559 
2560 	if (F2FS_IO_ALIGNED(sbi))
2561 		valid_block_count += sbi->blocks_per_seg *
2562 				SM_I(sbi)->additional_reserved_segments;
2563 
2564 	user_block_count = sbi->user_block_count;
2565 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2566 		user_block_count -= sbi->unusable_block_count;
2567 
2568 	if (unlikely(valid_block_count > user_block_count)) {
2569 		spin_unlock(&sbi->stat_lock);
2570 		goto enospc;
2571 	}
2572 
2573 	valid_node_count = sbi->total_valid_node_count + 1;
2574 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2575 		spin_unlock(&sbi->stat_lock);
2576 		goto enospc;
2577 	}
2578 
2579 	sbi->total_valid_node_count++;
2580 	sbi->total_valid_block_count++;
2581 	spin_unlock(&sbi->stat_lock);
2582 
2583 	if (inode) {
2584 		if (is_inode)
2585 			f2fs_mark_inode_dirty_sync(inode, true);
2586 		else
2587 			f2fs_i_blocks_write(inode, 1, true, true);
2588 	}
2589 
2590 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2591 	return 0;
2592 
2593 enospc:
2594 	if (is_inode) {
2595 		if (inode)
2596 			dquot_free_inode(inode);
2597 	} else {
2598 		dquot_release_reservation_block(inode, 1);
2599 	}
2600 	return -ENOSPC;
2601 }
2602 
2603 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2604 					struct inode *inode, bool is_inode)
2605 {
2606 	spin_lock(&sbi->stat_lock);
2607 
2608 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2609 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2610 
2611 	sbi->total_valid_node_count--;
2612 	sbi->total_valid_block_count--;
2613 	if (sbi->reserved_blocks &&
2614 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2615 		sbi->current_reserved_blocks++;
2616 
2617 	spin_unlock(&sbi->stat_lock);
2618 
2619 	if (is_inode) {
2620 		dquot_free_inode(inode);
2621 	} else {
2622 		if (unlikely(inode->i_blocks == 0)) {
2623 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2624 				  inode->i_ino,
2625 				  (unsigned long long)inode->i_blocks);
2626 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2627 			return;
2628 		}
2629 		f2fs_i_blocks_write(inode, 1, false, true);
2630 	}
2631 }
2632 
2633 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2634 {
2635 	return sbi->total_valid_node_count;
2636 }
2637 
2638 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2639 {
2640 	percpu_counter_inc(&sbi->total_valid_inode_count);
2641 }
2642 
2643 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2644 {
2645 	percpu_counter_dec(&sbi->total_valid_inode_count);
2646 }
2647 
2648 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2649 {
2650 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2651 }
2652 
2653 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2654 						pgoff_t index, bool for_write)
2655 {
2656 	struct page *page;
2657 
2658 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2659 		if (!for_write)
2660 			page = find_get_page_flags(mapping, index,
2661 							FGP_LOCK | FGP_ACCESSED);
2662 		else
2663 			page = find_lock_page(mapping, index);
2664 		if (page)
2665 			return page;
2666 
2667 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2668 			f2fs_show_injection_info(F2FS_M_SB(mapping),
2669 							FAULT_PAGE_ALLOC);
2670 			return NULL;
2671 		}
2672 	}
2673 
2674 	if (!for_write)
2675 		return grab_cache_page(mapping, index);
2676 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2677 }
2678 
2679 static inline struct page *f2fs_pagecache_get_page(
2680 				struct address_space *mapping, pgoff_t index,
2681 				int fgp_flags, gfp_t gfp_mask)
2682 {
2683 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2684 		f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2685 		return NULL;
2686 	}
2687 
2688 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2689 }
2690 
2691 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2692 {
2693 	char *src_kaddr = kmap(src);
2694 	char *dst_kaddr = kmap(dst);
2695 
2696 	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2697 	kunmap(dst);
2698 	kunmap(src);
2699 }
2700 
2701 static inline void f2fs_put_page(struct page *page, int unlock)
2702 {
2703 	if (!page)
2704 		return;
2705 
2706 	if (unlock) {
2707 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2708 		unlock_page(page);
2709 	}
2710 	put_page(page);
2711 }
2712 
2713 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2714 {
2715 	if (dn->node_page)
2716 		f2fs_put_page(dn->node_page, 1);
2717 	if (dn->inode_page && dn->node_page != dn->inode_page)
2718 		f2fs_put_page(dn->inode_page, 0);
2719 	dn->node_page = NULL;
2720 	dn->inode_page = NULL;
2721 }
2722 
2723 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2724 					size_t size)
2725 {
2726 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2727 }
2728 
2729 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2730 						gfp_t flags)
2731 {
2732 	void *entry;
2733 
2734 	entry = kmem_cache_alloc(cachep, flags);
2735 	if (!entry)
2736 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2737 	return entry;
2738 }
2739 
2740 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2741 			gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2742 {
2743 	if (nofail)
2744 		return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2745 
2746 	if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) {
2747 		f2fs_show_injection_info(sbi, FAULT_SLAB_ALLOC);
2748 		return NULL;
2749 	}
2750 
2751 	return kmem_cache_alloc(cachep, flags);
2752 }
2753 
2754 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2755 {
2756 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2757 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2758 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2759 		get_pages(sbi, F2FS_DIO_READ) ||
2760 		get_pages(sbi, F2FS_DIO_WRITE))
2761 		return true;
2762 
2763 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2764 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2765 		return true;
2766 
2767 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2768 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2769 		return true;
2770 	return false;
2771 }
2772 
2773 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2774 {
2775 	if (sbi->gc_mode == GC_URGENT_HIGH)
2776 		return true;
2777 
2778 	if (is_inflight_io(sbi, type))
2779 		return false;
2780 
2781 	if (sbi->gc_mode == GC_URGENT_MID)
2782 		return true;
2783 
2784 	if (sbi->gc_mode == GC_URGENT_LOW &&
2785 			(type == DISCARD_TIME || type == GC_TIME))
2786 		return true;
2787 
2788 	return f2fs_time_over(sbi, type);
2789 }
2790 
2791 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2792 				unsigned long index, void *item)
2793 {
2794 	while (radix_tree_insert(root, index, item))
2795 		cond_resched();
2796 }
2797 
2798 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2799 
2800 static inline bool IS_INODE(struct page *page)
2801 {
2802 	struct f2fs_node *p = F2FS_NODE(page);
2803 
2804 	return RAW_IS_INODE(p);
2805 }
2806 
2807 static inline int offset_in_addr(struct f2fs_inode *i)
2808 {
2809 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2810 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2811 }
2812 
2813 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2814 {
2815 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2816 }
2817 
2818 static inline int f2fs_has_extra_attr(struct inode *inode);
2819 static inline block_t data_blkaddr(struct inode *inode,
2820 			struct page *node_page, unsigned int offset)
2821 {
2822 	struct f2fs_node *raw_node;
2823 	__le32 *addr_array;
2824 	int base = 0;
2825 	bool is_inode = IS_INODE(node_page);
2826 
2827 	raw_node = F2FS_NODE(node_page);
2828 
2829 	if (is_inode) {
2830 		if (!inode)
2831 			/* from GC path only */
2832 			base = offset_in_addr(&raw_node->i);
2833 		else if (f2fs_has_extra_attr(inode))
2834 			base = get_extra_isize(inode);
2835 	}
2836 
2837 	addr_array = blkaddr_in_node(raw_node);
2838 	return le32_to_cpu(addr_array[base + offset]);
2839 }
2840 
2841 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2842 {
2843 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2844 }
2845 
2846 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2847 {
2848 	int mask;
2849 
2850 	addr += (nr >> 3);
2851 	mask = 1 << (7 - (nr & 0x07));
2852 	return mask & *addr;
2853 }
2854 
2855 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2856 {
2857 	int mask;
2858 
2859 	addr += (nr >> 3);
2860 	mask = 1 << (7 - (nr & 0x07));
2861 	*addr |= mask;
2862 }
2863 
2864 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2865 {
2866 	int mask;
2867 
2868 	addr += (nr >> 3);
2869 	mask = 1 << (7 - (nr & 0x07));
2870 	*addr &= ~mask;
2871 }
2872 
2873 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2874 {
2875 	int mask;
2876 	int ret;
2877 
2878 	addr += (nr >> 3);
2879 	mask = 1 << (7 - (nr & 0x07));
2880 	ret = mask & *addr;
2881 	*addr |= mask;
2882 	return ret;
2883 }
2884 
2885 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2886 {
2887 	int mask;
2888 	int ret;
2889 
2890 	addr += (nr >> 3);
2891 	mask = 1 << (7 - (nr & 0x07));
2892 	ret = mask & *addr;
2893 	*addr &= ~mask;
2894 	return ret;
2895 }
2896 
2897 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2898 {
2899 	int mask;
2900 
2901 	addr += (nr >> 3);
2902 	mask = 1 << (7 - (nr & 0x07));
2903 	*addr ^= mask;
2904 }
2905 
2906 /*
2907  * On-disk inode flags (f2fs_inode::i_flags)
2908  */
2909 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2910 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2911 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2912 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2913 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2914 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2915 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2916 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2917 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2918 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2919 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2920 
2921 /* Flags that should be inherited by new inodes from their parent. */
2922 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2923 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2924 			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2925 
2926 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2927 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2928 				F2FS_CASEFOLD_FL))
2929 
2930 /* Flags that are appropriate for non-directories/regular files. */
2931 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2932 
2933 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2934 {
2935 	if (S_ISDIR(mode))
2936 		return flags;
2937 	else if (S_ISREG(mode))
2938 		return flags & F2FS_REG_FLMASK;
2939 	else
2940 		return flags & F2FS_OTHER_FLMASK;
2941 }
2942 
2943 static inline void __mark_inode_dirty_flag(struct inode *inode,
2944 						int flag, bool set)
2945 {
2946 	switch (flag) {
2947 	case FI_INLINE_XATTR:
2948 	case FI_INLINE_DATA:
2949 	case FI_INLINE_DENTRY:
2950 	case FI_NEW_INODE:
2951 		if (set)
2952 			return;
2953 		fallthrough;
2954 	case FI_DATA_EXIST:
2955 	case FI_INLINE_DOTS:
2956 	case FI_PIN_FILE:
2957 	case FI_COMPRESS_RELEASED:
2958 		f2fs_mark_inode_dirty_sync(inode, true);
2959 	}
2960 }
2961 
2962 static inline void set_inode_flag(struct inode *inode, int flag)
2963 {
2964 	set_bit(flag, F2FS_I(inode)->flags);
2965 	__mark_inode_dirty_flag(inode, flag, true);
2966 }
2967 
2968 static inline int is_inode_flag_set(struct inode *inode, int flag)
2969 {
2970 	return test_bit(flag, F2FS_I(inode)->flags);
2971 }
2972 
2973 static inline void clear_inode_flag(struct inode *inode, int flag)
2974 {
2975 	clear_bit(flag, F2FS_I(inode)->flags);
2976 	__mark_inode_dirty_flag(inode, flag, false);
2977 }
2978 
2979 static inline bool f2fs_verity_in_progress(struct inode *inode)
2980 {
2981 	return IS_ENABLED(CONFIG_FS_VERITY) &&
2982 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2983 }
2984 
2985 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2986 {
2987 	F2FS_I(inode)->i_acl_mode = mode;
2988 	set_inode_flag(inode, FI_ACL_MODE);
2989 	f2fs_mark_inode_dirty_sync(inode, false);
2990 }
2991 
2992 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2993 {
2994 	if (inc)
2995 		inc_nlink(inode);
2996 	else
2997 		drop_nlink(inode);
2998 	f2fs_mark_inode_dirty_sync(inode, true);
2999 }
3000 
3001 static inline void f2fs_i_blocks_write(struct inode *inode,
3002 					block_t diff, bool add, bool claim)
3003 {
3004 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3005 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3006 
3007 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
3008 	if (add) {
3009 		if (claim)
3010 			dquot_claim_block(inode, diff);
3011 		else
3012 			dquot_alloc_block_nofail(inode, diff);
3013 	} else {
3014 		dquot_free_block(inode, diff);
3015 	}
3016 
3017 	f2fs_mark_inode_dirty_sync(inode, true);
3018 	if (clean || recover)
3019 		set_inode_flag(inode, FI_AUTO_RECOVER);
3020 }
3021 
3022 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3023 {
3024 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3025 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3026 
3027 	if (i_size_read(inode) == i_size)
3028 		return;
3029 
3030 	i_size_write(inode, i_size);
3031 	f2fs_mark_inode_dirty_sync(inode, true);
3032 	if (clean || recover)
3033 		set_inode_flag(inode, FI_AUTO_RECOVER);
3034 }
3035 
3036 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3037 {
3038 	F2FS_I(inode)->i_current_depth = depth;
3039 	f2fs_mark_inode_dirty_sync(inode, true);
3040 }
3041 
3042 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3043 					unsigned int count)
3044 {
3045 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
3046 	f2fs_mark_inode_dirty_sync(inode, true);
3047 }
3048 
3049 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3050 {
3051 	F2FS_I(inode)->i_xattr_nid = xnid;
3052 	f2fs_mark_inode_dirty_sync(inode, true);
3053 }
3054 
3055 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3056 {
3057 	F2FS_I(inode)->i_pino = pino;
3058 	f2fs_mark_inode_dirty_sync(inode, true);
3059 }
3060 
3061 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3062 {
3063 	struct f2fs_inode_info *fi = F2FS_I(inode);
3064 
3065 	if (ri->i_inline & F2FS_INLINE_XATTR)
3066 		set_bit(FI_INLINE_XATTR, fi->flags);
3067 	if (ri->i_inline & F2FS_INLINE_DATA)
3068 		set_bit(FI_INLINE_DATA, fi->flags);
3069 	if (ri->i_inline & F2FS_INLINE_DENTRY)
3070 		set_bit(FI_INLINE_DENTRY, fi->flags);
3071 	if (ri->i_inline & F2FS_DATA_EXIST)
3072 		set_bit(FI_DATA_EXIST, fi->flags);
3073 	if (ri->i_inline & F2FS_INLINE_DOTS)
3074 		set_bit(FI_INLINE_DOTS, fi->flags);
3075 	if (ri->i_inline & F2FS_EXTRA_ATTR)
3076 		set_bit(FI_EXTRA_ATTR, fi->flags);
3077 	if (ri->i_inline & F2FS_PIN_FILE)
3078 		set_bit(FI_PIN_FILE, fi->flags);
3079 	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3080 		set_bit(FI_COMPRESS_RELEASED, fi->flags);
3081 }
3082 
3083 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3084 {
3085 	ri->i_inline = 0;
3086 
3087 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3088 		ri->i_inline |= F2FS_INLINE_XATTR;
3089 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
3090 		ri->i_inline |= F2FS_INLINE_DATA;
3091 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3092 		ri->i_inline |= F2FS_INLINE_DENTRY;
3093 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
3094 		ri->i_inline |= F2FS_DATA_EXIST;
3095 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
3096 		ri->i_inline |= F2FS_INLINE_DOTS;
3097 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3098 		ri->i_inline |= F2FS_EXTRA_ATTR;
3099 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3100 		ri->i_inline |= F2FS_PIN_FILE;
3101 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3102 		ri->i_inline |= F2FS_COMPRESS_RELEASED;
3103 }
3104 
3105 static inline int f2fs_has_extra_attr(struct inode *inode)
3106 {
3107 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3108 }
3109 
3110 static inline int f2fs_has_inline_xattr(struct inode *inode)
3111 {
3112 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
3113 }
3114 
3115 static inline int f2fs_compressed_file(struct inode *inode)
3116 {
3117 	return S_ISREG(inode->i_mode) &&
3118 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3119 }
3120 
3121 static inline bool f2fs_need_compress_data(struct inode *inode)
3122 {
3123 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3124 
3125 	if (!f2fs_compressed_file(inode))
3126 		return false;
3127 
3128 	if (compress_mode == COMPR_MODE_FS)
3129 		return true;
3130 	else if (compress_mode == COMPR_MODE_USER &&
3131 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3132 		return true;
3133 
3134 	return false;
3135 }
3136 
3137 static inline unsigned int addrs_per_inode(struct inode *inode)
3138 {
3139 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3140 				get_inline_xattr_addrs(inode);
3141 
3142 	if (!f2fs_compressed_file(inode))
3143 		return addrs;
3144 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3145 }
3146 
3147 static inline unsigned int addrs_per_block(struct inode *inode)
3148 {
3149 	if (!f2fs_compressed_file(inode))
3150 		return DEF_ADDRS_PER_BLOCK;
3151 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3152 }
3153 
3154 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3155 {
3156 	struct f2fs_inode *ri = F2FS_INODE(page);
3157 
3158 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3159 					get_inline_xattr_addrs(inode)]);
3160 }
3161 
3162 static inline int inline_xattr_size(struct inode *inode)
3163 {
3164 	if (f2fs_has_inline_xattr(inode))
3165 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
3166 	return 0;
3167 }
3168 
3169 static inline int f2fs_has_inline_data(struct inode *inode)
3170 {
3171 	return is_inode_flag_set(inode, FI_INLINE_DATA);
3172 }
3173 
3174 static inline int f2fs_exist_data(struct inode *inode)
3175 {
3176 	return is_inode_flag_set(inode, FI_DATA_EXIST);
3177 }
3178 
3179 static inline int f2fs_has_inline_dots(struct inode *inode)
3180 {
3181 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
3182 }
3183 
3184 static inline int f2fs_is_mmap_file(struct inode *inode)
3185 {
3186 	return is_inode_flag_set(inode, FI_MMAP_FILE);
3187 }
3188 
3189 static inline bool f2fs_is_pinned_file(struct inode *inode)
3190 {
3191 	return is_inode_flag_set(inode, FI_PIN_FILE);
3192 }
3193 
3194 static inline bool f2fs_is_atomic_file(struct inode *inode)
3195 {
3196 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3197 }
3198 
3199 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
3200 {
3201 	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
3202 }
3203 
3204 static inline bool f2fs_is_volatile_file(struct inode *inode)
3205 {
3206 	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
3207 }
3208 
3209 static inline bool f2fs_is_first_block_written(struct inode *inode)
3210 {
3211 	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
3212 }
3213 
3214 static inline bool f2fs_is_drop_cache(struct inode *inode)
3215 {
3216 	return is_inode_flag_set(inode, FI_DROP_CACHE);
3217 }
3218 
3219 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3220 {
3221 	struct f2fs_inode *ri = F2FS_INODE(page);
3222 	int extra_size = get_extra_isize(inode);
3223 
3224 	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
3225 }
3226 
3227 static inline int f2fs_has_inline_dentry(struct inode *inode)
3228 {
3229 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3230 }
3231 
3232 static inline int is_file(struct inode *inode, int type)
3233 {
3234 	return F2FS_I(inode)->i_advise & type;
3235 }
3236 
3237 static inline void set_file(struct inode *inode, int type)
3238 {
3239 	if (is_file(inode, type))
3240 		return;
3241 	F2FS_I(inode)->i_advise |= type;
3242 	f2fs_mark_inode_dirty_sync(inode, true);
3243 }
3244 
3245 static inline void clear_file(struct inode *inode, int type)
3246 {
3247 	if (!is_file(inode, type))
3248 		return;
3249 	F2FS_I(inode)->i_advise &= ~type;
3250 	f2fs_mark_inode_dirty_sync(inode, true);
3251 }
3252 
3253 static inline bool f2fs_is_time_consistent(struct inode *inode)
3254 {
3255 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3256 		return false;
3257 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
3258 		return false;
3259 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3260 		return false;
3261 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
3262 						&F2FS_I(inode)->i_crtime))
3263 		return false;
3264 	return true;
3265 }
3266 
3267 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3268 {
3269 	bool ret;
3270 
3271 	if (dsync) {
3272 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3273 
3274 		spin_lock(&sbi->inode_lock[DIRTY_META]);
3275 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
3276 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3277 		return ret;
3278 	}
3279 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3280 			file_keep_isize(inode) ||
3281 			i_size_read(inode) & ~PAGE_MASK)
3282 		return false;
3283 
3284 	if (!f2fs_is_time_consistent(inode))
3285 		return false;
3286 
3287 	spin_lock(&F2FS_I(inode)->i_size_lock);
3288 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3289 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3290 
3291 	return ret;
3292 }
3293 
3294 static inline bool f2fs_readonly(struct super_block *sb)
3295 {
3296 	return sb_rdonly(sb);
3297 }
3298 
3299 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3300 {
3301 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3302 }
3303 
3304 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3305 {
3306 	if (len == 1 && name[0] == '.')
3307 		return true;
3308 
3309 	if (len == 2 && name[0] == '.' && name[1] == '.')
3310 		return true;
3311 
3312 	return false;
3313 }
3314 
3315 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3316 					size_t size, gfp_t flags)
3317 {
3318 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
3319 		f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3320 		return NULL;
3321 	}
3322 
3323 	return kmalloc(size, flags);
3324 }
3325 
3326 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3327 					size_t size, gfp_t flags)
3328 {
3329 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3330 }
3331 
3332 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3333 					size_t size, gfp_t flags)
3334 {
3335 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3336 		f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3337 		return NULL;
3338 	}
3339 
3340 	return kvmalloc(size, flags);
3341 }
3342 
3343 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3344 					size_t size, gfp_t flags)
3345 {
3346 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3347 }
3348 
3349 static inline int get_extra_isize(struct inode *inode)
3350 {
3351 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3352 }
3353 
3354 static inline int get_inline_xattr_addrs(struct inode *inode)
3355 {
3356 	return F2FS_I(inode)->i_inline_xattr_size;
3357 }
3358 
3359 #define f2fs_get_inode_mode(i) \
3360 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3361 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3362 
3363 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3364 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3365 	offsetof(struct f2fs_inode, i_extra_isize))	\
3366 
3367 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3368 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3369 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3370 		sizeof((f2fs_inode)->field))			\
3371 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3372 
3373 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
3374 
3375 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3376 
3377 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3378 					block_t blkaddr, int type);
3379 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3380 					block_t blkaddr, int type)
3381 {
3382 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3383 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3384 			 blkaddr, type);
3385 		f2fs_bug_on(sbi, 1);
3386 	}
3387 }
3388 
3389 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3390 {
3391 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3392 			blkaddr == COMPRESS_ADDR)
3393 		return false;
3394 	return true;
3395 }
3396 
3397 /*
3398  * file.c
3399  */
3400 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3401 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3402 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3403 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3404 int f2fs_truncate(struct inode *inode);
3405 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3406 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3407 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3408 		 struct iattr *attr);
3409 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3410 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3411 int f2fs_precache_extents(struct inode *inode);
3412 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3413 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3414 		      struct dentry *dentry, struct fileattr *fa);
3415 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3416 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3417 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3418 int f2fs_pin_file_control(struct inode *inode, bool inc);
3419 
3420 /*
3421  * inode.c
3422  */
3423 void f2fs_set_inode_flags(struct inode *inode);
3424 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3425 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3426 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3427 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3428 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3429 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3430 void f2fs_update_inode_page(struct inode *inode);
3431 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3432 void f2fs_evict_inode(struct inode *inode);
3433 void f2fs_handle_failed_inode(struct inode *inode);
3434 
3435 /*
3436  * namei.c
3437  */
3438 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3439 							bool hot, bool set);
3440 struct dentry *f2fs_get_parent(struct dentry *child);
3441 
3442 /*
3443  * dir.c
3444  */
3445 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3446 int f2fs_init_casefolded_name(const struct inode *dir,
3447 			      struct f2fs_filename *fname);
3448 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3449 			int lookup, struct f2fs_filename *fname);
3450 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3451 			struct f2fs_filename *fname);
3452 void f2fs_free_filename(struct f2fs_filename *fname);
3453 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3454 			const struct f2fs_filename *fname, int *max_slots);
3455 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3456 			unsigned int start_pos, struct fscrypt_str *fstr);
3457 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3458 			struct f2fs_dentry_ptr *d);
3459 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3460 			const struct f2fs_filename *fname, struct page *dpage);
3461 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3462 			unsigned int current_depth);
3463 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3464 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3465 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3466 					 const struct f2fs_filename *fname,
3467 					 struct page **res_page);
3468 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3469 			const struct qstr *child, struct page **res_page);
3470 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3471 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3472 			struct page **page);
3473 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3474 			struct page *page, struct inode *inode);
3475 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3476 			  const struct f2fs_filename *fname);
3477 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3478 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3479 			unsigned int bit_pos);
3480 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3481 			struct inode *inode, nid_t ino, umode_t mode);
3482 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3483 			struct inode *inode, nid_t ino, umode_t mode);
3484 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3485 			struct inode *inode, nid_t ino, umode_t mode);
3486 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3487 			struct inode *dir, struct inode *inode);
3488 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3489 bool f2fs_empty_dir(struct inode *dir);
3490 
3491 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3492 {
3493 	if (fscrypt_is_nokey_name(dentry))
3494 		return -ENOKEY;
3495 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3496 				inode, inode->i_ino, inode->i_mode);
3497 }
3498 
3499 /*
3500  * super.c
3501  */
3502 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3503 void f2fs_inode_synced(struct inode *inode);
3504 int f2fs_dquot_initialize(struct inode *inode);
3505 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3506 int f2fs_quota_sync(struct super_block *sb, int type);
3507 loff_t max_file_blocks(struct inode *inode);
3508 void f2fs_quota_off_umount(struct super_block *sb);
3509 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3510 int f2fs_sync_fs(struct super_block *sb, int sync);
3511 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3512 
3513 /*
3514  * hash.c
3515  */
3516 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3517 
3518 /*
3519  * node.c
3520  */
3521 struct node_info;
3522 
3523 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3524 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3525 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3526 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3527 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3528 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3529 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3530 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3531 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3532 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3533 				struct node_info *ni, bool checkpoint_context);
3534 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3535 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3536 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3537 int f2fs_truncate_xattr_node(struct inode *inode);
3538 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3539 					unsigned int seq_id);
3540 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3541 int f2fs_remove_inode_page(struct inode *inode);
3542 struct page *f2fs_new_inode_page(struct inode *inode);
3543 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3544 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3545 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3546 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3547 int f2fs_move_node_page(struct page *node_page, int gc_type);
3548 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3549 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3550 			struct writeback_control *wbc, bool atomic,
3551 			unsigned int *seq_id);
3552 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3553 			struct writeback_control *wbc,
3554 			bool do_balance, enum iostat_type io_type);
3555 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3556 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3557 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3558 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3559 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3560 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3561 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3562 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3563 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3564 			unsigned int segno, struct f2fs_summary_block *sum);
3565 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3566 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3567 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3568 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3569 int __init f2fs_create_node_manager_caches(void);
3570 void f2fs_destroy_node_manager_caches(void);
3571 
3572 /*
3573  * segment.c
3574  */
3575 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3576 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3577 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3578 void f2fs_drop_inmem_pages(struct inode *inode);
3579 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3580 int f2fs_commit_inmem_pages(struct inode *inode);
3581 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3582 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3583 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3584 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3585 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3586 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3587 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3588 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3589 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3590 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3591 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3592 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3593 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3594 					struct cp_control *cpc);
3595 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3596 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3597 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3598 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3599 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3600 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3601 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3602 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3603 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3604 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3605 			unsigned int *newseg, bool new_sec, int dir);
3606 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3607 					unsigned int start, unsigned int end);
3608 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3609 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3610 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3611 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3612 					struct cp_control *cpc);
3613 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3614 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3615 					block_t blk_addr);
3616 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3617 						enum iostat_type io_type);
3618 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3619 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3620 			struct f2fs_io_info *fio);
3621 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3622 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3623 			block_t old_blkaddr, block_t new_blkaddr,
3624 			bool recover_curseg, bool recover_newaddr,
3625 			bool from_gc);
3626 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3627 			block_t old_addr, block_t new_addr,
3628 			unsigned char version, bool recover_curseg,
3629 			bool recover_newaddr);
3630 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3631 			block_t old_blkaddr, block_t *new_blkaddr,
3632 			struct f2fs_summary *sum, int type,
3633 			struct f2fs_io_info *fio);
3634 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3635 					block_t blkaddr, unsigned int blkcnt);
3636 void f2fs_wait_on_page_writeback(struct page *page,
3637 			enum page_type type, bool ordered, bool locked);
3638 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3639 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3640 								block_t len);
3641 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3642 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3643 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3644 			unsigned int val, int alloc);
3645 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3646 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3647 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3648 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3649 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3650 int __init f2fs_create_segment_manager_caches(void);
3651 void f2fs_destroy_segment_manager_caches(void);
3652 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3653 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3654 			unsigned int segno);
3655 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3656 			unsigned int segno);
3657 
3658 #define DEF_FRAGMENT_SIZE	4
3659 #define MIN_FRAGMENT_SIZE	1
3660 #define MAX_FRAGMENT_SIZE	512
3661 
3662 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3663 {
3664 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3665 		F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3666 }
3667 
3668 /*
3669  * checkpoint.c
3670  */
3671 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3672 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3673 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3674 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3675 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3676 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3677 					block_t blkaddr, int type);
3678 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3679 			int type, bool sync);
3680 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3681 							unsigned int ra_blocks);
3682 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3683 			long nr_to_write, enum iostat_type io_type);
3684 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3685 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3686 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3687 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3688 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3689 					unsigned int devidx, int type);
3690 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3691 					unsigned int devidx, int type);
3692 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3693 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3694 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3695 void f2fs_add_orphan_inode(struct inode *inode);
3696 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3697 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3698 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3699 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3700 void f2fs_remove_dirty_inode(struct inode *inode);
3701 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3702 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3703 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3704 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3705 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3706 int __init f2fs_create_checkpoint_caches(void);
3707 void f2fs_destroy_checkpoint_caches(void);
3708 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3709 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3710 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3711 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3712 
3713 /*
3714  * data.c
3715  */
3716 int __init f2fs_init_bioset(void);
3717 void f2fs_destroy_bioset(void);
3718 int f2fs_init_bio_entry_cache(void);
3719 void f2fs_destroy_bio_entry_cache(void);
3720 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3721 				struct bio *bio, enum page_type type);
3722 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3723 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3724 				struct inode *inode, struct page *page,
3725 				nid_t ino, enum page_type type);
3726 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3727 					struct bio **bio, struct page *page);
3728 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3729 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3730 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3731 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3732 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3733 		block_t blk_addr, sector_t *sector);
3734 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3735 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3736 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3737 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3738 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3739 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3740 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3741 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3742 			int op_flags, bool for_write);
3743 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3744 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3745 			bool for_write);
3746 struct page *f2fs_get_new_data_page(struct inode *inode,
3747 			struct page *ipage, pgoff_t index, bool new_i_size);
3748 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3749 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3750 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3751 			int create, int flag);
3752 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3753 			u64 start, u64 len);
3754 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3755 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3756 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3757 int f2fs_write_single_data_page(struct page *page, int *submitted,
3758 				struct bio **bio, sector_t *last_block,
3759 				struct writeback_control *wbc,
3760 				enum iostat_type io_type,
3761 				int compr_blocks, bool allow_balance);
3762 void f2fs_write_failed(struct inode *inode, loff_t to);
3763 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
3764 int f2fs_release_page(struct page *page, gfp_t wait);
3765 #ifdef CONFIG_MIGRATION
3766 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3767 			struct page *page, enum migrate_mode mode);
3768 #endif
3769 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3770 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3771 int f2fs_init_post_read_processing(void);
3772 void f2fs_destroy_post_read_processing(void);
3773 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3774 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3775 extern const struct iomap_ops f2fs_iomap_ops;
3776 
3777 /*
3778  * gc.c
3779  */
3780 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3781 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3782 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3783 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3784 			unsigned int segno);
3785 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3786 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3787 int __init f2fs_create_garbage_collection_cache(void);
3788 void f2fs_destroy_garbage_collection_cache(void);
3789 
3790 /*
3791  * recovery.c
3792  */
3793 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3794 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3795 int __init f2fs_create_recovery_cache(void);
3796 void f2fs_destroy_recovery_cache(void);
3797 
3798 /*
3799  * debug.c
3800  */
3801 #ifdef CONFIG_F2FS_STAT_FS
3802 struct f2fs_stat_info {
3803 	struct list_head stat_list;
3804 	struct f2fs_sb_info *sbi;
3805 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3806 	int main_area_segs, main_area_sections, main_area_zones;
3807 	unsigned long long hit_largest, hit_cached, hit_rbtree;
3808 	unsigned long long hit_total, total_ext;
3809 	int ext_tree, zombie_tree, ext_node;
3810 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3811 	int ndirty_data, ndirty_qdata;
3812 	int inmem_pages;
3813 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3814 	int nats, dirty_nats, sits, dirty_sits;
3815 	int free_nids, avail_nids, alloc_nids;
3816 	int total_count, utilization;
3817 	int bg_gc, nr_wb_cp_data, nr_wb_data;
3818 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3819 	int nr_dio_read, nr_dio_write;
3820 	unsigned int io_skip_bggc, other_skip_bggc;
3821 	int nr_flushing, nr_flushed, flush_list_empty;
3822 	int nr_discarding, nr_discarded;
3823 	int nr_discard_cmd;
3824 	unsigned int undiscard_blks;
3825 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3826 	unsigned int cur_ckpt_time, peak_ckpt_time;
3827 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3828 	int compr_inode;
3829 	unsigned long long compr_blocks;
3830 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3831 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3832 	unsigned int bimodal, avg_vblocks;
3833 	int util_free, util_valid, util_invalid;
3834 	int rsvd_segs, overp_segs;
3835 	int dirty_count, node_pages, meta_pages, compress_pages;
3836 	int compress_page_hit;
3837 	int prefree_count, call_count, cp_count, bg_cp_count;
3838 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
3839 	int bg_node_segs, bg_data_segs;
3840 	int tot_blks, data_blks, node_blks;
3841 	int bg_data_blks, bg_node_blks;
3842 	unsigned long long skipped_atomic_files[2];
3843 	int curseg[NR_CURSEG_TYPE];
3844 	int cursec[NR_CURSEG_TYPE];
3845 	int curzone[NR_CURSEG_TYPE];
3846 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3847 	unsigned int full_seg[NR_CURSEG_TYPE];
3848 	unsigned int valid_blks[NR_CURSEG_TYPE];
3849 
3850 	unsigned int meta_count[META_MAX];
3851 	unsigned int segment_count[2];
3852 	unsigned int block_count[2];
3853 	unsigned int inplace_count;
3854 	unsigned long long base_mem, cache_mem, page_mem;
3855 };
3856 
3857 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3858 {
3859 	return (struct f2fs_stat_info *)sbi->stat_info;
3860 }
3861 
3862 #define stat_inc_cp_count(si)		((si)->cp_count++)
3863 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
3864 #define stat_inc_call_count(si)		((si)->call_count++)
3865 #define stat_inc_bggc_count(si)		((si)->bg_gc++)
3866 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3867 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3868 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3869 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3870 #define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
3871 #define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
3872 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3873 #define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
3874 #define stat_inc_inline_xattr(inode)					\
3875 	do {								\
3876 		if (f2fs_has_inline_xattr(inode))			\
3877 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3878 	} while (0)
3879 #define stat_dec_inline_xattr(inode)					\
3880 	do {								\
3881 		if (f2fs_has_inline_xattr(inode))			\
3882 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3883 	} while (0)
3884 #define stat_inc_inline_inode(inode)					\
3885 	do {								\
3886 		if (f2fs_has_inline_data(inode))			\
3887 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
3888 	} while (0)
3889 #define stat_dec_inline_inode(inode)					\
3890 	do {								\
3891 		if (f2fs_has_inline_data(inode))			\
3892 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
3893 	} while (0)
3894 #define stat_inc_inline_dir(inode)					\
3895 	do {								\
3896 		if (f2fs_has_inline_dentry(inode))			\
3897 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
3898 	} while (0)
3899 #define stat_dec_inline_dir(inode)					\
3900 	do {								\
3901 		if (f2fs_has_inline_dentry(inode))			\
3902 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
3903 	} while (0)
3904 #define stat_inc_compr_inode(inode)					\
3905 	do {								\
3906 		if (f2fs_compressed_file(inode))			\
3907 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
3908 	} while (0)
3909 #define stat_dec_compr_inode(inode)					\
3910 	do {								\
3911 		if (f2fs_compressed_file(inode))			\
3912 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
3913 	} while (0)
3914 #define stat_add_compr_blocks(inode, blocks)				\
3915 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3916 #define stat_sub_compr_blocks(inode, blocks)				\
3917 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3918 #define stat_inc_meta_count(sbi, blkaddr)				\
3919 	do {								\
3920 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
3921 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
3922 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
3923 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
3924 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
3925 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
3926 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
3927 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
3928 	} while (0)
3929 #define stat_inc_seg_type(sbi, curseg)					\
3930 		((sbi)->segment_count[(curseg)->alloc_type]++)
3931 #define stat_inc_block_count(sbi, curseg)				\
3932 		((sbi)->block_count[(curseg)->alloc_type]++)
3933 #define stat_inc_inplace_blocks(sbi)					\
3934 		(atomic_inc(&(sbi)->inplace_count))
3935 #define stat_update_max_atomic_write(inode)				\
3936 	do {								\
3937 		int cur = F2FS_I_SB(inode)->atomic_files;	\
3938 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
3939 		if (cur > max)						\
3940 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
3941 	} while (0)
3942 #define stat_inc_volatile_write(inode)					\
3943 		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3944 #define stat_dec_volatile_write(inode)					\
3945 		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3946 #define stat_update_max_volatile_write(inode)				\
3947 	do {								\
3948 		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
3949 		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
3950 		if (cur > max)						\
3951 			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
3952 	} while (0)
3953 #define stat_inc_seg_count(sbi, type, gc_type)				\
3954 	do {								\
3955 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3956 		si->tot_segs++;						\
3957 		if ((type) == SUM_TYPE_DATA) {				\
3958 			si->data_segs++;				\
3959 			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
3960 		} else {						\
3961 			si->node_segs++;				\
3962 			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
3963 		}							\
3964 	} while (0)
3965 
3966 #define stat_inc_tot_blk_count(si, blks)				\
3967 	((si)->tot_blks += (blks))
3968 
3969 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
3970 	do {								\
3971 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3972 		stat_inc_tot_blk_count(si, blks);			\
3973 		si->data_blks += (blks);				\
3974 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3975 	} while (0)
3976 
3977 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
3978 	do {								\
3979 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3980 		stat_inc_tot_blk_count(si, blks);			\
3981 		si->node_blks += (blks);				\
3982 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3983 	} while (0)
3984 
3985 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3986 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3987 void __init f2fs_create_root_stats(void);
3988 void f2fs_destroy_root_stats(void);
3989 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3990 #else
3991 #define stat_inc_cp_count(si)				do { } while (0)
3992 #define stat_inc_bg_cp_count(si)			do { } while (0)
3993 #define stat_inc_call_count(si)				do { } while (0)
3994 #define stat_inc_bggc_count(si)				do { } while (0)
3995 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
3996 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
3997 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
3998 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
3999 #define stat_inc_total_hit(sbi)				do { } while (0)
4000 #define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
4001 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
4002 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
4003 #define stat_inc_inline_xattr(inode)			do { } while (0)
4004 #define stat_dec_inline_xattr(inode)			do { } while (0)
4005 #define stat_inc_inline_inode(inode)			do { } while (0)
4006 #define stat_dec_inline_inode(inode)			do { } while (0)
4007 #define stat_inc_inline_dir(inode)			do { } while (0)
4008 #define stat_dec_inline_dir(inode)			do { } while (0)
4009 #define stat_inc_compr_inode(inode)			do { } while (0)
4010 #define stat_dec_compr_inode(inode)			do { } while (0)
4011 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
4012 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
4013 #define stat_update_max_atomic_write(inode)		do { } while (0)
4014 #define stat_inc_volatile_write(inode)			do { } while (0)
4015 #define stat_dec_volatile_write(inode)			do { } while (0)
4016 #define stat_update_max_volatile_write(inode)		do { } while (0)
4017 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
4018 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
4019 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
4020 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
4021 #define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
4022 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
4023 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
4024 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
4025 
4026 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
4027 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
4028 static inline void __init f2fs_create_root_stats(void) { }
4029 static inline void f2fs_destroy_root_stats(void) { }
4030 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4031 #endif
4032 
4033 extern const struct file_operations f2fs_dir_operations;
4034 extern const struct file_operations f2fs_file_operations;
4035 extern const struct inode_operations f2fs_file_inode_operations;
4036 extern const struct address_space_operations f2fs_dblock_aops;
4037 extern const struct address_space_operations f2fs_node_aops;
4038 extern const struct address_space_operations f2fs_meta_aops;
4039 extern const struct inode_operations f2fs_dir_inode_operations;
4040 extern const struct inode_operations f2fs_symlink_inode_operations;
4041 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4042 extern const struct inode_operations f2fs_special_inode_operations;
4043 extern struct kmem_cache *f2fs_inode_entry_slab;
4044 
4045 /*
4046  * inline.c
4047  */
4048 bool f2fs_may_inline_data(struct inode *inode);
4049 bool f2fs_may_inline_dentry(struct inode *inode);
4050 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
4051 void f2fs_truncate_inline_inode(struct inode *inode,
4052 						struct page *ipage, u64 from);
4053 int f2fs_read_inline_data(struct inode *inode, struct page *page);
4054 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4055 int f2fs_convert_inline_inode(struct inode *inode);
4056 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4057 int f2fs_write_inline_data(struct inode *inode, struct page *page);
4058 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4059 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4060 					const struct f2fs_filename *fname,
4061 					struct page **res_page);
4062 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4063 			struct page *ipage);
4064 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4065 			struct inode *inode, nid_t ino, umode_t mode);
4066 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4067 				struct page *page, struct inode *dir,
4068 				struct inode *inode);
4069 bool f2fs_empty_inline_dir(struct inode *dir);
4070 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4071 			struct fscrypt_str *fstr);
4072 int f2fs_inline_data_fiemap(struct inode *inode,
4073 			struct fiemap_extent_info *fieinfo,
4074 			__u64 start, __u64 len);
4075 
4076 /*
4077  * shrinker.c
4078  */
4079 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4080 			struct shrink_control *sc);
4081 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4082 			struct shrink_control *sc);
4083 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4084 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4085 
4086 /*
4087  * extent_cache.c
4088  */
4089 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
4090 				struct rb_entry *cached_re, unsigned int ofs);
4091 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
4092 				struct rb_root_cached *root,
4093 				struct rb_node **parent,
4094 				unsigned long long key, bool *left_most);
4095 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
4096 				struct rb_root_cached *root,
4097 				struct rb_node **parent,
4098 				unsigned int ofs, bool *leftmost);
4099 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
4100 		struct rb_entry *cached_re, unsigned int ofs,
4101 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
4102 		struct rb_node ***insert_p, struct rb_node **insert_parent,
4103 		bool force, bool *leftmost);
4104 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
4105 				struct rb_root_cached *root, bool check_key);
4106 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
4107 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
4108 void f2fs_drop_extent_tree(struct inode *inode);
4109 unsigned int f2fs_destroy_extent_node(struct inode *inode);
4110 void f2fs_destroy_extent_tree(struct inode *inode);
4111 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
4112 			struct extent_info *ei);
4113 void f2fs_update_extent_cache(struct dnode_of_data *dn);
4114 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
4115 			pgoff_t fofs, block_t blkaddr, unsigned int len);
4116 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4117 int __init f2fs_create_extent_cache(void);
4118 void f2fs_destroy_extent_cache(void);
4119 
4120 /*
4121  * sysfs.c
4122  */
4123 #define MIN_RA_MUL	2
4124 #define MAX_RA_MUL	256
4125 
4126 int __init f2fs_init_sysfs(void);
4127 void f2fs_exit_sysfs(void);
4128 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4129 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4130 
4131 /* verity.c */
4132 extern const struct fsverity_operations f2fs_verityops;
4133 
4134 /*
4135  * crypto support
4136  */
4137 static inline bool f2fs_encrypted_file(struct inode *inode)
4138 {
4139 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4140 }
4141 
4142 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4143 {
4144 #ifdef CONFIG_FS_ENCRYPTION
4145 	file_set_encrypt(inode);
4146 	f2fs_set_inode_flags(inode);
4147 #endif
4148 }
4149 
4150 /*
4151  * Returns true if the reads of the inode's data need to undergo some
4152  * postprocessing step, like decryption or authenticity verification.
4153  */
4154 static inline bool f2fs_post_read_required(struct inode *inode)
4155 {
4156 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4157 		f2fs_compressed_file(inode);
4158 }
4159 
4160 /*
4161  * compress.c
4162  */
4163 #ifdef CONFIG_F2FS_FS_COMPRESSION
4164 bool f2fs_is_compressed_page(struct page *page);
4165 struct page *f2fs_compress_control_page(struct page *page);
4166 int f2fs_prepare_compress_overwrite(struct inode *inode,
4167 			struct page **pagep, pgoff_t index, void **fsdata);
4168 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4169 					pgoff_t index, unsigned copied);
4170 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4171 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4172 bool f2fs_is_compress_backend_ready(struct inode *inode);
4173 int f2fs_init_compress_mempool(void);
4174 void f2fs_destroy_compress_mempool(void);
4175 void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
4176 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4177 							block_t blkaddr);
4178 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4179 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4180 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
4181 				int index, int nr_pages);
4182 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4183 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4184 int f2fs_write_multi_pages(struct compress_ctx *cc,
4185 						int *submitted,
4186 						struct writeback_control *wbc,
4187 						enum iostat_type io_type);
4188 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4189 void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4190 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
4191 				unsigned int c_len);
4192 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4193 				unsigned nr_pages, sector_t *last_block_in_bio,
4194 				bool is_readahead, bool for_write);
4195 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4196 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
4197 void f2fs_put_page_dic(struct page *page);
4198 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
4199 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4200 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4201 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4202 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4203 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4204 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4205 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4206 int __init f2fs_init_compress_cache(void);
4207 void f2fs_destroy_compress_cache(void);
4208 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4209 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4210 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4211 						nid_t ino, block_t blkaddr);
4212 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4213 								block_t blkaddr);
4214 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4215 #define inc_compr_inode_stat(inode)					\
4216 	do {								\
4217 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4218 		sbi->compr_new_inode++;					\
4219 	} while (0)
4220 #define add_compr_block_stat(inode, blocks)				\
4221 	do {								\
4222 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4223 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
4224 		sbi->compr_written_block += blocks;			\
4225 		sbi->compr_saved_block += diff;				\
4226 	} while (0)
4227 #else
4228 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
4229 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4230 {
4231 	if (!f2fs_compressed_file(inode))
4232 		return true;
4233 	/* not support compression */
4234 	return false;
4235 }
4236 static inline struct page *f2fs_compress_control_page(struct page *page)
4237 {
4238 	WARN_ON_ONCE(1);
4239 	return ERR_PTR(-EINVAL);
4240 }
4241 static inline int f2fs_init_compress_mempool(void) { return 0; }
4242 static inline void f2fs_destroy_compress_mempool(void) { }
4243 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
4244 static inline void f2fs_end_read_compressed_page(struct page *page,
4245 						bool failed, block_t blkaddr)
4246 {
4247 	WARN_ON_ONCE(1);
4248 }
4249 static inline void f2fs_put_page_dic(struct page *page)
4250 {
4251 	WARN_ON_ONCE(1);
4252 }
4253 static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
4254 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4255 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4256 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4257 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4258 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4259 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4260 static inline void f2fs_destroy_compress_cache(void) { }
4261 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4262 				block_t blkaddr) { }
4263 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4264 				struct page *page, nid_t ino, block_t blkaddr) { }
4265 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4266 				struct page *page, block_t blkaddr) { return false; }
4267 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4268 							nid_t ino) { }
4269 #define inc_compr_inode_stat(inode)		do { } while (0)
4270 static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
4271 				pgoff_t fofs, block_t blkaddr, unsigned int llen,
4272 				unsigned int c_len) { }
4273 #endif
4274 
4275 static inline void set_compress_context(struct inode *inode)
4276 {
4277 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4278 
4279 	F2FS_I(inode)->i_compress_algorithm =
4280 			F2FS_OPTION(sbi).compress_algorithm;
4281 	F2FS_I(inode)->i_log_cluster_size =
4282 			F2FS_OPTION(sbi).compress_log_size;
4283 	F2FS_I(inode)->i_compress_flag =
4284 			F2FS_OPTION(sbi).compress_chksum ?
4285 				1 << COMPRESS_CHKSUM : 0;
4286 	F2FS_I(inode)->i_cluster_size =
4287 			1 << F2FS_I(inode)->i_log_cluster_size;
4288 	if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4289 		F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4290 			F2FS_OPTION(sbi).compress_level)
4291 		F2FS_I(inode)->i_compress_flag |=
4292 				F2FS_OPTION(sbi).compress_level <<
4293 				COMPRESS_LEVEL_OFFSET;
4294 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4295 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4296 	stat_inc_compr_inode(inode);
4297 	inc_compr_inode_stat(inode);
4298 	f2fs_mark_inode_dirty_sync(inode, true);
4299 }
4300 
4301 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4302 {
4303 	struct f2fs_inode_info *fi = F2FS_I(inode);
4304 
4305 	if (!f2fs_compressed_file(inode))
4306 		return true;
4307 	if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
4308 		return false;
4309 
4310 	fi->i_flags &= ~F2FS_COMPR_FL;
4311 	stat_dec_compr_inode(inode);
4312 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4313 	f2fs_mark_inode_dirty_sync(inode, true);
4314 	return true;
4315 }
4316 
4317 #define F2FS_FEATURE_FUNCS(name, flagname) \
4318 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4319 { \
4320 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4321 }
4322 
4323 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4324 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4325 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4326 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4327 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4328 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4329 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4330 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4331 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4332 F2FS_FEATURE_FUNCS(verity, VERITY);
4333 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4334 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4335 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4336 F2FS_FEATURE_FUNCS(readonly, RO);
4337 
4338 static inline bool f2fs_may_extent_tree(struct inode *inode)
4339 {
4340 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4341 
4342 	if (!test_opt(sbi, EXTENT_CACHE) ||
4343 			is_inode_flag_set(inode, FI_NO_EXTENT) ||
4344 			(is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
4345 			 !f2fs_sb_has_readonly(sbi)))
4346 		return false;
4347 
4348 	/*
4349 	 * for recovered files during mount do not create extents
4350 	 * if shrinker is not registered.
4351 	 */
4352 	if (list_empty(&sbi->s_list))
4353 		return false;
4354 
4355 	return S_ISREG(inode->i_mode);
4356 }
4357 
4358 #ifdef CONFIG_BLK_DEV_ZONED
4359 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4360 				    block_t blkaddr)
4361 {
4362 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4363 
4364 	return test_bit(zno, FDEV(devi).blkz_seq);
4365 }
4366 #endif
4367 
4368 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4369 {
4370 	return f2fs_sb_has_blkzoned(sbi);
4371 }
4372 
4373 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4374 {
4375 	return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4376 }
4377 
4378 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4379 {
4380 	int i;
4381 
4382 	if (!f2fs_is_multi_device(sbi))
4383 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4384 
4385 	for (i = 0; i < sbi->s_ndevs; i++)
4386 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4387 			return true;
4388 	return false;
4389 }
4390 
4391 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4392 {
4393 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4394 					f2fs_hw_should_discard(sbi);
4395 }
4396 
4397 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4398 {
4399 	int i;
4400 
4401 	if (!f2fs_is_multi_device(sbi))
4402 		return bdev_read_only(sbi->sb->s_bdev);
4403 
4404 	for (i = 0; i < sbi->s_ndevs; i++)
4405 		if (bdev_read_only(FDEV(i).bdev))
4406 			return true;
4407 	return false;
4408 }
4409 
4410 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4411 {
4412 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4413 }
4414 
4415 static inline bool f2fs_may_compress(struct inode *inode)
4416 {
4417 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4418 				f2fs_is_atomic_file(inode) ||
4419 				f2fs_is_volatile_file(inode))
4420 		return false;
4421 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4422 }
4423 
4424 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4425 						u64 blocks, bool add)
4426 {
4427 	int diff = F2FS_I(inode)->i_cluster_size - blocks;
4428 	struct f2fs_inode_info *fi = F2FS_I(inode);
4429 
4430 	/* don't update i_compr_blocks if saved blocks were released */
4431 	if (!add && !atomic_read(&fi->i_compr_blocks))
4432 		return;
4433 
4434 	if (add) {
4435 		atomic_add(diff, &fi->i_compr_blocks);
4436 		stat_add_compr_blocks(inode, diff);
4437 	} else {
4438 		atomic_sub(diff, &fi->i_compr_blocks);
4439 		stat_sub_compr_blocks(inode, diff);
4440 	}
4441 	f2fs_mark_inode_dirty_sync(inode, true);
4442 }
4443 
4444 static inline int block_unaligned_IO(struct inode *inode,
4445 				struct kiocb *iocb, struct iov_iter *iter)
4446 {
4447 	unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4448 	unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4449 	loff_t offset = iocb->ki_pos;
4450 	unsigned long align = offset | iov_iter_alignment(iter);
4451 
4452 	return align & blocksize_mask;
4453 }
4454 
4455 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4456 								int flag)
4457 {
4458 	if (!f2fs_is_multi_device(sbi))
4459 		return false;
4460 	if (flag != F2FS_GET_BLOCK_DIO)
4461 		return false;
4462 	return sbi->aligned_blksize;
4463 }
4464 
4465 static inline bool f2fs_force_buffered_io(struct inode *inode,
4466 				struct kiocb *iocb, struct iov_iter *iter)
4467 {
4468 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4469 	int rw = iov_iter_rw(iter);
4470 
4471 	if (!fscrypt_dio_supported(iocb, iter))
4472 		return true;
4473 	if (fsverity_active(inode))
4474 		return true;
4475 	if (f2fs_compressed_file(inode))
4476 		return true;
4477 
4478 	/* disallow direct IO if any of devices has unaligned blksize */
4479 	if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
4480 		return true;
4481 	/*
4482 	 * for blkzoned device, fallback direct IO to buffered IO, so
4483 	 * all IOs can be serialized by log-structured write.
4484 	 */
4485 	if (f2fs_sb_has_blkzoned(sbi))
4486 		return true;
4487 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4488 		if (block_unaligned_IO(inode, iocb, iter))
4489 			return true;
4490 		if (F2FS_IO_ALIGNED(sbi))
4491 			return true;
4492 	}
4493 	if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4494 		return true;
4495 
4496 	return false;
4497 }
4498 
4499 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4500 {
4501 	return fsverity_active(inode) &&
4502 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4503 }
4504 
4505 #ifdef CONFIG_F2FS_FAULT_INJECTION
4506 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4507 							unsigned int type);
4508 #else
4509 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4510 #endif
4511 
4512 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4513 {
4514 #ifdef CONFIG_QUOTA
4515 	if (f2fs_sb_has_quota_ino(sbi))
4516 		return true;
4517 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4518 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4519 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4520 		return true;
4521 #endif
4522 	return false;
4523 }
4524 
4525 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4526 {
4527 	return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4528 }
4529 
4530 static inline void f2fs_io_schedule_timeout(long timeout)
4531 {
4532 	set_current_state(TASK_UNINTERRUPTIBLE);
4533 	io_schedule_timeout(timeout);
4534 }
4535 
4536 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4537 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4538 
4539 #endif /* _LINUX_F2FS_H */
4540