xref: /openbmc/linux/fs/f2fs/f2fs.h (revision 40d76c39)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/sched/mm.h>
22 #include <linux/vmalloc.h>
23 #include <linux/bio.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/part_stat.h>
27 #include <crypto/hash.h>
28 
29 #include <linux/fscrypt.h>
30 #include <linux/fsverity.h>
31 
32 struct pagevec;
33 
34 #ifdef CONFIG_F2FS_CHECK_FS
35 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
36 #else
37 #define f2fs_bug_on(sbi, condition)					\
38 	do {								\
39 		if (WARN_ON(condition))					\
40 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
41 	} while (0)
42 #endif
43 
44 enum {
45 	FAULT_KMALLOC,
46 	FAULT_KVMALLOC,
47 	FAULT_PAGE_ALLOC,
48 	FAULT_PAGE_GET,
49 	FAULT_ALLOC_BIO,	/* it's obsolete due to bio_alloc() will never fail */
50 	FAULT_ALLOC_NID,
51 	FAULT_ORPHAN,
52 	FAULT_BLOCK,
53 	FAULT_DIR_DEPTH,
54 	FAULT_EVICT_INODE,
55 	FAULT_TRUNCATE,
56 	FAULT_READ_IO,
57 	FAULT_CHECKPOINT,
58 	FAULT_DISCARD,
59 	FAULT_WRITE_IO,
60 	FAULT_SLAB_ALLOC,
61 	FAULT_DQUOT_INIT,
62 	FAULT_LOCK_OP,
63 	FAULT_BLKADDR,
64 	FAULT_MAX,
65 };
66 
67 #ifdef CONFIG_F2FS_FAULT_INJECTION
68 #define F2FS_ALL_FAULT_TYPE		(GENMASK(FAULT_MAX - 1, 0))
69 
70 struct f2fs_fault_info {
71 	atomic_t inject_ops;
72 	unsigned int inject_rate;
73 	unsigned int inject_type;
74 };
75 
76 extern const char *f2fs_fault_name[FAULT_MAX];
77 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
78 
79 /* maximum retry count for injected failure */
80 #define DEFAULT_FAILURE_RETRY_COUNT		8
81 #else
82 #define DEFAULT_FAILURE_RETRY_COUNT		1
83 #endif
84 
85 /*
86  * For mount options
87  */
88 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000001
89 #define F2FS_MOUNT_DISCARD		0x00000002
90 #define F2FS_MOUNT_NOHEAP		0x00000004
91 #define F2FS_MOUNT_XATTR_USER		0x00000008
92 #define F2FS_MOUNT_POSIX_ACL		0x00000010
93 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000020
94 #define F2FS_MOUNT_INLINE_XATTR		0x00000040
95 #define F2FS_MOUNT_INLINE_DATA		0x00000080
96 #define F2FS_MOUNT_INLINE_DENTRY	0x00000100
97 #define F2FS_MOUNT_FLUSH_MERGE		0x00000200
98 #define F2FS_MOUNT_NOBARRIER		0x00000400
99 #define F2FS_MOUNT_FASTBOOT		0x00000800
100 #define F2FS_MOUNT_READ_EXTENT_CACHE	0x00001000
101 #define F2FS_MOUNT_DATA_FLUSH		0x00002000
102 #define F2FS_MOUNT_FAULT_INJECTION	0x00004000
103 #define F2FS_MOUNT_USRQUOTA		0x00008000
104 #define F2FS_MOUNT_GRPQUOTA		0x00010000
105 #define F2FS_MOUNT_PRJQUOTA		0x00020000
106 #define F2FS_MOUNT_QUOTA		0x00040000
107 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00080000
108 #define F2FS_MOUNT_RESERVE_ROOT		0x00100000
109 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x00200000
110 #define F2FS_MOUNT_NORECOVERY		0x00400000
111 #define F2FS_MOUNT_ATGC			0x00800000
112 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x01000000
113 #define	F2FS_MOUNT_GC_MERGE		0x02000000
114 #define F2FS_MOUNT_COMPRESS_CACHE	0x04000000
115 #define F2FS_MOUNT_AGE_EXTENT_CACHE	0x08000000
116 
117 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
118 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
119 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
120 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
121 
122 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
123 		typecheck(unsigned long long, b) &&			\
124 		((long long)((a) - (b)) > 0))
125 
126 typedef u32 block_t;	/*
127 			 * should not change u32, since it is the on-disk block
128 			 * address format, __le32.
129 			 */
130 typedef u32 nid_t;
131 
132 #define COMPRESS_EXT_NUM		16
133 
134 /*
135  * An implementation of an rwsem that is explicitly unfair to readers. This
136  * prevents priority inversion when a low-priority reader acquires the read lock
137  * while sleeping on the write lock but the write lock is needed by
138  * higher-priority clients.
139  */
140 
141 struct f2fs_rwsem {
142         struct rw_semaphore internal_rwsem;
143 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
144         wait_queue_head_t read_waiters;
145 #endif
146 };
147 
148 struct f2fs_mount_info {
149 	unsigned int opt;
150 	int write_io_size_bits;		/* Write IO size bits */
151 	block_t root_reserved_blocks;	/* root reserved blocks */
152 	kuid_t s_resuid;		/* reserved blocks for uid */
153 	kgid_t s_resgid;		/* reserved blocks for gid */
154 	int active_logs;		/* # of active logs */
155 	int inline_xattr_size;		/* inline xattr size */
156 #ifdef CONFIG_F2FS_FAULT_INJECTION
157 	struct f2fs_fault_info fault_info;	/* For fault injection */
158 #endif
159 #ifdef CONFIG_QUOTA
160 	/* Names of quota files with journalled quota */
161 	char *s_qf_names[MAXQUOTAS];
162 	int s_jquota_fmt;			/* Format of quota to use */
163 #endif
164 	/* For which write hints are passed down to block layer */
165 	int alloc_mode;			/* segment allocation policy */
166 	int fsync_mode;			/* fsync policy */
167 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
168 	int bggc_mode;			/* bggc mode: off, on or sync */
169 	int memory_mode;		/* memory mode */
170 	int errors;			/* errors parameter */
171 	int discard_unit;		/*
172 					 * discard command's offset/size should
173 					 * be aligned to this unit: block,
174 					 * segment or section
175 					 */
176 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
177 	block_t unusable_cap_perc;	/* percentage for cap */
178 	block_t unusable_cap;		/* Amount of space allowed to be
179 					 * unusable when disabling checkpoint
180 					 */
181 
182 	/* For compression */
183 	unsigned char compress_algorithm;	/* algorithm type */
184 	unsigned char compress_log_size;	/* cluster log size */
185 	unsigned char compress_level;		/* compress level */
186 	bool compress_chksum;			/* compressed data chksum */
187 	unsigned char compress_ext_cnt;		/* extension count */
188 	unsigned char nocompress_ext_cnt;		/* nocompress extension count */
189 	int compress_mode;			/* compression mode */
190 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
191 	unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
192 };
193 
194 #define F2FS_FEATURE_ENCRYPT			0x00000001
195 #define F2FS_FEATURE_BLKZONED			0x00000002
196 #define F2FS_FEATURE_ATOMIC_WRITE		0x00000004
197 #define F2FS_FEATURE_EXTRA_ATTR			0x00000008
198 #define F2FS_FEATURE_PRJQUOTA			0x00000010
199 #define F2FS_FEATURE_INODE_CHKSUM		0x00000020
200 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x00000040
201 #define F2FS_FEATURE_QUOTA_INO			0x00000080
202 #define F2FS_FEATURE_INODE_CRTIME		0x00000100
203 #define F2FS_FEATURE_LOST_FOUND			0x00000200
204 #define F2FS_FEATURE_VERITY			0x00000400
205 #define F2FS_FEATURE_SB_CHKSUM			0x00000800
206 #define F2FS_FEATURE_CASEFOLD			0x00001000
207 #define F2FS_FEATURE_COMPRESSION		0x00002000
208 #define F2FS_FEATURE_RO				0x00004000
209 
210 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
211 	((raw_super->feature & cpu_to_le32(mask)) != 0)
212 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
213 
214 /*
215  * Default values for user and/or group using reserved blocks
216  */
217 #define	F2FS_DEF_RESUID		0
218 #define	F2FS_DEF_RESGID		0
219 
220 /*
221  * For checkpoint manager
222  */
223 enum {
224 	NAT_BITMAP,
225 	SIT_BITMAP
226 };
227 
228 #define	CP_UMOUNT	0x00000001
229 #define	CP_FASTBOOT	0x00000002
230 #define	CP_SYNC		0x00000004
231 #define	CP_RECOVERY	0x00000008
232 #define	CP_DISCARD	0x00000010
233 #define CP_TRIMMED	0x00000020
234 #define CP_PAUSE	0x00000040
235 #define CP_RESIZE 	0x00000080
236 
237 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
238 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
239 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
240 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
241 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
242 #define DEF_CP_INTERVAL			60	/* 60 secs */
243 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
244 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
245 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
246 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
247 
248 struct cp_control {
249 	int reason;
250 	__u64 trim_start;
251 	__u64 trim_end;
252 	__u64 trim_minlen;
253 };
254 
255 /*
256  * indicate meta/data type
257  */
258 enum {
259 	META_CP,
260 	META_NAT,
261 	META_SIT,
262 	META_SSA,
263 	META_MAX,
264 	META_POR,
265 	DATA_GENERIC,		/* check range only */
266 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
267 	DATA_GENERIC_ENHANCE_READ,	/*
268 					 * strong check on range and segment
269 					 * bitmap but no warning due to race
270 					 * condition of read on truncated area
271 					 * by extent_cache
272 					 */
273 	DATA_GENERIC_ENHANCE_UPDATE,	/*
274 					 * strong check on range and segment
275 					 * bitmap for update case
276 					 */
277 	META_GENERIC,
278 };
279 
280 /* for the list of ino */
281 enum {
282 	ORPHAN_INO,		/* for orphan ino list */
283 	APPEND_INO,		/* for append ino list */
284 	UPDATE_INO,		/* for update ino list */
285 	TRANS_DIR_INO,		/* for transactions dir ino list */
286 	FLUSH_INO,		/* for multiple device flushing */
287 	MAX_INO_ENTRY,		/* max. list */
288 };
289 
290 struct ino_entry {
291 	struct list_head list;		/* list head */
292 	nid_t ino;			/* inode number */
293 	unsigned int dirty_device;	/* dirty device bitmap */
294 };
295 
296 /* for the list of inodes to be GCed */
297 struct inode_entry {
298 	struct list_head list;	/* list head */
299 	struct inode *inode;	/* vfs inode pointer */
300 };
301 
302 struct fsync_node_entry {
303 	struct list_head list;	/* list head */
304 	struct page *page;	/* warm node page pointer */
305 	unsigned int seq_id;	/* sequence id */
306 };
307 
308 struct ckpt_req {
309 	struct completion wait;		/* completion for checkpoint done */
310 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
311 	int ret;			/* return code of checkpoint */
312 	ktime_t queue_time;		/* request queued time */
313 };
314 
315 struct ckpt_req_control {
316 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
317 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
318 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
319 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
320 	atomic_t total_ckpt;		/* # of total ckpts */
321 	atomic_t queued_ckpt;		/* # of queued ckpts */
322 	struct llist_head issue_list;	/* list for command issue */
323 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
324 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
325 	unsigned int peak_time;		/* peak wait time in msec until now */
326 };
327 
328 /* for the bitmap indicate blocks to be discarded */
329 struct discard_entry {
330 	struct list_head list;	/* list head */
331 	block_t start_blkaddr;	/* start blockaddr of current segment */
332 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
333 };
334 
335 /* minimum discard granularity, unit: block count */
336 #define MIN_DISCARD_GRANULARITY		1
337 /* default discard granularity of inner discard thread, unit: block count */
338 #define DEFAULT_DISCARD_GRANULARITY		16
339 /* default maximum discard granularity of ordered discard, unit: block count */
340 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY	16
341 
342 /* max discard pend list number */
343 #define MAX_PLIST_NUM		512
344 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
345 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
346 
347 enum {
348 	D_PREP,			/* initial */
349 	D_PARTIAL,		/* partially submitted */
350 	D_SUBMIT,		/* all submitted */
351 	D_DONE,			/* finished */
352 };
353 
354 struct discard_info {
355 	block_t lstart;			/* logical start address */
356 	block_t len;			/* length */
357 	block_t start;			/* actual start address in dev */
358 };
359 
360 struct discard_cmd {
361 	struct rb_node rb_node;		/* rb node located in rb-tree */
362 	struct discard_info di;		/* discard info */
363 	struct list_head list;		/* command list */
364 	struct completion wait;		/* compleation */
365 	struct block_device *bdev;	/* bdev */
366 	unsigned short ref;		/* reference count */
367 	unsigned char state;		/* state */
368 	unsigned char queued;		/* queued discard */
369 	int error;			/* bio error */
370 	spinlock_t lock;		/* for state/bio_ref updating */
371 	unsigned short bio_ref;		/* bio reference count */
372 };
373 
374 enum {
375 	DPOLICY_BG,
376 	DPOLICY_FORCE,
377 	DPOLICY_FSTRIM,
378 	DPOLICY_UMOUNT,
379 	MAX_DPOLICY,
380 };
381 
382 struct discard_policy {
383 	int type;			/* type of discard */
384 	unsigned int min_interval;	/* used for candidates exist */
385 	unsigned int mid_interval;	/* used for device busy */
386 	unsigned int max_interval;	/* used for candidates not exist */
387 	unsigned int max_requests;	/* # of discards issued per round */
388 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
389 	bool io_aware;			/* issue discard in idle time */
390 	bool sync;			/* submit discard with REQ_SYNC flag */
391 	bool ordered;			/* issue discard by lba order */
392 	bool timeout;			/* discard timeout for put_super */
393 	unsigned int granularity;	/* discard granularity */
394 };
395 
396 struct discard_cmd_control {
397 	struct task_struct *f2fs_issue_discard;	/* discard thread */
398 	struct list_head entry_list;		/* 4KB discard entry list */
399 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
400 	struct list_head wait_list;		/* store on-flushing entries */
401 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
402 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
403 	struct mutex cmd_lock;
404 	unsigned int nr_discards;		/* # of discards in the list */
405 	unsigned int max_discards;		/* max. discards to be issued */
406 	unsigned int max_discard_request;	/* max. discard request per round */
407 	unsigned int min_discard_issue_time;	/* min. interval between discard issue */
408 	unsigned int mid_discard_issue_time;	/* mid. interval between discard issue */
409 	unsigned int max_discard_issue_time;	/* max. interval between discard issue */
410 	unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
411 	unsigned int discard_urgent_util;	/* utilization which issue discard proactively */
412 	unsigned int discard_granularity;	/* discard granularity */
413 	unsigned int max_ordered_discard;	/* maximum discard granularity issued by lba order */
414 	unsigned int undiscard_blks;		/* # of undiscard blocks */
415 	unsigned int next_pos;			/* next discard position */
416 	atomic_t issued_discard;		/* # of issued discard */
417 	atomic_t queued_discard;		/* # of queued discard */
418 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
419 	struct rb_root_cached root;		/* root of discard rb-tree */
420 	bool rbtree_check;			/* config for consistence check */
421 	bool discard_wake;			/* to wake up discard thread */
422 };
423 
424 /* for the list of fsync inodes, used only during recovery */
425 struct fsync_inode_entry {
426 	struct list_head list;	/* list head */
427 	struct inode *inode;	/* vfs inode pointer */
428 	block_t blkaddr;	/* block address locating the last fsync */
429 	block_t last_dentry;	/* block address locating the last dentry */
430 };
431 
432 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
433 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
434 
435 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
436 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
437 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
438 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
439 
440 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
441 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
442 
443 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
444 {
445 	int before = nats_in_cursum(journal);
446 
447 	journal->n_nats = cpu_to_le16(before + i);
448 	return before;
449 }
450 
451 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
452 {
453 	int before = sits_in_cursum(journal);
454 
455 	journal->n_sits = cpu_to_le16(before + i);
456 	return before;
457 }
458 
459 static inline bool __has_cursum_space(struct f2fs_journal *journal,
460 							int size, int type)
461 {
462 	if (type == NAT_JOURNAL)
463 		return size <= MAX_NAT_JENTRIES(journal);
464 	return size <= MAX_SIT_JENTRIES(journal);
465 }
466 
467 /* for inline stuff */
468 #define DEF_INLINE_RESERVED_SIZE	1
469 static inline int get_extra_isize(struct inode *inode);
470 static inline int get_inline_xattr_addrs(struct inode *inode);
471 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
472 				(CUR_ADDRS_PER_INODE(inode) -		\
473 				get_inline_xattr_addrs(inode) -	\
474 				DEF_INLINE_RESERVED_SIZE))
475 
476 /* for inline dir */
477 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
478 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
479 				BITS_PER_BYTE + 1))
480 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
481 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
482 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
483 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
484 				NR_INLINE_DENTRY(inode) + \
485 				INLINE_DENTRY_BITMAP_SIZE(inode)))
486 
487 /*
488  * For INODE and NODE manager
489  */
490 /* for directory operations */
491 
492 struct f2fs_filename {
493 	/*
494 	 * The filename the user specified.  This is NULL for some
495 	 * filesystem-internal operations, e.g. converting an inline directory
496 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
497 	 */
498 	const struct qstr *usr_fname;
499 
500 	/*
501 	 * The on-disk filename.  For encrypted directories, this is encrypted.
502 	 * This may be NULL for lookups in an encrypted dir without the key.
503 	 */
504 	struct fscrypt_str disk_name;
505 
506 	/* The dirhash of this filename */
507 	f2fs_hash_t hash;
508 
509 #ifdef CONFIG_FS_ENCRYPTION
510 	/*
511 	 * For lookups in encrypted directories: either the buffer backing
512 	 * disk_name, or a buffer that holds the decoded no-key name.
513 	 */
514 	struct fscrypt_str crypto_buf;
515 #endif
516 #if IS_ENABLED(CONFIG_UNICODE)
517 	/*
518 	 * For casefolded directories: the casefolded name, but it's left NULL
519 	 * if the original name is not valid Unicode, if the original name is
520 	 * "." or "..", if the directory is both casefolded and encrypted and
521 	 * its encryption key is unavailable, or if the filesystem is doing an
522 	 * internal operation where usr_fname is also NULL.  In all these cases
523 	 * we fall back to treating the name as an opaque byte sequence.
524 	 */
525 	struct fscrypt_str cf_name;
526 #endif
527 };
528 
529 struct f2fs_dentry_ptr {
530 	struct inode *inode;
531 	void *bitmap;
532 	struct f2fs_dir_entry *dentry;
533 	__u8 (*filename)[F2FS_SLOT_LEN];
534 	int max;
535 	int nr_bitmap;
536 };
537 
538 static inline void make_dentry_ptr_block(struct inode *inode,
539 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
540 {
541 	d->inode = inode;
542 	d->max = NR_DENTRY_IN_BLOCK;
543 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
544 	d->bitmap = t->dentry_bitmap;
545 	d->dentry = t->dentry;
546 	d->filename = t->filename;
547 }
548 
549 static inline void make_dentry_ptr_inline(struct inode *inode,
550 					struct f2fs_dentry_ptr *d, void *t)
551 {
552 	int entry_cnt = NR_INLINE_DENTRY(inode);
553 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
554 	int reserved_size = INLINE_RESERVED_SIZE(inode);
555 
556 	d->inode = inode;
557 	d->max = entry_cnt;
558 	d->nr_bitmap = bitmap_size;
559 	d->bitmap = t;
560 	d->dentry = t + bitmap_size + reserved_size;
561 	d->filename = t + bitmap_size + reserved_size +
562 					SIZE_OF_DIR_ENTRY * entry_cnt;
563 }
564 
565 /*
566  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
567  * as its node offset to distinguish from index node blocks.
568  * But some bits are used to mark the node block.
569  */
570 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
571 				>> OFFSET_BIT_SHIFT)
572 enum {
573 	ALLOC_NODE,			/* allocate a new node page if needed */
574 	LOOKUP_NODE,			/* look up a node without readahead */
575 	LOOKUP_NODE_RA,			/*
576 					 * look up a node with readahead called
577 					 * by get_data_block.
578 					 */
579 };
580 
581 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO or flush count */
582 
583 /* congestion wait timeout value, default: 20ms */
584 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
585 
586 /* maximum retry quota flush count */
587 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
588 
589 /* maximum retry of EIO'ed page */
590 #define MAX_RETRY_PAGE_EIO			100
591 
592 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
593 
594 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
595 
596 /* dirty segments threshold for triggering CP */
597 #define DEFAULT_DIRTY_THRESHOLD		4
598 
599 #define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_VECS
600 #define RECOVERY_MIN_RA_BLOCKS		1
601 
602 #define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
603 
604 /* for in-memory extent cache entry */
605 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
606 
607 /* number of extent info in extent cache we try to shrink */
608 #define READ_EXTENT_CACHE_SHRINK_NUMBER	128
609 
610 /* number of age extent info in extent cache we try to shrink */
611 #define AGE_EXTENT_CACHE_SHRINK_NUMBER	128
612 #define LAST_AGE_WEIGHT			30
613 #define SAME_AGE_REGION			1024
614 
615 /*
616  * Define data block with age less than 1GB as hot data
617  * define data block with age less than 10GB but more than 1GB as warm data
618  */
619 #define DEF_HOT_DATA_AGE_THRESHOLD	262144
620 #define DEF_WARM_DATA_AGE_THRESHOLD	2621440
621 
622 /* extent cache type */
623 enum extent_type {
624 	EX_READ,
625 	EX_BLOCK_AGE,
626 	NR_EXTENT_CACHES,
627 };
628 
629 struct extent_info {
630 	unsigned int fofs;		/* start offset in a file */
631 	unsigned int len;		/* length of the extent */
632 	union {
633 		/* read extent_cache */
634 		struct {
635 			/* start block address of the extent */
636 			block_t blk;
637 #ifdef CONFIG_F2FS_FS_COMPRESSION
638 			/* physical extent length of compressed blocks */
639 			unsigned int c_len;
640 #endif
641 		};
642 		/* block age extent_cache */
643 		struct {
644 			/* block age of the extent */
645 			unsigned long long age;
646 			/* last total blocks allocated */
647 			unsigned long long last_blocks;
648 		};
649 	};
650 };
651 
652 struct extent_node {
653 	struct rb_node rb_node;		/* rb node located in rb-tree */
654 	struct extent_info ei;		/* extent info */
655 	struct list_head list;		/* node in global extent list of sbi */
656 	struct extent_tree *et;		/* extent tree pointer */
657 };
658 
659 struct extent_tree {
660 	nid_t ino;			/* inode number */
661 	enum extent_type type;		/* keep the extent tree type */
662 	struct rb_root_cached root;	/* root of extent info rb-tree */
663 	struct extent_node *cached_en;	/* recently accessed extent node */
664 	struct list_head list;		/* to be used by sbi->zombie_list */
665 	rwlock_t lock;			/* protect extent info rb-tree */
666 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
667 	bool largest_updated;		/* largest extent updated */
668 	struct extent_info largest;	/* largest cached extent for EX_READ */
669 };
670 
671 struct extent_tree_info {
672 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
673 	struct mutex extent_tree_lock;	/* locking extent radix tree */
674 	struct list_head extent_list;		/* lru list for shrinker */
675 	spinlock_t extent_lock;			/* locking extent lru list */
676 	atomic_t total_ext_tree;		/* extent tree count */
677 	struct list_head zombie_list;		/* extent zombie tree list */
678 	atomic_t total_zombie_tree;		/* extent zombie tree count */
679 	atomic_t total_ext_node;		/* extent info count */
680 };
681 
682 /*
683  * State of block returned by f2fs_map_blocks.
684  */
685 #define F2FS_MAP_NEW		(1U << 0)
686 #define F2FS_MAP_MAPPED		(1U << 1)
687 #define F2FS_MAP_DELALLOC	(1U << 2)
688 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
689 				F2FS_MAP_DELALLOC)
690 
691 struct f2fs_map_blocks {
692 	struct block_device *m_bdev;	/* for multi-device dio */
693 	block_t m_pblk;
694 	block_t m_lblk;
695 	unsigned int m_len;
696 	unsigned int m_flags;
697 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
698 	pgoff_t *m_next_extent;		/* point to next possible extent */
699 	int m_seg_type;
700 	bool m_may_create;		/* indicate it is from write path */
701 	bool m_multidev_dio;		/* indicate it allows multi-device dio */
702 };
703 
704 /* for flag in get_data_block */
705 enum {
706 	F2FS_GET_BLOCK_DEFAULT,
707 	F2FS_GET_BLOCK_FIEMAP,
708 	F2FS_GET_BLOCK_BMAP,
709 	F2FS_GET_BLOCK_DIO,
710 	F2FS_GET_BLOCK_PRE_DIO,
711 	F2FS_GET_BLOCK_PRE_AIO,
712 	F2FS_GET_BLOCK_PRECACHE,
713 };
714 
715 /*
716  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
717  */
718 #define FADVISE_COLD_BIT	0x01
719 #define FADVISE_LOST_PINO_BIT	0x02
720 #define FADVISE_ENCRYPT_BIT	0x04
721 #define FADVISE_ENC_NAME_BIT	0x08
722 #define FADVISE_KEEP_SIZE_BIT	0x10
723 #define FADVISE_HOT_BIT		0x20
724 #define FADVISE_VERITY_BIT	0x40
725 #define FADVISE_TRUNC_BIT	0x80
726 
727 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
728 
729 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
730 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
731 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
732 
733 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
734 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
735 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
736 
737 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
738 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
739 
740 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
741 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
742 
743 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
744 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
745 
746 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
747 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
748 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
749 
750 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
751 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
752 
753 #define file_should_truncate(inode)	is_file(inode, FADVISE_TRUNC_BIT)
754 #define file_need_truncate(inode)	set_file(inode, FADVISE_TRUNC_BIT)
755 #define file_dont_truncate(inode)	clear_file(inode, FADVISE_TRUNC_BIT)
756 
757 #define DEF_DIR_LEVEL		0
758 
759 enum {
760 	GC_FAILURE_PIN,
761 	MAX_GC_FAILURE
762 };
763 
764 /* used for f2fs_inode_info->flags */
765 enum {
766 	FI_NEW_INODE,		/* indicate newly allocated inode */
767 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
768 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
769 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
770 	FI_INC_LINK,		/* need to increment i_nlink */
771 	FI_ACL_MODE,		/* indicate acl mode */
772 	FI_NO_ALLOC,		/* should not allocate any blocks */
773 	FI_FREE_NID,		/* free allocated nide */
774 	FI_NO_EXTENT,		/* not to use the extent cache */
775 	FI_INLINE_XATTR,	/* used for inline xattr */
776 	FI_INLINE_DATA,		/* used for inline data*/
777 	FI_INLINE_DENTRY,	/* used for inline dentry */
778 	FI_APPEND_WRITE,	/* inode has appended data */
779 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
780 	FI_NEED_IPU,		/* used for ipu per file */
781 	FI_ATOMIC_FILE,		/* indicate atomic file */
782 	FI_DATA_EXIST,		/* indicate data exists */
783 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
784 	FI_SKIP_WRITES,		/* should skip data page writeback */
785 	FI_OPU_WRITE,		/* used for opu per file */
786 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
787 	FI_PREALLOCATED_ALL,	/* all blocks for write were preallocated */
788 	FI_HOT_DATA,		/* indicate file is hot */
789 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
790 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
791 	FI_PIN_FILE,		/* indicate file should not be gced */
792 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
793 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
794 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
795 	FI_MMAP_FILE,		/* indicate file was mmapped */
796 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
797 	FI_COMPRESS_RELEASED,	/* compressed blocks were released */
798 	FI_ALIGNED_WRITE,	/* enable aligned write */
799 	FI_COW_FILE,		/* indicate COW file */
800 	FI_ATOMIC_COMMITTED,	/* indicate atomic commit completed except disk sync */
801 	FI_ATOMIC_REPLACE,	/* indicate atomic replace */
802 	FI_MAX,			/* max flag, never be used */
803 };
804 
805 struct f2fs_inode_info {
806 	struct inode vfs_inode;		/* serve a vfs inode */
807 	unsigned long i_flags;		/* keep an inode flags for ioctl */
808 	unsigned char i_advise;		/* use to give file attribute hints */
809 	unsigned char i_dir_level;	/* use for dentry level for large dir */
810 	unsigned int i_current_depth;	/* only for directory depth */
811 	/* for gc failure statistic */
812 	unsigned int i_gc_failures[MAX_GC_FAILURE];
813 	unsigned int i_pino;		/* parent inode number */
814 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
815 
816 	/* Use below internally in f2fs*/
817 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
818 	struct f2fs_rwsem i_sem;	/* protect fi info */
819 	atomic_t dirty_pages;		/* # of dirty pages */
820 	f2fs_hash_t chash;		/* hash value of given file name */
821 	unsigned int clevel;		/* maximum level of given file name */
822 	struct task_struct *task;	/* lookup and create consistency */
823 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
824 	struct task_struct *wb_task;	/* indicate inode is in context of writeback */
825 	nid_t i_xattr_nid;		/* node id that contains xattrs */
826 	loff_t	last_disk_size;		/* lastly written file size */
827 	spinlock_t i_size_lock;		/* protect last_disk_size */
828 
829 #ifdef CONFIG_QUOTA
830 	struct dquot __rcu *i_dquot[MAXQUOTAS];
831 
832 	/* quota space reservation, managed internally by quota code */
833 	qsize_t i_reserved_quota;
834 #endif
835 	struct list_head dirty_list;	/* dirty list for dirs and files */
836 	struct list_head gdirty_list;	/* linked in global dirty list */
837 	struct task_struct *atomic_write_task;	/* store atomic write task */
838 	struct extent_tree *extent_tree[NR_EXTENT_CACHES];
839 					/* cached extent_tree entry */
840 	struct inode *cow_inode;	/* copy-on-write inode for atomic write */
841 
842 	/* avoid racing between foreground op and gc */
843 	struct f2fs_rwsem i_gc_rwsem[2];
844 	struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
845 
846 	int i_extra_isize;		/* size of extra space located in i_addr */
847 	kprojid_t i_projid;		/* id for project quota */
848 	int i_inline_xattr_size;	/* inline xattr size */
849 	struct timespec64 i_crtime;	/* inode creation time */
850 	struct timespec64 i_disk_time[3];/* inode disk times */
851 
852 	/* for file compress */
853 	atomic_t i_compr_blocks;		/* # of compressed blocks */
854 	unsigned char i_compress_algorithm;	/* algorithm type */
855 	unsigned char i_log_cluster_size;	/* log of cluster size */
856 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
857 	unsigned char i_compress_flag;		/* compress flag */
858 	unsigned int i_cluster_size;		/* cluster size */
859 
860 	unsigned int atomic_write_cnt;
861 	loff_t original_i_size;		/* original i_size before atomic write */
862 };
863 
864 static inline void get_read_extent_info(struct extent_info *ext,
865 					struct f2fs_extent *i_ext)
866 {
867 	ext->fofs = le32_to_cpu(i_ext->fofs);
868 	ext->blk = le32_to_cpu(i_ext->blk);
869 	ext->len = le32_to_cpu(i_ext->len);
870 }
871 
872 static inline void set_raw_read_extent(struct extent_info *ext,
873 					struct f2fs_extent *i_ext)
874 {
875 	i_ext->fofs = cpu_to_le32(ext->fofs);
876 	i_ext->blk = cpu_to_le32(ext->blk);
877 	i_ext->len = cpu_to_le32(ext->len);
878 }
879 
880 static inline bool __is_discard_mergeable(struct discard_info *back,
881 			struct discard_info *front, unsigned int max_len)
882 {
883 	return (back->lstart + back->len == front->lstart) &&
884 		(back->len + front->len <= max_len);
885 }
886 
887 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
888 			struct discard_info *back, unsigned int max_len)
889 {
890 	return __is_discard_mergeable(back, cur, max_len);
891 }
892 
893 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
894 			struct discard_info *front, unsigned int max_len)
895 {
896 	return __is_discard_mergeable(cur, front, max_len);
897 }
898 
899 /*
900  * For free nid management
901  */
902 enum nid_state {
903 	FREE_NID,		/* newly added to free nid list */
904 	PREALLOC_NID,		/* it is preallocated */
905 	MAX_NID_STATE,
906 };
907 
908 enum nat_state {
909 	TOTAL_NAT,
910 	DIRTY_NAT,
911 	RECLAIMABLE_NAT,
912 	MAX_NAT_STATE,
913 };
914 
915 struct f2fs_nm_info {
916 	block_t nat_blkaddr;		/* base disk address of NAT */
917 	nid_t max_nid;			/* maximum possible node ids */
918 	nid_t available_nids;		/* # of available node ids */
919 	nid_t next_scan_nid;		/* the next nid to be scanned */
920 	nid_t max_rf_node_blocks;	/* max # of nodes for recovery */
921 	unsigned int ram_thresh;	/* control the memory footprint */
922 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
923 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
924 
925 	/* NAT cache management */
926 	struct radix_tree_root nat_root;/* root of the nat entry cache */
927 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
928 	struct f2fs_rwsem nat_tree_lock;	/* protect nat entry tree */
929 	struct list_head nat_entries;	/* cached nat entry list (clean) */
930 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
931 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
932 	unsigned int nat_blocks;	/* # of nat blocks */
933 
934 	/* free node ids management */
935 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
936 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
937 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
938 	spinlock_t nid_list_lock;	/* protect nid lists ops */
939 	struct mutex build_lock;	/* lock for build free nids */
940 	unsigned char **free_nid_bitmap;
941 	unsigned char *nat_block_bitmap;
942 	unsigned short *free_nid_count;	/* free nid count of NAT block */
943 
944 	/* for checkpoint */
945 	char *nat_bitmap;		/* NAT bitmap pointer */
946 
947 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
948 	unsigned char *nat_bits;	/* NAT bits blocks */
949 	unsigned char *full_nat_bits;	/* full NAT pages */
950 	unsigned char *empty_nat_bits;	/* empty NAT pages */
951 #ifdef CONFIG_F2FS_CHECK_FS
952 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
953 #endif
954 	int bitmap_size;		/* bitmap size */
955 };
956 
957 /*
958  * this structure is used as one of function parameters.
959  * all the information are dedicated to a given direct node block determined
960  * by the data offset in a file.
961  */
962 struct dnode_of_data {
963 	struct inode *inode;		/* vfs inode pointer */
964 	struct page *inode_page;	/* its inode page, NULL is possible */
965 	struct page *node_page;		/* cached direct node page */
966 	nid_t nid;			/* node id of the direct node block */
967 	unsigned int ofs_in_node;	/* data offset in the node page */
968 	bool inode_page_locked;		/* inode page is locked or not */
969 	bool node_changed;		/* is node block changed */
970 	char cur_level;			/* level of hole node page */
971 	char max_level;			/* level of current page located */
972 	block_t	data_blkaddr;		/* block address of the node block */
973 };
974 
975 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
976 		struct page *ipage, struct page *npage, nid_t nid)
977 {
978 	memset(dn, 0, sizeof(*dn));
979 	dn->inode = inode;
980 	dn->inode_page = ipage;
981 	dn->node_page = npage;
982 	dn->nid = nid;
983 }
984 
985 /*
986  * For SIT manager
987  *
988  * By default, there are 6 active log areas across the whole main area.
989  * When considering hot and cold data separation to reduce cleaning overhead,
990  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
991  * respectively.
992  * In the current design, you should not change the numbers intentionally.
993  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
994  * logs individually according to the underlying devices. (default: 6)
995  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
996  * data and 8 for node logs.
997  */
998 #define	NR_CURSEG_DATA_TYPE	(3)
999 #define NR_CURSEG_NODE_TYPE	(3)
1000 #define NR_CURSEG_INMEM_TYPE	(2)
1001 #define NR_CURSEG_RO_TYPE	(2)
1002 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1003 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1004 
1005 enum {
1006 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
1007 	CURSEG_WARM_DATA,	/* data blocks */
1008 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
1009 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
1010 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
1011 	CURSEG_COLD_NODE,	/* indirect node blocks */
1012 	NR_PERSISTENT_LOG,	/* number of persistent log */
1013 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1014 				/* pinned file that needs consecutive block address */
1015 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
1016 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
1017 };
1018 
1019 struct flush_cmd {
1020 	struct completion wait;
1021 	struct llist_node llnode;
1022 	nid_t ino;
1023 	int ret;
1024 };
1025 
1026 struct flush_cmd_control {
1027 	struct task_struct *f2fs_issue_flush;	/* flush thread */
1028 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
1029 	atomic_t issued_flush;			/* # of issued flushes */
1030 	atomic_t queued_flush;			/* # of queued flushes */
1031 	struct llist_head issue_list;		/* list for command issue */
1032 	struct llist_node *dispatch_list;	/* list for command dispatch */
1033 };
1034 
1035 struct f2fs_sm_info {
1036 	struct sit_info *sit_info;		/* whole segment information */
1037 	struct free_segmap_info *free_info;	/* free segment information */
1038 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
1039 	struct curseg_info *curseg_array;	/* active segment information */
1040 
1041 	struct f2fs_rwsem curseg_lock;	/* for preventing curseg change */
1042 
1043 	block_t seg0_blkaddr;		/* block address of 0'th segment */
1044 	block_t main_blkaddr;		/* start block address of main area */
1045 	block_t ssa_blkaddr;		/* start block address of SSA area */
1046 
1047 	unsigned int segment_count;	/* total # of segments */
1048 	unsigned int main_segments;	/* # of segments in main area */
1049 	unsigned int reserved_segments;	/* # of reserved segments */
1050 	unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1051 	unsigned int ovp_segments;	/* # of overprovision segments */
1052 
1053 	/* a threshold to reclaim prefree segments */
1054 	unsigned int rec_prefree_segments;
1055 
1056 	struct list_head sit_entry_set;	/* sit entry set list */
1057 
1058 	unsigned int ipu_policy;	/* in-place-update policy */
1059 	unsigned int min_ipu_util;	/* in-place-update threshold */
1060 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1061 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1062 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1063 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1064 
1065 	/* for flush command control */
1066 	struct flush_cmd_control *fcc_info;
1067 
1068 	/* for discard command control */
1069 	struct discard_cmd_control *dcc_info;
1070 };
1071 
1072 /*
1073  * For superblock
1074  */
1075 /*
1076  * COUNT_TYPE for monitoring
1077  *
1078  * f2fs monitors the number of several block types such as on-writeback,
1079  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1080  */
1081 #define WB_DATA_TYPE(p, f)			\
1082 	(f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1083 enum count_type {
1084 	F2FS_DIRTY_DENTS,
1085 	F2FS_DIRTY_DATA,
1086 	F2FS_DIRTY_QDATA,
1087 	F2FS_DIRTY_NODES,
1088 	F2FS_DIRTY_META,
1089 	F2FS_DIRTY_IMETA,
1090 	F2FS_WB_CP_DATA,
1091 	F2FS_WB_DATA,
1092 	F2FS_RD_DATA,
1093 	F2FS_RD_NODE,
1094 	F2FS_RD_META,
1095 	F2FS_DIO_WRITE,
1096 	F2FS_DIO_READ,
1097 	NR_COUNT_TYPE,
1098 };
1099 
1100 /*
1101  * The below are the page types of bios used in submit_bio().
1102  * The available types are:
1103  * DATA			User data pages. It operates as async mode.
1104  * NODE			Node pages. It operates as async mode.
1105  * META			FS metadata pages such as SIT, NAT, CP.
1106  * NR_PAGE_TYPE		The number of page types.
1107  * META_FLUSH		Make sure the previous pages are written
1108  *			with waiting the bio's completion
1109  * ...			Only can be used with META.
1110  */
1111 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1112 enum page_type {
1113 	DATA = 0,
1114 	NODE = 1,	/* should not change this */
1115 	META,
1116 	NR_PAGE_TYPE,
1117 	META_FLUSH,
1118 	IPU,		/* the below types are used by tracepoints only. */
1119 	OPU,
1120 };
1121 
1122 enum temp_type {
1123 	HOT = 0,	/* must be zero for meta bio */
1124 	WARM,
1125 	COLD,
1126 	NR_TEMP_TYPE,
1127 };
1128 
1129 enum need_lock_type {
1130 	LOCK_REQ = 0,
1131 	LOCK_DONE,
1132 	LOCK_RETRY,
1133 };
1134 
1135 enum cp_reason_type {
1136 	CP_NO_NEEDED,
1137 	CP_NON_REGULAR,
1138 	CP_COMPRESSED,
1139 	CP_HARDLINK,
1140 	CP_SB_NEED_CP,
1141 	CP_WRONG_PINO,
1142 	CP_NO_SPC_ROLL,
1143 	CP_NODE_NEED_CP,
1144 	CP_FASTBOOT_MODE,
1145 	CP_SPEC_LOG_NUM,
1146 	CP_RECOVER_DIR,
1147 };
1148 
1149 enum iostat_type {
1150 	/* WRITE IO */
1151 	APP_DIRECT_IO,			/* app direct write IOs */
1152 	APP_BUFFERED_IO,		/* app buffered write IOs */
1153 	APP_WRITE_IO,			/* app write IOs */
1154 	APP_MAPPED_IO,			/* app mapped IOs */
1155 	APP_BUFFERED_CDATA_IO,		/* app buffered write IOs on compressed file */
1156 	APP_MAPPED_CDATA_IO,		/* app mapped write IOs on compressed file */
1157 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1158 	FS_CDATA_IO,			/* data IOs from kworker/fsync/reclaimer on compressed file */
1159 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1160 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1161 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1162 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1163 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1164 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1165 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1166 
1167 	/* READ IO */
1168 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1169 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1170 	APP_READ_IO,			/* app read IOs */
1171 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1172 	APP_BUFFERED_CDATA_READ_IO,	/* app buffered read IOs on compressed file  */
1173 	APP_MAPPED_CDATA_READ_IO,	/* app mapped read IOs on compressed file  */
1174 	FS_DATA_READ_IO,		/* data read IOs */
1175 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1176 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1177 	FS_NODE_READ_IO,		/* node read IOs */
1178 	FS_META_READ_IO,		/* meta read IOs */
1179 
1180 	/* other */
1181 	FS_DISCARD_IO,			/* discard */
1182 	FS_FLUSH_IO,			/* flush */
1183 	FS_ZONE_RESET_IO,		/* zone reset */
1184 	NR_IO_TYPE,
1185 };
1186 
1187 struct f2fs_io_info {
1188 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1189 	nid_t ino;		/* inode number */
1190 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1191 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1192 	enum req_op op;		/* contains REQ_OP_ */
1193 	blk_opf_t op_flags;	/* req_flag_bits */
1194 	block_t new_blkaddr;	/* new block address to be written */
1195 	block_t old_blkaddr;	/* old block address before Cow */
1196 	struct page *page;	/* page to be written */
1197 	struct page *encrypted_page;	/* encrypted page */
1198 	struct page *compressed_page;	/* compressed page */
1199 	struct list_head list;		/* serialize IOs */
1200 	unsigned int compr_blocks;	/* # of compressed block addresses */
1201 	unsigned int need_lock:8;	/* indicate we need to lock cp_rwsem */
1202 	unsigned int version:8;		/* version of the node */
1203 	unsigned int submitted:1;	/* indicate IO submission */
1204 	unsigned int in_list:1;		/* indicate fio is in io_list */
1205 	unsigned int is_por:1;		/* indicate IO is from recovery or not */
1206 	unsigned int retry:1;		/* need to reallocate block address */
1207 	unsigned int encrypted:1;	/* indicate file is encrypted */
1208 	unsigned int post_read:1;	/* require post read */
1209 	enum iostat_type io_type;	/* io type */
1210 	struct writeback_control *io_wbc; /* writeback control */
1211 	struct bio **bio;		/* bio for ipu */
1212 	sector_t *last_block;		/* last block number in bio */
1213 };
1214 
1215 struct bio_entry {
1216 	struct bio *bio;
1217 	struct list_head list;
1218 };
1219 
1220 #define is_read_io(rw) ((rw) == READ)
1221 struct f2fs_bio_info {
1222 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1223 	struct bio *bio;		/* bios to merge */
1224 	sector_t last_block_in_bio;	/* last block number */
1225 	struct f2fs_io_info fio;	/* store buffered io info. */
1226 #ifdef CONFIG_BLK_DEV_ZONED
1227 	struct completion zone_wait;	/* condition value for the previous open zone to close */
1228 	struct bio *zone_pending_bio;	/* pending bio for the previous zone */
1229 	void *bi_private;		/* previous bi_private for pending bio */
1230 #endif
1231 	struct f2fs_rwsem io_rwsem;	/* blocking op for bio */
1232 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1233 	struct list_head io_list;	/* track fios */
1234 	struct list_head bio_list;	/* bio entry list head */
1235 	struct f2fs_rwsem bio_list_lock;	/* lock to protect bio entry list */
1236 };
1237 
1238 #define FDEV(i)				(sbi->devs[i])
1239 #define RDEV(i)				(raw_super->devs[i])
1240 struct f2fs_dev_info {
1241 	struct block_device *bdev;
1242 	char path[MAX_PATH_LEN];
1243 	unsigned int total_segments;
1244 	block_t start_blk;
1245 	block_t end_blk;
1246 #ifdef CONFIG_BLK_DEV_ZONED
1247 	unsigned int nr_blkz;		/* Total number of zones */
1248 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1249 #endif
1250 };
1251 
1252 enum inode_type {
1253 	DIR_INODE,			/* for dirty dir inode */
1254 	FILE_INODE,			/* for dirty regular/symlink inode */
1255 	DIRTY_META,			/* for all dirtied inode metadata */
1256 	NR_INODE_TYPE,
1257 };
1258 
1259 /* for inner inode cache management */
1260 struct inode_management {
1261 	struct radix_tree_root ino_root;	/* ino entry array */
1262 	spinlock_t ino_lock;			/* for ino entry lock */
1263 	struct list_head ino_list;		/* inode list head */
1264 	unsigned long ino_num;			/* number of entries */
1265 };
1266 
1267 /* for GC_AT */
1268 struct atgc_management {
1269 	bool atgc_enabled;			/* ATGC is enabled or not */
1270 	struct rb_root_cached root;		/* root of victim rb-tree */
1271 	struct list_head victim_list;		/* linked with all victim entries */
1272 	unsigned int victim_count;		/* victim count in rb-tree */
1273 	unsigned int candidate_ratio;		/* candidate ratio */
1274 	unsigned int max_candidate_count;	/* max candidate count */
1275 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1276 	unsigned long long age_threshold;	/* age threshold */
1277 };
1278 
1279 struct f2fs_gc_control {
1280 	unsigned int victim_segno;	/* target victim segment number */
1281 	int init_gc_type;		/* FG_GC or BG_GC */
1282 	bool no_bg_gc;			/* check the space and stop bg_gc */
1283 	bool should_migrate_blocks;	/* should migrate blocks */
1284 	bool err_gc_skipped;		/* return EAGAIN if GC skipped */
1285 	unsigned int nr_free_secs;	/* # of free sections to do GC */
1286 };
1287 
1288 /*
1289  * For s_flag in struct f2fs_sb_info
1290  * Modification on enum should be synchronized with s_flag array
1291  */
1292 enum {
1293 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1294 	SBI_IS_CLOSE,				/* specify unmounting */
1295 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1296 	SBI_POR_DOING,				/* recovery is doing or not */
1297 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1298 	SBI_NEED_CP,				/* need to checkpoint */
1299 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1300 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1301 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1302 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1303 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1304 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1305 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1306 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1307 	SBI_IS_FREEZING,			/* freezefs is in process */
1308 	SBI_IS_WRITABLE,			/* remove ro mountoption transiently */
1309 	MAX_SBI_FLAG,
1310 };
1311 
1312 enum {
1313 	CP_TIME,
1314 	REQ_TIME,
1315 	DISCARD_TIME,
1316 	GC_TIME,
1317 	DISABLE_TIME,
1318 	UMOUNT_DISCARD_TIMEOUT,
1319 	MAX_TIME,
1320 };
1321 
1322 /* Note that you need to keep synchronization with this gc_mode_names array */
1323 enum {
1324 	GC_NORMAL,
1325 	GC_IDLE_CB,
1326 	GC_IDLE_GREEDY,
1327 	GC_IDLE_AT,
1328 	GC_URGENT_HIGH,
1329 	GC_URGENT_LOW,
1330 	GC_URGENT_MID,
1331 	MAX_GC_MODE,
1332 };
1333 
1334 enum {
1335 	BGGC_MODE_ON,		/* background gc is on */
1336 	BGGC_MODE_OFF,		/* background gc is off */
1337 	BGGC_MODE_SYNC,		/*
1338 				 * background gc is on, migrating blocks
1339 				 * like foreground gc
1340 				 */
1341 };
1342 
1343 enum {
1344 	FS_MODE_ADAPTIVE,		/* use both lfs/ssr allocation */
1345 	FS_MODE_LFS,			/* use lfs allocation only */
1346 	FS_MODE_FRAGMENT_SEG,		/* segment fragmentation mode */
1347 	FS_MODE_FRAGMENT_BLK,		/* block fragmentation mode */
1348 };
1349 
1350 enum {
1351 	ALLOC_MODE_DEFAULT,	/* stay default */
1352 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1353 };
1354 
1355 enum fsync_mode {
1356 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1357 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1358 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1359 };
1360 
1361 enum {
1362 	COMPR_MODE_FS,		/*
1363 				 * automatically compress compression
1364 				 * enabled files
1365 				 */
1366 	COMPR_MODE_USER,	/*
1367 				 * automatical compression is disabled.
1368 				 * user can control the file compression
1369 				 * using ioctls
1370 				 */
1371 };
1372 
1373 enum {
1374 	DISCARD_UNIT_BLOCK,	/* basic discard unit is block */
1375 	DISCARD_UNIT_SEGMENT,	/* basic discard unit is segment */
1376 	DISCARD_UNIT_SECTION,	/* basic discard unit is section */
1377 };
1378 
1379 enum {
1380 	MEMORY_MODE_NORMAL,	/* memory mode for normal devices */
1381 	MEMORY_MODE_LOW,	/* memory mode for low memry devices */
1382 };
1383 
1384 enum errors_option {
1385 	MOUNT_ERRORS_READONLY,	/* remount fs ro on errors */
1386 	MOUNT_ERRORS_CONTINUE,	/* continue on errors */
1387 	MOUNT_ERRORS_PANIC,	/* panic on errors */
1388 };
1389 
1390 enum {
1391 	BACKGROUND,
1392 	FOREGROUND,
1393 	MAX_CALL_TYPE,
1394 	TOTAL_CALL = FOREGROUND,
1395 };
1396 
1397 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1398 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1399 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1400 
1401 /*
1402  * Layout of f2fs page.private:
1403  *
1404  * Layout A: lowest bit should be 1
1405  * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1406  * bit 0	PAGE_PRIVATE_NOT_POINTER
1407  * bit 1	PAGE_PRIVATE_DUMMY_WRITE
1408  * bit 2	PAGE_PRIVATE_ONGOING_MIGRATION
1409  * bit 3	PAGE_PRIVATE_INLINE_INODE
1410  * bit 4	PAGE_PRIVATE_REF_RESOURCE
1411  * bit 5-	f2fs private data
1412  *
1413  * Layout B: lowest bit should be 0
1414  * page.private is a wrapped pointer.
1415  */
1416 enum {
1417 	PAGE_PRIVATE_NOT_POINTER,		/* private contains non-pointer data */
1418 	PAGE_PRIVATE_DUMMY_WRITE,		/* data page for padding aligned IO */
1419 	PAGE_PRIVATE_ONGOING_MIGRATION,		/* data page which is on-going migrating */
1420 	PAGE_PRIVATE_INLINE_INODE,		/* inode page contains inline data */
1421 	PAGE_PRIVATE_REF_RESOURCE,		/* dirty page has referenced resources */
1422 	PAGE_PRIVATE_MAX
1423 };
1424 
1425 /* For compression */
1426 enum compress_algorithm_type {
1427 	COMPRESS_LZO,
1428 	COMPRESS_LZ4,
1429 	COMPRESS_ZSTD,
1430 	COMPRESS_LZORLE,
1431 	COMPRESS_MAX,
1432 };
1433 
1434 enum compress_flag {
1435 	COMPRESS_CHKSUM,
1436 	COMPRESS_MAX_FLAG,
1437 };
1438 
1439 #define	COMPRESS_WATERMARK			20
1440 #define	COMPRESS_PERCENT			20
1441 
1442 #define COMPRESS_DATA_RESERVED_SIZE		4
1443 struct compress_data {
1444 	__le32 clen;			/* compressed data size */
1445 	__le32 chksum;			/* compressed data chksum */
1446 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1447 	u8 cdata[];			/* compressed data */
1448 };
1449 
1450 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1451 
1452 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1453 
1454 #define F2FS_ZSTD_DEFAULT_CLEVEL	1
1455 
1456 #define	COMPRESS_LEVEL_OFFSET	8
1457 
1458 /* compress context */
1459 struct compress_ctx {
1460 	struct inode *inode;		/* inode the context belong to */
1461 	pgoff_t cluster_idx;		/* cluster index number */
1462 	unsigned int cluster_size;	/* page count in cluster */
1463 	unsigned int log_cluster_size;	/* log of cluster size */
1464 	struct page **rpages;		/* pages store raw data in cluster */
1465 	unsigned int nr_rpages;		/* total page number in rpages */
1466 	struct page **cpages;		/* pages store compressed data in cluster */
1467 	unsigned int nr_cpages;		/* total page number in cpages */
1468 	unsigned int valid_nr_cpages;	/* valid page number in cpages */
1469 	void *rbuf;			/* virtual mapped address on rpages */
1470 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1471 	size_t rlen;			/* valid data length in rbuf */
1472 	size_t clen;			/* valid data length in cbuf */
1473 	void *private;			/* payload buffer for specified compression algorithm */
1474 	void *private2;			/* extra payload buffer */
1475 };
1476 
1477 /* compress context for write IO path */
1478 struct compress_io_ctx {
1479 	u32 magic;			/* magic number to indicate page is compressed */
1480 	struct inode *inode;		/* inode the context belong to */
1481 	struct page **rpages;		/* pages store raw data in cluster */
1482 	unsigned int nr_rpages;		/* total page number in rpages */
1483 	atomic_t pending_pages;		/* in-flight compressed page count */
1484 };
1485 
1486 /* Context for decompressing one cluster on the read IO path */
1487 struct decompress_io_ctx {
1488 	u32 magic;			/* magic number to indicate page is compressed */
1489 	struct inode *inode;		/* inode the context belong to */
1490 	pgoff_t cluster_idx;		/* cluster index number */
1491 	unsigned int cluster_size;	/* page count in cluster */
1492 	unsigned int log_cluster_size;	/* log of cluster size */
1493 	struct page **rpages;		/* pages store raw data in cluster */
1494 	unsigned int nr_rpages;		/* total page number in rpages */
1495 	struct page **cpages;		/* pages store compressed data in cluster */
1496 	unsigned int nr_cpages;		/* total page number in cpages */
1497 	struct page **tpages;		/* temp pages to pad holes in cluster */
1498 	void *rbuf;			/* virtual mapped address on rpages */
1499 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1500 	size_t rlen;			/* valid data length in rbuf */
1501 	size_t clen;			/* valid data length in cbuf */
1502 
1503 	/*
1504 	 * The number of compressed pages remaining to be read in this cluster.
1505 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1506 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1507 	 * is decompressed (or an error is reported).
1508 	 *
1509 	 * If an error occurs before all the pages have been submitted for I/O,
1510 	 * then this will never reach 0.  In this case the I/O submitter is
1511 	 * responsible for calling f2fs_decompress_end_io() instead.
1512 	 */
1513 	atomic_t remaining_pages;
1514 
1515 	/*
1516 	 * Number of references to this decompress_io_ctx.
1517 	 *
1518 	 * One reference is held for I/O completion.  This reference is dropped
1519 	 * after the pagecache pages are updated and unlocked -- either after
1520 	 * decompression (and verity if enabled), or after an error.
1521 	 *
1522 	 * In addition, each compressed page holds a reference while it is in a
1523 	 * bio.  These references are necessary prevent compressed pages from
1524 	 * being freed while they are still in a bio.
1525 	 */
1526 	refcount_t refcnt;
1527 
1528 	bool failed;			/* IO error occurred before decompression? */
1529 	bool need_verity;		/* need fs-verity verification after decompression? */
1530 	void *private;			/* payload buffer for specified decompression algorithm */
1531 	void *private2;			/* extra payload buffer */
1532 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1533 	struct work_struct free_work;	/* work for late free this structure itself */
1534 };
1535 
1536 #define NULL_CLUSTER			((unsigned int)(~0))
1537 #define MIN_COMPRESS_LOG_SIZE		2
1538 #define MAX_COMPRESS_LOG_SIZE		8
1539 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1540 
1541 struct f2fs_sb_info {
1542 	struct super_block *sb;			/* pointer to VFS super block */
1543 	struct proc_dir_entry *s_proc;		/* proc entry */
1544 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1545 	struct f2fs_rwsem sb_lock;		/* lock for raw super block */
1546 	int valid_super_block;			/* valid super block no */
1547 	unsigned long s_flag;				/* flags for sbi */
1548 	struct mutex writepages;		/* mutex for writepages() */
1549 
1550 #ifdef CONFIG_BLK_DEV_ZONED
1551 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1552 #endif
1553 
1554 	/* for node-related operations */
1555 	struct f2fs_nm_info *nm_info;		/* node manager */
1556 	struct inode *node_inode;		/* cache node blocks */
1557 
1558 	/* for segment-related operations */
1559 	struct f2fs_sm_info *sm_info;		/* segment manager */
1560 
1561 	/* for bio operations */
1562 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1563 	/* keep migration IO order for LFS mode */
1564 	struct f2fs_rwsem io_order_lock;
1565 	mempool_t *write_io_dummy;		/* Dummy pages */
1566 	pgoff_t page_eio_ofs[NR_PAGE_TYPE];	/* EIO page offset */
1567 	int page_eio_cnt[NR_PAGE_TYPE];		/* EIO count */
1568 
1569 	/* for checkpoint */
1570 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1571 	int cur_cp_pack;			/* remain current cp pack */
1572 	spinlock_t cp_lock;			/* for flag in ckpt */
1573 	struct inode *meta_inode;		/* cache meta blocks */
1574 	struct f2fs_rwsem cp_global_sem;	/* checkpoint procedure lock */
1575 	struct f2fs_rwsem cp_rwsem;		/* blocking FS operations */
1576 	struct f2fs_rwsem node_write;		/* locking node writes */
1577 	struct f2fs_rwsem node_change;	/* locking node change */
1578 	wait_queue_head_t cp_wait;
1579 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1580 	long interval_time[MAX_TIME];		/* to store thresholds */
1581 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1582 
1583 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1584 
1585 	spinlock_t fsync_node_lock;		/* for node entry lock */
1586 	struct list_head fsync_node_list;	/* node list head */
1587 	unsigned int fsync_seg_id;		/* sequence id */
1588 	unsigned int fsync_node_num;		/* number of node entries */
1589 
1590 	/* for orphan inode, use 0'th array */
1591 	unsigned int max_orphans;		/* max orphan inodes */
1592 
1593 	/* for inode management */
1594 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1595 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1596 	struct mutex flush_lock;		/* for flush exclusion */
1597 
1598 	/* for extent tree cache */
1599 	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1600 	atomic64_t allocated_data_blocks;	/* for block age extent_cache */
1601 
1602 	/* The threshold used for hot and warm data seperation*/
1603 	unsigned int hot_data_age_threshold;
1604 	unsigned int warm_data_age_threshold;
1605 	unsigned int last_age_weight;
1606 
1607 	/* basic filesystem units */
1608 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1609 	unsigned int log_blocksize;		/* log2 block size */
1610 	unsigned int blocksize;			/* block size */
1611 	unsigned int root_ino_num;		/* root inode number*/
1612 	unsigned int node_ino_num;		/* node inode number*/
1613 	unsigned int meta_ino_num;		/* meta inode number*/
1614 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1615 	unsigned int blocks_per_seg;		/* blocks per segment */
1616 	unsigned int unusable_blocks_per_sec;	/* unusable blocks per section */
1617 	unsigned int segs_per_sec;		/* segments per section */
1618 	unsigned int secs_per_zone;		/* sections per zone */
1619 	unsigned int total_sections;		/* total section count */
1620 	unsigned int total_node_count;		/* total node block count */
1621 	unsigned int total_valid_node_count;	/* valid node block count */
1622 	int dir_level;				/* directory level */
1623 	bool readdir_ra;			/* readahead inode in readdir */
1624 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1625 
1626 	block_t user_block_count;		/* # of user blocks */
1627 	block_t total_valid_block_count;	/* # of valid blocks */
1628 	block_t discard_blks;			/* discard command candidats */
1629 	block_t last_valid_block_count;		/* for recovery */
1630 	block_t reserved_blocks;		/* configurable reserved blocks */
1631 	block_t current_reserved_blocks;	/* current reserved blocks */
1632 
1633 	/* Additional tracking for no checkpoint mode */
1634 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1635 
1636 	unsigned int nquota_files;		/* # of quota sysfile */
1637 	struct f2fs_rwsem quota_sem;		/* blocking cp for flags */
1638 
1639 	/* # of pages, see count_type */
1640 	atomic_t nr_pages[NR_COUNT_TYPE];
1641 	/* # of allocated blocks */
1642 	struct percpu_counter alloc_valid_block_count;
1643 	/* # of node block writes as roll forward recovery */
1644 	struct percpu_counter rf_node_block_count;
1645 
1646 	/* writeback control */
1647 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1648 
1649 	/* valid inode count */
1650 	struct percpu_counter total_valid_inode_count;
1651 
1652 	struct f2fs_mount_info mount_opt;	/* mount options */
1653 
1654 	/* for cleaning operations */
1655 	struct f2fs_rwsem gc_lock;		/*
1656 						 * semaphore for GC, avoid
1657 						 * race between GC and GC or CP
1658 						 */
1659 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1660 	struct atgc_management am;		/* atgc management */
1661 	unsigned int cur_victim_sec;		/* current victim section num */
1662 	unsigned int gc_mode;			/* current GC state */
1663 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1664 	spinlock_t gc_remaining_trials_lock;
1665 	/* remaining trial count for GC_URGENT_* and GC_IDLE_* */
1666 	unsigned int gc_remaining_trials;
1667 
1668 	/* for skip statistic */
1669 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1670 
1671 	/* threshold for gc trials on pinned files */
1672 	u64 gc_pin_file_threshold;
1673 	struct f2fs_rwsem pin_sem;
1674 
1675 	/* maximum # of trials to find a victim segment for SSR and GC */
1676 	unsigned int max_victim_search;
1677 	/* migration granularity of garbage collection, unit: segment */
1678 	unsigned int migration_granularity;
1679 
1680 	/*
1681 	 * for stat information.
1682 	 * one is for the LFS mode, and the other is for the SSR mode.
1683 	 */
1684 #ifdef CONFIG_F2FS_STAT_FS
1685 	struct f2fs_stat_info *stat_info;	/* FS status information */
1686 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1687 	unsigned int segment_count[2];		/* # of allocated segments */
1688 	unsigned int block_count[2];		/* # of allocated blocks */
1689 	atomic_t inplace_count;		/* # of inplace update */
1690 	/* # of lookup extent cache */
1691 	atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1692 	/* # of hit rbtree extent node */
1693 	atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1694 	/* # of hit cached extent node */
1695 	atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1696 	/* # of hit largest extent node in read extent cache */
1697 	atomic64_t read_hit_largest;
1698 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1699 	atomic_t inline_inode;			/* # of inline_data inodes */
1700 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1701 	atomic_t compr_inode;			/* # of compressed inodes */
1702 	atomic64_t compr_blocks;		/* # of compressed blocks */
1703 	atomic_t swapfile_inode;		/* # of swapfile inodes */
1704 	atomic_t atomic_files;			/* # of opened atomic file */
1705 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1706 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1707 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1708 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1709 	atomic_t cp_call_count[MAX_CALL_TYPE];	/* # of cp call */
1710 #endif
1711 	spinlock_t stat_lock;			/* lock for stat operations */
1712 
1713 	/* to attach REQ_META|REQ_FUA flags */
1714 	unsigned int data_io_flag;
1715 	unsigned int node_io_flag;
1716 
1717 	/* For sysfs support */
1718 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1719 	struct completion s_kobj_unregister;
1720 
1721 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1722 	struct completion s_stat_kobj_unregister;
1723 
1724 	struct kobject s_feature_list_kobj;		/* /sys/fs/f2fs/<devname>/feature_list */
1725 	struct completion s_feature_list_kobj_unregister;
1726 
1727 	/* For shrinker support */
1728 	struct list_head s_list;
1729 	struct mutex umount_mutex;
1730 	unsigned int shrinker_run_no;
1731 
1732 	/* For multi devices */
1733 	int s_ndevs;				/* number of devices */
1734 	struct f2fs_dev_info *devs;		/* for device list */
1735 	unsigned int dirty_device;		/* for checkpoint data flush */
1736 	spinlock_t dev_lock;			/* protect dirty_device */
1737 	bool aligned_blksize;			/* all devices has the same logical blksize */
1738 
1739 	/* For write statistics */
1740 	u64 sectors_written_start;
1741 	u64 kbytes_written;
1742 
1743 	/* Reference to checksum algorithm driver via cryptoapi */
1744 	struct crypto_shash *s_chksum_driver;
1745 
1746 	/* Precomputed FS UUID checksum for seeding other checksums */
1747 	__u32 s_chksum_seed;
1748 
1749 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1750 
1751 	/*
1752 	 * If we are in irq context, let's update error information into
1753 	 * on-disk superblock in the work.
1754 	 */
1755 	struct work_struct s_error_work;
1756 	unsigned char errors[MAX_F2FS_ERRORS];		/* error flags */
1757 	unsigned char stop_reason[MAX_STOP_REASON];	/* stop reason */
1758 	spinlock_t error_lock;			/* protect errors/stop_reason array */
1759 	bool error_dirty;			/* errors of sb is dirty */
1760 
1761 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1762 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1763 
1764 	/* For reclaimed segs statistics per each GC mode */
1765 	unsigned int gc_segment_mode;		/* GC state for reclaimed segments */
1766 	unsigned int gc_reclaimed_segs[MAX_GC_MODE];	/* Reclaimed segs for each mode */
1767 
1768 	unsigned long seq_file_ra_mul;		/* multiplier for ra_pages of seq. files in fadvise */
1769 
1770 	int max_fragment_chunk;			/* max chunk size for block fragmentation mode */
1771 	int max_fragment_hole;			/* max hole size for block fragmentation mode */
1772 
1773 	/* For atomic write statistics */
1774 	atomic64_t current_atomic_write;
1775 	s64 peak_atomic_write;
1776 	u64 committed_atomic_block;
1777 	u64 revoked_atomic_block;
1778 
1779 #ifdef CONFIG_F2FS_FS_COMPRESSION
1780 	struct kmem_cache *page_array_slab;	/* page array entry */
1781 	unsigned int page_array_slab_size;	/* default page array slab size */
1782 
1783 	/* For runtime compression statistics */
1784 	u64 compr_written_block;
1785 	u64 compr_saved_block;
1786 	u32 compr_new_inode;
1787 
1788 	/* For compressed block cache */
1789 	struct inode *compress_inode;		/* cache compressed blocks */
1790 	unsigned int compress_percent;		/* cache page percentage */
1791 	unsigned int compress_watermark;	/* cache page watermark */
1792 	atomic_t compress_page_hit;		/* cache hit count */
1793 #endif
1794 
1795 #ifdef CONFIG_F2FS_IOSTAT
1796 	/* For app/fs IO statistics */
1797 	spinlock_t iostat_lock;
1798 	unsigned long long iostat_count[NR_IO_TYPE];
1799 	unsigned long long iostat_bytes[NR_IO_TYPE];
1800 	unsigned long long prev_iostat_bytes[NR_IO_TYPE];
1801 	bool iostat_enable;
1802 	unsigned long iostat_next_period;
1803 	unsigned int iostat_period_ms;
1804 
1805 	/* For io latency related statistics info in one iostat period */
1806 	spinlock_t iostat_lat_lock;
1807 	struct iostat_lat_info *iostat_io_lat;
1808 #endif
1809 };
1810 
1811 /* Definitions to access f2fs_sb_info */
1812 #define BLKS_PER_SEG(sbi)					\
1813 	((sbi)->blocks_per_seg)
1814 #define BLKS_PER_SEC(sbi)					\
1815 	((sbi)->segs_per_sec << (sbi)->log_blocks_per_seg)
1816 #define SEGS_PER_SEC(sbi)					\
1817 	((sbi)->segs_per_sec)
1818 
1819 __printf(3, 4)
1820 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
1821 
1822 #define f2fs_err(sbi, fmt, ...)						\
1823 	f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
1824 #define f2fs_warn(sbi, fmt, ...)					\
1825 	f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
1826 #define f2fs_notice(sbi, fmt, ...)					\
1827 	f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
1828 #define f2fs_info(sbi, fmt, ...)					\
1829 	f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
1830 #define f2fs_debug(sbi, fmt, ...)					\
1831 	f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
1832 
1833 #define f2fs_err_ratelimited(sbi, fmt, ...)				\
1834 	f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
1835 #define f2fs_warn_ratelimited(sbi, fmt, ...)				\
1836 	f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
1837 #define f2fs_info_ratelimited(sbi, fmt, ...)				\
1838 	f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
1839 
1840 #ifdef CONFIG_F2FS_FAULT_INJECTION
1841 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__,	\
1842 									__builtin_return_address(0))
1843 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
1844 				const char *func, const char *parent_func)
1845 {
1846 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1847 
1848 	if (!ffi->inject_rate)
1849 		return false;
1850 
1851 	if (!IS_FAULT_SET(ffi, type))
1852 		return false;
1853 
1854 	atomic_inc(&ffi->inject_ops);
1855 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1856 		atomic_set(&ffi->inject_ops, 0);
1857 		f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
1858 				f2fs_fault_name[type], func, parent_func);
1859 		return true;
1860 	}
1861 	return false;
1862 }
1863 #else
1864 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1865 {
1866 	return false;
1867 }
1868 #endif
1869 
1870 /*
1871  * Test if the mounted volume is a multi-device volume.
1872  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1873  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1874  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1875  */
1876 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1877 {
1878 	return sbi->s_ndevs > 1;
1879 }
1880 
1881 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1882 {
1883 	unsigned long now = jiffies;
1884 
1885 	sbi->last_time[type] = now;
1886 
1887 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1888 	if (type == REQ_TIME) {
1889 		sbi->last_time[DISCARD_TIME] = now;
1890 		sbi->last_time[GC_TIME] = now;
1891 	}
1892 }
1893 
1894 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1895 {
1896 	unsigned long interval = sbi->interval_time[type] * HZ;
1897 
1898 	return time_after(jiffies, sbi->last_time[type] + interval);
1899 }
1900 
1901 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1902 						int type)
1903 {
1904 	unsigned long interval = sbi->interval_time[type] * HZ;
1905 	unsigned int wait_ms = 0;
1906 	long delta;
1907 
1908 	delta = (sbi->last_time[type] + interval) - jiffies;
1909 	if (delta > 0)
1910 		wait_ms = jiffies_to_msecs(delta);
1911 
1912 	return wait_ms;
1913 }
1914 
1915 /*
1916  * Inline functions
1917  */
1918 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1919 			      const void *address, unsigned int length)
1920 {
1921 	struct {
1922 		struct shash_desc shash;
1923 		char ctx[4];
1924 	} desc;
1925 	int err;
1926 
1927 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1928 
1929 	desc.shash.tfm = sbi->s_chksum_driver;
1930 	*(u32 *)desc.ctx = crc;
1931 
1932 	err = crypto_shash_update(&desc.shash, address, length);
1933 	BUG_ON(err);
1934 
1935 	return *(u32 *)desc.ctx;
1936 }
1937 
1938 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1939 			   unsigned int length)
1940 {
1941 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1942 }
1943 
1944 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1945 				  void *buf, size_t buf_size)
1946 {
1947 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1948 }
1949 
1950 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1951 			      const void *address, unsigned int length)
1952 {
1953 	return __f2fs_crc32(sbi, crc, address, length);
1954 }
1955 
1956 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1957 {
1958 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1959 }
1960 
1961 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1962 {
1963 	return sb->s_fs_info;
1964 }
1965 
1966 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1967 {
1968 	return F2FS_SB(inode->i_sb);
1969 }
1970 
1971 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1972 {
1973 	return F2FS_I_SB(mapping->host);
1974 }
1975 
1976 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1977 {
1978 	return F2FS_M_SB(page_file_mapping(page));
1979 }
1980 
1981 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1982 {
1983 	return (struct f2fs_super_block *)(sbi->raw_super);
1984 }
1985 
1986 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1987 {
1988 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1989 }
1990 
1991 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1992 {
1993 	return (struct f2fs_node *)page_address(page);
1994 }
1995 
1996 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1997 {
1998 	return &((struct f2fs_node *)page_address(page))->i;
1999 }
2000 
2001 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
2002 {
2003 	return (struct f2fs_nm_info *)(sbi->nm_info);
2004 }
2005 
2006 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
2007 {
2008 	return (struct f2fs_sm_info *)(sbi->sm_info);
2009 }
2010 
2011 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
2012 {
2013 	return (struct sit_info *)(SM_I(sbi)->sit_info);
2014 }
2015 
2016 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
2017 {
2018 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
2019 }
2020 
2021 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
2022 {
2023 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
2024 }
2025 
2026 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2027 {
2028 	return sbi->meta_inode->i_mapping;
2029 }
2030 
2031 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2032 {
2033 	return sbi->node_inode->i_mapping;
2034 }
2035 
2036 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2037 {
2038 	return test_bit(type, &sbi->s_flag);
2039 }
2040 
2041 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2042 {
2043 	set_bit(type, &sbi->s_flag);
2044 }
2045 
2046 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2047 {
2048 	clear_bit(type, &sbi->s_flag);
2049 }
2050 
2051 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2052 {
2053 	return le64_to_cpu(cp->checkpoint_ver);
2054 }
2055 
2056 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2057 {
2058 	if (type < F2FS_MAX_QUOTAS)
2059 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2060 	return 0;
2061 }
2062 
2063 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2064 {
2065 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2066 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2067 }
2068 
2069 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2070 {
2071 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2072 
2073 	return ckpt_flags & f;
2074 }
2075 
2076 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2077 {
2078 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2079 }
2080 
2081 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2082 {
2083 	unsigned int ckpt_flags;
2084 
2085 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2086 	ckpt_flags |= f;
2087 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2088 }
2089 
2090 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2091 {
2092 	unsigned long flags;
2093 
2094 	spin_lock_irqsave(&sbi->cp_lock, flags);
2095 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
2096 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2097 }
2098 
2099 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2100 {
2101 	unsigned int ckpt_flags;
2102 
2103 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2104 	ckpt_flags &= (~f);
2105 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2106 }
2107 
2108 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2109 {
2110 	unsigned long flags;
2111 
2112 	spin_lock_irqsave(&sbi->cp_lock, flags);
2113 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
2114 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
2115 }
2116 
2117 #define init_f2fs_rwsem(sem)					\
2118 do {								\
2119 	static struct lock_class_key __key;			\
2120 								\
2121 	__init_f2fs_rwsem((sem), #sem, &__key);			\
2122 } while (0)
2123 
2124 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2125 		const char *sem_name, struct lock_class_key *key)
2126 {
2127 	__init_rwsem(&sem->internal_rwsem, sem_name, key);
2128 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2129 	init_waitqueue_head(&sem->read_waiters);
2130 #endif
2131 }
2132 
2133 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2134 {
2135 	return rwsem_is_locked(&sem->internal_rwsem);
2136 }
2137 
2138 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2139 {
2140 	return rwsem_is_contended(&sem->internal_rwsem);
2141 }
2142 
2143 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2144 {
2145 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2146 	wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2147 #else
2148 	down_read(&sem->internal_rwsem);
2149 #endif
2150 }
2151 
2152 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2153 {
2154 	return down_read_trylock(&sem->internal_rwsem);
2155 }
2156 
2157 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2158 {
2159 	up_read(&sem->internal_rwsem);
2160 }
2161 
2162 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2163 {
2164 	down_write(&sem->internal_rwsem);
2165 }
2166 
2167 #ifdef CONFIG_DEBUG_LOCK_ALLOC
2168 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2169 {
2170 	down_read_nested(&sem->internal_rwsem, subclass);
2171 }
2172 
2173 static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
2174 {
2175 	down_write_nested(&sem->internal_rwsem, subclass);
2176 }
2177 #else
2178 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2179 #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
2180 #endif
2181 
2182 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2183 {
2184 	return down_write_trylock(&sem->internal_rwsem);
2185 }
2186 
2187 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2188 {
2189 	up_write(&sem->internal_rwsem);
2190 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2191 	wake_up_all(&sem->read_waiters);
2192 #endif
2193 }
2194 
2195 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2196 {
2197 	f2fs_down_read(&sbi->cp_rwsem);
2198 }
2199 
2200 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2201 {
2202 	if (time_to_inject(sbi, FAULT_LOCK_OP))
2203 		return 0;
2204 	return f2fs_down_read_trylock(&sbi->cp_rwsem);
2205 }
2206 
2207 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2208 {
2209 	f2fs_up_read(&sbi->cp_rwsem);
2210 }
2211 
2212 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2213 {
2214 	f2fs_down_write(&sbi->cp_rwsem);
2215 }
2216 
2217 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2218 {
2219 	f2fs_up_write(&sbi->cp_rwsem);
2220 }
2221 
2222 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2223 {
2224 	int reason = CP_SYNC;
2225 
2226 	if (test_opt(sbi, FASTBOOT))
2227 		reason = CP_FASTBOOT;
2228 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2229 		reason = CP_UMOUNT;
2230 	return reason;
2231 }
2232 
2233 static inline bool __remain_node_summaries(int reason)
2234 {
2235 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
2236 }
2237 
2238 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2239 {
2240 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2241 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2242 }
2243 
2244 /*
2245  * Check whether the inode has blocks or not
2246  */
2247 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2248 {
2249 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2250 
2251 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2252 }
2253 
2254 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2255 {
2256 	return ofs == XATTR_NODE_OFFSET;
2257 }
2258 
2259 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2260 					struct inode *inode, bool cap)
2261 {
2262 	if (!inode)
2263 		return true;
2264 	if (!test_opt(sbi, RESERVE_ROOT))
2265 		return false;
2266 	if (IS_NOQUOTA(inode))
2267 		return true;
2268 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2269 		return true;
2270 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2271 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2272 		return true;
2273 	if (cap && capable(CAP_SYS_RESOURCE))
2274 		return true;
2275 	return false;
2276 }
2277 
2278 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2279 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2280 				 struct inode *inode, blkcnt_t *count, bool partial)
2281 {
2282 	blkcnt_t diff = 0, release = 0;
2283 	block_t avail_user_block_count;
2284 	int ret;
2285 
2286 	ret = dquot_reserve_block(inode, *count);
2287 	if (ret)
2288 		return ret;
2289 
2290 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2291 		release = *count;
2292 		goto release_quota;
2293 	}
2294 
2295 	/*
2296 	 * let's increase this in prior to actual block count change in order
2297 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2298 	 */
2299 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2300 
2301 	spin_lock(&sbi->stat_lock);
2302 	sbi->total_valid_block_count += (block_t)(*count);
2303 	avail_user_block_count = sbi->user_block_count -
2304 					sbi->current_reserved_blocks;
2305 
2306 	if (!__allow_reserved_blocks(sbi, inode, true))
2307 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2308 
2309 	if (F2FS_IO_ALIGNED(sbi))
2310 		avail_user_block_count -= sbi->blocks_per_seg *
2311 				SM_I(sbi)->additional_reserved_segments;
2312 
2313 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2314 		if (avail_user_block_count > sbi->unusable_block_count)
2315 			avail_user_block_count -= sbi->unusable_block_count;
2316 		else
2317 			avail_user_block_count = 0;
2318 	}
2319 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2320 		if (!partial) {
2321 			spin_unlock(&sbi->stat_lock);
2322 			goto enospc;
2323 		}
2324 
2325 		diff = sbi->total_valid_block_count - avail_user_block_count;
2326 		if (diff > *count)
2327 			diff = *count;
2328 		*count -= diff;
2329 		release = diff;
2330 		sbi->total_valid_block_count -= diff;
2331 		if (!*count) {
2332 			spin_unlock(&sbi->stat_lock);
2333 			goto enospc;
2334 		}
2335 	}
2336 	spin_unlock(&sbi->stat_lock);
2337 
2338 	if (unlikely(release)) {
2339 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2340 		dquot_release_reservation_block(inode, release);
2341 	}
2342 	f2fs_i_blocks_write(inode, *count, true, true);
2343 	return 0;
2344 
2345 enospc:
2346 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2347 release_quota:
2348 	dquot_release_reservation_block(inode, release);
2349 	return -ENOSPC;
2350 }
2351 
2352 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
2353 static inline bool page_private_##name(struct page *page) \
2354 { \
2355 	return PagePrivate(page) && \
2356 		test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
2357 		test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2358 }
2359 
2360 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
2361 static inline void set_page_private_##name(struct page *page) \
2362 { \
2363 	if (!PagePrivate(page)) \
2364 		attach_page_private(page, (void *)0); \
2365 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
2366 	set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2367 }
2368 
2369 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
2370 static inline void clear_page_private_##name(struct page *page) \
2371 { \
2372 	clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2373 	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
2374 		detach_page_private(page); \
2375 }
2376 
2377 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
2378 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
2379 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
2380 PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
2381 
2382 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
2383 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
2384 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
2385 PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
2386 
2387 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
2388 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
2389 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
2390 PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
2391 
2392 static inline unsigned long get_page_private_data(struct page *page)
2393 {
2394 	unsigned long data = page_private(page);
2395 
2396 	if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
2397 		return 0;
2398 	return data >> PAGE_PRIVATE_MAX;
2399 }
2400 
2401 static inline void set_page_private_data(struct page *page, unsigned long data)
2402 {
2403 	if (!PagePrivate(page))
2404 		attach_page_private(page, (void *)0);
2405 	set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
2406 	page_private(page) |= data << PAGE_PRIVATE_MAX;
2407 }
2408 
2409 static inline void clear_page_private_data(struct page *page)
2410 {
2411 	page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
2412 	if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
2413 		detach_page_private(page);
2414 }
2415 
2416 static inline void clear_page_private_all(struct page *page)
2417 {
2418 	clear_page_private_data(page);
2419 	clear_page_private_reference(page);
2420 	clear_page_private_gcing(page);
2421 	clear_page_private_inline(page);
2422 
2423 	f2fs_bug_on(F2FS_P_SB(page), page_private(page));
2424 }
2425 
2426 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2427 						struct inode *inode,
2428 						block_t count)
2429 {
2430 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2431 
2432 	spin_lock(&sbi->stat_lock);
2433 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2434 	sbi->total_valid_block_count -= (block_t)count;
2435 	if (sbi->reserved_blocks &&
2436 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2437 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2438 					sbi->current_reserved_blocks + count);
2439 	spin_unlock(&sbi->stat_lock);
2440 	if (unlikely(inode->i_blocks < sectors)) {
2441 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2442 			  inode->i_ino,
2443 			  (unsigned long long)inode->i_blocks,
2444 			  (unsigned long long)sectors);
2445 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2446 		return;
2447 	}
2448 	f2fs_i_blocks_write(inode, count, false, true);
2449 }
2450 
2451 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2452 {
2453 	atomic_inc(&sbi->nr_pages[count_type]);
2454 
2455 	if (count_type == F2FS_DIRTY_DENTS ||
2456 			count_type == F2FS_DIRTY_NODES ||
2457 			count_type == F2FS_DIRTY_META ||
2458 			count_type == F2FS_DIRTY_QDATA ||
2459 			count_type == F2FS_DIRTY_IMETA)
2460 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2461 }
2462 
2463 static inline void inode_inc_dirty_pages(struct inode *inode)
2464 {
2465 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2466 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2467 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2468 	if (IS_NOQUOTA(inode))
2469 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2470 }
2471 
2472 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2473 {
2474 	atomic_dec(&sbi->nr_pages[count_type]);
2475 }
2476 
2477 static inline void inode_dec_dirty_pages(struct inode *inode)
2478 {
2479 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2480 			!S_ISLNK(inode->i_mode))
2481 		return;
2482 
2483 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2484 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2485 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2486 	if (IS_NOQUOTA(inode))
2487 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2488 }
2489 
2490 static inline void inc_atomic_write_cnt(struct inode *inode)
2491 {
2492 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2493 	struct f2fs_inode_info *fi = F2FS_I(inode);
2494 	u64 current_write;
2495 
2496 	fi->atomic_write_cnt++;
2497 	atomic64_inc(&sbi->current_atomic_write);
2498 	current_write = atomic64_read(&sbi->current_atomic_write);
2499 	if (current_write > sbi->peak_atomic_write)
2500 		sbi->peak_atomic_write = current_write;
2501 }
2502 
2503 static inline void release_atomic_write_cnt(struct inode *inode)
2504 {
2505 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2506 	struct f2fs_inode_info *fi = F2FS_I(inode);
2507 
2508 	atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
2509 	fi->atomic_write_cnt = 0;
2510 }
2511 
2512 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2513 {
2514 	return atomic_read(&sbi->nr_pages[count_type]);
2515 }
2516 
2517 static inline int get_dirty_pages(struct inode *inode)
2518 {
2519 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2520 }
2521 
2522 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2523 {
2524 	return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
2525 							BLKS_PER_SEC(sbi));
2526 }
2527 
2528 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2529 {
2530 	return sbi->total_valid_block_count;
2531 }
2532 
2533 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2534 {
2535 	return sbi->discard_blks;
2536 }
2537 
2538 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2539 {
2540 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2541 
2542 	/* return NAT or SIT bitmap */
2543 	if (flag == NAT_BITMAP)
2544 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2545 	else if (flag == SIT_BITMAP)
2546 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2547 
2548 	return 0;
2549 }
2550 
2551 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2552 {
2553 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2554 }
2555 
2556 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2557 {
2558 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2559 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2560 	int offset;
2561 
2562 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2563 		offset = (flag == SIT_BITMAP) ?
2564 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2565 		/*
2566 		 * if large_nat_bitmap feature is enabled, leave checksum
2567 		 * protection for all nat/sit bitmaps.
2568 		 */
2569 		return tmp_ptr + offset + sizeof(__le32);
2570 	}
2571 
2572 	if (__cp_payload(sbi) > 0) {
2573 		if (flag == NAT_BITMAP)
2574 			return tmp_ptr;
2575 		else
2576 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2577 	} else {
2578 		offset = (flag == NAT_BITMAP) ?
2579 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2580 		return tmp_ptr + offset;
2581 	}
2582 }
2583 
2584 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2585 {
2586 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2587 
2588 	if (sbi->cur_cp_pack == 2)
2589 		start_addr += BLKS_PER_SEG(sbi);
2590 	return start_addr;
2591 }
2592 
2593 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2594 {
2595 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2596 
2597 	if (sbi->cur_cp_pack == 1)
2598 		start_addr += BLKS_PER_SEG(sbi);
2599 	return start_addr;
2600 }
2601 
2602 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2603 {
2604 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2605 }
2606 
2607 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2608 {
2609 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2610 }
2611 
2612 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
2613 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2614 					struct inode *inode, bool is_inode)
2615 {
2616 	block_t	valid_block_count;
2617 	unsigned int valid_node_count, user_block_count;
2618 	int err;
2619 
2620 	if (is_inode) {
2621 		if (inode) {
2622 			err = dquot_alloc_inode(inode);
2623 			if (err)
2624 				return err;
2625 		}
2626 	} else {
2627 		err = dquot_reserve_block(inode, 1);
2628 		if (err)
2629 			return err;
2630 	}
2631 
2632 	if (time_to_inject(sbi, FAULT_BLOCK))
2633 		goto enospc;
2634 
2635 	spin_lock(&sbi->stat_lock);
2636 
2637 	valid_block_count = sbi->total_valid_block_count +
2638 					sbi->current_reserved_blocks + 1;
2639 
2640 	if (!__allow_reserved_blocks(sbi, inode, false))
2641 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2642 
2643 	if (F2FS_IO_ALIGNED(sbi))
2644 		valid_block_count += sbi->blocks_per_seg *
2645 				SM_I(sbi)->additional_reserved_segments;
2646 
2647 	user_block_count = sbi->user_block_count;
2648 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2649 		user_block_count -= sbi->unusable_block_count;
2650 
2651 	if (unlikely(valid_block_count > user_block_count)) {
2652 		spin_unlock(&sbi->stat_lock);
2653 		goto enospc;
2654 	}
2655 
2656 	valid_node_count = sbi->total_valid_node_count + 1;
2657 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2658 		spin_unlock(&sbi->stat_lock);
2659 		goto enospc;
2660 	}
2661 
2662 	sbi->total_valid_node_count++;
2663 	sbi->total_valid_block_count++;
2664 	spin_unlock(&sbi->stat_lock);
2665 
2666 	if (inode) {
2667 		if (is_inode)
2668 			f2fs_mark_inode_dirty_sync(inode, true);
2669 		else
2670 			f2fs_i_blocks_write(inode, 1, true, true);
2671 	}
2672 
2673 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2674 	return 0;
2675 
2676 enospc:
2677 	if (is_inode) {
2678 		if (inode)
2679 			dquot_free_inode(inode);
2680 	} else {
2681 		dquot_release_reservation_block(inode, 1);
2682 	}
2683 	return -ENOSPC;
2684 }
2685 
2686 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2687 					struct inode *inode, bool is_inode)
2688 {
2689 	spin_lock(&sbi->stat_lock);
2690 
2691 	if (unlikely(!sbi->total_valid_block_count ||
2692 			!sbi->total_valid_node_count)) {
2693 		f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2694 			  sbi->total_valid_block_count,
2695 			  sbi->total_valid_node_count);
2696 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2697 	} else {
2698 		sbi->total_valid_block_count--;
2699 		sbi->total_valid_node_count--;
2700 	}
2701 
2702 	if (sbi->reserved_blocks &&
2703 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2704 		sbi->current_reserved_blocks++;
2705 
2706 	spin_unlock(&sbi->stat_lock);
2707 
2708 	if (is_inode) {
2709 		dquot_free_inode(inode);
2710 	} else {
2711 		if (unlikely(inode->i_blocks == 0)) {
2712 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2713 				  inode->i_ino,
2714 				  (unsigned long long)inode->i_blocks);
2715 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2716 			return;
2717 		}
2718 		f2fs_i_blocks_write(inode, 1, false, true);
2719 	}
2720 }
2721 
2722 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2723 {
2724 	return sbi->total_valid_node_count;
2725 }
2726 
2727 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2728 {
2729 	percpu_counter_inc(&sbi->total_valid_inode_count);
2730 }
2731 
2732 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2733 {
2734 	percpu_counter_dec(&sbi->total_valid_inode_count);
2735 }
2736 
2737 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2738 {
2739 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2740 }
2741 
2742 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2743 						pgoff_t index, bool for_write)
2744 {
2745 	struct page *page;
2746 	unsigned int flags;
2747 
2748 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2749 		if (!for_write)
2750 			page = find_get_page_flags(mapping, index,
2751 							FGP_LOCK | FGP_ACCESSED);
2752 		else
2753 			page = find_lock_page(mapping, index);
2754 		if (page)
2755 			return page;
2756 
2757 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
2758 			return NULL;
2759 	}
2760 
2761 	if (!for_write)
2762 		return grab_cache_page(mapping, index);
2763 
2764 	flags = memalloc_nofs_save();
2765 	page = grab_cache_page_write_begin(mapping, index);
2766 	memalloc_nofs_restore(flags);
2767 
2768 	return page;
2769 }
2770 
2771 static inline struct page *f2fs_pagecache_get_page(
2772 				struct address_space *mapping, pgoff_t index,
2773 				fgf_t fgp_flags, gfp_t gfp_mask)
2774 {
2775 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2776 		return NULL;
2777 
2778 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2779 }
2780 
2781 static inline void f2fs_put_page(struct page *page, int unlock)
2782 {
2783 	if (!page)
2784 		return;
2785 
2786 	if (unlock) {
2787 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2788 		unlock_page(page);
2789 	}
2790 	put_page(page);
2791 }
2792 
2793 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2794 {
2795 	if (dn->node_page)
2796 		f2fs_put_page(dn->node_page, 1);
2797 	if (dn->inode_page && dn->node_page != dn->inode_page)
2798 		f2fs_put_page(dn->inode_page, 0);
2799 	dn->node_page = NULL;
2800 	dn->inode_page = NULL;
2801 }
2802 
2803 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2804 					size_t size)
2805 {
2806 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2807 }
2808 
2809 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2810 						gfp_t flags)
2811 {
2812 	void *entry;
2813 
2814 	entry = kmem_cache_alloc(cachep, flags);
2815 	if (!entry)
2816 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2817 	return entry;
2818 }
2819 
2820 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2821 			gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2822 {
2823 	if (nofail)
2824 		return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2825 
2826 	if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
2827 		return NULL;
2828 
2829 	return kmem_cache_alloc(cachep, flags);
2830 }
2831 
2832 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2833 {
2834 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2835 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2836 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2837 		get_pages(sbi, F2FS_DIO_READ) ||
2838 		get_pages(sbi, F2FS_DIO_WRITE))
2839 		return true;
2840 
2841 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2842 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2843 		return true;
2844 
2845 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2846 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2847 		return true;
2848 	return false;
2849 }
2850 
2851 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2852 {
2853 	if (sbi->gc_mode == GC_URGENT_HIGH)
2854 		return true;
2855 
2856 	if (is_inflight_io(sbi, type))
2857 		return false;
2858 
2859 	if (sbi->gc_mode == GC_URGENT_MID)
2860 		return true;
2861 
2862 	if (sbi->gc_mode == GC_URGENT_LOW &&
2863 			(type == DISCARD_TIME || type == GC_TIME))
2864 		return true;
2865 
2866 	return f2fs_time_over(sbi, type);
2867 }
2868 
2869 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2870 				unsigned long index, void *item)
2871 {
2872 	while (radix_tree_insert(root, index, item))
2873 		cond_resched();
2874 }
2875 
2876 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2877 
2878 static inline bool IS_INODE(struct page *page)
2879 {
2880 	struct f2fs_node *p = F2FS_NODE(page);
2881 
2882 	return RAW_IS_INODE(p);
2883 }
2884 
2885 static inline int offset_in_addr(struct f2fs_inode *i)
2886 {
2887 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2888 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2889 }
2890 
2891 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2892 {
2893 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2894 }
2895 
2896 static inline int f2fs_has_extra_attr(struct inode *inode);
2897 static inline block_t data_blkaddr(struct inode *inode,
2898 			struct page *node_page, unsigned int offset)
2899 {
2900 	struct f2fs_node *raw_node;
2901 	__le32 *addr_array;
2902 	int base = 0;
2903 	bool is_inode = IS_INODE(node_page);
2904 
2905 	raw_node = F2FS_NODE(node_page);
2906 
2907 	if (is_inode) {
2908 		if (!inode)
2909 			/* from GC path only */
2910 			base = offset_in_addr(&raw_node->i);
2911 		else if (f2fs_has_extra_attr(inode))
2912 			base = get_extra_isize(inode);
2913 	}
2914 
2915 	addr_array = blkaddr_in_node(raw_node);
2916 	return le32_to_cpu(addr_array[base + offset]);
2917 }
2918 
2919 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2920 {
2921 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2922 }
2923 
2924 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2925 {
2926 	int mask;
2927 
2928 	addr += (nr >> 3);
2929 	mask = BIT(7 - (nr & 0x07));
2930 	return mask & *addr;
2931 }
2932 
2933 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2934 {
2935 	int mask;
2936 
2937 	addr += (nr >> 3);
2938 	mask = BIT(7 - (nr & 0x07));
2939 	*addr |= mask;
2940 }
2941 
2942 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2943 {
2944 	int mask;
2945 
2946 	addr += (nr >> 3);
2947 	mask = BIT(7 - (nr & 0x07));
2948 	*addr &= ~mask;
2949 }
2950 
2951 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2952 {
2953 	int mask;
2954 	int ret;
2955 
2956 	addr += (nr >> 3);
2957 	mask = BIT(7 - (nr & 0x07));
2958 	ret = mask & *addr;
2959 	*addr |= mask;
2960 	return ret;
2961 }
2962 
2963 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2964 {
2965 	int mask;
2966 	int ret;
2967 
2968 	addr += (nr >> 3);
2969 	mask = BIT(7 - (nr & 0x07));
2970 	ret = mask & *addr;
2971 	*addr &= ~mask;
2972 	return ret;
2973 }
2974 
2975 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2976 {
2977 	int mask;
2978 
2979 	addr += (nr >> 3);
2980 	mask = BIT(7 - (nr & 0x07));
2981 	*addr ^= mask;
2982 }
2983 
2984 /*
2985  * On-disk inode flags (f2fs_inode::i_flags)
2986  */
2987 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2988 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2989 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2990 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2991 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2992 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2993 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2994 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2995 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2996 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2997 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2998 
2999 #define F2FS_QUOTA_DEFAULT_FL		(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
3000 
3001 /* Flags that should be inherited by new inodes from their parent. */
3002 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
3003 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3004 			   F2FS_CASEFOLD_FL)
3005 
3006 /* Flags that are appropriate for regular files (all but dir-specific ones). */
3007 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3008 				F2FS_CASEFOLD_FL))
3009 
3010 /* Flags that are appropriate for non-directories/regular files. */
3011 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
3012 
3013 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
3014 {
3015 	if (S_ISDIR(mode))
3016 		return flags;
3017 	else if (S_ISREG(mode))
3018 		return flags & F2FS_REG_FLMASK;
3019 	else
3020 		return flags & F2FS_OTHER_FLMASK;
3021 }
3022 
3023 static inline void __mark_inode_dirty_flag(struct inode *inode,
3024 						int flag, bool set)
3025 {
3026 	switch (flag) {
3027 	case FI_INLINE_XATTR:
3028 	case FI_INLINE_DATA:
3029 	case FI_INLINE_DENTRY:
3030 	case FI_NEW_INODE:
3031 		if (set)
3032 			return;
3033 		fallthrough;
3034 	case FI_DATA_EXIST:
3035 	case FI_INLINE_DOTS:
3036 	case FI_PIN_FILE:
3037 	case FI_COMPRESS_RELEASED:
3038 	case FI_ATOMIC_COMMITTED:
3039 		f2fs_mark_inode_dirty_sync(inode, true);
3040 	}
3041 }
3042 
3043 static inline void set_inode_flag(struct inode *inode, int flag)
3044 {
3045 	set_bit(flag, F2FS_I(inode)->flags);
3046 	__mark_inode_dirty_flag(inode, flag, true);
3047 }
3048 
3049 static inline int is_inode_flag_set(struct inode *inode, int flag)
3050 {
3051 	return test_bit(flag, F2FS_I(inode)->flags);
3052 }
3053 
3054 static inline void clear_inode_flag(struct inode *inode, int flag)
3055 {
3056 	clear_bit(flag, F2FS_I(inode)->flags);
3057 	__mark_inode_dirty_flag(inode, flag, false);
3058 }
3059 
3060 static inline bool f2fs_verity_in_progress(struct inode *inode)
3061 {
3062 	return IS_ENABLED(CONFIG_FS_VERITY) &&
3063 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
3064 }
3065 
3066 static inline void set_acl_inode(struct inode *inode, umode_t mode)
3067 {
3068 	F2FS_I(inode)->i_acl_mode = mode;
3069 	set_inode_flag(inode, FI_ACL_MODE);
3070 	f2fs_mark_inode_dirty_sync(inode, false);
3071 }
3072 
3073 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
3074 {
3075 	if (inc)
3076 		inc_nlink(inode);
3077 	else
3078 		drop_nlink(inode);
3079 	f2fs_mark_inode_dirty_sync(inode, true);
3080 }
3081 
3082 static inline void f2fs_i_blocks_write(struct inode *inode,
3083 					block_t diff, bool add, bool claim)
3084 {
3085 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3086 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3087 
3088 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
3089 	if (add) {
3090 		if (claim)
3091 			dquot_claim_block(inode, diff);
3092 		else
3093 			dquot_alloc_block_nofail(inode, diff);
3094 	} else {
3095 		dquot_free_block(inode, diff);
3096 	}
3097 
3098 	f2fs_mark_inode_dirty_sync(inode, true);
3099 	if (clean || recover)
3100 		set_inode_flag(inode, FI_AUTO_RECOVER);
3101 }
3102 
3103 static inline bool f2fs_is_atomic_file(struct inode *inode);
3104 
3105 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3106 {
3107 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3108 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3109 
3110 	if (i_size_read(inode) == i_size)
3111 		return;
3112 
3113 	i_size_write(inode, i_size);
3114 
3115 	if (f2fs_is_atomic_file(inode))
3116 		return;
3117 
3118 	f2fs_mark_inode_dirty_sync(inode, true);
3119 	if (clean || recover)
3120 		set_inode_flag(inode, FI_AUTO_RECOVER);
3121 }
3122 
3123 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3124 {
3125 	F2FS_I(inode)->i_current_depth = depth;
3126 	f2fs_mark_inode_dirty_sync(inode, true);
3127 }
3128 
3129 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3130 					unsigned int count)
3131 {
3132 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
3133 	f2fs_mark_inode_dirty_sync(inode, true);
3134 }
3135 
3136 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3137 {
3138 	F2FS_I(inode)->i_xattr_nid = xnid;
3139 	f2fs_mark_inode_dirty_sync(inode, true);
3140 }
3141 
3142 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3143 {
3144 	F2FS_I(inode)->i_pino = pino;
3145 	f2fs_mark_inode_dirty_sync(inode, true);
3146 }
3147 
3148 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3149 {
3150 	struct f2fs_inode_info *fi = F2FS_I(inode);
3151 
3152 	if (ri->i_inline & F2FS_INLINE_XATTR)
3153 		set_bit(FI_INLINE_XATTR, fi->flags);
3154 	if (ri->i_inline & F2FS_INLINE_DATA)
3155 		set_bit(FI_INLINE_DATA, fi->flags);
3156 	if (ri->i_inline & F2FS_INLINE_DENTRY)
3157 		set_bit(FI_INLINE_DENTRY, fi->flags);
3158 	if (ri->i_inline & F2FS_DATA_EXIST)
3159 		set_bit(FI_DATA_EXIST, fi->flags);
3160 	if (ri->i_inline & F2FS_INLINE_DOTS)
3161 		set_bit(FI_INLINE_DOTS, fi->flags);
3162 	if (ri->i_inline & F2FS_EXTRA_ATTR)
3163 		set_bit(FI_EXTRA_ATTR, fi->flags);
3164 	if (ri->i_inline & F2FS_PIN_FILE)
3165 		set_bit(FI_PIN_FILE, fi->flags);
3166 	if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3167 		set_bit(FI_COMPRESS_RELEASED, fi->flags);
3168 }
3169 
3170 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3171 {
3172 	ri->i_inline = 0;
3173 
3174 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3175 		ri->i_inline |= F2FS_INLINE_XATTR;
3176 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
3177 		ri->i_inline |= F2FS_INLINE_DATA;
3178 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3179 		ri->i_inline |= F2FS_INLINE_DENTRY;
3180 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
3181 		ri->i_inline |= F2FS_DATA_EXIST;
3182 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
3183 		ri->i_inline |= F2FS_INLINE_DOTS;
3184 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3185 		ri->i_inline |= F2FS_EXTRA_ATTR;
3186 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3187 		ri->i_inline |= F2FS_PIN_FILE;
3188 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3189 		ri->i_inline |= F2FS_COMPRESS_RELEASED;
3190 }
3191 
3192 static inline int f2fs_has_extra_attr(struct inode *inode)
3193 {
3194 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3195 }
3196 
3197 static inline int f2fs_has_inline_xattr(struct inode *inode)
3198 {
3199 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
3200 }
3201 
3202 static inline int f2fs_compressed_file(struct inode *inode)
3203 {
3204 	return S_ISREG(inode->i_mode) &&
3205 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3206 }
3207 
3208 static inline bool f2fs_need_compress_data(struct inode *inode)
3209 {
3210 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3211 
3212 	if (!f2fs_compressed_file(inode))
3213 		return false;
3214 
3215 	if (compress_mode == COMPR_MODE_FS)
3216 		return true;
3217 	else if (compress_mode == COMPR_MODE_USER &&
3218 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3219 		return true;
3220 
3221 	return false;
3222 }
3223 
3224 static inline unsigned int addrs_per_inode(struct inode *inode)
3225 {
3226 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3227 				get_inline_xattr_addrs(inode);
3228 
3229 	if (!f2fs_compressed_file(inode))
3230 		return addrs;
3231 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3232 }
3233 
3234 static inline unsigned int addrs_per_block(struct inode *inode)
3235 {
3236 	if (!f2fs_compressed_file(inode))
3237 		return DEF_ADDRS_PER_BLOCK;
3238 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3239 }
3240 
3241 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3242 {
3243 	struct f2fs_inode *ri = F2FS_INODE(page);
3244 
3245 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3246 					get_inline_xattr_addrs(inode)]);
3247 }
3248 
3249 static inline int inline_xattr_size(struct inode *inode)
3250 {
3251 	if (f2fs_has_inline_xattr(inode))
3252 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
3253 	return 0;
3254 }
3255 
3256 /*
3257  * Notice: check inline_data flag without inode page lock is unsafe.
3258  * It could change at any time by f2fs_convert_inline_page().
3259  */
3260 static inline int f2fs_has_inline_data(struct inode *inode)
3261 {
3262 	return is_inode_flag_set(inode, FI_INLINE_DATA);
3263 }
3264 
3265 static inline int f2fs_exist_data(struct inode *inode)
3266 {
3267 	return is_inode_flag_set(inode, FI_DATA_EXIST);
3268 }
3269 
3270 static inline int f2fs_has_inline_dots(struct inode *inode)
3271 {
3272 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
3273 }
3274 
3275 static inline int f2fs_is_mmap_file(struct inode *inode)
3276 {
3277 	return is_inode_flag_set(inode, FI_MMAP_FILE);
3278 }
3279 
3280 static inline bool f2fs_is_pinned_file(struct inode *inode)
3281 {
3282 	return is_inode_flag_set(inode, FI_PIN_FILE);
3283 }
3284 
3285 static inline bool f2fs_is_atomic_file(struct inode *inode)
3286 {
3287 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3288 }
3289 
3290 static inline bool f2fs_is_cow_file(struct inode *inode)
3291 {
3292 	return is_inode_flag_set(inode, FI_COW_FILE);
3293 }
3294 
3295 static inline __le32 *get_dnode_addr(struct inode *inode,
3296 					struct page *node_page);
3297 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3298 {
3299 	__le32 *addr = get_dnode_addr(inode, page);
3300 
3301 	return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
3302 }
3303 
3304 static inline int f2fs_has_inline_dentry(struct inode *inode)
3305 {
3306 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3307 }
3308 
3309 static inline int is_file(struct inode *inode, int type)
3310 {
3311 	return F2FS_I(inode)->i_advise & type;
3312 }
3313 
3314 static inline void set_file(struct inode *inode, int type)
3315 {
3316 	if (is_file(inode, type))
3317 		return;
3318 	F2FS_I(inode)->i_advise |= type;
3319 	f2fs_mark_inode_dirty_sync(inode, true);
3320 }
3321 
3322 static inline void clear_file(struct inode *inode, int type)
3323 {
3324 	if (!is_file(inode, type))
3325 		return;
3326 	F2FS_I(inode)->i_advise &= ~type;
3327 	f2fs_mark_inode_dirty_sync(inode, true);
3328 }
3329 
3330 static inline bool f2fs_is_time_consistent(struct inode *inode)
3331 {
3332 	struct timespec64 ctime = inode_get_ctime(inode);
3333 
3334 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3335 		return false;
3336 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ctime))
3337 		return false;
3338 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3339 		return false;
3340 	return true;
3341 }
3342 
3343 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3344 {
3345 	bool ret;
3346 
3347 	if (dsync) {
3348 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3349 
3350 		spin_lock(&sbi->inode_lock[DIRTY_META]);
3351 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
3352 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3353 		return ret;
3354 	}
3355 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3356 			file_keep_isize(inode) ||
3357 			i_size_read(inode) & ~PAGE_MASK)
3358 		return false;
3359 
3360 	if (!f2fs_is_time_consistent(inode))
3361 		return false;
3362 
3363 	spin_lock(&F2FS_I(inode)->i_size_lock);
3364 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3365 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3366 
3367 	return ret;
3368 }
3369 
3370 static inline bool f2fs_readonly(struct super_block *sb)
3371 {
3372 	return sb_rdonly(sb);
3373 }
3374 
3375 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3376 {
3377 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3378 }
3379 
3380 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3381 {
3382 	if (len == 1 && name[0] == '.')
3383 		return true;
3384 
3385 	if (len == 2 && name[0] == '.' && name[1] == '.')
3386 		return true;
3387 
3388 	return false;
3389 }
3390 
3391 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3392 					size_t size, gfp_t flags)
3393 {
3394 	if (time_to_inject(sbi, FAULT_KMALLOC))
3395 		return NULL;
3396 
3397 	return kmalloc(size, flags);
3398 }
3399 
3400 static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
3401 {
3402 	if (time_to_inject(sbi, FAULT_KMALLOC))
3403 		return NULL;
3404 
3405 	return __getname();
3406 }
3407 
3408 static inline void f2fs_putname(char *buf)
3409 {
3410 	__putname(buf);
3411 }
3412 
3413 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3414 					size_t size, gfp_t flags)
3415 {
3416 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3417 }
3418 
3419 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3420 					size_t size, gfp_t flags)
3421 {
3422 	if (time_to_inject(sbi, FAULT_KVMALLOC))
3423 		return NULL;
3424 
3425 	return kvmalloc(size, flags);
3426 }
3427 
3428 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3429 					size_t size, gfp_t flags)
3430 {
3431 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3432 }
3433 
3434 static inline int get_extra_isize(struct inode *inode)
3435 {
3436 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3437 }
3438 
3439 static inline int get_inline_xattr_addrs(struct inode *inode)
3440 {
3441 	return F2FS_I(inode)->i_inline_xattr_size;
3442 }
3443 
3444 static inline __le32 *get_dnode_addr(struct inode *inode,
3445 					struct page *node_page)
3446 {
3447 	int base = 0;
3448 
3449 	if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
3450 		base = get_extra_isize(inode);
3451 
3452 	return blkaddr_in_node(F2FS_NODE(node_page)) + base;
3453 }
3454 
3455 #define f2fs_get_inode_mode(i) \
3456 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3457 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3458 
3459 #define F2FS_MIN_EXTRA_ATTR_SIZE		(sizeof(__le32))
3460 
3461 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3462 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3463 	offsetof(struct f2fs_inode, i_extra_isize))	\
3464 
3465 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3466 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3467 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3468 		sizeof((f2fs_inode)->field))			\
3469 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3470 
3471 #define __is_large_section(sbi)		(SEGS_PER_SEC(sbi) > 1)
3472 
3473 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3474 
3475 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3476 					block_t blkaddr, int type);
3477 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3478 					block_t blkaddr, int type)
3479 {
3480 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
3481 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3482 			 blkaddr, type);
3483 }
3484 
3485 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3486 {
3487 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3488 			blkaddr == COMPRESS_ADDR)
3489 		return false;
3490 	return true;
3491 }
3492 
3493 /*
3494  * file.c
3495  */
3496 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3497 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3498 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3499 int f2fs_truncate(struct inode *inode);
3500 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
3501 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3502 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
3503 		 struct iattr *attr);
3504 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3505 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3506 int f2fs_precache_extents(struct inode *inode);
3507 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3508 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3509 		      struct dentry *dentry, struct fileattr *fa);
3510 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3511 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3512 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3513 int f2fs_pin_file_control(struct inode *inode, bool inc);
3514 
3515 /*
3516  * inode.c
3517  */
3518 void f2fs_set_inode_flags(struct inode *inode);
3519 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3520 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3521 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3522 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3523 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3524 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3525 void f2fs_update_inode_page(struct inode *inode);
3526 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3527 void f2fs_evict_inode(struct inode *inode);
3528 void f2fs_handle_failed_inode(struct inode *inode);
3529 
3530 /*
3531  * namei.c
3532  */
3533 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3534 							bool hot, bool set);
3535 struct dentry *f2fs_get_parent(struct dentry *child);
3536 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3537 		     struct inode **new_inode);
3538 
3539 /*
3540  * dir.c
3541  */
3542 int f2fs_init_casefolded_name(const struct inode *dir,
3543 			      struct f2fs_filename *fname);
3544 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3545 			int lookup, struct f2fs_filename *fname);
3546 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3547 			struct f2fs_filename *fname);
3548 void f2fs_free_filename(struct f2fs_filename *fname);
3549 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3550 			const struct f2fs_filename *fname, int *max_slots);
3551 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3552 			unsigned int start_pos, struct fscrypt_str *fstr);
3553 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3554 			struct f2fs_dentry_ptr *d);
3555 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3556 			const struct f2fs_filename *fname, struct page *dpage);
3557 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3558 			unsigned int current_depth);
3559 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3560 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3561 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3562 					 const struct f2fs_filename *fname,
3563 					 struct page **res_page);
3564 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3565 			const struct qstr *child, struct page **res_page);
3566 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3567 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3568 			struct page **page);
3569 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3570 			struct page *page, struct inode *inode);
3571 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3572 			  const struct f2fs_filename *fname);
3573 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3574 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3575 			unsigned int bit_pos);
3576 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3577 			struct inode *inode, nid_t ino, umode_t mode);
3578 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3579 			struct inode *inode, nid_t ino, umode_t mode);
3580 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3581 			struct inode *inode, nid_t ino, umode_t mode);
3582 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3583 			struct inode *dir, struct inode *inode);
3584 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
3585 					struct f2fs_filename *fname);
3586 bool f2fs_empty_dir(struct inode *dir);
3587 
3588 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3589 {
3590 	if (fscrypt_is_nokey_name(dentry))
3591 		return -ENOKEY;
3592 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3593 				inode, inode->i_ino, inode->i_mode);
3594 }
3595 
3596 /*
3597  * super.c
3598  */
3599 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3600 void f2fs_inode_synced(struct inode *inode);
3601 int f2fs_dquot_initialize(struct inode *inode);
3602 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3603 int f2fs_quota_sync(struct super_block *sb, int type);
3604 loff_t max_file_blocks(struct inode *inode);
3605 void f2fs_quota_off_umount(struct super_block *sb);
3606 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
3607 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
3608 							bool irq_context);
3609 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
3610 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
3611 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3612 int f2fs_sync_fs(struct super_block *sb, int sync);
3613 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3614 
3615 /*
3616  * hash.c
3617  */
3618 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3619 
3620 /*
3621  * node.c
3622  */
3623 struct node_info;
3624 
3625 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3626 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3627 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3628 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3629 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3630 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3631 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3632 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3633 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3634 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3635 				struct node_info *ni, bool checkpoint_context);
3636 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3637 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3638 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3639 int f2fs_truncate_xattr_node(struct inode *inode);
3640 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3641 					unsigned int seq_id);
3642 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3643 int f2fs_remove_inode_page(struct inode *inode);
3644 struct page *f2fs_new_inode_page(struct inode *inode);
3645 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3646 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3647 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3648 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3649 int f2fs_move_node_page(struct page *node_page, int gc_type);
3650 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3651 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3652 			struct writeback_control *wbc, bool atomic,
3653 			unsigned int *seq_id);
3654 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3655 			struct writeback_control *wbc,
3656 			bool do_balance, enum iostat_type io_type);
3657 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3658 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3659 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3660 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3661 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3662 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3663 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3664 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3665 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3666 			unsigned int segno, struct f2fs_summary_block *sum);
3667 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3668 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3669 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3670 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3671 int __init f2fs_create_node_manager_caches(void);
3672 void f2fs_destroy_node_manager_caches(void);
3673 
3674 /*
3675  * segment.c
3676  */
3677 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3678 int f2fs_commit_atomic_write(struct inode *inode);
3679 void f2fs_abort_atomic_write(struct inode *inode, bool clean);
3680 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3681 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3682 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3683 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3684 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3685 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3686 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3687 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3688 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3689 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3690 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3691 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3692 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3693 					struct cp_control *cpc);
3694 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3695 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3696 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3697 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3698 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3699 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3700 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3701 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3702 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3703 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3704 			unsigned int *newseg, bool new_sec, int dir);
3705 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3706 					unsigned int start, unsigned int end);
3707 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3708 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
3709 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3710 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3711 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3712 					struct cp_control *cpc);
3713 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3714 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3715 					block_t blk_addr);
3716 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3717 						enum iostat_type io_type);
3718 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3719 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3720 			struct f2fs_io_info *fio);
3721 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3722 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3723 			block_t old_blkaddr, block_t new_blkaddr,
3724 			bool recover_curseg, bool recover_newaddr,
3725 			bool from_gc);
3726 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3727 			block_t old_addr, block_t new_addr,
3728 			unsigned char version, bool recover_curseg,
3729 			bool recover_newaddr);
3730 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3731 			block_t old_blkaddr, block_t *new_blkaddr,
3732 			struct f2fs_summary *sum, int type,
3733 			struct f2fs_io_info *fio);
3734 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3735 					block_t blkaddr, unsigned int blkcnt);
3736 void f2fs_wait_on_page_writeback(struct page *page,
3737 			enum page_type type, bool ordered, bool locked);
3738 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3739 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3740 								block_t len);
3741 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3742 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3743 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3744 			unsigned int val, int alloc);
3745 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3746 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3747 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3748 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3749 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3750 int __init f2fs_create_segment_manager_caches(void);
3751 void f2fs_destroy_segment_manager_caches(void);
3752 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3753 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3754 			unsigned int segno);
3755 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3756 			unsigned int segno);
3757 
3758 #define DEF_FRAGMENT_SIZE	4
3759 #define MIN_FRAGMENT_SIZE	1
3760 #define MAX_FRAGMENT_SIZE	512
3761 
3762 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3763 {
3764 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3765 		F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3766 }
3767 
3768 /*
3769  * checkpoint.c
3770  */
3771 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3772 							unsigned char reason);
3773 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3774 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3775 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3776 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3777 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3778 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3779 					block_t blkaddr, int type);
3780 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3781 			int type, bool sync);
3782 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3783 							unsigned int ra_blocks);
3784 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3785 			long nr_to_write, enum iostat_type io_type);
3786 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3787 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3788 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3789 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3790 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3791 					unsigned int devidx, int type);
3792 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3793 					unsigned int devidx, int type);
3794 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3795 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3796 void f2fs_add_orphan_inode(struct inode *inode);
3797 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3798 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3799 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3800 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3801 void f2fs_remove_dirty_inode(struct inode *inode);
3802 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3803 								bool from_cp);
3804 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3805 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3806 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3807 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3808 int __init f2fs_create_checkpoint_caches(void);
3809 void f2fs_destroy_checkpoint_caches(void);
3810 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3811 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3812 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3813 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3814 
3815 /*
3816  * data.c
3817  */
3818 int __init f2fs_init_bioset(void);
3819 void f2fs_destroy_bioset(void);
3820 bool f2fs_is_cp_guaranteed(struct page *page);
3821 int f2fs_init_bio_entry_cache(void);
3822 void f2fs_destroy_bio_entry_cache(void);
3823 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
3824 			  enum page_type type);
3825 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
3826 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3827 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3828 				struct inode *inode, struct page *page,
3829 				nid_t ino, enum page_type type);
3830 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3831 					struct bio **bio, struct page *page);
3832 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3833 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3834 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3835 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3836 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3837 		block_t blk_addr, sector_t *sector);
3838 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3839 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3840 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3841 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3842 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3843 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
3844 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3845 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3846 			blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
3847 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
3848 							pgoff_t *next_pgofs);
3849 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3850 			bool for_write);
3851 struct page *f2fs_get_new_data_page(struct inode *inode,
3852 			struct page *ipage, pgoff_t index, bool new_i_size);
3853 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3854 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
3855 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3856 			u64 start, u64 len);
3857 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3858 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3859 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3860 int f2fs_write_single_data_page(struct page *page, int *submitted,
3861 				struct bio **bio, sector_t *last_block,
3862 				struct writeback_control *wbc,
3863 				enum iostat_type io_type,
3864 				int compr_blocks, bool allow_balance);
3865 void f2fs_write_failed(struct inode *inode, loff_t to);
3866 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
3867 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
3868 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3869 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3870 int f2fs_init_post_read_processing(void);
3871 void f2fs_destroy_post_read_processing(void);
3872 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3873 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3874 extern const struct iomap_ops f2fs_iomap_ops;
3875 
3876 /*
3877  * gc.c
3878  */
3879 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3880 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3881 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3882 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
3883 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3884 int f2fs_gc_range(struct f2fs_sb_info *sbi,
3885 		unsigned int start_seg, unsigned int end_seg,
3886 		bool dry_run, unsigned int dry_run_sections);
3887 int f2fs_resize_fs(struct file *filp, __u64 block_count);
3888 int __init f2fs_create_garbage_collection_cache(void);
3889 void f2fs_destroy_garbage_collection_cache(void);
3890 /* victim selection function for cleaning and SSR */
3891 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
3892 			int gc_type, int type, char alloc_mode,
3893 			unsigned long long age);
3894 
3895 /*
3896  * recovery.c
3897  */
3898 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3899 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3900 int __init f2fs_create_recovery_cache(void);
3901 void f2fs_destroy_recovery_cache(void);
3902 
3903 /*
3904  * debug.c
3905  */
3906 #ifdef CONFIG_F2FS_STAT_FS
3907 struct f2fs_stat_info {
3908 	struct list_head stat_list;
3909 	struct f2fs_sb_info *sbi;
3910 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3911 	int main_area_segs, main_area_sections, main_area_zones;
3912 	unsigned long long hit_cached[NR_EXTENT_CACHES];
3913 	unsigned long long hit_rbtree[NR_EXTENT_CACHES];
3914 	unsigned long long total_ext[NR_EXTENT_CACHES];
3915 	unsigned long long hit_total[NR_EXTENT_CACHES];
3916 	int ext_tree[NR_EXTENT_CACHES];
3917 	int zombie_tree[NR_EXTENT_CACHES];
3918 	int ext_node[NR_EXTENT_CACHES];
3919 	/* to count memory footprint */
3920 	unsigned long long ext_mem[NR_EXTENT_CACHES];
3921 	/* for read extent cache */
3922 	unsigned long long hit_largest;
3923 	/* for block age extent cache */
3924 	unsigned long long allocated_data_blocks;
3925 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3926 	int ndirty_data, ndirty_qdata;
3927 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3928 	int nats, dirty_nats, sits, dirty_sits;
3929 	int free_nids, avail_nids, alloc_nids;
3930 	int total_count, utilization;
3931 	int nr_wb_cp_data, nr_wb_data;
3932 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3933 	int nr_dio_read, nr_dio_write;
3934 	unsigned int io_skip_bggc, other_skip_bggc;
3935 	int nr_flushing, nr_flushed, flush_list_empty;
3936 	int nr_discarding, nr_discarded;
3937 	int nr_discard_cmd;
3938 	unsigned int undiscard_blks;
3939 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3940 	unsigned int cur_ckpt_time, peak_ckpt_time;
3941 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3942 	int compr_inode, swapfile_inode;
3943 	unsigned long long compr_blocks;
3944 	int aw_cnt, max_aw_cnt;
3945 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3946 	unsigned int bimodal, avg_vblocks;
3947 	int util_free, util_valid, util_invalid;
3948 	int rsvd_segs, overp_segs;
3949 	int dirty_count, node_pages, meta_pages, compress_pages;
3950 	int compress_page_hit;
3951 	int prefree_count, free_segs, free_secs;
3952 	int cp_call_count[MAX_CALL_TYPE], cp_count;
3953 	int gc_call_count[MAX_CALL_TYPE];
3954 	int gc_segs[2][2];
3955 	int gc_secs[2][2];
3956 	int tot_blks, data_blks, node_blks;
3957 	int bg_data_blks, bg_node_blks;
3958 	int curseg[NR_CURSEG_TYPE];
3959 	int cursec[NR_CURSEG_TYPE];
3960 	int curzone[NR_CURSEG_TYPE];
3961 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3962 	unsigned int full_seg[NR_CURSEG_TYPE];
3963 	unsigned int valid_blks[NR_CURSEG_TYPE];
3964 
3965 	unsigned int meta_count[META_MAX];
3966 	unsigned int segment_count[2];
3967 	unsigned int block_count[2];
3968 	unsigned int inplace_count;
3969 	unsigned long long base_mem, cache_mem, page_mem;
3970 };
3971 
3972 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3973 {
3974 	return (struct f2fs_stat_info *)sbi->stat_info;
3975 }
3976 
3977 #define stat_inc_cp_call_count(sbi, foreground)				\
3978 		atomic_inc(&sbi->cp_call_count[(foreground)])
3979 #define stat_inc_cp_count(si)		(F2FS_STAT(sbi)->cp_count++)
3980 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3981 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3982 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3983 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3984 #define stat_inc_total_hit(sbi, type)		(atomic64_inc(&(sbi)->total_hit_ext[type]))
3985 #define stat_inc_rbtree_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_rbtree[type]))
3986 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3987 #define stat_inc_cached_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_cached[type]))
3988 #define stat_inc_inline_xattr(inode)					\
3989 	do {								\
3990 		if (f2fs_has_inline_xattr(inode))			\
3991 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3992 	} while (0)
3993 #define stat_dec_inline_xattr(inode)					\
3994 	do {								\
3995 		if (f2fs_has_inline_xattr(inode))			\
3996 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3997 	} while (0)
3998 #define stat_inc_inline_inode(inode)					\
3999 	do {								\
4000 		if (f2fs_has_inline_data(inode))			\
4001 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
4002 	} while (0)
4003 #define stat_dec_inline_inode(inode)					\
4004 	do {								\
4005 		if (f2fs_has_inline_data(inode))			\
4006 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
4007 	} while (0)
4008 #define stat_inc_inline_dir(inode)					\
4009 	do {								\
4010 		if (f2fs_has_inline_dentry(inode))			\
4011 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
4012 	} while (0)
4013 #define stat_dec_inline_dir(inode)					\
4014 	do {								\
4015 		if (f2fs_has_inline_dentry(inode))			\
4016 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
4017 	} while (0)
4018 #define stat_inc_compr_inode(inode)					\
4019 	do {								\
4020 		if (f2fs_compressed_file(inode))			\
4021 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
4022 	} while (0)
4023 #define stat_dec_compr_inode(inode)					\
4024 	do {								\
4025 		if (f2fs_compressed_file(inode))			\
4026 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
4027 	} while (0)
4028 #define stat_add_compr_blocks(inode, blocks)				\
4029 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
4030 #define stat_sub_compr_blocks(inode, blocks)				\
4031 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
4032 #define stat_inc_swapfile_inode(inode)					\
4033 		(atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
4034 #define stat_dec_swapfile_inode(inode)					\
4035 		(atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
4036 #define stat_inc_atomic_inode(inode)					\
4037 			(atomic_inc(&F2FS_I_SB(inode)->atomic_files))
4038 #define stat_dec_atomic_inode(inode)					\
4039 			(atomic_dec(&F2FS_I_SB(inode)->atomic_files))
4040 #define stat_inc_meta_count(sbi, blkaddr)				\
4041 	do {								\
4042 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
4043 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
4044 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
4045 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
4046 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
4047 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
4048 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
4049 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
4050 	} while (0)
4051 #define stat_inc_seg_type(sbi, curseg)					\
4052 		((sbi)->segment_count[(curseg)->alloc_type]++)
4053 #define stat_inc_block_count(sbi, curseg)				\
4054 		((sbi)->block_count[(curseg)->alloc_type]++)
4055 #define stat_inc_inplace_blocks(sbi)					\
4056 		(atomic_inc(&(sbi)->inplace_count))
4057 #define stat_update_max_atomic_write(inode)				\
4058 	do {								\
4059 		int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files);	\
4060 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
4061 		if (cur > max)						\
4062 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
4063 	} while (0)
4064 #define stat_inc_gc_call_count(sbi, foreground)				\
4065 		(F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
4066 #define stat_inc_gc_sec_count(sbi, type, gc_type)			\
4067 		(F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
4068 #define stat_inc_gc_seg_count(sbi, type, gc_type)			\
4069 		(F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
4070 
4071 #define stat_inc_tot_blk_count(si, blks)				\
4072 	((si)->tot_blks += (blks))
4073 
4074 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
4075 	do {								\
4076 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4077 		stat_inc_tot_blk_count(si, blks);			\
4078 		si->data_blks += (blks);				\
4079 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4080 	} while (0)
4081 
4082 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
4083 	do {								\
4084 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
4085 		stat_inc_tot_blk_count(si, blks);			\
4086 		si->node_blks += (blks);				\
4087 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
4088 	} while (0)
4089 
4090 int f2fs_build_stats(struct f2fs_sb_info *sbi);
4091 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
4092 void __init f2fs_create_root_stats(void);
4093 void f2fs_destroy_root_stats(void);
4094 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
4095 #else
4096 #define stat_inc_cp_call_count(sbi, foreground)		do { } while (0)
4097 #define stat_inc_cp_count(sbi)				do { } while (0)
4098 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
4099 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
4100 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
4101 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
4102 #define stat_inc_total_hit(sbi, type)			do { } while (0)
4103 #define stat_inc_rbtree_node_hit(sbi, type)		do { } while (0)
4104 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
4105 #define stat_inc_cached_node_hit(sbi, type)		do { } while (0)
4106 #define stat_inc_inline_xattr(inode)			do { } while (0)
4107 #define stat_dec_inline_xattr(inode)			do { } while (0)
4108 #define stat_inc_inline_inode(inode)			do { } while (0)
4109 #define stat_dec_inline_inode(inode)			do { } while (0)
4110 #define stat_inc_inline_dir(inode)			do { } while (0)
4111 #define stat_dec_inline_dir(inode)			do { } while (0)
4112 #define stat_inc_compr_inode(inode)			do { } while (0)
4113 #define stat_dec_compr_inode(inode)			do { } while (0)
4114 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
4115 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
4116 #define stat_inc_swapfile_inode(inode)			do { } while (0)
4117 #define stat_dec_swapfile_inode(inode)			do { } while (0)
4118 #define stat_inc_atomic_inode(inode)			do { } while (0)
4119 #define stat_dec_atomic_inode(inode)			do { } while (0)
4120 #define stat_update_max_atomic_write(inode)		do { } while (0)
4121 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
4122 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
4123 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
4124 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
4125 #define stat_inc_gc_call_count(sbi, foreground)		do { } while (0)
4126 #define stat_inc_gc_sec_count(sbi, type, gc_type)	do { } while (0)
4127 #define stat_inc_gc_seg_count(sbi, type, gc_type)	do { } while (0)
4128 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
4129 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
4130 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
4131 
4132 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
4133 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
4134 static inline void __init f2fs_create_root_stats(void) { }
4135 static inline void f2fs_destroy_root_stats(void) { }
4136 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4137 #endif
4138 
4139 extern const struct file_operations f2fs_dir_operations;
4140 extern const struct file_operations f2fs_file_operations;
4141 extern const struct inode_operations f2fs_file_inode_operations;
4142 extern const struct address_space_operations f2fs_dblock_aops;
4143 extern const struct address_space_operations f2fs_node_aops;
4144 extern const struct address_space_operations f2fs_meta_aops;
4145 extern const struct inode_operations f2fs_dir_inode_operations;
4146 extern const struct inode_operations f2fs_symlink_inode_operations;
4147 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4148 extern const struct inode_operations f2fs_special_inode_operations;
4149 extern struct kmem_cache *f2fs_inode_entry_slab;
4150 
4151 /*
4152  * inline.c
4153  */
4154 bool f2fs_may_inline_data(struct inode *inode);
4155 bool f2fs_sanity_check_inline_data(struct inode *inode);
4156 bool f2fs_may_inline_dentry(struct inode *inode);
4157 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
4158 void f2fs_truncate_inline_inode(struct inode *inode,
4159 						struct page *ipage, u64 from);
4160 int f2fs_read_inline_data(struct inode *inode, struct page *page);
4161 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4162 int f2fs_convert_inline_inode(struct inode *inode);
4163 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4164 int f2fs_write_inline_data(struct inode *inode, struct page *page);
4165 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4166 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4167 					const struct f2fs_filename *fname,
4168 					struct page **res_page);
4169 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4170 			struct page *ipage);
4171 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4172 			struct inode *inode, nid_t ino, umode_t mode);
4173 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4174 				struct page *page, struct inode *dir,
4175 				struct inode *inode);
4176 bool f2fs_empty_inline_dir(struct inode *dir);
4177 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4178 			struct fscrypt_str *fstr);
4179 int f2fs_inline_data_fiemap(struct inode *inode,
4180 			struct fiemap_extent_info *fieinfo,
4181 			__u64 start, __u64 len);
4182 
4183 /*
4184  * shrinker.c
4185  */
4186 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4187 			struct shrink_control *sc);
4188 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4189 			struct shrink_control *sc);
4190 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4191 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4192 
4193 /*
4194  * extent_cache.c
4195  */
4196 bool sanity_check_extent_cache(struct inode *inode);
4197 void f2fs_init_extent_tree(struct inode *inode);
4198 void f2fs_drop_extent_tree(struct inode *inode);
4199 void f2fs_destroy_extent_node(struct inode *inode);
4200 void f2fs_destroy_extent_tree(struct inode *inode);
4201 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4202 int __init f2fs_create_extent_cache(void);
4203 void f2fs_destroy_extent_cache(void);
4204 
4205 /* read extent cache ops */
4206 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
4207 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4208 			struct extent_info *ei);
4209 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
4210 			block_t *blkaddr);
4211 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4212 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4213 			pgoff_t fofs, block_t blkaddr, unsigned int len);
4214 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4215 			int nr_shrink);
4216 
4217 /* block age extent cache ops */
4218 void f2fs_init_age_extent_tree(struct inode *inode);
4219 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4220 			struct extent_info *ei);
4221 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4222 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4223 			pgoff_t fofs, unsigned int len);
4224 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4225 			int nr_shrink);
4226 
4227 /*
4228  * sysfs.c
4229  */
4230 #define MIN_RA_MUL	2
4231 #define MAX_RA_MUL	256
4232 
4233 int __init f2fs_init_sysfs(void);
4234 void f2fs_exit_sysfs(void);
4235 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4236 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4237 
4238 /* verity.c */
4239 extern const struct fsverity_operations f2fs_verityops;
4240 
4241 /*
4242  * crypto support
4243  */
4244 static inline bool f2fs_encrypted_file(struct inode *inode)
4245 {
4246 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4247 }
4248 
4249 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4250 {
4251 #ifdef CONFIG_FS_ENCRYPTION
4252 	file_set_encrypt(inode);
4253 	f2fs_set_inode_flags(inode);
4254 #endif
4255 }
4256 
4257 /*
4258  * Returns true if the reads of the inode's data need to undergo some
4259  * postprocessing step, like decryption or authenticity verification.
4260  */
4261 static inline bool f2fs_post_read_required(struct inode *inode)
4262 {
4263 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4264 		f2fs_compressed_file(inode);
4265 }
4266 
4267 /*
4268  * compress.c
4269  */
4270 #ifdef CONFIG_F2FS_FS_COMPRESSION
4271 bool f2fs_is_compressed_page(struct page *page);
4272 struct page *f2fs_compress_control_page(struct page *page);
4273 int f2fs_prepare_compress_overwrite(struct inode *inode,
4274 			struct page **pagep, pgoff_t index, void **fsdata);
4275 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4276 					pgoff_t index, unsigned copied);
4277 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4278 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4279 bool f2fs_is_compress_backend_ready(struct inode *inode);
4280 bool f2fs_is_compress_level_valid(int alg, int lvl);
4281 int __init f2fs_init_compress_mempool(void);
4282 void f2fs_destroy_compress_mempool(void);
4283 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4284 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4285 				block_t blkaddr, bool in_task);
4286 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4287 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4288 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4289 				int index, int nr_pages, bool uptodate);
4290 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4291 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4292 int f2fs_write_multi_pages(struct compress_ctx *cc,
4293 						int *submitted,
4294 						struct writeback_control *wbc,
4295 						enum iostat_type io_type);
4296 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4297 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4298 				pgoff_t fofs, block_t blkaddr,
4299 				unsigned int llen, unsigned int c_len);
4300 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4301 				unsigned nr_pages, sector_t *last_block_in_bio,
4302 				bool is_readahead, bool for_write);
4303 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4304 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4305 				bool in_task);
4306 void f2fs_put_page_dic(struct page *page, bool in_task);
4307 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
4308 						unsigned int ofs_in_node);
4309 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4310 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4311 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4312 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4313 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4314 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4315 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4316 int __init f2fs_init_compress_cache(void);
4317 void f2fs_destroy_compress_cache(void);
4318 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4319 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4320 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4321 						nid_t ino, block_t blkaddr);
4322 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4323 								block_t blkaddr);
4324 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4325 #define inc_compr_inode_stat(inode)					\
4326 	do {								\
4327 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4328 		sbi->compr_new_inode++;					\
4329 	} while (0)
4330 #define add_compr_block_stat(inode, blocks)				\
4331 	do {								\
4332 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
4333 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
4334 		sbi->compr_written_block += blocks;			\
4335 		sbi->compr_saved_block += diff;				\
4336 	} while (0)
4337 #else
4338 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
4339 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4340 {
4341 	if (!f2fs_compressed_file(inode))
4342 		return true;
4343 	/* not support compression */
4344 	return false;
4345 }
4346 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
4347 static inline struct page *f2fs_compress_control_page(struct page *page)
4348 {
4349 	WARN_ON_ONCE(1);
4350 	return ERR_PTR(-EINVAL);
4351 }
4352 static inline int __init f2fs_init_compress_mempool(void) { return 0; }
4353 static inline void f2fs_destroy_compress_mempool(void) { }
4354 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4355 				bool in_task) { }
4356 static inline void f2fs_end_read_compressed_page(struct page *page,
4357 				bool failed, block_t blkaddr, bool in_task)
4358 {
4359 	WARN_ON_ONCE(1);
4360 }
4361 static inline void f2fs_put_page_dic(struct page *page, bool in_task)
4362 {
4363 	WARN_ON_ONCE(1);
4364 }
4365 static inline unsigned int f2fs_cluster_blocks_are_contiguous(
4366 			struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
4367 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
4368 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
4369 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
4370 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4371 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4372 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4373 static inline void f2fs_destroy_compress_cache(void) { }
4374 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4375 				block_t blkaddr) { }
4376 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4377 				struct page *page, nid_t ino, block_t blkaddr) { }
4378 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4379 				struct page *page, block_t blkaddr) { return false; }
4380 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4381 							nid_t ino) { }
4382 #define inc_compr_inode_stat(inode)		do { } while (0)
4383 static inline void f2fs_update_read_extent_tree_range_compressed(
4384 				struct inode *inode,
4385 				pgoff_t fofs, block_t blkaddr,
4386 				unsigned int llen, unsigned int c_len) { }
4387 #endif
4388 
4389 static inline int set_compress_context(struct inode *inode)
4390 {
4391 #ifdef CONFIG_F2FS_FS_COMPRESSION
4392 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4393 
4394 	F2FS_I(inode)->i_compress_algorithm =
4395 			F2FS_OPTION(sbi).compress_algorithm;
4396 	F2FS_I(inode)->i_log_cluster_size =
4397 			F2FS_OPTION(sbi).compress_log_size;
4398 	F2FS_I(inode)->i_compress_flag =
4399 			F2FS_OPTION(sbi).compress_chksum ?
4400 				BIT(COMPRESS_CHKSUM) : 0;
4401 	F2FS_I(inode)->i_cluster_size =
4402 			BIT(F2FS_I(inode)->i_log_cluster_size);
4403 	if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4404 		F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4405 			F2FS_OPTION(sbi).compress_level)
4406 		F2FS_I(inode)->i_compress_level =
4407 				F2FS_OPTION(sbi).compress_level;
4408 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4409 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4410 	stat_inc_compr_inode(inode);
4411 	inc_compr_inode_stat(inode);
4412 	f2fs_mark_inode_dirty_sync(inode, true);
4413 	return 0;
4414 #else
4415 	return -EOPNOTSUPP;
4416 #endif
4417 }
4418 
4419 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4420 {
4421 	struct f2fs_inode_info *fi = F2FS_I(inode);
4422 
4423 	f2fs_down_write(&F2FS_I(inode)->i_sem);
4424 
4425 	if (!f2fs_compressed_file(inode)) {
4426 		f2fs_up_write(&F2FS_I(inode)->i_sem);
4427 		return true;
4428 	}
4429 	if (f2fs_is_mmap_file(inode) ||
4430 		(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
4431 		f2fs_up_write(&F2FS_I(inode)->i_sem);
4432 		return false;
4433 	}
4434 
4435 	fi->i_flags &= ~F2FS_COMPR_FL;
4436 	stat_dec_compr_inode(inode);
4437 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4438 	f2fs_mark_inode_dirty_sync(inode, true);
4439 
4440 	f2fs_up_write(&F2FS_I(inode)->i_sem);
4441 	return true;
4442 }
4443 
4444 #define F2FS_FEATURE_FUNCS(name, flagname) \
4445 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4446 { \
4447 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4448 }
4449 
4450 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4451 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4452 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4453 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4454 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4455 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4456 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4457 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4458 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4459 F2FS_FEATURE_FUNCS(verity, VERITY);
4460 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4461 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4462 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4463 F2FS_FEATURE_FUNCS(readonly, RO);
4464 
4465 #ifdef CONFIG_BLK_DEV_ZONED
4466 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4467 				    block_t blkaddr)
4468 {
4469 	unsigned int zno = blkaddr / sbi->blocks_per_blkz;
4470 
4471 	return test_bit(zno, FDEV(devi).blkz_seq);
4472 }
4473 #endif
4474 
4475 static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
4476 				  struct block_device *bdev)
4477 {
4478 	int i;
4479 
4480 	if (!f2fs_is_multi_device(sbi))
4481 		return 0;
4482 
4483 	for (i = 0; i < sbi->s_ndevs; i++)
4484 		if (FDEV(i).bdev == bdev)
4485 			return i;
4486 
4487 	WARN_ON(1);
4488 	return -1;
4489 }
4490 
4491 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4492 {
4493 	return f2fs_sb_has_blkzoned(sbi);
4494 }
4495 
4496 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4497 {
4498 	return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4499 }
4500 
4501 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4502 {
4503 	int i;
4504 
4505 	if (!f2fs_is_multi_device(sbi))
4506 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4507 
4508 	for (i = 0; i < sbi->s_ndevs; i++)
4509 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4510 			return true;
4511 	return false;
4512 }
4513 
4514 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4515 {
4516 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4517 					f2fs_hw_should_discard(sbi);
4518 }
4519 
4520 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4521 {
4522 	int i;
4523 
4524 	if (!f2fs_is_multi_device(sbi))
4525 		return bdev_read_only(sbi->sb->s_bdev);
4526 
4527 	for (i = 0; i < sbi->s_ndevs; i++)
4528 		if (bdev_read_only(FDEV(i).bdev))
4529 			return true;
4530 	return false;
4531 }
4532 
4533 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
4534 {
4535 	return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
4536 }
4537 
4538 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4539 {
4540 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4541 }
4542 
4543 static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
4544 					  block_t blkaddr)
4545 {
4546 	if (f2fs_sb_has_blkzoned(sbi)) {
4547 		int devi = f2fs_target_device_index(sbi, blkaddr);
4548 
4549 		return !bdev_is_zoned(FDEV(devi).bdev);
4550 	}
4551 	return true;
4552 }
4553 
4554 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4555 {
4556 	return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4557 }
4558 
4559 static inline bool f2fs_may_compress(struct inode *inode)
4560 {
4561 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4562 		f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
4563 		f2fs_is_mmap_file(inode))
4564 		return false;
4565 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4566 }
4567 
4568 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4569 						u64 blocks, bool add)
4570 {
4571 	struct f2fs_inode_info *fi = F2FS_I(inode);
4572 	int diff = fi->i_cluster_size - blocks;
4573 
4574 	/* don't update i_compr_blocks if saved blocks were released */
4575 	if (!add && !atomic_read(&fi->i_compr_blocks))
4576 		return;
4577 
4578 	if (add) {
4579 		atomic_add(diff, &fi->i_compr_blocks);
4580 		stat_add_compr_blocks(inode, diff);
4581 	} else {
4582 		atomic_sub(diff, &fi->i_compr_blocks);
4583 		stat_sub_compr_blocks(inode, diff);
4584 	}
4585 	f2fs_mark_inode_dirty_sync(inode, true);
4586 }
4587 
4588 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4589 								int flag)
4590 {
4591 	if (!f2fs_is_multi_device(sbi))
4592 		return false;
4593 	if (flag != F2FS_GET_BLOCK_DIO)
4594 		return false;
4595 	return sbi->aligned_blksize;
4596 }
4597 
4598 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4599 {
4600 	return fsverity_active(inode) &&
4601 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4602 }
4603 
4604 #ifdef CONFIG_F2FS_FAULT_INJECTION
4605 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4606 							unsigned int type);
4607 #else
4608 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4609 #endif
4610 
4611 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4612 {
4613 #ifdef CONFIG_QUOTA
4614 	if (f2fs_sb_has_quota_ino(sbi))
4615 		return true;
4616 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4617 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4618 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4619 		return true;
4620 #endif
4621 	return false;
4622 }
4623 
4624 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4625 {
4626 	return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4627 }
4628 
4629 static inline void f2fs_io_schedule_timeout(long timeout)
4630 {
4631 	set_current_state(TASK_UNINTERRUPTIBLE);
4632 	io_schedule_timeout(timeout);
4633 }
4634 
4635 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
4636 					enum page_type type)
4637 {
4638 	if (unlikely(f2fs_cp_error(sbi)))
4639 		return;
4640 
4641 	if (ofs == sbi->page_eio_ofs[type]) {
4642 		if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
4643 			set_ckpt_flags(sbi, CP_ERROR_FLAG);
4644 	} else {
4645 		sbi->page_eio_ofs[type] = ofs;
4646 		sbi->page_eio_cnt[type] = 0;
4647 	}
4648 }
4649 
4650 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
4651 {
4652 	return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
4653 }
4654 
4655 static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
4656 					block_t blkaddr, unsigned int cnt)
4657 {
4658 	bool need_submit = false;
4659 	int i = 0;
4660 
4661 	do {
4662 		struct page *page;
4663 
4664 		page = find_get_page(META_MAPPING(sbi), blkaddr + i);
4665 		if (page) {
4666 			if (PageWriteback(page))
4667 				need_submit = true;
4668 			f2fs_put_page(page, 0);
4669 		}
4670 	} while (++i < cnt && !need_submit);
4671 
4672 	if (need_submit)
4673 		f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
4674 							NULL, 0, DATA);
4675 
4676 	truncate_inode_pages_range(META_MAPPING(sbi),
4677 			F2FS_BLK_TO_BYTES((loff_t)blkaddr),
4678 			F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
4679 }
4680 
4681 static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
4682 								block_t blkaddr)
4683 {
4684 	f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
4685 	f2fs_invalidate_compress_page(sbi, blkaddr);
4686 }
4687 
4688 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4689 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4690 
4691 #endif /* _LINUX_F2FS_H */
4692