xref: /openbmc/linux/fs/f2fs/f2fs.h (revision 509f1010)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition)					\
35 	do {								\
36 		if (WARN_ON(condition))					\
37 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
38 	} while (0)
39 #endif
40 
41 enum {
42 	FAULT_KMALLOC,
43 	FAULT_KVMALLOC,
44 	FAULT_PAGE_ALLOC,
45 	FAULT_PAGE_GET,
46 	FAULT_ALLOC_NID,
47 	FAULT_ORPHAN,
48 	FAULT_BLOCK,
49 	FAULT_DIR_DEPTH,
50 	FAULT_EVICT_INODE,
51 	FAULT_TRUNCATE,
52 	FAULT_READ_IO,
53 	FAULT_CHECKPOINT,
54 	FAULT_DISCARD,
55 	FAULT_WRITE_IO,
56 	FAULT_MAX,
57 };
58 
59 #ifdef CONFIG_F2FS_FAULT_INJECTION
60 #define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
61 
62 struct f2fs_fault_info {
63 	atomic_t inject_ops;
64 	unsigned int inject_rate;
65 	unsigned int inject_type;
66 };
67 
68 extern const char *f2fs_fault_name[FAULT_MAX];
69 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
70 #endif
71 
72 /*
73  * For mount options
74  */
75 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
76 #define F2FS_MOUNT_DISCARD		0x00000004
77 #define F2FS_MOUNT_NOHEAP		0x00000008
78 #define F2FS_MOUNT_XATTR_USER		0x00000010
79 #define F2FS_MOUNT_POSIX_ACL		0x00000020
80 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
81 #define F2FS_MOUNT_INLINE_XATTR		0x00000080
82 #define F2FS_MOUNT_INLINE_DATA		0x00000100
83 #define F2FS_MOUNT_INLINE_DENTRY	0x00000200
84 #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
85 #define F2FS_MOUNT_NOBARRIER		0x00000800
86 #define F2FS_MOUNT_FASTBOOT		0x00001000
87 #define F2FS_MOUNT_EXTENT_CACHE		0x00002000
88 #define F2FS_MOUNT_DATA_FLUSH		0x00008000
89 #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
90 #define F2FS_MOUNT_USRQUOTA		0x00080000
91 #define F2FS_MOUNT_GRPQUOTA		0x00100000
92 #define F2FS_MOUNT_PRJQUOTA		0x00200000
93 #define F2FS_MOUNT_QUOTA		0x00400000
94 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
95 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
96 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
97 #define F2FS_MOUNT_NORECOVERY		0x04000000
98 #define F2FS_MOUNT_ATGC			0x08000000
99 #define F2FS_MOUNT_MERGE_CHECKPOINT	0x10000000
100 #define	F2FS_MOUNT_GC_MERGE		0x20000000
101 
102 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
103 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
104 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
105 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
106 
107 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
108 		typecheck(unsigned long long, b) &&			\
109 		((long long)((a) - (b)) > 0))
110 
111 typedef u32 block_t;	/*
112 			 * should not change u32, since it is the on-disk block
113 			 * address format, __le32.
114 			 */
115 typedef u32 nid_t;
116 
117 #define COMPRESS_EXT_NUM		16
118 
119 struct f2fs_mount_info {
120 	unsigned int opt;
121 	int write_io_size_bits;		/* Write IO size bits */
122 	block_t root_reserved_blocks;	/* root reserved blocks */
123 	kuid_t s_resuid;		/* reserved blocks for uid */
124 	kgid_t s_resgid;		/* reserved blocks for gid */
125 	int active_logs;		/* # of active logs */
126 	int inline_xattr_size;		/* inline xattr size */
127 #ifdef CONFIG_F2FS_FAULT_INJECTION
128 	struct f2fs_fault_info fault_info;	/* For fault injection */
129 #endif
130 #ifdef CONFIG_QUOTA
131 	/* Names of quota files with journalled quota */
132 	char *s_qf_names[MAXQUOTAS];
133 	int s_jquota_fmt;			/* Format of quota to use */
134 #endif
135 	/* For which write hints are passed down to block layer */
136 	int whint_mode;
137 	int alloc_mode;			/* segment allocation policy */
138 	int fsync_mode;			/* fsync policy */
139 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
140 	int bggc_mode;			/* bggc mode: off, on or sync */
141 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
142 	block_t unusable_cap_perc;	/* percentage for cap */
143 	block_t unusable_cap;		/* Amount of space allowed to be
144 					 * unusable when disabling checkpoint
145 					 */
146 
147 	/* For compression */
148 	unsigned char compress_algorithm;	/* algorithm type */
149 	unsigned char compress_log_size;	/* cluster log size */
150 	unsigned char compress_level;		/* compress level */
151 	bool compress_chksum;			/* compressed data chksum */
152 	unsigned char compress_ext_cnt;		/* extension count */
153 	int compress_mode;			/* compression mode */
154 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
155 };
156 
157 #define F2FS_FEATURE_ENCRYPT		0x0001
158 #define F2FS_FEATURE_BLKZONED		0x0002
159 #define F2FS_FEATURE_ATOMIC_WRITE	0x0004
160 #define F2FS_FEATURE_EXTRA_ATTR		0x0008
161 #define F2FS_FEATURE_PRJQUOTA		0x0010
162 #define F2FS_FEATURE_INODE_CHKSUM	0x0020
163 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
164 #define F2FS_FEATURE_QUOTA_INO		0x0080
165 #define F2FS_FEATURE_INODE_CRTIME	0x0100
166 #define F2FS_FEATURE_LOST_FOUND		0x0200
167 #define F2FS_FEATURE_VERITY		0x0400
168 #define F2FS_FEATURE_SB_CHKSUM		0x0800
169 #define F2FS_FEATURE_CASEFOLD		0x1000
170 #define F2FS_FEATURE_COMPRESSION	0x2000
171 
172 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
173 	((raw_super->feature & cpu_to_le32(mask)) != 0)
174 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
175 #define F2FS_SET_FEATURE(sbi, mask)					\
176 	(sbi->raw_super->feature |= cpu_to_le32(mask))
177 #define F2FS_CLEAR_FEATURE(sbi, mask)					\
178 	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
179 
180 /*
181  * Default values for user and/or group using reserved blocks
182  */
183 #define	F2FS_DEF_RESUID		0
184 #define	F2FS_DEF_RESGID		0
185 
186 /*
187  * For checkpoint manager
188  */
189 enum {
190 	NAT_BITMAP,
191 	SIT_BITMAP
192 };
193 
194 #define	CP_UMOUNT	0x00000001
195 #define	CP_FASTBOOT	0x00000002
196 #define	CP_SYNC		0x00000004
197 #define	CP_RECOVERY	0x00000008
198 #define	CP_DISCARD	0x00000010
199 #define CP_TRIMMED	0x00000020
200 #define CP_PAUSE	0x00000040
201 #define CP_RESIZE 	0x00000080
202 
203 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
204 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
205 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
206 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
207 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
208 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
209 #define DEF_CP_INTERVAL			60	/* 60 secs */
210 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
211 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
212 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
213 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
214 
215 struct cp_control {
216 	int reason;
217 	__u64 trim_start;
218 	__u64 trim_end;
219 	__u64 trim_minlen;
220 };
221 
222 /*
223  * indicate meta/data type
224  */
225 enum {
226 	META_CP,
227 	META_NAT,
228 	META_SIT,
229 	META_SSA,
230 	META_MAX,
231 	META_POR,
232 	DATA_GENERIC,		/* check range only */
233 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
234 	DATA_GENERIC_ENHANCE_READ,	/*
235 					 * strong check on range and segment
236 					 * bitmap but no warning due to race
237 					 * condition of read on truncated area
238 					 * by extent_cache
239 					 */
240 	META_GENERIC,
241 };
242 
243 /* for the list of ino */
244 enum {
245 	ORPHAN_INO,		/* for orphan ino list */
246 	APPEND_INO,		/* for append ino list */
247 	UPDATE_INO,		/* for update ino list */
248 	TRANS_DIR_INO,		/* for trasactions dir ino list */
249 	FLUSH_INO,		/* for multiple device flushing */
250 	MAX_INO_ENTRY,		/* max. list */
251 };
252 
253 struct ino_entry {
254 	struct list_head list;		/* list head */
255 	nid_t ino;			/* inode number */
256 	unsigned int dirty_device;	/* dirty device bitmap */
257 };
258 
259 /* for the list of inodes to be GCed */
260 struct inode_entry {
261 	struct list_head list;	/* list head */
262 	struct inode *inode;	/* vfs inode pointer */
263 };
264 
265 struct fsync_node_entry {
266 	struct list_head list;	/* list head */
267 	struct page *page;	/* warm node page pointer */
268 	unsigned int seq_id;	/* sequence id */
269 };
270 
271 struct ckpt_req {
272 	struct completion wait;		/* completion for checkpoint done */
273 	struct llist_node llnode;	/* llist_node to be linked in wait queue */
274 	int ret;			/* return code of checkpoint */
275 	ktime_t queue_time;		/* request queued time */
276 };
277 
278 struct ckpt_req_control {
279 	struct task_struct *f2fs_issue_ckpt;	/* checkpoint task */
280 	int ckpt_thread_ioprio;			/* checkpoint merge thread ioprio */
281 	wait_queue_head_t ckpt_wait_queue;	/* waiting queue for wake-up */
282 	atomic_t issued_ckpt;		/* # of actually issued ckpts */
283 	atomic_t total_ckpt;		/* # of total ckpts */
284 	atomic_t queued_ckpt;		/* # of queued ckpts */
285 	struct llist_head issue_list;	/* list for command issue */
286 	spinlock_t stat_lock;		/* lock for below checkpoint time stats */
287 	unsigned int cur_time;		/* cur wait time in msec for currently issued checkpoint */
288 	unsigned int peak_time;		/* peak wait time in msec until now */
289 };
290 
291 /* for the bitmap indicate blocks to be discarded */
292 struct discard_entry {
293 	struct list_head list;	/* list head */
294 	block_t start_blkaddr;	/* start blockaddr of current segment */
295 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
296 };
297 
298 /* default discard granularity of inner discard thread, unit: block count */
299 #define DEFAULT_DISCARD_GRANULARITY		16
300 
301 /* max discard pend list number */
302 #define MAX_PLIST_NUM		512
303 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
304 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
305 
306 enum {
307 	D_PREP,			/* initial */
308 	D_PARTIAL,		/* partially submitted */
309 	D_SUBMIT,		/* all submitted */
310 	D_DONE,			/* finished */
311 };
312 
313 struct discard_info {
314 	block_t lstart;			/* logical start address */
315 	block_t len;			/* length */
316 	block_t start;			/* actual start address in dev */
317 };
318 
319 struct discard_cmd {
320 	struct rb_node rb_node;		/* rb node located in rb-tree */
321 	union {
322 		struct {
323 			block_t lstart;	/* logical start address */
324 			block_t len;	/* length */
325 			block_t start;	/* actual start address in dev */
326 		};
327 		struct discard_info di;	/* discard info */
328 
329 	};
330 	struct list_head list;		/* command list */
331 	struct completion wait;		/* compleation */
332 	struct block_device *bdev;	/* bdev */
333 	unsigned short ref;		/* reference count */
334 	unsigned char state;		/* state */
335 	unsigned char queued;		/* queued discard */
336 	int error;			/* bio error */
337 	spinlock_t lock;		/* for state/bio_ref updating */
338 	unsigned short bio_ref;		/* bio reference count */
339 };
340 
341 enum {
342 	DPOLICY_BG,
343 	DPOLICY_FORCE,
344 	DPOLICY_FSTRIM,
345 	DPOLICY_UMOUNT,
346 	MAX_DPOLICY,
347 };
348 
349 struct discard_policy {
350 	int type;			/* type of discard */
351 	unsigned int min_interval;	/* used for candidates exist */
352 	unsigned int mid_interval;	/* used for device busy */
353 	unsigned int max_interval;	/* used for candidates not exist */
354 	unsigned int max_requests;	/* # of discards issued per round */
355 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
356 	bool io_aware;			/* issue discard in idle time */
357 	bool sync;			/* submit discard with REQ_SYNC flag */
358 	bool ordered;			/* issue discard by lba order */
359 	bool timeout;			/* discard timeout for put_super */
360 	unsigned int granularity;	/* discard granularity */
361 };
362 
363 struct discard_cmd_control {
364 	struct task_struct *f2fs_issue_discard;	/* discard thread */
365 	struct list_head entry_list;		/* 4KB discard entry list */
366 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
367 	struct list_head wait_list;		/* store on-flushing entries */
368 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
369 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
370 	unsigned int discard_wake;		/* to wake up discard thread */
371 	struct mutex cmd_lock;
372 	unsigned int nr_discards;		/* # of discards in the list */
373 	unsigned int max_discards;		/* max. discards to be issued */
374 	unsigned int discard_granularity;	/* discard granularity */
375 	unsigned int undiscard_blks;		/* # of undiscard blocks */
376 	unsigned int next_pos;			/* next discard position */
377 	atomic_t issued_discard;		/* # of issued discard */
378 	atomic_t queued_discard;		/* # of queued discard */
379 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
380 	struct rb_root_cached root;		/* root of discard rb-tree */
381 	bool rbtree_check;			/* config for consistence check */
382 };
383 
384 /* for the list of fsync inodes, used only during recovery */
385 struct fsync_inode_entry {
386 	struct list_head list;	/* list head */
387 	struct inode *inode;	/* vfs inode pointer */
388 	block_t blkaddr;	/* block address locating the last fsync */
389 	block_t last_dentry;	/* block address locating the last dentry */
390 };
391 
392 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
393 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
394 
395 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
396 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
397 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
398 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
399 
400 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
401 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
402 
403 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
404 {
405 	int before = nats_in_cursum(journal);
406 
407 	journal->n_nats = cpu_to_le16(before + i);
408 	return before;
409 }
410 
411 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
412 {
413 	int before = sits_in_cursum(journal);
414 
415 	journal->n_sits = cpu_to_le16(before + i);
416 	return before;
417 }
418 
419 static inline bool __has_cursum_space(struct f2fs_journal *journal,
420 							int size, int type)
421 {
422 	if (type == NAT_JOURNAL)
423 		return size <= MAX_NAT_JENTRIES(journal);
424 	return size <= MAX_SIT_JENTRIES(journal);
425 }
426 
427 /* for inline stuff */
428 #define DEF_INLINE_RESERVED_SIZE	1
429 static inline int get_extra_isize(struct inode *inode);
430 static inline int get_inline_xattr_addrs(struct inode *inode);
431 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
432 				(CUR_ADDRS_PER_INODE(inode) -		\
433 				get_inline_xattr_addrs(inode) -	\
434 				DEF_INLINE_RESERVED_SIZE))
435 
436 /* for inline dir */
437 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
438 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
439 				BITS_PER_BYTE + 1))
440 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
441 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
442 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
443 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
444 				NR_INLINE_DENTRY(inode) + \
445 				INLINE_DENTRY_BITMAP_SIZE(inode)))
446 
447 /*
448  * For INODE and NODE manager
449  */
450 /* for directory operations */
451 
452 struct f2fs_filename {
453 	/*
454 	 * The filename the user specified.  This is NULL for some
455 	 * filesystem-internal operations, e.g. converting an inline directory
456 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
457 	 */
458 	const struct qstr *usr_fname;
459 
460 	/*
461 	 * The on-disk filename.  For encrypted directories, this is encrypted.
462 	 * This may be NULL for lookups in an encrypted dir without the key.
463 	 */
464 	struct fscrypt_str disk_name;
465 
466 	/* The dirhash of this filename */
467 	f2fs_hash_t hash;
468 
469 #ifdef CONFIG_FS_ENCRYPTION
470 	/*
471 	 * For lookups in encrypted directories: either the buffer backing
472 	 * disk_name, or a buffer that holds the decoded no-key name.
473 	 */
474 	struct fscrypt_str crypto_buf;
475 #endif
476 #ifdef CONFIG_UNICODE
477 	/*
478 	 * For casefolded directories: the casefolded name, but it's left NULL
479 	 * if the original name is not valid Unicode, if the directory is both
480 	 * casefolded and encrypted and its encryption key is unavailable, or if
481 	 * the filesystem is doing an internal operation where usr_fname is also
482 	 * NULL.  In all these cases we fall back to treating the name as an
483 	 * opaque byte sequence.
484 	 */
485 	struct fscrypt_str cf_name;
486 #endif
487 };
488 
489 struct f2fs_dentry_ptr {
490 	struct inode *inode;
491 	void *bitmap;
492 	struct f2fs_dir_entry *dentry;
493 	__u8 (*filename)[F2FS_SLOT_LEN];
494 	int max;
495 	int nr_bitmap;
496 };
497 
498 static inline void make_dentry_ptr_block(struct inode *inode,
499 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
500 {
501 	d->inode = inode;
502 	d->max = NR_DENTRY_IN_BLOCK;
503 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
504 	d->bitmap = t->dentry_bitmap;
505 	d->dentry = t->dentry;
506 	d->filename = t->filename;
507 }
508 
509 static inline void make_dentry_ptr_inline(struct inode *inode,
510 					struct f2fs_dentry_ptr *d, void *t)
511 {
512 	int entry_cnt = NR_INLINE_DENTRY(inode);
513 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
514 	int reserved_size = INLINE_RESERVED_SIZE(inode);
515 
516 	d->inode = inode;
517 	d->max = entry_cnt;
518 	d->nr_bitmap = bitmap_size;
519 	d->bitmap = t;
520 	d->dentry = t + bitmap_size + reserved_size;
521 	d->filename = t + bitmap_size + reserved_size +
522 					SIZE_OF_DIR_ENTRY * entry_cnt;
523 }
524 
525 /*
526  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
527  * as its node offset to distinguish from index node blocks.
528  * But some bits are used to mark the node block.
529  */
530 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
531 				>> OFFSET_BIT_SHIFT)
532 enum {
533 	ALLOC_NODE,			/* allocate a new node page if needed */
534 	LOOKUP_NODE,			/* look up a node without readahead */
535 	LOOKUP_NODE_RA,			/*
536 					 * look up a node with readahead called
537 					 * by get_data_block.
538 					 */
539 };
540 
541 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */
542 
543 /* congestion wait timeout value, default: 20ms */
544 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
545 
546 /* maximum retry quota flush count */
547 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
548 
549 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
550 
551 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
552 
553 /* for in-memory extent cache entry */
554 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
555 
556 /* number of extent info in extent cache we try to shrink */
557 #define EXTENT_CACHE_SHRINK_NUMBER	128
558 
559 struct rb_entry {
560 	struct rb_node rb_node;		/* rb node located in rb-tree */
561 	union {
562 		struct {
563 			unsigned int ofs;	/* start offset of the entry */
564 			unsigned int len;	/* length of the entry */
565 		};
566 		unsigned long long key;		/* 64-bits key */
567 	} __packed;
568 };
569 
570 struct extent_info {
571 	unsigned int fofs;		/* start offset in a file */
572 	unsigned int len;		/* length of the extent */
573 	u32 blk;			/* start block address of the extent */
574 };
575 
576 struct extent_node {
577 	struct rb_node rb_node;		/* rb node located in rb-tree */
578 	struct extent_info ei;		/* extent info */
579 	struct list_head list;		/* node in global extent list of sbi */
580 	struct extent_tree *et;		/* extent tree pointer */
581 };
582 
583 struct extent_tree {
584 	nid_t ino;			/* inode number */
585 	struct rb_root_cached root;	/* root of extent info rb-tree */
586 	struct extent_node *cached_en;	/* recently accessed extent node */
587 	struct extent_info largest;	/* largested extent info */
588 	struct list_head list;		/* to be used by sbi->zombie_list */
589 	rwlock_t lock;			/* protect extent info rb-tree */
590 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
591 	bool largest_updated;		/* largest extent updated */
592 };
593 
594 /*
595  * This structure is taken from ext4_map_blocks.
596  *
597  * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
598  */
599 #define F2FS_MAP_NEW		(1 << BH_New)
600 #define F2FS_MAP_MAPPED		(1 << BH_Mapped)
601 #define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
602 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
603 				F2FS_MAP_UNWRITTEN)
604 
605 struct f2fs_map_blocks {
606 	block_t m_pblk;
607 	block_t m_lblk;
608 	unsigned int m_len;
609 	unsigned int m_flags;
610 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
611 	pgoff_t *m_next_extent;		/* point to next possible extent */
612 	int m_seg_type;
613 	bool m_may_create;		/* indicate it is from write path */
614 };
615 
616 /* for flag in get_data_block */
617 enum {
618 	F2FS_GET_BLOCK_DEFAULT,
619 	F2FS_GET_BLOCK_FIEMAP,
620 	F2FS_GET_BLOCK_BMAP,
621 	F2FS_GET_BLOCK_DIO,
622 	F2FS_GET_BLOCK_PRE_DIO,
623 	F2FS_GET_BLOCK_PRE_AIO,
624 	F2FS_GET_BLOCK_PRECACHE,
625 };
626 
627 /*
628  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
629  */
630 #define FADVISE_COLD_BIT	0x01
631 #define FADVISE_LOST_PINO_BIT	0x02
632 #define FADVISE_ENCRYPT_BIT	0x04
633 #define FADVISE_ENC_NAME_BIT	0x08
634 #define FADVISE_KEEP_SIZE_BIT	0x10
635 #define FADVISE_HOT_BIT		0x20
636 #define FADVISE_VERITY_BIT	0x40
637 
638 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
639 
640 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
641 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
642 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
643 
644 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
645 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
646 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
647 
648 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
649 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
650 
651 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
652 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
653 
654 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
655 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
656 
657 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
658 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
659 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
660 
661 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
662 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
663 
664 #define DEF_DIR_LEVEL		0
665 
666 enum {
667 	GC_FAILURE_PIN,
668 	GC_FAILURE_ATOMIC,
669 	MAX_GC_FAILURE
670 };
671 
672 /* used for f2fs_inode_info->flags */
673 enum {
674 	FI_NEW_INODE,		/* indicate newly allocated inode */
675 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
676 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
677 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
678 	FI_INC_LINK,		/* need to increment i_nlink */
679 	FI_ACL_MODE,		/* indicate acl mode */
680 	FI_NO_ALLOC,		/* should not allocate any blocks */
681 	FI_FREE_NID,		/* free allocated nide */
682 	FI_NO_EXTENT,		/* not to use the extent cache */
683 	FI_INLINE_XATTR,	/* used for inline xattr */
684 	FI_INLINE_DATA,		/* used for inline data*/
685 	FI_INLINE_DENTRY,	/* used for inline dentry */
686 	FI_APPEND_WRITE,	/* inode has appended data */
687 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
688 	FI_NEED_IPU,		/* used for ipu per file */
689 	FI_ATOMIC_FILE,		/* indicate atomic file */
690 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
691 	FI_VOLATILE_FILE,	/* indicate volatile file */
692 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
693 	FI_DROP_CACHE,		/* drop dirty page cache */
694 	FI_DATA_EXIST,		/* indicate data exists */
695 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
696 	FI_DO_DEFRAG,		/* indicate defragment is running */
697 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
698 	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */
699 	FI_HOT_DATA,		/* indicate file is hot */
700 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
701 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
702 	FI_PIN_FILE,		/* indicate file should not be gced */
703 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
704 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
705 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
706 	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
707 	FI_MMAP_FILE,		/* indicate file was mmapped */
708 	FI_ENABLE_COMPRESS,	/* enable compression in "user" compression mode */
709 	FI_MAX,			/* max flag, never be used */
710 };
711 
712 struct f2fs_inode_info {
713 	struct inode vfs_inode;		/* serve a vfs inode */
714 	unsigned long i_flags;		/* keep an inode flags for ioctl */
715 	unsigned char i_advise;		/* use to give file attribute hints */
716 	unsigned char i_dir_level;	/* use for dentry level for large dir */
717 	unsigned int i_current_depth;	/* only for directory depth */
718 	/* for gc failure statistic */
719 	unsigned int i_gc_failures[MAX_GC_FAILURE];
720 	unsigned int i_pino;		/* parent inode number */
721 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
722 
723 	/* Use below internally in f2fs*/
724 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
725 	struct rw_semaphore i_sem;	/* protect fi info */
726 	atomic_t dirty_pages;		/* # of dirty pages */
727 	f2fs_hash_t chash;		/* hash value of given file name */
728 	unsigned int clevel;		/* maximum level of given file name */
729 	struct task_struct *task;	/* lookup and create consistency */
730 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
731 	nid_t i_xattr_nid;		/* node id that contains xattrs */
732 	loff_t	last_disk_size;		/* lastly written file size */
733 	spinlock_t i_size_lock;		/* protect last_disk_size */
734 
735 #ifdef CONFIG_QUOTA
736 	struct dquot *i_dquot[MAXQUOTAS];
737 
738 	/* quota space reservation, managed internally by quota code */
739 	qsize_t i_reserved_quota;
740 #endif
741 	struct list_head dirty_list;	/* dirty list for dirs and files */
742 	struct list_head gdirty_list;	/* linked in global dirty list */
743 	struct list_head inmem_ilist;	/* list for inmem inodes */
744 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
745 	struct task_struct *inmem_task;	/* store inmemory task */
746 	struct mutex inmem_lock;	/* lock for inmemory pages */
747 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
748 
749 	/* avoid racing between foreground op and gc */
750 	struct rw_semaphore i_gc_rwsem[2];
751 	struct rw_semaphore i_mmap_sem;
752 	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
753 
754 	int i_extra_isize;		/* size of extra space located in i_addr */
755 	kprojid_t i_projid;		/* id for project quota */
756 	int i_inline_xattr_size;	/* inline xattr size */
757 	struct timespec64 i_crtime;	/* inode creation time */
758 	struct timespec64 i_disk_time[4];/* inode disk times */
759 
760 	/* for file compress */
761 	atomic_t i_compr_blocks;		/* # of compressed blocks */
762 	unsigned char i_compress_algorithm;	/* algorithm type */
763 	unsigned char i_log_cluster_size;	/* log of cluster size */
764 	unsigned char i_compress_level;		/* compress level (lz4hc,zstd) */
765 	unsigned short i_compress_flag;		/* compress flag */
766 	unsigned int i_cluster_size;		/* cluster size */
767 };
768 
769 static inline void get_extent_info(struct extent_info *ext,
770 					struct f2fs_extent *i_ext)
771 {
772 	ext->fofs = le32_to_cpu(i_ext->fofs);
773 	ext->blk = le32_to_cpu(i_ext->blk);
774 	ext->len = le32_to_cpu(i_ext->len);
775 }
776 
777 static inline void set_raw_extent(struct extent_info *ext,
778 					struct f2fs_extent *i_ext)
779 {
780 	i_ext->fofs = cpu_to_le32(ext->fofs);
781 	i_ext->blk = cpu_to_le32(ext->blk);
782 	i_ext->len = cpu_to_le32(ext->len);
783 }
784 
785 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
786 						u32 blk, unsigned int len)
787 {
788 	ei->fofs = fofs;
789 	ei->blk = blk;
790 	ei->len = len;
791 }
792 
793 static inline bool __is_discard_mergeable(struct discard_info *back,
794 			struct discard_info *front, unsigned int max_len)
795 {
796 	return (back->lstart + back->len == front->lstart) &&
797 		(back->len + front->len <= max_len);
798 }
799 
800 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
801 			struct discard_info *back, unsigned int max_len)
802 {
803 	return __is_discard_mergeable(back, cur, max_len);
804 }
805 
806 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
807 			struct discard_info *front, unsigned int max_len)
808 {
809 	return __is_discard_mergeable(cur, front, max_len);
810 }
811 
812 static inline bool __is_extent_mergeable(struct extent_info *back,
813 						struct extent_info *front)
814 {
815 	return (back->fofs + back->len == front->fofs &&
816 			back->blk + back->len == front->blk);
817 }
818 
819 static inline bool __is_back_mergeable(struct extent_info *cur,
820 						struct extent_info *back)
821 {
822 	return __is_extent_mergeable(back, cur);
823 }
824 
825 static inline bool __is_front_mergeable(struct extent_info *cur,
826 						struct extent_info *front)
827 {
828 	return __is_extent_mergeable(cur, front);
829 }
830 
831 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
832 static inline void __try_update_largest_extent(struct extent_tree *et,
833 						struct extent_node *en)
834 {
835 	if (en->ei.len > et->largest.len) {
836 		et->largest = en->ei;
837 		et->largest_updated = true;
838 	}
839 }
840 
841 /*
842  * For free nid management
843  */
844 enum nid_state {
845 	FREE_NID,		/* newly added to free nid list */
846 	PREALLOC_NID,		/* it is preallocated */
847 	MAX_NID_STATE,
848 };
849 
850 enum nat_state {
851 	TOTAL_NAT,
852 	DIRTY_NAT,
853 	RECLAIMABLE_NAT,
854 	MAX_NAT_STATE,
855 };
856 
857 struct f2fs_nm_info {
858 	block_t nat_blkaddr;		/* base disk address of NAT */
859 	nid_t max_nid;			/* maximum possible node ids */
860 	nid_t available_nids;		/* # of available node ids */
861 	nid_t next_scan_nid;		/* the next nid to be scanned */
862 	unsigned int ram_thresh;	/* control the memory footprint */
863 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
864 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
865 
866 	/* NAT cache management */
867 	struct radix_tree_root nat_root;/* root of the nat entry cache */
868 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
869 	struct rw_semaphore nat_tree_lock;	/* protect nat entry tree */
870 	struct list_head nat_entries;	/* cached nat entry list (clean) */
871 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
872 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
873 	unsigned int nat_blocks;	/* # of nat blocks */
874 
875 	/* free node ids management */
876 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
877 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
878 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
879 	spinlock_t nid_list_lock;	/* protect nid lists ops */
880 	struct mutex build_lock;	/* lock for build free nids */
881 	unsigned char **free_nid_bitmap;
882 	unsigned char *nat_block_bitmap;
883 	unsigned short *free_nid_count;	/* free nid count of NAT block */
884 
885 	/* for checkpoint */
886 	char *nat_bitmap;		/* NAT bitmap pointer */
887 
888 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
889 	unsigned char *nat_bits;	/* NAT bits blocks */
890 	unsigned char *full_nat_bits;	/* full NAT pages */
891 	unsigned char *empty_nat_bits;	/* empty NAT pages */
892 #ifdef CONFIG_F2FS_CHECK_FS
893 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
894 #endif
895 	int bitmap_size;		/* bitmap size */
896 };
897 
898 /*
899  * this structure is used as one of function parameters.
900  * all the information are dedicated to a given direct node block determined
901  * by the data offset in a file.
902  */
903 struct dnode_of_data {
904 	struct inode *inode;		/* vfs inode pointer */
905 	struct page *inode_page;	/* its inode page, NULL is possible */
906 	struct page *node_page;		/* cached direct node page */
907 	nid_t nid;			/* node id of the direct node block */
908 	unsigned int ofs_in_node;	/* data offset in the node page */
909 	bool inode_page_locked;		/* inode page is locked or not */
910 	bool node_changed;		/* is node block changed */
911 	char cur_level;			/* level of hole node page */
912 	char max_level;			/* level of current page located */
913 	block_t	data_blkaddr;		/* block address of the node block */
914 };
915 
916 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
917 		struct page *ipage, struct page *npage, nid_t nid)
918 {
919 	memset(dn, 0, sizeof(*dn));
920 	dn->inode = inode;
921 	dn->inode_page = ipage;
922 	dn->node_page = npage;
923 	dn->nid = nid;
924 }
925 
926 /*
927  * For SIT manager
928  *
929  * By default, there are 6 active log areas across the whole main area.
930  * When considering hot and cold data separation to reduce cleaning overhead,
931  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
932  * respectively.
933  * In the current design, you should not change the numbers intentionally.
934  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
935  * logs individually according to the underlying devices. (default: 6)
936  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
937  * data and 8 for node logs.
938  */
939 #define	NR_CURSEG_DATA_TYPE	(3)
940 #define NR_CURSEG_NODE_TYPE	(3)
941 #define NR_CURSEG_INMEM_TYPE	(2)
942 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
943 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
944 
945 enum {
946 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
947 	CURSEG_WARM_DATA,	/* data blocks */
948 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
949 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
950 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
951 	CURSEG_COLD_NODE,	/* indirect node blocks */
952 	NR_PERSISTENT_LOG,	/* number of persistent log */
953 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
954 				/* pinned file that needs consecutive block address */
955 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
956 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
957 };
958 
959 struct flush_cmd {
960 	struct completion wait;
961 	struct llist_node llnode;
962 	nid_t ino;
963 	int ret;
964 };
965 
966 struct flush_cmd_control {
967 	struct task_struct *f2fs_issue_flush;	/* flush thread */
968 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
969 	atomic_t issued_flush;			/* # of issued flushes */
970 	atomic_t queued_flush;			/* # of queued flushes */
971 	struct llist_head issue_list;		/* list for command issue */
972 	struct llist_node *dispatch_list;	/* list for command dispatch */
973 };
974 
975 struct f2fs_sm_info {
976 	struct sit_info *sit_info;		/* whole segment information */
977 	struct free_segmap_info *free_info;	/* free segment information */
978 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
979 	struct curseg_info *curseg_array;	/* active segment information */
980 
981 	struct rw_semaphore curseg_lock;	/* for preventing curseg change */
982 
983 	block_t seg0_blkaddr;		/* block address of 0'th segment */
984 	block_t main_blkaddr;		/* start block address of main area */
985 	block_t ssa_blkaddr;		/* start block address of SSA area */
986 
987 	unsigned int segment_count;	/* total # of segments */
988 	unsigned int main_segments;	/* # of segments in main area */
989 	unsigned int reserved_segments;	/* # of reserved segments */
990 	unsigned int ovp_segments;	/* # of overprovision segments */
991 
992 	/* a threshold to reclaim prefree segments */
993 	unsigned int rec_prefree_segments;
994 
995 	/* for batched trimming */
996 	unsigned int trim_sections;		/* # of sections to trim */
997 
998 	struct list_head sit_entry_set;	/* sit entry set list */
999 
1000 	unsigned int ipu_policy;	/* in-place-update policy */
1001 	unsigned int min_ipu_util;	/* in-place-update threshold */
1002 	unsigned int min_fsync_blocks;	/* threshold for fsync */
1003 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
1004 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
1005 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
1006 
1007 	/* for flush command control */
1008 	struct flush_cmd_control *fcc_info;
1009 
1010 	/* for discard command control */
1011 	struct discard_cmd_control *dcc_info;
1012 };
1013 
1014 /*
1015  * For superblock
1016  */
1017 /*
1018  * COUNT_TYPE for monitoring
1019  *
1020  * f2fs monitors the number of several block types such as on-writeback,
1021  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1022  */
1023 #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1024 enum count_type {
1025 	F2FS_DIRTY_DENTS,
1026 	F2FS_DIRTY_DATA,
1027 	F2FS_DIRTY_QDATA,
1028 	F2FS_DIRTY_NODES,
1029 	F2FS_DIRTY_META,
1030 	F2FS_INMEM_PAGES,
1031 	F2FS_DIRTY_IMETA,
1032 	F2FS_WB_CP_DATA,
1033 	F2FS_WB_DATA,
1034 	F2FS_RD_DATA,
1035 	F2FS_RD_NODE,
1036 	F2FS_RD_META,
1037 	F2FS_DIO_WRITE,
1038 	F2FS_DIO_READ,
1039 	NR_COUNT_TYPE,
1040 };
1041 
1042 /*
1043  * The below are the page types of bios used in submit_bio().
1044  * The available types are:
1045  * DATA			User data pages. It operates as async mode.
1046  * NODE			Node pages. It operates as async mode.
1047  * META			FS metadata pages such as SIT, NAT, CP.
1048  * NR_PAGE_TYPE		The number of page types.
1049  * META_FLUSH		Make sure the previous pages are written
1050  *			with waiting the bio's completion
1051  * ...			Only can be used with META.
1052  */
1053 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1054 enum page_type {
1055 	DATA,
1056 	NODE,
1057 	META,
1058 	NR_PAGE_TYPE,
1059 	META_FLUSH,
1060 	INMEM,		/* the below types are used by tracepoints only. */
1061 	INMEM_DROP,
1062 	INMEM_INVALIDATE,
1063 	INMEM_REVOKE,
1064 	IPU,
1065 	OPU,
1066 };
1067 
1068 enum temp_type {
1069 	HOT = 0,	/* must be zero for meta bio */
1070 	WARM,
1071 	COLD,
1072 	NR_TEMP_TYPE,
1073 };
1074 
1075 enum need_lock_type {
1076 	LOCK_REQ = 0,
1077 	LOCK_DONE,
1078 	LOCK_RETRY,
1079 };
1080 
1081 enum cp_reason_type {
1082 	CP_NO_NEEDED,
1083 	CP_NON_REGULAR,
1084 	CP_COMPRESSED,
1085 	CP_HARDLINK,
1086 	CP_SB_NEED_CP,
1087 	CP_WRONG_PINO,
1088 	CP_NO_SPC_ROLL,
1089 	CP_NODE_NEED_CP,
1090 	CP_FASTBOOT_MODE,
1091 	CP_SPEC_LOG_NUM,
1092 	CP_RECOVER_DIR,
1093 };
1094 
1095 enum iostat_type {
1096 	/* WRITE IO */
1097 	APP_DIRECT_IO,			/* app direct write IOs */
1098 	APP_BUFFERED_IO,		/* app buffered write IOs */
1099 	APP_WRITE_IO,			/* app write IOs */
1100 	APP_MAPPED_IO,			/* app mapped IOs */
1101 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1102 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1103 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1104 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1105 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1106 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1107 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1108 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1109 
1110 	/* READ IO */
1111 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1112 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1113 	APP_READ_IO,			/* app read IOs */
1114 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1115 	FS_DATA_READ_IO,		/* data read IOs */
1116 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1117 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1118 	FS_NODE_READ_IO,		/* node read IOs */
1119 	FS_META_READ_IO,		/* meta read IOs */
1120 
1121 	/* other */
1122 	FS_DISCARD,			/* discard */
1123 	NR_IO_TYPE,
1124 };
1125 
1126 struct f2fs_io_info {
1127 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1128 	nid_t ino;		/* inode number */
1129 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1130 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1131 	int op;			/* contains REQ_OP_ */
1132 	int op_flags;		/* req_flag_bits */
1133 	block_t new_blkaddr;	/* new block address to be written */
1134 	block_t old_blkaddr;	/* old block address before Cow */
1135 	struct page *page;	/* page to be written */
1136 	struct page *encrypted_page;	/* encrypted page */
1137 	struct page *compressed_page;	/* compressed page */
1138 	struct list_head list;		/* serialize IOs */
1139 	bool submitted;		/* indicate IO submission */
1140 	int need_lock;		/* indicate we need to lock cp_rwsem */
1141 	bool in_list;		/* indicate fio is in io_list */
1142 	bool is_por;		/* indicate IO is from recovery or not */
1143 	bool retry;		/* need to reallocate block address */
1144 	int compr_blocks;	/* # of compressed block addresses */
1145 	bool encrypted;		/* indicate file is encrypted */
1146 	enum iostat_type io_type;	/* io type */
1147 	struct writeback_control *io_wbc; /* writeback control */
1148 	struct bio **bio;		/* bio for ipu */
1149 	sector_t *last_block;		/* last block number in bio */
1150 	unsigned char version;		/* version of the node */
1151 };
1152 
1153 struct bio_entry {
1154 	struct bio *bio;
1155 	struct list_head list;
1156 };
1157 
1158 #define is_read_io(rw) ((rw) == READ)
1159 struct f2fs_bio_info {
1160 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1161 	struct bio *bio;		/* bios to merge */
1162 	sector_t last_block_in_bio;	/* last block number */
1163 	struct f2fs_io_info fio;	/* store buffered io info. */
1164 	struct rw_semaphore io_rwsem;	/* blocking op for bio */
1165 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1166 	struct list_head io_list;	/* track fios */
1167 	struct list_head bio_list;	/* bio entry list head */
1168 	struct rw_semaphore bio_list_lock;	/* lock to protect bio entry list */
1169 };
1170 
1171 #define FDEV(i)				(sbi->devs[i])
1172 #define RDEV(i)				(raw_super->devs[i])
1173 struct f2fs_dev_info {
1174 	struct block_device *bdev;
1175 	char path[MAX_PATH_LEN];
1176 	unsigned int total_segments;
1177 	block_t start_blk;
1178 	block_t end_blk;
1179 #ifdef CONFIG_BLK_DEV_ZONED
1180 	unsigned int nr_blkz;		/* Total number of zones */
1181 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1182 	block_t *zone_capacity_blocks;  /* Array of zone capacity in blks */
1183 #endif
1184 };
1185 
1186 enum inode_type {
1187 	DIR_INODE,			/* for dirty dir inode */
1188 	FILE_INODE,			/* for dirty regular/symlink inode */
1189 	DIRTY_META,			/* for all dirtied inode metadata */
1190 	ATOMIC_FILE,			/* for all atomic files */
1191 	NR_INODE_TYPE,
1192 };
1193 
1194 /* for inner inode cache management */
1195 struct inode_management {
1196 	struct radix_tree_root ino_root;	/* ino entry array */
1197 	spinlock_t ino_lock;			/* for ino entry lock */
1198 	struct list_head ino_list;		/* inode list head */
1199 	unsigned long ino_num;			/* number of entries */
1200 };
1201 
1202 /* for GC_AT */
1203 struct atgc_management {
1204 	bool atgc_enabled;			/* ATGC is enabled or not */
1205 	struct rb_root_cached root;		/* root of victim rb-tree */
1206 	struct list_head victim_list;		/* linked with all victim entries */
1207 	unsigned int victim_count;		/* victim count in rb-tree */
1208 	unsigned int candidate_ratio;		/* candidate ratio */
1209 	unsigned int max_candidate_count;	/* max candidate count */
1210 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1211 	unsigned long long age_threshold;	/* age threshold */
1212 };
1213 
1214 /* For s_flag in struct f2fs_sb_info */
1215 enum {
1216 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1217 	SBI_IS_CLOSE,				/* specify unmounting */
1218 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1219 	SBI_POR_DOING,				/* recovery is doing or not */
1220 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1221 	SBI_NEED_CP,				/* need to checkpoint */
1222 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1223 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1224 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1225 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1226 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1227 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1228 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1229 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1230 };
1231 
1232 enum {
1233 	CP_TIME,
1234 	REQ_TIME,
1235 	DISCARD_TIME,
1236 	GC_TIME,
1237 	DISABLE_TIME,
1238 	UMOUNT_DISCARD_TIMEOUT,
1239 	MAX_TIME,
1240 };
1241 
1242 enum {
1243 	GC_NORMAL,
1244 	GC_IDLE_CB,
1245 	GC_IDLE_GREEDY,
1246 	GC_IDLE_AT,
1247 	GC_URGENT_HIGH,
1248 	GC_URGENT_LOW,
1249 };
1250 
1251 enum {
1252 	BGGC_MODE_ON,		/* background gc is on */
1253 	BGGC_MODE_OFF,		/* background gc is off */
1254 	BGGC_MODE_SYNC,		/*
1255 				 * background gc is on, migrating blocks
1256 				 * like foreground gc
1257 				 */
1258 };
1259 
1260 enum {
1261 	FS_MODE_ADAPTIVE,	/* use both lfs/ssr allocation */
1262 	FS_MODE_LFS,		/* use lfs allocation only */
1263 };
1264 
1265 enum {
1266 	WHINT_MODE_OFF,		/* not pass down write hints */
1267 	WHINT_MODE_USER,	/* try to pass down hints given by users */
1268 	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
1269 };
1270 
1271 enum {
1272 	ALLOC_MODE_DEFAULT,	/* stay default */
1273 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1274 };
1275 
1276 enum fsync_mode {
1277 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1278 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1279 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1280 };
1281 
1282 enum {
1283 	COMPR_MODE_FS,		/*
1284 				 * automatically compress compression
1285 				 * enabled files
1286 				 */
1287 	COMPR_MODE_USER,	/*
1288 				 * automatical compression is disabled.
1289 				 * user can control the file compression
1290 				 * using ioctls
1291 				 */
1292 };
1293 
1294 /*
1295  * this value is set in page as a private data which indicate that
1296  * the page is atomically written, and it is in inmem_pages list.
1297  */
1298 #define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
1299 #define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
1300 
1301 #define IS_ATOMIC_WRITTEN_PAGE(page)			\
1302 		(page_private(page) == ATOMIC_WRITTEN_PAGE)
1303 #define IS_DUMMY_WRITTEN_PAGE(page)			\
1304 		(page_private(page) == DUMMY_WRITTEN_PAGE)
1305 
1306 #ifdef CONFIG_F2FS_IO_TRACE
1307 #define IS_IO_TRACED_PAGE(page)			\
1308 		(page_private(page) > 0 &&		\
1309 		 page_private(page) < (unsigned long)PID_MAX_LIMIT)
1310 #else
1311 #define IS_IO_TRACED_PAGE(page) (0)
1312 #endif
1313 
1314 /* For compression */
1315 enum compress_algorithm_type {
1316 	COMPRESS_LZO,
1317 	COMPRESS_LZ4,
1318 	COMPRESS_ZSTD,
1319 	COMPRESS_LZORLE,
1320 	COMPRESS_MAX,
1321 };
1322 
1323 enum compress_flag {
1324 	COMPRESS_CHKSUM,
1325 	COMPRESS_MAX_FLAG,
1326 };
1327 
1328 #define COMPRESS_DATA_RESERVED_SIZE		4
1329 struct compress_data {
1330 	__le32 clen;			/* compressed data size */
1331 	__le32 chksum;			/* compressed data chksum */
1332 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1333 	u8 cdata[];			/* compressed data */
1334 };
1335 
1336 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1337 
1338 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1339 
1340 #define	COMPRESS_LEVEL_OFFSET	8
1341 
1342 /* compress context */
1343 struct compress_ctx {
1344 	struct inode *inode;		/* inode the context belong to */
1345 	pgoff_t cluster_idx;		/* cluster index number */
1346 	unsigned int cluster_size;	/* page count in cluster */
1347 	unsigned int log_cluster_size;	/* log of cluster size */
1348 	struct page **rpages;		/* pages store raw data in cluster */
1349 	unsigned int nr_rpages;		/* total page number in rpages */
1350 	struct page **cpages;		/* pages store compressed data in cluster */
1351 	unsigned int nr_cpages;		/* total page number in cpages */
1352 	void *rbuf;			/* virtual mapped address on rpages */
1353 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1354 	size_t rlen;			/* valid data length in rbuf */
1355 	size_t clen;			/* valid data length in cbuf */
1356 	void *private;			/* payload buffer for specified compression algorithm */
1357 	void *private2;			/* extra payload buffer */
1358 };
1359 
1360 /* compress context for write IO path */
1361 struct compress_io_ctx {
1362 	u32 magic;			/* magic number to indicate page is compressed */
1363 	struct inode *inode;		/* inode the context belong to */
1364 	struct page **rpages;		/* pages store raw data in cluster */
1365 	unsigned int nr_rpages;		/* total page number in rpages */
1366 	atomic_t pending_pages;		/* in-flight compressed page count */
1367 };
1368 
1369 /* Context for decompressing one cluster on the read IO path */
1370 struct decompress_io_ctx {
1371 	u32 magic;			/* magic number to indicate page is compressed */
1372 	struct inode *inode;		/* inode the context belong to */
1373 	pgoff_t cluster_idx;		/* cluster index number */
1374 	unsigned int cluster_size;	/* page count in cluster */
1375 	unsigned int log_cluster_size;	/* log of cluster size */
1376 	struct page **rpages;		/* pages store raw data in cluster */
1377 	unsigned int nr_rpages;		/* total page number in rpages */
1378 	struct page **cpages;		/* pages store compressed data in cluster */
1379 	unsigned int nr_cpages;		/* total page number in cpages */
1380 	struct page **tpages;		/* temp pages to pad holes in cluster */
1381 	void *rbuf;			/* virtual mapped address on rpages */
1382 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1383 	size_t rlen;			/* valid data length in rbuf */
1384 	size_t clen;			/* valid data length in cbuf */
1385 
1386 	/*
1387 	 * The number of compressed pages remaining to be read in this cluster.
1388 	 * This is initially nr_cpages.  It is decremented by 1 each time a page
1389 	 * has been read (or failed to be read).  When it reaches 0, the cluster
1390 	 * is decompressed (or an error is reported).
1391 	 *
1392 	 * If an error occurs before all the pages have been submitted for I/O,
1393 	 * then this will never reach 0.  In this case the I/O submitter is
1394 	 * responsible for calling f2fs_decompress_end_io() instead.
1395 	 */
1396 	atomic_t remaining_pages;
1397 
1398 	/*
1399 	 * Number of references to this decompress_io_ctx.
1400 	 *
1401 	 * One reference is held for I/O completion.  This reference is dropped
1402 	 * after the pagecache pages are updated and unlocked -- either after
1403 	 * decompression (and verity if enabled), or after an error.
1404 	 *
1405 	 * In addition, each compressed page holds a reference while it is in a
1406 	 * bio.  These references are necessary prevent compressed pages from
1407 	 * being freed while they are still in a bio.
1408 	 */
1409 	refcount_t refcnt;
1410 
1411 	bool failed;			/* IO error occurred before decompression? */
1412 	bool need_verity;		/* need fs-verity verification after decompression? */
1413 	void *private;			/* payload buffer for specified decompression algorithm */
1414 	void *private2;			/* extra payload buffer */
1415 	struct work_struct verity_work;	/* work to verify the decompressed pages */
1416 };
1417 
1418 #define NULL_CLUSTER			((unsigned int)(~0))
1419 #define MIN_COMPRESS_LOG_SIZE		2
1420 #define MAX_COMPRESS_LOG_SIZE		8
1421 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1422 
1423 struct f2fs_sb_info {
1424 	struct super_block *sb;			/* pointer to VFS super block */
1425 	struct proc_dir_entry *s_proc;		/* proc entry */
1426 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1427 	struct rw_semaphore sb_lock;		/* lock for raw super block */
1428 	int valid_super_block;			/* valid super block no */
1429 	unsigned long s_flag;				/* flags for sbi */
1430 	struct mutex writepages;		/* mutex for writepages() */
1431 
1432 #ifdef CONFIG_BLK_DEV_ZONED
1433 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1434 	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
1435 #endif
1436 
1437 	/* for node-related operations */
1438 	struct f2fs_nm_info *nm_info;		/* node manager */
1439 	struct inode *node_inode;		/* cache node blocks */
1440 
1441 	/* for segment-related operations */
1442 	struct f2fs_sm_info *sm_info;		/* segment manager */
1443 
1444 	/* for bio operations */
1445 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1446 	/* keep migration IO order for LFS mode */
1447 	struct rw_semaphore io_order_lock;
1448 	mempool_t *write_io_dummy;		/* Dummy pages */
1449 
1450 	/* for checkpoint */
1451 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1452 	int cur_cp_pack;			/* remain current cp pack */
1453 	spinlock_t cp_lock;			/* for flag in ckpt */
1454 	struct inode *meta_inode;		/* cache meta blocks */
1455 	struct rw_semaphore cp_global_sem;	/* checkpoint procedure lock */
1456 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
1457 	struct rw_semaphore node_write;		/* locking node writes */
1458 	struct rw_semaphore node_change;	/* locking node change */
1459 	wait_queue_head_t cp_wait;
1460 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1461 	long interval_time[MAX_TIME];		/* to store thresholds */
1462 	struct ckpt_req_control cprc_info;	/* for checkpoint request control */
1463 
1464 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1465 
1466 	spinlock_t fsync_node_lock;		/* for node entry lock */
1467 	struct list_head fsync_node_list;	/* node list head */
1468 	unsigned int fsync_seg_id;		/* sequence id */
1469 	unsigned int fsync_node_num;		/* number of node entries */
1470 
1471 	/* for orphan inode, use 0'th array */
1472 	unsigned int max_orphans;		/* max orphan inodes */
1473 
1474 	/* for inode management */
1475 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1476 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1477 	struct mutex flush_lock;		/* for flush exclusion */
1478 
1479 	/* for extent tree cache */
1480 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1481 	struct mutex extent_tree_lock;	/* locking extent radix tree */
1482 	struct list_head extent_list;		/* lru list for shrinker */
1483 	spinlock_t extent_lock;			/* locking extent lru list */
1484 	atomic_t total_ext_tree;		/* extent tree count */
1485 	struct list_head zombie_list;		/* extent zombie tree list */
1486 	atomic_t total_zombie_tree;		/* extent zombie tree count */
1487 	atomic_t total_ext_node;		/* extent info count */
1488 
1489 	/* basic filesystem units */
1490 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1491 	unsigned int log_blocksize;		/* log2 block size */
1492 	unsigned int blocksize;			/* block size */
1493 	unsigned int root_ino_num;		/* root inode number*/
1494 	unsigned int node_ino_num;		/* node inode number*/
1495 	unsigned int meta_ino_num;		/* meta inode number*/
1496 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1497 	unsigned int blocks_per_seg;		/* blocks per segment */
1498 	unsigned int segs_per_sec;		/* segments per section */
1499 	unsigned int secs_per_zone;		/* sections per zone */
1500 	unsigned int total_sections;		/* total section count */
1501 	unsigned int total_node_count;		/* total node block count */
1502 	unsigned int total_valid_node_count;	/* valid node block count */
1503 	int dir_level;				/* directory level */
1504 	int readdir_ra;				/* readahead inode in readdir */
1505 	u64 max_io_bytes;			/* max io bytes to merge IOs */
1506 
1507 	block_t user_block_count;		/* # of user blocks */
1508 	block_t total_valid_block_count;	/* # of valid blocks */
1509 	block_t discard_blks;			/* discard command candidats */
1510 	block_t last_valid_block_count;		/* for recovery */
1511 	block_t reserved_blocks;		/* configurable reserved blocks */
1512 	block_t current_reserved_blocks;	/* current reserved blocks */
1513 
1514 	/* Additional tracking for no checkpoint mode */
1515 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1516 
1517 	unsigned int nquota_files;		/* # of quota sysfile */
1518 	struct rw_semaphore quota_sem;		/* blocking cp for flags */
1519 
1520 	/* # of pages, see count_type */
1521 	atomic_t nr_pages[NR_COUNT_TYPE];
1522 	/* # of allocated blocks */
1523 	struct percpu_counter alloc_valid_block_count;
1524 
1525 	/* writeback control */
1526 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1527 
1528 	/* valid inode count */
1529 	struct percpu_counter total_valid_inode_count;
1530 
1531 	struct f2fs_mount_info mount_opt;	/* mount options */
1532 
1533 	/* for cleaning operations */
1534 	struct rw_semaphore gc_lock;		/*
1535 						 * semaphore for GC, avoid
1536 						 * race between GC and GC or CP
1537 						 */
1538 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1539 	struct atgc_management am;		/* atgc management */
1540 	unsigned int cur_victim_sec;		/* current victim section num */
1541 	unsigned int gc_mode;			/* current GC state */
1542 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1543 
1544 	/* for skip statistic */
1545 	unsigned int atomic_files;		/* # of opened atomic file */
1546 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
1547 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1548 
1549 	/* threshold for gc trials on pinned files */
1550 	u64 gc_pin_file_threshold;
1551 	struct rw_semaphore pin_sem;
1552 
1553 	/* maximum # of trials to find a victim segment for SSR and GC */
1554 	unsigned int max_victim_search;
1555 	/* migration granularity of garbage collection, unit: segment */
1556 	unsigned int migration_granularity;
1557 
1558 	/*
1559 	 * for stat information.
1560 	 * one is for the LFS mode, and the other is for the SSR mode.
1561 	 */
1562 #ifdef CONFIG_F2FS_STAT_FS
1563 	struct f2fs_stat_info *stat_info;	/* FS status information */
1564 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1565 	unsigned int segment_count[2];		/* # of allocated segments */
1566 	unsigned int block_count[2];		/* # of allocated blocks */
1567 	atomic_t inplace_count;		/* # of inplace update */
1568 	atomic64_t total_hit_ext;		/* # of lookup extent cache */
1569 	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
1570 	atomic64_t read_hit_largest;		/* # of hit largest extent node */
1571 	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1572 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1573 	atomic_t inline_inode;			/* # of inline_data inodes */
1574 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1575 	atomic_t compr_inode;			/* # of compressed inodes */
1576 	atomic64_t compr_blocks;		/* # of compressed blocks */
1577 	atomic_t vw_cnt;			/* # of volatile writes */
1578 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1579 	atomic_t max_vw_cnt;			/* max # of volatile writes */
1580 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1581 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1582 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1583 #endif
1584 	spinlock_t stat_lock;			/* lock for stat operations */
1585 
1586 	/* For app/fs IO statistics */
1587 	spinlock_t iostat_lock;
1588 	unsigned long long rw_iostat[NR_IO_TYPE];
1589 	unsigned long long prev_rw_iostat[NR_IO_TYPE];
1590 	bool iostat_enable;
1591 	unsigned long iostat_next_period;
1592 	unsigned int iostat_period_ms;
1593 
1594 	/* to attach REQ_META|REQ_FUA flags */
1595 	unsigned int data_io_flag;
1596 	unsigned int node_io_flag;
1597 
1598 	/* For sysfs suppport */
1599 	struct kobject s_kobj;			/* /sys/fs/f2fs/<devname> */
1600 	struct completion s_kobj_unregister;
1601 
1602 	struct kobject s_stat_kobj;		/* /sys/fs/f2fs/<devname>/stat */
1603 	struct completion s_stat_kobj_unregister;
1604 
1605 	/* For shrinker support */
1606 	struct list_head s_list;
1607 	int s_ndevs;				/* number of devices */
1608 	struct f2fs_dev_info *devs;		/* for device list */
1609 	unsigned int dirty_device;		/* for checkpoint data flush */
1610 	spinlock_t dev_lock;			/* protect dirty_device */
1611 	struct mutex umount_mutex;
1612 	unsigned int shrinker_run_no;
1613 
1614 	/* For write statistics */
1615 	u64 sectors_written_start;
1616 	u64 kbytes_written;
1617 
1618 	/* Reference to checksum algorithm driver via cryptoapi */
1619 	struct crypto_shash *s_chksum_driver;
1620 
1621 	/* Precomputed FS UUID checksum for seeding other checksums */
1622 	__u32 s_chksum_seed;
1623 
1624 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1625 
1626 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1627 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1628 
1629 #ifdef CONFIG_F2FS_FS_COMPRESSION
1630 	struct kmem_cache *page_array_slab;	/* page array entry */
1631 	unsigned int page_array_slab_size;	/* default page array slab size */
1632 
1633 	/* For runtime compression statistics */
1634 	u64 compr_written_block;
1635 	u64 compr_saved_block;
1636 	u32 compr_new_inode;
1637 #endif
1638 };
1639 
1640 struct f2fs_private_dio {
1641 	struct inode *inode;
1642 	void *orig_private;
1643 	bio_end_io_t *orig_end_io;
1644 	bool write;
1645 };
1646 
1647 #ifdef CONFIG_F2FS_FAULT_INJECTION
1648 #define f2fs_show_injection_info(sbi, type)					\
1649 	printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",	\
1650 		KERN_INFO, sbi->sb->s_id,				\
1651 		f2fs_fault_name[type],					\
1652 		__func__, __builtin_return_address(0))
1653 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1654 {
1655 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1656 
1657 	if (!ffi->inject_rate)
1658 		return false;
1659 
1660 	if (!IS_FAULT_SET(ffi, type))
1661 		return false;
1662 
1663 	atomic_inc(&ffi->inject_ops);
1664 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1665 		atomic_set(&ffi->inject_ops, 0);
1666 		return true;
1667 	}
1668 	return false;
1669 }
1670 #else
1671 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1672 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1673 {
1674 	return false;
1675 }
1676 #endif
1677 
1678 /*
1679  * Test if the mounted volume is a multi-device volume.
1680  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1681  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1682  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1683  */
1684 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1685 {
1686 	return sbi->s_ndevs > 1;
1687 }
1688 
1689 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1690 {
1691 	unsigned long now = jiffies;
1692 
1693 	sbi->last_time[type] = now;
1694 
1695 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1696 	if (type == REQ_TIME) {
1697 		sbi->last_time[DISCARD_TIME] = now;
1698 		sbi->last_time[GC_TIME] = now;
1699 	}
1700 }
1701 
1702 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1703 {
1704 	unsigned long interval = sbi->interval_time[type] * HZ;
1705 
1706 	return time_after(jiffies, sbi->last_time[type] + interval);
1707 }
1708 
1709 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1710 						int type)
1711 {
1712 	unsigned long interval = sbi->interval_time[type] * HZ;
1713 	unsigned int wait_ms = 0;
1714 	long delta;
1715 
1716 	delta = (sbi->last_time[type] + interval) - jiffies;
1717 	if (delta > 0)
1718 		wait_ms = jiffies_to_msecs(delta);
1719 
1720 	return wait_ms;
1721 }
1722 
1723 /*
1724  * Inline functions
1725  */
1726 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1727 			      const void *address, unsigned int length)
1728 {
1729 	struct {
1730 		struct shash_desc shash;
1731 		char ctx[4];
1732 	} desc;
1733 	int err;
1734 
1735 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1736 
1737 	desc.shash.tfm = sbi->s_chksum_driver;
1738 	*(u32 *)desc.ctx = crc;
1739 
1740 	err = crypto_shash_update(&desc.shash, address, length);
1741 	BUG_ON(err);
1742 
1743 	return *(u32 *)desc.ctx;
1744 }
1745 
1746 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1747 			   unsigned int length)
1748 {
1749 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1750 }
1751 
1752 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1753 				  void *buf, size_t buf_size)
1754 {
1755 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1756 }
1757 
1758 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1759 			      const void *address, unsigned int length)
1760 {
1761 	return __f2fs_crc32(sbi, crc, address, length);
1762 }
1763 
1764 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1765 {
1766 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1767 }
1768 
1769 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1770 {
1771 	return sb->s_fs_info;
1772 }
1773 
1774 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1775 {
1776 	return F2FS_SB(inode->i_sb);
1777 }
1778 
1779 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1780 {
1781 	return F2FS_I_SB(mapping->host);
1782 }
1783 
1784 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1785 {
1786 	return F2FS_M_SB(page_file_mapping(page));
1787 }
1788 
1789 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1790 {
1791 	return (struct f2fs_super_block *)(sbi->raw_super);
1792 }
1793 
1794 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1795 {
1796 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1797 }
1798 
1799 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1800 {
1801 	return (struct f2fs_node *)page_address(page);
1802 }
1803 
1804 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1805 {
1806 	return &((struct f2fs_node *)page_address(page))->i;
1807 }
1808 
1809 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1810 {
1811 	return (struct f2fs_nm_info *)(sbi->nm_info);
1812 }
1813 
1814 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1815 {
1816 	return (struct f2fs_sm_info *)(sbi->sm_info);
1817 }
1818 
1819 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1820 {
1821 	return (struct sit_info *)(SM_I(sbi)->sit_info);
1822 }
1823 
1824 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1825 {
1826 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1827 }
1828 
1829 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1830 {
1831 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1832 }
1833 
1834 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1835 {
1836 	return sbi->meta_inode->i_mapping;
1837 }
1838 
1839 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1840 {
1841 	return sbi->node_inode->i_mapping;
1842 }
1843 
1844 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1845 {
1846 	return test_bit(type, &sbi->s_flag);
1847 }
1848 
1849 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1850 {
1851 	set_bit(type, &sbi->s_flag);
1852 }
1853 
1854 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1855 {
1856 	clear_bit(type, &sbi->s_flag);
1857 }
1858 
1859 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1860 {
1861 	return le64_to_cpu(cp->checkpoint_ver);
1862 }
1863 
1864 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
1865 {
1866 	if (type < F2FS_MAX_QUOTAS)
1867 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
1868 	return 0;
1869 }
1870 
1871 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
1872 {
1873 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
1874 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
1875 }
1876 
1877 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1878 {
1879 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1880 
1881 	return ckpt_flags & f;
1882 }
1883 
1884 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1885 {
1886 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1887 }
1888 
1889 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1890 {
1891 	unsigned int ckpt_flags;
1892 
1893 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1894 	ckpt_flags |= f;
1895 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1896 }
1897 
1898 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1899 {
1900 	unsigned long flags;
1901 
1902 	spin_lock_irqsave(&sbi->cp_lock, flags);
1903 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
1904 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1905 }
1906 
1907 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1908 {
1909 	unsigned int ckpt_flags;
1910 
1911 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1912 	ckpt_flags &= (~f);
1913 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1914 }
1915 
1916 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1917 {
1918 	unsigned long flags;
1919 
1920 	spin_lock_irqsave(&sbi->cp_lock, flags);
1921 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
1922 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1923 }
1924 
1925 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
1926 {
1927 	unsigned long flags;
1928 	unsigned char *nat_bits;
1929 
1930 	/*
1931 	 * In order to re-enable nat_bits we need to call fsck.f2fs by
1932 	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
1933 	 * so let's rely on regular fsck or unclean shutdown.
1934 	 */
1935 
1936 	if (lock)
1937 		spin_lock_irqsave(&sbi->cp_lock, flags);
1938 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
1939 	nat_bits = NM_I(sbi)->nat_bits;
1940 	NM_I(sbi)->nat_bits = NULL;
1941 	if (lock)
1942 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
1943 
1944 	kvfree(nat_bits);
1945 }
1946 
1947 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
1948 					struct cp_control *cpc)
1949 {
1950 	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1951 
1952 	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1953 }
1954 
1955 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1956 {
1957 	down_read(&sbi->cp_rwsem);
1958 }
1959 
1960 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
1961 {
1962 	return down_read_trylock(&sbi->cp_rwsem);
1963 }
1964 
1965 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1966 {
1967 	up_read(&sbi->cp_rwsem);
1968 }
1969 
1970 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1971 {
1972 	down_write(&sbi->cp_rwsem);
1973 }
1974 
1975 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1976 {
1977 	up_write(&sbi->cp_rwsem);
1978 }
1979 
1980 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1981 {
1982 	int reason = CP_SYNC;
1983 
1984 	if (test_opt(sbi, FASTBOOT))
1985 		reason = CP_FASTBOOT;
1986 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1987 		reason = CP_UMOUNT;
1988 	return reason;
1989 }
1990 
1991 static inline bool __remain_node_summaries(int reason)
1992 {
1993 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
1994 }
1995 
1996 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1997 {
1998 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1999 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2000 }
2001 
2002 /*
2003  * Check whether the inode has blocks or not
2004  */
2005 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2006 {
2007 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2008 
2009 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2010 }
2011 
2012 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2013 {
2014 	return ofs == XATTR_NODE_OFFSET;
2015 }
2016 
2017 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2018 					struct inode *inode, bool cap)
2019 {
2020 	if (!inode)
2021 		return true;
2022 	if (!test_opt(sbi, RESERVE_ROOT))
2023 		return false;
2024 	if (IS_NOQUOTA(inode))
2025 		return true;
2026 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2027 		return true;
2028 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2029 					in_group_p(F2FS_OPTION(sbi).s_resgid))
2030 		return true;
2031 	if (cap && capable(CAP_SYS_RESOURCE))
2032 		return true;
2033 	return false;
2034 }
2035 
2036 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
2037 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2038 				 struct inode *inode, blkcnt_t *count)
2039 {
2040 	blkcnt_t diff = 0, release = 0;
2041 	block_t avail_user_block_count;
2042 	int ret;
2043 
2044 	ret = dquot_reserve_block(inode, *count);
2045 	if (ret)
2046 		return ret;
2047 
2048 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2049 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2050 		release = *count;
2051 		goto release_quota;
2052 	}
2053 
2054 	/*
2055 	 * let's increase this in prior to actual block count change in order
2056 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2057 	 */
2058 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2059 
2060 	spin_lock(&sbi->stat_lock);
2061 	sbi->total_valid_block_count += (block_t)(*count);
2062 	avail_user_block_count = sbi->user_block_count -
2063 					sbi->current_reserved_blocks;
2064 
2065 	if (!__allow_reserved_blocks(sbi, inode, true))
2066 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2067 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2068 		if (avail_user_block_count > sbi->unusable_block_count)
2069 			avail_user_block_count -= sbi->unusable_block_count;
2070 		else
2071 			avail_user_block_count = 0;
2072 	}
2073 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2074 		diff = sbi->total_valid_block_count - avail_user_block_count;
2075 		if (diff > *count)
2076 			diff = *count;
2077 		*count -= diff;
2078 		release = diff;
2079 		sbi->total_valid_block_count -= diff;
2080 		if (!*count) {
2081 			spin_unlock(&sbi->stat_lock);
2082 			goto enospc;
2083 		}
2084 	}
2085 	spin_unlock(&sbi->stat_lock);
2086 
2087 	if (unlikely(release)) {
2088 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2089 		dquot_release_reservation_block(inode, release);
2090 	}
2091 	f2fs_i_blocks_write(inode, *count, true, true);
2092 	return 0;
2093 
2094 enospc:
2095 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2096 release_quota:
2097 	dquot_release_reservation_block(inode, release);
2098 	return -ENOSPC;
2099 }
2100 
2101 __printf(2, 3)
2102 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2103 
2104 #define f2fs_err(sbi, fmt, ...)						\
2105 	f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2106 #define f2fs_warn(sbi, fmt, ...)					\
2107 	f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2108 #define f2fs_notice(sbi, fmt, ...)					\
2109 	f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2110 #define f2fs_info(sbi, fmt, ...)					\
2111 	f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2112 #define f2fs_debug(sbi, fmt, ...)					\
2113 	f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2114 
2115 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2116 						struct inode *inode,
2117 						block_t count)
2118 {
2119 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2120 
2121 	spin_lock(&sbi->stat_lock);
2122 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2123 	sbi->total_valid_block_count -= (block_t)count;
2124 	if (sbi->reserved_blocks &&
2125 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2126 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2127 					sbi->current_reserved_blocks + count);
2128 	spin_unlock(&sbi->stat_lock);
2129 	if (unlikely(inode->i_blocks < sectors)) {
2130 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2131 			  inode->i_ino,
2132 			  (unsigned long long)inode->i_blocks,
2133 			  (unsigned long long)sectors);
2134 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2135 		return;
2136 	}
2137 	f2fs_i_blocks_write(inode, count, false, true);
2138 }
2139 
2140 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2141 {
2142 	atomic_inc(&sbi->nr_pages[count_type]);
2143 
2144 	if (count_type == F2FS_DIRTY_DENTS ||
2145 			count_type == F2FS_DIRTY_NODES ||
2146 			count_type == F2FS_DIRTY_META ||
2147 			count_type == F2FS_DIRTY_QDATA ||
2148 			count_type == F2FS_DIRTY_IMETA)
2149 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2150 }
2151 
2152 static inline void inode_inc_dirty_pages(struct inode *inode)
2153 {
2154 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2155 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2156 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2157 	if (IS_NOQUOTA(inode))
2158 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2159 }
2160 
2161 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2162 {
2163 	atomic_dec(&sbi->nr_pages[count_type]);
2164 }
2165 
2166 static inline void inode_dec_dirty_pages(struct inode *inode)
2167 {
2168 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2169 			!S_ISLNK(inode->i_mode))
2170 		return;
2171 
2172 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2173 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2174 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2175 	if (IS_NOQUOTA(inode))
2176 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2177 }
2178 
2179 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2180 {
2181 	return atomic_read(&sbi->nr_pages[count_type]);
2182 }
2183 
2184 static inline int get_dirty_pages(struct inode *inode)
2185 {
2186 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2187 }
2188 
2189 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2190 {
2191 	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2192 	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2193 						sbi->log_blocks_per_seg;
2194 
2195 	return segs / sbi->segs_per_sec;
2196 }
2197 
2198 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2199 {
2200 	return sbi->total_valid_block_count;
2201 }
2202 
2203 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2204 {
2205 	return sbi->discard_blks;
2206 }
2207 
2208 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2209 {
2210 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2211 
2212 	/* return NAT or SIT bitmap */
2213 	if (flag == NAT_BITMAP)
2214 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2215 	else if (flag == SIT_BITMAP)
2216 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2217 
2218 	return 0;
2219 }
2220 
2221 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2222 {
2223 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2224 }
2225 
2226 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2227 {
2228 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2229 	void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2230 	int offset;
2231 
2232 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2233 		offset = (flag == SIT_BITMAP) ?
2234 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2235 		/*
2236 		 * if large_nat_bitmap feature is enabled, leave checksum
2237 		 * protection for all nat/sit bitmaps.
2238 		 */
2239 		return tmp_ptr + offset + sizeof(__le32);
2240 	}
2241 
2242 	if (__cp_payload(sbi) > 0) {
2243 		if (flag == NAT_BITMAP)
2244 			return &ckpt->sit_nat_version_bitmap;
2245 		else
2246 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2247 	} else {
2248 		offset = (flag == NAT_BITMAP) ?
2249 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2250 		return tmp_ptr + offset;
2251 	}
2252 }
2253 
2254 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2255 {
2256 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2257 
2258 	if (sbi->cur_cp_pack == 2)
2259 		start_addr += sbi->blocks_per_seg;
2260 	return start_addr;
2261 }
2262 
2263 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2264 {
2265 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2266 
2267 	if (sbi->cur_cp_pack == 1)
2268 		start_addr += sbi->blocks_per_seg;
2269 	return start_addr;
2270 }
2271 
2272 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2273 {
2274 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2275 }
2276 
2277 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2278 {
2279 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2280 }
2281 
2282 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2283 					struct inode *inode, bool is_inode)
2284 {
2285 	block_t	valid_block_count;
2286 	unsigned int valid_node_count, user_block_count;
2287 	int err;
2288 
2289 	if (is_inode) {
2290 		if (inode) {
2291 			err = dquot_alloc_inode(inode);
2292 			if (err)
2293 				return err;
2294 		}
2295 	} else {
2296 		err = dquot_reserve_block(inode, 1);
2297 		if (err)
2298 			return err;
2299 	}
2300 
2301 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2302 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2303 		goto enospc;
2304 	}
2305 
2306 	spin_lock(&sbi->stat_lock);
2307 
2308 	valid_block_count = sbi->total_valid_block_count +
2309 					sbi->current_reserved_blocks + 1;
2310 
2311 	if (!__allow_reserved_blocks(sbi, inode, false))
2312 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2313 	user_block_count = sbi->user_block_count;
2314 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2315 		user_block_count -= sbi->unusable_block_count;
2316 
2317 	if (unlikely(valid_block_count > user_block_count)) {
2318 		spin_unlock(&sbi->stat_lock);
2319 		goto enospc;
2320 	}
2321 
2322 	valid_node_count = sbi->total_valid_node_count + 1;
2323 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2324 		spin_unlock(&sbi->stat_lock);
2325 		goto enospc;
2326 	}
2327 
2328 	sbi->total_valid_node_count++;
2329 	sbi->total_valid_block_count++;
2330 	spin_unlock(&sbi->stat_lock);
2331 
2332 	if (inode) {
2333 		if (is_inode)
2334 			f2fs_mark_inode_dirty_sync(inode, true);
2335 		else
2336 			f2fs_i_blocks_write(inode, 1, true, true);
2337 	}
2338 
2339 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2340 	return 0;
2341 
2342 enospc:
2343 	if (is_inode) {
2344 		if (inode)
2345 			dquot_free_inode(inode);
2346 	} else {
2347 		dquot_release_reservation_block(inode, 1);
2348 	}
2349 	return -ENOSPC;
2350 }
2351 
2352 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2353 					struct inode *inode, bool is_inode)
2354 {
2355 	spin_lock(&sbi->stat_lock);
2356 
2357 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2358 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2359 
2360 	sbi->total_valid_node_count--;
2361 	sbi->total_valid_block_count--;
2362 	if (sbi->reserved_blocks &&
2363 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2364 		sbi->current_reserved_blocks++;
2365 
2366 	spin_unlock(&sbi->stat_lock);
2367 
2368 	if (is_inode) {
2369 		dquot_free_inode(inode);
2370 	} else {
2371 		if (unlikely(inode->i_blocks == 0)) {
2372 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2373 				  inode->i_ino,
2374 				  (unsigned long long)inode->i_blocks);
2375 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2376 			return;
2377 		}
2378 		f2fs_i_blocks_write(inode, 1, false, true);
2379 	}
2380 }
2381 
2382 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2383 {
2384 	return sbi->total_valid_node_count;
2385 }
2386 
2387 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2388 {
2389 	percpu_counter_inc(&sbi->total_valid_inode_count);
2390 }
2391 
2392 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2393 {
2394 	percpu_counter_dec(&sbi->total_valid_inode_count);
2395 }
2396 
2397 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2398 {
2399 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2400 }
2401 
2402 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2403 						pgoff_t index, bool for_write)
2404 {
2405 	struct page *page;
2406 
2407 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2408 		if (!for_write)
2409 			page = find_get_page_flags(mapping, index,
2410 							FGP_LOCK | FGP_ACCESSED);
2411 		else
2412 			page = find_lock_page(mapping, index);
2413 		if (page)
2414 			return page;
2415 
2416 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2417 			f2fs_show_injection_info(F2FS_M_SB(mapping),
2418 							FAULT_PAGE_ALLOC);
2419 			return NULL;
2420 		}
2421 	}
2422 
2423 	if (!for_write)
2424 		return grab_cache_page(mapping, index);
2425 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2426 }
2427 
2428 static inline struct page *f2fs_pagecache_get_page(
2429 				struct address_space *mapping, pgoff_t index,
2430 				int fgp_flags, gfp_t gfp_mask)
2431 {
2432 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2433 		f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2434 		return NULL;
2435 	}
2436 
2437 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2438 }
2439 
2440 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2441 {
2442 	char *src_kaddr = kmap(src);
2443 	char *dst_kaddr = kmap(dst);
2444 
2445 	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2446 	kunmap(dst);
2447 	kunmap(src);
2448 }
2449 
2450 static inline void f2fs_put_page(struct page *page, int unlock)
2451 {
2452 	if (!page)
2453 		return;
2454 
2455 	if (unlock) {
2456 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2457 		unlock_page(page);
2458 	}
2459 	put_page(page);
2460 }
2461 
2462 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2463 {
2464 	if (dn->node_page)
2465 		f2fs_put_page(dn->node_page, 1);
2466 	if (dn->inode_page && dn->node_page != dn->inode_page)
2467 		f2fs_put_page(dn->inode_page, 0);
2468 	dn->node_page = NULL;
2469 	dn->inode_page = NULL;
2470 }
2471 
2472 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2473 					size_t size)
2474 {
2475 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2476 }
2477 
2478 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2479 						gfp_t flags)
2480 {
2481 	void *entry;
2482 
2483 	entry = kmem_cache_alloc(cachep, flags);
2484 	if (!entry)
2485 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2486 	return entry;
2487 }
2488 
2489 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2490 {
2491 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2492 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2493 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2494 		get_pages(sbi, F2FS_DIO_READ) ||
2495 		get_pages(sbi, F2FS_DIO_WRITE))
2496 		return true;
2497 
2498 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2499 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2500 		return true;
2501 
2502 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2503 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2504 		return true;
2505 	return false;
2506 }
2507 
2508 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2509 {
2510 	if (sbi->gc_mode == GC_URGENT_HIGH)
2511 		return true;
2512 
2513 	if (is_inflight_io(sbi, type))
2514 		return false;
2515 
2516 	if (sbi->gc_mode == GC_URGENT_LOW &&
2517 			(type == DISCARD_TIME || type == GC_TIME))
2518 		return true;
2519 
2520 	return f2fs_time_over(sbi, type);
2521 }
2522 
2523 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2524 				unsigned long index, void *item)
2525 {
2526 	while (radix_tree_insert(root, index, item))
2527 		cond_resched();
2528 }
2529 
2530 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2531 
2532 static inline bool IS_INODE(struct page *page)
2533 {
2534 	struct f2fs_node *p = F2FS_NODE(page);
2535 
2536 	return RAW_IS_INODE(p);
2537 }
2538 
2539 static inline int offset_in_addr(struct f2fs_inode *i)
2540 {
2541 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2542 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2543 }
2544 
2545 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2546 {
2547 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2548 }
2549 
2550 static inline int f2fs_has_extra_attr(struct inode *inode);
2551 static inline block_t data_blkaddr(struct inode *inode,
2552 			struct page *node_page, unsigned int offset)
2553 {
2554 	struct f2fs_node *raw_node;
2555 	__le32 *addr_array;
2556 	int base = 0;
2557 	bool is_inode = IS_INODE(node_page);
2558 
2559 	raw_node = F2FS_NODE(node_page);
2560 
2561 	if (is_inode) {
2562 		if (!inode)
2563 			/* from GC path only */
2564 			base = offset_in_addr(&raw_node->i);
2565 		else if (f2fs_has_extra_attr(inode))
2566 			base = get_extra_isize(inode);
2567 	}
2568 
2569 	addr_array = blkaddr_in_node(raw_node);
2570 	return le32_to_cpu(addr_array[base + offset]);
2571 }
2572 
2573 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2574 {
2575 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2576 }
2577 
2578 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2579 {
2580 	int mask;
2581 
2582 	addr += (nr >> 3);
2583 	mask = 1 << (7 - (nr & 0x07));
2584 	return mask & *addr;
2585 }
2586 
2587 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2588 {
2589 	int mask;
2590 
2591 	addr += (nr >> 3);
2592 	mask = 1 << (7 - (nr & 0x07));
2593 	*addr |= mask;
2594 }
2595 
2596 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2597 {
2598 	int mask;
2599 
2600 	addr += (nr >> 3);
2601 	mask = 1 << (7 - (nr & 0x07));
2602 	*addr &= ~mask;
2603 }
2604 
2605 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2606 {
2607 	int mask;
2608 	int ret;
2609 
2610 	addr += (nr >> 3);
2611 	mask = 1 << (7 - (nr & 0x07));
2612 	ret = mask & *addr;
2613 	*addr |= mask;
2614 	return ret;
2615 }
2616 
2617 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2618 {
2619 	int mask;
2620 	int ret;
2621 
2622 	addr += (nr >> 3);
2623 	mask = 1 << (7 - (nr & 0x07));
2624 	ret = mask & *addr;
2625 	*addr &= ~mask;
2626 	return ret;
2627 }
2628 
2629 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2630 {
2631 	int mask;
2632 
2633 	addr += (nr >> 3);
2634 	mask = 1 << (7 - (nr & 0x07));
2635 	*addr ^= mask;
2636 }
2637 
2638 /*
2639  * On-disk inode flags (f2fs_inode::i_flags)
2640  */
2641 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2642 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2643 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2644 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2645 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2646 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2647 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2648 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2649 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2650 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2651 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2652 
2653 /* Flags that should be inherited by new inodes from their parent. */
2654 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2655 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2656 			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2657 
2658 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2659 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2660 				F2FS_CASEFOLD_FL))
2661 
2662 /* Flags that are appropriate for non-directories/regular files. */
2663 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2664 
2665 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2666 {
2667 	if (S_ISDIR(mode))
2668 		return flags;
2669 	else if (S_ISREG(mode))
2670 		return flags & F2FS_REG_FLMASK;
2671 	else
2672 		return flags & F2FS_OTHER_FLMASK;
2673 }
2674 
2675 static inline void __mark_inode_dirty_flag(struct inode *inode,
2676 						int flag, bool set)
2677 {
2678 	switch (flag) {
2679 	case FI_INLINE_XATTR:
2680 	case FI_INLINE_DATA:
2681 	case FI_INLINE_DENTRY:
2682 	case FI_NEW_INODE:
2683 		if (set)
2684 			return;
2685 		fallthrough;
2686 	case FI_DATA_EXIST:
2687 	case FI_INLINE_DOTS:
2688 	case FI_PIN_FILE:
2689 		f2fs_mark_inode_dirty_sync(inode, true);
2690 	}
2691 }
2692 
2693 static inline void set_inode_flag(struct inode *inode, int flag)
2694 {
2695 	set_bit(flag, F2FS_I(inode)->flags);
2696 	__mark_inode_dirty_flag(inode, flag, true);
2697 }
2698 
2699 static inline int is_inode_flag_set(struct inode *inode, int flag)
2700 {
2701 	return test_bit(flag, F2FS_I(inode)->flags);
2702 }
2703 
2704 static inline void clear_inode_flag(struct inode *inode, int flag)
2705 {
2706 	clear_bit(flag, F2FS_I(inode)->flags);
2707 	__mark_inode_dirty_flag(inode, flag, false);
2708 }
2709 
2710 static inline bool f2fs_verity_in_progress(struct inode *inode)
2711 {
2712 	return IS_ENABLED(CONFIG_FS_VERITY) &&
2713 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2714 }
2715 
2716 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2717 {
2718 	F2FS_I(inode)->i_acl_mode = mode;
2719 	set_inode_flag(inode, FI_ACL_MODE);
2720 	f2fs_mark_inode_dirty_sync(inode, false);
2721 }
2722 
2723 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2724 {
2725 	if (inc)
2726 		inc_nlink(inode);
2727 	else
2728 		drop_nlink(inode);
2729 	f2fs_mark_inode_dirty_sync(inode, true);
2730 }
2731 
2732 static inline void f2fs_i_blocks_write(struct inode *inode,
2733 					block_t diff, bool add, bool claim)
2734 {
2735 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2736 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2737 
2738 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
2739 	if (add) {
2740 		if (claim)
2741 			dquot_claim_block(inode, diff);
2742 		else
2743 			dquot_alloc_block_nofail(inode, diff);
2744 	} else {
2745 		dquot_free_block(inode, diff);
2746 	}
2747 
2748 	f2fs_mark_inode_dirty_sync(inode, true);
2749 	if (clean || recover)
2750 		set_inode_flag(inode, FI_AUTO_RECOVER);
2751 }
2752 
2753 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2754 {
2755 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2756 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2757 
2758 	if (i_size_read(inode) == i_size)
2759 		return;
2760 
2761 	i_size_write(inode, i_size);
2762 	f2fs_mark_inode_dirty_sync(inode, true);
2763 	if (clean || recover)
2764 		set_inode_flag(inode, FI_AUTO_RECOVER);
2765 }
2766 
2767 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2768 {
2769 	F2FS_I(inode)->i_current_depth = depth;
2770 	f2fs_mark_inode_dirty_sync(inode, true);
2771 }
2772 
2773 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2774 					unsigned int count)
2775 {
2776 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2777 	f2fs_mark_inode_dirty_sync(inode, true);
2778 }
2779 
2780 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2781 {
2782 	F2FS_I(inode)->i_xattr_nid = xnid;
2783 	f2fs_mark_inode_dirty_sync(inode, true);
2784 }
2785 
2786 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2787 {
2788 	F2FS_I(inode)->i_pino = pino;
2789 	f2fs_mark_inode_dirty_sync(inode, true);
2790 }
2791 
2792 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2793 {
2794 	struct f2fs_inode_info *fi = F2FS_I(inode);
2795 
2796 	if (ri->i_inline & F2FS_INLINE_XATTR)
2797 		set_bit(FI_INLINE_XATTR, fi->flags);
2798 	if (ri->i_inline & F2FS_INLINE_DATA)
2799 		set_bit(FI_INLINE_DATA, fi->flags);
2800 	if (ri->i_inline & F2FS_INLINE_DENTRY)
2801 		set_bit(FI_INLINE_DENTRY, fi->flags);
2802 	if (ri->i_inline & F2FS_DATA_EXIST)
2803 		set_bit(FI_DATA_EXIST, fi->flags);
2804 	if (ri->i_inline & F2FS_INLINE_DOTS)
2805 		set_bit(FI_INLINE_DOTS, fi->flags);
2806 	if (ri->i_inline & F2FS_EXTRA_ATTR)
2807 		set_bit(FI_EXTRA_ATTR, fi->flags);
2808 	if (ri->i_inline & F2FS_PIN_FILE)
2809 		set_bit(FI_PIN_FILE, fi->flags);
2810 }
2811 
2812 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2813 {
2814 	ri->i_inline = 0;
2815 
2816 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2817 		ri->i_inline |= F2FS_INLINE_XATTR;
2818 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
2819 		ri->i_inline |= F2FS_INLINE_DATA;
2820 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2821 		ri->i_inline |= F2FS_INLINE_DENTRY;
2822 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
2823 		ri->i_inline |= F2FS_DATA_EXIST;
2824 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2825 		ri->i_inline |= F2FS_INLINE_DOTS;
2826 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2827 		ri->i_inline |= F2FS_EXTRA_ATTR;
2828 	if (is_inode_flag_set(inode, FI_PIN_FILE))
2829 		ri->i_inline |= F2FS_PIN_FILE;
2830 }
2831 
2832 static inline int f2fs_has_extra_attr(struct inode *inode)
2833 {
2834 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2835 }
2836 
2837 static inline int f2fs_has_inline_xattr(struct inode *inode)
2838 {
2839 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
2840 }
2841 
2842 static inline int f2fs_compressed_file(struct inode *inode)
2843 {
2844 	return S_ISREG(inode->i_mode) &&
2845 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
2846 }
2847 
2848 static inline bool f2fs_need_compress_data(struct inode *inode)
2849 {
2850 	int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
2851 
2852 	if (!f2fs_compressed_file(inode))
2853 		return false;
2854 
2855 	if (compress_mode == COMPR_MODE_FS)
2856 		return true;
2857 	else if (compress_mode == COMPR_MODE_USER &&
2858 			is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
2859 		return true;
2860 
2861 	return false;
2862 }
2863 
2864 static inline unsigned int addrs_per_inode(struct inode *inode)
2865 {
2866 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
2867 				get_inline_xattr_addrs(inode);
2868 
2869 	if (!f2fs_compressed_file(inode))
2870 		return addrs;
2871 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
2872 }
2873 
2874 static inline unsigned int addrs_per_block(struct inode *inode)
2875 {
2876 	if (!f2fs_compressed_file(inode))
2877 		return DEF_ADDRS_PER_BLOCK;
2878 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
2879 }
2880 
2881 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2882 {
2883 	struct f2fs_inode *ri = F2FS_INODE(page);
2884 
2885 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
2886 					get_inline_xattr_addrs(inode)]);
2887 }
2888 
2889 static inline int inline_xattr_size(struct inode *inode)
2890 {
2891 	if (f2fs_has_inline_xattr(inode))
2892 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
2893 	return 0;
2894 }
2895 
2896 static inline int f2fs_has_inline_data(struct inode *inode)
2897 {
2898 	return is_inode_flag_set(inode, FI_INLINE_DATA);
2899 }
2900 
2901 static inline int f2fs_exist_data(struct inode *inode)
2902 {
2903 	return is_inode_flag_set(inode, FI_DATA_EXIST);
2904 }
2905 
2906 static inline int f2fs_has_inline_dots(struct inode *inode)
2907 {
2908 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
2909 }
2910 
2911 static inline int f2fs_is_mmap_file(struct inode *inode)
2912 {
2913 	return is_inode_flag_set(inode, FI_MMAP_FILE);
2914 }
2915 
2916 static inline bool f2fs_is_pinned_file(struct inode *inode)
2917 {
2918 	return is_inode_flag_set(inode, FI_PIN_FILE);
2919 }
2920 
2921 static inline bool f2fs_is_atomic_file(struct inode *inode)
2922 {
2923 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
2924 }
2925 
2926 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
2927 {
2928 	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
2929 }
2930 
2931 static inline bool f2fs_is_volatile_file(struct inode *inode)
2932 {
2933 	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2934 }
2935 
2936 static inline bool f2fs_is_first_block_written(struct inode *inode)
2937 {
2938 	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2939 }
2940 
2941 static inline bool f2fs_is_drop_cache(struct inode *inode)
2942 {
2943 	return is_inode_flag_set(inode, FI_DROP_CACHE);
2944 }
2945 
2946 static inline void *inline_data_addr(struct inode *inode, struct page *page)
2947 {
2948 	struct f2fs_inode *ri = F2FS_INODE(page);
2949 	int extra_size = get_extra_isize(inode);
2950 
2951 	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2952 }
2953 
2954 static inline int f2fs_has_inline_dentry(struct inode *inode)
2955 {
2956 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2957 }
2958 
2959 static inline int is_file(struct inode *inode, int type)
2960 {
2961 	return F2FS_I(inode)->i_advise & type;
2962 }
2963 
2964 static inline void set_file(struct inode *inode, int type)
2965 {
2966 	F2FS_I(inode)->i_advise |= type;
2967 	f2fs_mark_inode_dirty_sync(inode, true);
2968 }
2969 
2970 static inline void clear_file(struct inode *inode, int type)
2971 {
2972 	F2FS_I(inode)->i_advise &= ~type;
2973 	f2fs_mark_inode_dirty_sync(inode, true);
2974 }
2975 
2976 static inline bool f2fs_is_time_consistent(struct inode *inode)
2977 {
2978 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
2979 		return false;
2980 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
2981 		return false;
2982 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
2983 		return false;
2984 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
2985 						&F2FS_I(inode)->i_crtime))
2986 		return false;
2987 	return true;
2988 }
2989 
2990 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
2991 {
2992 	bool ret;
2993 
2994 	if (dsync) {
2995 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2996 
2997 		spin_lock(&sbi->inode_lock[DIRTY_META]);
2998 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
2999 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
3000 		return ret;
3001 	}
3002 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3003 			file_keep_isize(inode) ||
3004 			i_size_read(inode) & ~PAGE_MASK)
3005 		return false;
3006 
3007 	if (!f2fs_is_time_consistent(inode))
3008 		return false;
3009 
3010 	spin_lock(&F2FS_I(inode)->i_size_lock);
3011 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3012 	spin_unlock(&F2FS_I(inode)->i_size_lock);
3013 
3014 	return ret;
3015 }
3016 
3017 static inline bool f2fs_readonly(struct super_block *sb)
3018 {
3019 	return sb_rdonly(sb);
3020 }
3021 
3022 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3023 {
3024 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3025 }
3026 
3027 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3028 {
3029 	if (len == 1 && name[0] == '.')
3030 		return true;
3031 
3032 	if (len == 2 && name[0] == '.' && name[1] == '.')
3033 		return true;
3034 
3035 	return false;
3036 }
3037 
3038 static inline bool f2fs_may_extent_tree(struct inode *inode)
3039 {
3040 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3041 
3042 	if (!test_opt(sbi, EXTENT_CACHE) ||
3043 			is_inode_flag_set(inode, FI_NO_EXTENT) ||
3044 			is_inode_flag_set(inode, FI_COMPRESSED_FILE))
3045 		return false;
3046 
3047 	/*
3048 	 * for recovered files during mount do not create extents
3049 	 * if shrinker is not registered.
3050 	 */
3051 	if (list_empty(&sbi->s_list))
3052 		return false;
3053 
3054 	return S_ISREG(inode->i_mode);
3055 }
3056 
3057 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3058 					size_t size, gfp_t flags)
3059 {
3060 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
3061 		f2fs_show_injection_info(sbi, FAULT_KMALLOC);
3062 		return NULL;
3063 	}
3064 
3065 	return kmalloc(size, flags);
3066 }
3067 
3068 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3069 					size_t size, gfp_t flags)
3070 {
3071 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3072 }
3073 
3074 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3075 					size_t size, gfp_t flags)
3076 {
3077 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3078 		f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3079 		return NULL;
3080 	}
3081 
3082 	return kvmalloc(size, flags);
3083 }
3084 
3085 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3086 					size_t size, gfp_t flags)
3087 {
3088 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3089 }
3090 
3091 static inline int get_extra_isize(struct inode *inode)
3092 {
3093 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3094 }
3095 
3096 static inline int get_inline_xattr_addrs(struct inode *inode)
3097 {
3098 	return F2FS_I(inode)->i_inline_xattr_size;
3099 }
3100 
3101 #define f2fs_get_inode_mode(i) \
3102 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3103 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3104 
3105 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3106 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3107 	offsetof(struct f2fs_inode, i_extra_isize))	\
3108 
3109 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3110 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3111 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3112 		sizeof((f2fs_inode)->field))			\
3113 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3114 
3115 #define DEFAULT_IOSTAT_PERIOD_MS	3000
3116 #define MIN_IOSTAT_PERIOD_MS		100
3117 /* maximum period of iostat tracing is 1 day */
3118 #define MAX_IOSTAT_PERIOD_MS		8640000
3119 
3120 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3121 {
3122 	int i;
3123 
3124 	spin_lock(&sbi->iostat_lock);
3125 	for (i = 0; i < NR_IO_TYPE; i++) {
3126 		sbi->rw_iostat[i] = 0;
3127 		sbi->prev_rw_iostat[i] = 0;
3128 	}
3129 	spin_unlock(&sbi->iostat_lock);
3130 }
3131 
3132 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3133 
3134 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3135 			enum iostat_type type, unsigned long long io_bytes)
3136 {
3137 	if (!sbi->iostat_enable)
3138 		return;
3139 	spin_lock(&sbi->iostat_lock);
3140 	sbi->rw_iostat[type] += io_bytes;
3141 
3142 	if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3143 		sbi->rw_iostat[APP_BUFFERED_IO] =
3144 			sbi->rw_iostat[APP_WRITE_IO] -
3145 			sbi->rw_iostat[APP_DIRECT_IO];
3146 
3147 	if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3148 		sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3149 			sbi->rw_iostat[APP_READ_IO] -
3150 			sbi->rw_iostat[APP_DIRECT_READ_IO];
3151 	spin_unlock(&sbi->iostat_lock);
3152 
3153 	f2fs_record_iostat(sbi);
3154 }
3155 
3156 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
3157 
3158 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3159 
3160 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3161 					block_t blkaddr, int type);
3162 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3163 					block_t blkaddr, int type)
3164 {
3165 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3166 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3167 			 blkaddr, type);
3168 		f2fs_bug_on(sbi, 1);
3169 	}
3170 }
3171 
3172 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3173 {
3174 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3175 			blkaddr == COMPRESS_ADDR)
3176 		return false;
3177 	return true;
3178 }
3179 
3180 static inline void f2fs_set_page_private(struct page *page,
3181 						unsigned long data)
3182 {
3183 	if (PagePrivate(page))
3184 		return;
3185 
3186 	attach_page_private(page, (void *)data);
3187 }
3188 
3189 static inline void f2fs_clear_page_private(struct page *page)
3190 {
3191 	detach_page_private(page);
3192 }
3193 
3194 /*
3195  * file.c
3196  */
3197 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3198 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3199 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3200 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3201 int f2fs_truncate(struct inode *inode);
3202 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
3203 		 struct kstat *stat, u32 request_mask, unsigned int flags);
3204 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
3205 		 struct iattr *attr);
3206 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3207 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3208 int f2fs_precache_extents(struct inode *inode);
3209 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3210 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3211 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3212 int f2fs_pin_file_control(struct inode *inode, bool inc);
3213 
3214 /*
3215  * inode.c
3216  */
3217 void f2fs_set_inode_flags(struct inode *inode);
3218 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3219 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3220 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3221 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3222 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3223 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3224 void f2fs_update_inode_page(struct inode *inode);
3225 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3226 void f2fs_evict_inode(struct inode *inode);
3227 void f2fs_handle_failed_inode(struct inode *inode);
3228 
3229 /*
3230  * namei.c
3231  */
3232 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3233 							bool hot, bool set);
3234 struct dentry *f2fs_get_parent(struct dentry *child);
3235 
3236 /*
3237  * dir.c
3238  */
3239 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3240 int f2fs_init_casefolded_name(const struct inode *dir,
3241 			      struct f2fs_filename *fname);
3242 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3243 			int lookup, struct f2fs_filename *fname);
3244 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3245 			struct f2fs_filename *fname);
3246 void f2fs_free_filename(struct f2fs_filename *fname);
3247 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3248 			const struct f2fs_filename *fname, int *max_slots);
3249 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3250 			unsigned int start_pos, struct fscrypt_str *fstr);
3251 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3252 			struct f2fs_dentry_ptr *d);
3253 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3254 			const struct f2fs_filename *fname, struct page *dpage);
3255 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3256 			unsigned int current_depth);
3257 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3258 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3259 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3260 					 const struct f2fs_filename *fname,
3261 					 struct page **res_page);
3262 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3263 			const struct qstr *child, struct page **res_page);
3264 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3265 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3266 			struct page **page);
3267 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3268 			struct page *page, struct inode *inode);
3269 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3270 			  const struct f2fs_filename *fname);
3271 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3272 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3273 			unsigned int bit_pos);
3274 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3275 			struct inode *inode, nid_t ino, umode_t mode);
3276 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3277 			struct inode *inode, nid_t ino, umode_t mode);
3278 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3279 			struct inode *inode, nid_t ino, umode_t mode);
3280 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3281 			struct inode *dir, struct inode *inode);
3282 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3283 bool f2fs_empty_dir(struct inode *dir);
3284 
3285 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3286 {
3287 	if (fscrypt_is_nokey_name(dentry))
3288 		return -ENOKEY;
3289 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3290 				inode, inode->i_ino, inode->i_mode);
3291 }
3292 
3293 /*
3294  * super.c
3295  */
3296 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3297 void f2fs_inode_synced(struct inode *inode);
3298 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3299 int f2fs_quota_sync(struct super_block *sb, int type);
3300 loff_t max_file_blocks(struct inode *inode);
3301 void f2fs_quota_off_umount(struct super_block *sb);
3302 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3303 int f2fs_sync_fs(struct super_block *sb, int sync);
3304 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3305 
3306 /*
3307  * hash.c
3308  */
3309 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3310 
3311 /*
3312  * node.c
3313  */
3314 struct node_info;
3315 
3316 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3317 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3318 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3320 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3321 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3322 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3323 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3324 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3325 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3326 						struct node_info *ni);
3327 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3328 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3329 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3330 int f2fs_truncate_xattr_node(struct inode *inode);
3331 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3332 					unsigned int seq_id);
3333 int f2fs_remove_inode_page(struct inode *inode);
3334 struct page *f2fs_new_inode_page(struct inode *inode);
3335 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3336 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3337 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3338 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3339 int f2fs_move_node_page(struct page *node_page, int gc_type);
3340 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3341 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3342 			struct writeback_control *wbc, bool atomic,
3343 			unsigned int *seq_id);
3344 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3345 			struct writeback_control *wbc,
3346 			bool do_balance, enum iostat_type io_type);
3347 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3348 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3349 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3350 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3351 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3352 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3353 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3354 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3355 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3356 			unsigned int segno, struct f2fs_summary_block *sum);
3357 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3358 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3359 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3360 int __init f2fs_create_node_manager_caches(void);
3361 void f2fs_destroy_node_manager_caches(void);
3362 
3363 /*
3364  * segment.c
3365  */
3366 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3367 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3368 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3369 void f2fs_drop_inmem_pages(struct inode *inode);
3370 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3371 int f2fs_commit_inmem_pages(struct inode *inode);
3372 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3373 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3374 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3375 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3376 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3377 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3378 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3379 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3380 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3381 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3382 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3383 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3384 					struct cp_control *cpc);
3385 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3386 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3387 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3388 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3389 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3390 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3391 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3392 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3393 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3394 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3395 			unsigned int *newseg, bool new_sec, int dir);
3396 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3397 					unsigned int start, unsigned int end);
3398 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3399 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3400 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3401 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3402 					struct cp_control *cpc);
3403 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3404 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3405 					block_t blk_addr);
3406 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3407 						enum iostat_type io_type);
3408 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3409 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3410 			struct f2fs_io_info *fio);
3411 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3412 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3413 			block_t old_blkaddr, block_t new_blkaddr,
3414 			bool recover_curseg, bool recover_newaddr,
3415 			bool from_gc);
3416 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3417 			block_t old_addr, block_t new_addr,
3418 			unsigned char version, bool recover_curseg,
3419 			bool recover_newaddr);
3420 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3421 			block_t old_blkaddr, block_t *new_blkaddr,
3422 			struct f2fs_summary *sum, int type,
3423 			struct f2fs_io_info *fio);
3424 void f2fs_wait_on_page_writeback(struct page *page,
3425 			enum page_type type, bool ordered, bool locked);
3426 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3427 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3428 								block_t len);
3429 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3430 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3431 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3432 			unsigned int val, int alloc);
3433 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3434 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3435 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3436 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3437 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3438 int __init f2fs_create_segment_manager_caches(void);
3439 void f2fs_destroy_segment_manager_caches(void);
3440 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3441 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3442 			enum page_type type, enum temp_type temp);
3443 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3444 			unsigned int segno);
3445 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3446 			unsigned int segno);
3447 
3448 /*
3449  * checkpoint.c
3450  */
3451 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3452 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3453 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3454 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3455 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3456 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3457 					block_t blkaddr, int type);
3458 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3459 			int type, bool sync);
3460 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3461 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3462 			long nr_to_write, enum iostat_type io_type);
3463 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3464 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3465 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3466 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3467 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3468 					unsigned int devidx, int type);
3469 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3470 					unsigned int devidx, int type);
3471 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3472 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3473 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3474 void f2fs_add_orphan_inode(struct inode *inode);
3475 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3476 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3477 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3478 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3479 void f2fs_remove_dirty_inode(struct inode *inode);
3480 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3481 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3482 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3483 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3484 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3485 int __init f2fs_create_checkpoint_caches(void);
3486 void f2fs_destroy_checkpoint_caches(void);
3487 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3488 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3489 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3490 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3491 
3492 /*
3493  * data.c
3494  */
3495 int __init f2fs_init_bioset(void);
3496 void f2fs_destroy_bioset(void);
3497 int f2fs_init_bio_entry_cache(void);
3498 void f2fs_destroy_bio_entry_cache(void);
3499 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3500 				struct bio *bio, enum page_type type);
3501 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3502 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3503 				struct inode *inode, struct page *page,
3504 				nid_t ino, enum page_type type);
3505 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3506 					struct bio **bio, struct page *page);
3507 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3508 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3509 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3510 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3511 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3512 			block_t blk_addr, struct bio *bio);
3513 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3514 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3515 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3516 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3517 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3518 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3519 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3520 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3521 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3522 			int op_flags, bool for_write);
3523 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3524 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3525 			bool for_write);
3526 struct page *f2fs_get_new_data_page(struct inode *inode,
3527 			struct page *ipage, pgoff_t index, bool new_i_size);
3528 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3529 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3530 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3531 			int create, int flag);
3532 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3533 			u64 start, u64 len);
3534 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3535 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3536 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3537 int f2fs_write_single_data_page(struct page *page, int *submitted,
3538 				struct bio **bio, sector_t *last_block,
3539 				struct writeback_control *wbc,
3540 				enum iostat_type io_type,
3541 				int compr_blocks, bool allow_balance);
3542 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3543 			unsigned int length);
3544 int f2fs_release_page(struct page *page, gfp_t wait);
3545 #ifdef CONFIG_MIGRATION
3546 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3547 			struct page *page, enum migrate_mode mode);
3548 #endif
3549 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3550 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3551 int f2fs_init_post_read_processing(void);
3552 void f2fs_destroy_post_read_processing(void);
3553 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3554 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3555 
3556 /*
3557  * gc.c
3558  */
3559 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3560 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3561 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3562 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3563 			unsigned int segno);
3564 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3565 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3566 int __init f2fs_create_garbage_collection_cache(void);
3567 void f2fs_destroy_garbage_collection_cache(void);
3568 
3569 /*
3570  * recovery.c
3571  */
3572 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3573 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3574 
3575 /*
3576  * debug.c
3577  */
3578 #ifdef CONFIG_F2FS_STAT_FS
3579 struct f2fs_stat_info {
3580 	struct list_head stat_list;
3581 	struct f2fs_sb_info *sbi;
3582 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3583 	int main_area_segs, main_area_sections, main_area_zones;
3584 	unsigned long long hit_largest, hit_cached, hit_rbtree;
3585 	unsigned long long hit_total, total_ext;
3586 	int ext_tree, zombie_tree, ext_node;
3587 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3588 	int ndirty_data, ndirty_qdata;
3589 	int inmem_pages;
3590 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3591 	int nats, dirty_nats, sits, dirty_sits;
3592 	int free_nids, avail_nids, alloc_nids;
3593 	int total_count, utilization;
3594 	int bg_gc, nr_wb_cp_data, nr_wb_data;
3595 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3596 	int nr_dio_read, nr_dio_write;
3597 	unsigned int io_skip_bggc, other_skip_bggc;
3598 	int nr_flushing, nr_flushed, flush_list_empty;
3599 	int nr_discarding, nr_discarded;
3600 	int nr_discard_cmd;
3601 	unsigned int undiscard_blks;
3602 	int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3603 	unsigned int cur_ckpt_time, peak_ckpt_time;
3604 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3605 	int compr_inode;
3606 	unsigned long long compr_blocks;
3607 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3608 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3609 	unsigned int bimodal, avg_vblocks;
3610 	int util_free, util_valid, util_invalid;
3611 	int rsvd_segs, overp_segs;
3612 	int dirty_count, node_pages, meta_pages;
3613 	int prefree_count, call_count, cp_count, bg_cp_count;
3614 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
3615 	int bg_node_segs, bg_data_segs;
3616 	int tot_blks, data_blks, node_blks;
3617 	int bg_data_blks, bg_node_blks;
3618 	unsigned long long skipped_atomic_files[2];
3619 	int curseg[NR_CURSEG_TYPE];
3620 	int cursec[NR_CURSEG_TYPE];
3621 	int curzone[NR_CURSEG_TYPE];
3622 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3623 	unsigned int full_seg[NR_CURSEG_TYPE];
3624 	unsigned int valid_blks[NR_CURSEG_TYPE];
3625 
3626 	unsigned int meta_count[META_MAX];
3627 	unsigned int segment_count[2];
3628 	unsigned int block_count[2];
3629 	unsigned int inplace_count;
3630 	unsigned long long base_mem, cache_mem, page_mem;
3631 };
3632 
3633 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3634 {
3635 	return (struct f2fs_stat_info *)sbi->stat_info;
3636 }
3637 
3638 #define stat_inc_cp_count(si)		((si)->cp_count++)
3639 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
3640 #define stat_inc_call_count(si)		((si)->call_count++)
3641 #define stat_inc_bggc_count(si)		((si)->bg_gc++)
3642 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3643 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3644 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3645 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3646 #define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
3647 #define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
3648 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3649 #define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
3650 #define stat_inc_inline_xattr(inode)					\
3651 	do {								\
3652 		if (f2fs_has_inline_xattr(inode))			\
3653 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3654 	} while (0)
3655 #define stat_dec_inline_xattr(inode)					\
3656 	do {								\
3657 		if (f2fs_has_inline_xattr(inode))			\
3658 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3659 	} while (0)
3660 #define stat_inc_inline_inode(inode)					\
3661 	do {								\
3662 		if (f2fs_has_inline_data(inode))			\
3663 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
3664 	} while (0)
3665 #define stat_dec_inline_inode(inode)					\
3666 	do {								\
3667 		if (f2fs_has_inline_data(inode))			\
3668 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
3669 	} while (0)
3670 #define stat_inc_inline_dir(inode)					\
3671 	do {								\
3672 		if (f2fs_has_inline_dentry(inode))			\
3673 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
3674 	} while (0)
3675 #define stat_dec_inline_dir(inode)					\
3676 	do {								\
3677 		if (f2fs_has_inline_dentry(inode))			\
3678 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
3679 	} while (0)
3680 #define stat_inc_compr_inode(inode)					\
3681 	do {								\
3682 		if (f2fs_compressed_file(inode))			\
3683 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
3684 	} while (0)
3685 #define stat_dec_compr_inode(inode)					\
3686 	do {								\
3687 		if (f2fs_compressed_file(inode))			\
3688 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
3689 	} while (0)
3690 #define stat_add_compr_blocks(inode, blocks)				\
3691 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3692 #define stat_sub_compr_blocks(inode, blocks)				\
3693 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3694 #define stat_inc_meta_count(sbi, blkaddr)				\
3695 	do {								\
3696 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
3697 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
3698 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
3699 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
3700 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
3701 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
3702 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
3703 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
3704 	} while (0)
3705 #define stat_inc_seg_type(sbi, curseg)					\
3706 		((sbi)->segment_count[(curseg)->alloc_type]++)
3707 #define stat_inc_block_count(sbi, curseg)				\
3708 		((sbi)->block_count[(curseg)->alloc_type]++)
3709 #define stat_inc_inplace_blocks(sbi)					\
3710 		(atomic_inc(&(sbi)->inplace_count))
3711 #define stat_update_max_atomic_write(inode)				\
3712 	do {								\
3713 		int cur = F2FS_I_SB(inode)->atomic_files;	\
3714 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
3715 		if (cur > max)						\
3716 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
3717 	} while (0)
3718 #define stat_inc_volatile_write(inode)					\
3719 		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3720 #define stat_dec_volatile_write(inode)					\
3721 		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3722 #define stat_update_max_volatile_write(inode)				\
3723 	do {								\
3724 		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
3725 		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
3726 		if (cur > max)						\
3727 			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
3728 	} while (0)
3729 #define stat_inc_seg_count(sbi, type, gc_type)				\
3730 	do {								\
3731 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3732 		si->tot_segs++;						\
3733 		if ((type) == SUM_TYPE_DATA) {				\
3734 			si->data_segs++;				\
3735 			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
3736 		} else {						\
3737 			si->node_segs++;				\
3738 			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
3739 		}							\
3740 	} while (0)
3741 
3742 #define stat_inc_tot_blk_count(si, blks)				\
3743 	((si)->tot_blks += (blks))
3744 
3745 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
3746 	do {								\
3747 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3748 		stat_inc_tot_blk_count(si, blks);			\
3749 		si->data_blks += (blks);				\
3750 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3751 	} while (0)
3752 
3753 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
3754 	do {								\
3755 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3756 		stat_inc_tot_blk_count(si, blks);			\
3757 		si->node_blks += (blks);				\
3758 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3759 	} while (0)
3760 
3761 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3762 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3763 void __init f2fs_create_root_stats(void);
3764 void f2fs_destroy_root_stats(void);
3765 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3766 #else
3767 #define stat_inc_cp_count(si)				do { } while (0)
3768 #define stat_inc_bg_cp_count(si)			do { } while (0)
3769 #define stat_inc_call_count(si)				do { } while (0)
3770 #define stat_inc_bggc_count(si)				do { } while (0)
3771 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
3772 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
3773 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
3774 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
3775 #define stat_inc_total_hit(sbi)				do { } while (0)
3776 #define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
3777 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
3778 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
3779 #define stat_inc_inline_xattr(inode)			do { } while (0)
3780 #define stat_dec_inline_xattr(inode)			do { } while (0)
3781 #define stat_inc_inline_inode(inode)			do { } while (0)
3782 #define stat_dec_inline_inode(inode)			do { } while (0)
3783 #define stat_inc_inline_dir(inode)			do { } while (0)
3784 #define stat_dec_inline_dir(inode)			do { } while (0)
3785 #define stat_inc_compr_inode(inode)			do { } while (0)
3786 #define stat_dec_compr_inode(inode)			do { } while (0)
3787 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
3788 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
3789 #define stat_update_max_atomic_write(inode)		do { } while (0)
3790 #define stat_inc_volatile_write(inode)			do { } while (0)
3791 #define stat_dec_volatile_write(inode)			do { } while (0)
3792 #define stat_update_max_volatile_write(inode)		do { } while (0)
3793 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
3794 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
3795 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
3796 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
3797 #define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
3798 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
3799 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
3800 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
3801 
3802 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
3803 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
3804 static inline void __init f2fs_create_root_stats(void) { }
3805 static inline void f2fs_destroy_root_stats(void) { }
3806 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3807 #endif
3808 
3809 extern const struct file_operations f2fs_dir_operations;
3810 extern const struct file_operations f2fs_file_operations;
3811 extern const struct inode_operations f2fs_file_inode_operations;
3812 extern const struct address_space_operations f2fs_dblock_aops;
3813 extern const struct address_space_operations f2fs_node_aops;
3814 extern const struct address_space_operations f2fs_meta_aops;
3815 extern const struct inode_operations f2fs_dir_inode_operations;
3816 extern const struct inode_operations f2fs_symlink_inode_operations;
3817 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3818 extern const struct inode_operations f2fs_special_inode_operations;
3819 extern struct kmem_cache *f2fs_inode_entry_slab;
3820 
3821 /*
3822  * inline.c
3823  */
3824 bool f2fs_may_inline_data(struct inode *inode);
3825 bool f2fs_may_inline_dentry(struct inode *inode);
3826 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3827 void f2fs_truncate_inline_inode(struct inode *inode,
3828 						struct page *ipage, u64 from);
3829 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3830 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3831 int f2fs_convert_inline_inode(struct inode *inode);
3832 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3833 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3834 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3835 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3836 					const struct f2fs_filename *fname,
3837 					struct page **res_page);
3838 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3839 			struct page *ipage);
3840 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3841 			struct inode *inode, nid_t ino, umode_t mode);
3842 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3843 				struct page *page, struct inode *dir,
3844 				struct inode *inode);
3845 bool f2fs_empty_inline_dir(struct inode *dir);
3846 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3847 			struct fscrypt_str *fstr);
3848 int f2fs_inline_data_fiemap(struct inode *inode,
3849 			struct fiemap_extent_info *fieinfo,
3850 			__u64 start, __u64 len);
3851 
3852 /*
3853  * shrinker.c
3854  */
3855 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3856 			struct shrink_control *sc);
3857 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3858 			struct shrink_control *sc);
3859 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3860 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3861 
3862 /*
3863  * extent_cache.c
3864  */
3865 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3866 				struct rb_entry *cached_re, unsigned int ofs);
3867 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
3868 				struct rb_root_cached *root,
3869 				struct rb_node **parent,
3870 				unsigned long long key, bool *left_most);
3871 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3872 				struct rb_root_cached *root,
3873 				struct rb_node **parent,
3874 				unsigned int ofs, bool *leftmost);
3875 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3876 		struct rb_entry *cached_re, unsigned int ofs,
3877 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
3878 		struct rb_node ***insert_p, struct rb_node **insert_parent,
3879 		bool force, bool *leftmost);
3880 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3881 				struct rb_root_cached *root, bool check_key);
3882 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3883 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
3884 void f2fs_drop_extent_tree(struct inode *inode);
3885 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3886 void f2fs_destroy_extent_tree(struct inode *inode);
3887 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3888 			struct extent_info *ei);
3889 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3890 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3891 			pgoff_t fofs, block_t blkaddr, unsigned int len);
3892 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3893 int __init f2fs_create_extent_cache(void);
3894 void f2fs_destroy_extent_cache(void);
3895 
3896 /*
3897  * sysfs.c
3898  */
3899 int __init f2fs_init_sysfs(void);
3900 void f2fs_exit_sysfs(void);
3901 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
3902 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
3903 
3904 /* verity.c */
3905 extern const struct fsverity_operations f2fs_verityops;
3906 
3907 /*
3908  * crypto support
3909  */
3910 static inline bool f2fs_encrypted_file(struct inode *inode)
3911 {
3912 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
3913 }
3914 
3915 static inline void f2fs_set_encrypted_inode(struct inode *inode)
3916 {
3917 #ifdef CONFIG_FS_ENCRYPTION
3918 	file_set_encrypt(inode);
3919 	f2fs_set_inode_flags(inode);
3920 #endif
3921 }
3922 
3923 /*
3924  * Returns true if the reads of the inode's data need to undergo some
3925  * postprocessing step, like decryption or authenticity verification.
3926  */
3927 static inline bool f2fs_post_read_required(struct inode *inode)
3928 {
3929 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
3930 		f2fs_compressed_file(inode);
3931 }
3932 
3933 /*
3934  * compress.c
3935  */
3936 #ifdef CONFIG_F2FS_FS_COMPRESSION
3937 bool f2fs_is_compressed_page(struct page *page);
3938 struct page *f2fs_compress_control_page(struct page *page);
3939 int f2fs_prepare_compress_overwrite(struct inode *inode,
3940 			struct page **pagep, pgoff_t index, void **fsdata);
3941 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
3942 					pgoff_t index, unsigned copied);
3943 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
3944 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
3945 bool f2fs_is_compress_backend_ready(struct inode *inode);
3946 int f2fs_init_compress_mempool(void);
3947 void f2fs_destroy_compress_mempool(void);
3948 void f2fs_end_read_compressed_page(struct page *page, bool failed);
3949 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
3950 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
3951 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
3952 int f2fs_write_multi_pages(struct compress_ctx *cc,
3953 						int *submitted,
3954 						struct writeback_control *wbc,
3955 						enum iostat_type io_type);
3956 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
3957 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
3958 				unsigned nr_pages, sector_t *last_block_in_bio,
3959 				bool is_readahead, bool for_write);
3960 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
3961 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
3962 void f2fs_put_page_dic(struct page *page);
3963 int f2fs_init_compress_ctx(struct compress_ctx *cc);
3964 void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
3965 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
3966 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
3967 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
3968 int __init f2fs_init_compress_cache(void);
3969 void f2fs_destroy_compress_cache(void);
3970 #define inc_compr_inode_stat(inode)					\
3971 	do {								\
3972 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
3973 		sbi->compr_new_inode++;					\
3974 	} while (0)
3975 #define add_compr_block_stat(inode, blocks)				\
3976 	do {								\
3977 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);		\
3978 		int diff = F2FS_I(inode)->i_cluster_size - blocks;	\
3979 		sbi->compr_written_block += blocks;			\
3980 		sbi->compr_saved_block += diff;				\
3981 	} while (0)
3982 #else
3983 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
3984 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
3985 {
3986 	if (!f2fs_compressed_file(inode))
3987 		return true;
3988 	/* not support compression */
3989 	return false;
3990 }
3991 static inline struct page *f2fs_compress_control_page(struct page *page)
3992 {
3993 	WARN_ON_ONCE(1);
3994 	return ERR_PTR(-EINVAL);
3995 }
3996 static inline int f2fs_init_compress_mempool(void) { return 0; }
3997 static inline void f2fs_destroy_compress_mempool(void) { }
3998 static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
3999 {
4000 	WARN_ON_ONCE(1);
4001 }
4002 static inline void f2fs_put_page_dic(struct page *page)
4003 {
4004 	WARN_ON_ONCE(1);
4005 }
4006 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
4007 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
4008 static inline int __init f2fs_init_compress_cache(void) { return 0; }
4009 static inline void f2fs_destroy_compress_cache(void) { }
4010 #define inc_compr_inode_stat(inode)		do { } while (0)
4011 #endif
4012 
4013 static inline void set_compress_context(struct inode *inode)
4014 {
4015 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4016 
4017 	F2FS_I(inode)->i_compress_algorithm =
4018 			F2FS_OPTION(sbi).compress_algorithm;
4019 	F2FS_I(inode)->i_log_cluster_size =
4020 			F2FS_OPTION(sbi).compress_log_size;
4021 	F2FS_I(inode)->i_compress_flag =
4022 			F2FS_OPTION(sbi).compress_chksum ?
4023 				1 << COMPRESS_CHKSUM : 0;
4024 	F2FS_I(inode)->i_cluster_size =
4025 			1 << F2FS_I(inode)->i_log_cluster_size;
4026 	if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
4027 			F2FS_OPTION(sbi).compress_level)
4028 		F2FS_I(inode)->i_compress_flag |=
4029 				F2FS_OPTION(sbi).compress_level <<
4030 				COMPRESS_LEVEL_OFFSET;
4031 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4032 	set_inode_flag(inode, FI_COMPRESSED_FILE);
4033 	stat_inc_compr_inode(inode);
4034 	inc_compr_inode_stat(inode);
4035 	f2fs_mark_inode_dirty_sync(inode, true);
4036 }
4037 
4038 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4039 {
4040 	struct f2fs_inode_info *fi = F2FS_I(inode);
4041 
4042 	if (!f2fs_compressed_file(inode))
4043 		return true;
4044 	if (S_ISREG(inode->i_mode) &&
4045 		(get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
4046 		return false;
4047 
4048 	fi->i_flags &= ~F2FS_COMPR_FL;
4049 	stat_dec_compr_inode(inode);
4050 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
4051 	f2fs_mark_inode_dirty_sync(inode, true);
4052 	return true;
4053 }
4054 
4055 #define F2FS_FEATURE_FUNCS(name, flagname) \
4056 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4057 { \
4058 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4059 }
4060 
4061 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4062 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4063 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4064 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4065 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4066 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4067 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4068 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4069 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4070 F2FS_FEATURE_FUNCS(verity, VERITY);
4071 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4072 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4073 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4074 
4075 #ifdef CONFIG_BLK_DEV_ZONED
4076 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4077 				    block_t blkaddr)
4078 {
4079 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4080 
4081 	return test_bit(zno, FDEV(devi).blkz_seq);
4082 }
4083 #endif
4084 
4085 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4086 {
4087 	return f2fs_sb_has_blkzoned(sbi);
4088 }
4089 
4090 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4091 {
4092 	return blk_queue_discard(bdev_get_queue(bdev)) ||
4093 	       bdev_is_zoned(bdev);
4094 }
4095 
4096 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4097 {
4098 	int i;
4099 
4100 	if (!f2fs_is_multi_device(sbi))
4101 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4102 
4103 	for (i = 0; i < sbi->s_ndevs; i++)
4104 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4105 			return true;
4106 	return false;
4107 }
4108 
4109 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4110 {
4111 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4112 					f2fs_hw_should_discard(sbi);
4113 }
4114 
4115 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4116 {
4117 	int i;
4118 
4119 	if (!f2fs_is_multi_device(sbi))
4120 		return bdev_read_only(sbi->sb->s_bdev);
4121 
4122 	for (i = 0; i < sbi->s_ndevs; i++)
4123 		if (bdev_read_only(FDEV(i).bdev))
4124 			return true;
4125 	return false;
4126 }
4127 
4128 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4129 {
4130 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4131 }
4132 
4133 static inline bool f2fs_may_compress(struct inode *inode)
4134 {
4135 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4136 				f2fs_is_atomic_file(inode) ||
4137 				f2fs_is_volatile_file(inode))
4138 		return false;
4139 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4140 }
4141 
4142 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4143 						u64 blocks, bool add)
4144 {
4145 	int diff = F2FS_I(inode)->i_cluster_size - blocks;
4146 	struct f2fs_inode_info *fi = F2FS_I(inode);
4147 
4148 	/* don't update i_compr_blocks if saved blocks were released */
4149 	if (!add && !atomic_read(&fi->i_compr_blocks))
4150 		return;
4151 
4152 	if (add) {
4153 		atomic_add(diff, &fi->i_compr_blocks);
4154 		stat_add_compr_blocks(inode, diff);
4155 	} else {
4156 		atomic_sub(diff, &fi->i_compr_blocks);
4157 		stat_sub_compr_blocks(inode, diff);
4158 	}
4159 	f2fs_mark_inode_dirty_sync(inode, true);
4160 }
4161 
4162 static inline int block_unaligned_IO(struct inode *inode,
4163 				struct kiocb *iocb, struct iov_iter *iter)
4164 {
4165 	unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4166 	unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4167 	loff_t offset = iocb->ki_pos;
4168 	unsigned long align = offset | iov_iter_alignment(iter);
4169 
4170 	return align & blocksize_mask;
4171 }
4172 
4173 static inline int allow_outplace_dio(struct inode *inode,
4174 				struct kiocb *iocb, struct iov_iter *iter)
4175 {
4176 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4177 	int rw = iov_iter_rw(iter);
4178 
4179 	return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
4180 				!block_unaligned_IO(inode, iocb, iter));
4181 }
4182 
4183 static inline bool f2fs_force_buffered_io(struct inode *inode,
4184 				struct kiocb *iocb, struct iov_iter *iter)
4185 {
4186 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4187 	int rw = iov_iter_rw(iter);
4188 
4189 	if (f2fs_post_read_required(inode))
4190 		return true;
4191 	if (f2fs_is_multi_device(sbi))
4192 		return true;
4193 	/*
4194 	 * for blkzoned device, fallback direct IO to buffered IO, so
4195 	 * all IOs can be serialized by log-structured write.
4196 	 */
4197 	if (f2fs_sb_has_blkzoned(sbi))
4198 		return true;
4199 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4200 		if (block_unaligned_IO(inode, iocb, iter))
4201 			return true;
4202 		if (F2FS_IO_ALIGNED(sbi))
4203 			return true;
4204 	}
4205 	if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
4206 		return true;
4207 
4208 	return false;
4209 }
4210 
4211 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4212 {
4213 	return fsverity_active(inode) &&
4214 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4215 }
4216 
4217 #ifdef CONFIG_F2FS_FAULT_INJECTION
4218 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4219 							unsigned int type);
4220 #else
4221 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4222 #endif
4223 
4224 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4225 {
4226 #ifdef CONFIG_QUOTA
4227 	if (f2fs_sb_has_quota_ino(sbi))
4228 		return true;
4229 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4230 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4231 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4232 		return true;
4233 #endif
4234 	return false;
4235 }
4236 
4237 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4238 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4239 
4240 #endif /* _LINUX_F2FS_H */
4241