1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/f2fs.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/sched/mm.h>
22 #include <linux/vmalloc.h>
23 #include <linux/bio.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/part_stat.h>
27 #include <crypto/hash.h>
28
29 #include <linux/fscrypt.h>
30 #include <linux/fsverity.h>
31
32 struct pagevec;
33
34 #ifdef CONFIG_F2FS_CHECK_FS
35 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
36 #else
37 #define f2fs_bug_on(sbi, condition) \
38 do { \
39 if (WARN_ON(condition)) \
40 set_sbi_flag(sbi, SBI_NEED_FSCK); \
41 } while (0)
42 #endif
43
44 enum {
45 FAULT_KMALLOC,
46 FAULT_KVMALLOC,
47 FAULT_PAGE_ALLOC,
48 FAULT_PAGE_GET,
49 FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
50 FAULT_ALLOC_NID,
51 FAULT_ORPHAN,
52 FAULT_BLOCK,
53 FAULT_DIR_DEPTH,
54 FAULT_EVICT_INODE,
55 FAULT_TRUNCATE,
56 FAULT_READ_IO,
57 FAULT_CHECKPOINT,
58 FAULT_DISCARD,
59 FAULT_WRITE_IO,
60 FAULT_SLAB_ALLOC,
61 FAULT_DQUOT_INIT,
62 FAULT_LOCK_OP,
63 FAULT_BLKADDR,
64 FAULT_MAX,
65 };
66
67 #ifdef CONFIG_F2FS_FAULT_INJECTION
68 #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0))
69
70 struct f2fs_fault_info {
71 atomic_t inject_ops;
72 int inject_rate;
73 unsigned int inject_type;
74 };
75
76 extern const char *f2fs_fault_name[FAULT_MAX];
77 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
78
79 /* maximum retry count for injected failure */
80 #define DEFAULT_FAILURE_RETRY_COUNT 8
81 #else
82 #define DEFAULT_FAILURE_RETRY_COUNT 1
83 #endif
84
85 /*
86 * For mount options
87 */
88 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001
89 #define F2FS_MOUNT_DISCARD 0x00000002
90 #define F2FS_MOUNT_NOHEAP 0x00000004
91 #define F2FS_MOUNT_XATTR_USER 0x00000008
92 #define F2FS_MOUNT_POSIX_ACL 0x00000010
93 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020
94 #define F2FS_MOUNT_INLINE_XATTR 0x00000040
95 #define F2FS_MOUNT_INLINE_DATA 0x00000080
96 #define F2FS_MOUNT_INLINE_DENTRY 0x00000100
97 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200
98 #define F2FS_MOUNT_NOBARRIER 0x00000400
99 #define F2FS_MOUNT_FASTBOOT 0x00000800
100 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000
101 #define F2FS_MOUNT_DATA_FLUSH 0x00002000
102 #define F2FS_MOUNT_FAULT_INJECTION 0x00004000
103 #define F2FS_MOUNT_USRQUOTA 0x00008000
104 #define F2FS_MOUNT_GRPQUOTA 0x00010000
105 #define F2FS_MOUNT_PRJQUOTA 0x00020000
106 #define F2FS_MOUNT_QUOTA 0x00040000
107 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000
108 #define F2FS_MOUNT_RESERVE_ROOT 0x00100000
109 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000
110 #define F2FS_MOUNT_NORECOVERY 0x00400000
111 #define F2FS_MOUNT_ATGC 0x00800000
112 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000
113 #define F2FS_MOUNT_GC_MERGE 0x02000000
114 #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000
115 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000
116
117 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
118 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
119 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
120 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
121
122 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
123 typecheck(unsigned long long, b) && \
124 ((long long)((a) - (b)) > 0))
125
126 typedef u32 block_t; /*
127 * should not change u32, since it is the on-disk block
128 * address format, __le32.
129 */
130 typedef u32 nid_t;
131
132 #define COMPRESS_EXT_NUM 16
133
134 /*
135 * An implementation of an rwsem that is explicitly unfair to readers. This
136 * prevents priority inversion when a low-priority reader acquires the read lock
137 * while sleeping on the write lock but the write lock is needed by
138 * higher-priority clients.
139 */
140
141 struct f2fs_rwsem {
142 struct rw_semaphore internal_rwsem;
143 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
144 wait_queue_head_t read_waiters;
145 #endif
146 };
147
148 struct f2fs_mount_info {
149 unsigned int opt;
150 block_t root_reserved_blocks; /* root reserved blocks */
151 kuid_t s_resuid; /* reserved blocks for uid */
152 kgid_t s_resgid; /* reserved blocks for gid */
153 int active_logs; /* # of active logs */
154 int inline_xattr_size; /* inline xattr size */
155 #ifdef CONFIG_F2FS_FAULT_INJECTION
156 struct f2fs_fault_info fault_info; /* For fault injection */
157 #endif
158 #ifdef CONFIG_QUOTA
159 /* Names of quota files with journalled quota */
160 char *s_qf_names[MAXQUOTAS];
161 int s_jquota_fmt; /* Format of quota to use */
162 #endif
163 /* For which write hints are passed down to block layer */
164 int alloc_mode; /* segment allocation policy */
165 int fsync_mode; /* fsync policy */
166 int fs_mode; /* fs mode: LFS or ADAPTIVE */
167 int bggc_mode; /* bggc mode: off, on or sync */
168 int memory_mode; /* memory mode */
169 int errors; /* errors parameter */
170 int discard_unit; /*
171 * discard command's offset/size should
172 * be aligned to this unit: block,
173 * segment or section
174 */
175 struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
176 block_t unusable_cap_perc; /* percentage for cap */
177 block_t unusable_cap; /* Amount of space allowed to be
178 * unusable when disabling checkpoint
179 */
180
181 /* For compression */
182 unsigned char compress_algorithm; /* algorithm type */
183 unsigned char compress_log_size; /* cluster log size */
184 unsigned char compress_level; /* compress level */
185 bool compress_chksum; /* compressed data chksum */
186 unsigned char compress_ext_cnt; /* extension count */
187 unsigned char nocompress_ext_cnt; /* nocompress extension count */
188 int compress_mode; /* compression mode */
189 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
190 unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
191 };
192
193 #define F2FS_FEATURE_ENCRYPT 0x00000001
194 #define F2FS_FEATURE_BLKZONED 0x00000002
195 #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004
196 #define F2FS_FEATURE_EXTRA_ATTR 0x00000008
197 #define F2FS_FEATURE_PRJQUOTA 0x00000010
198 #define F2FS_FEATURE_INODE_CHKSUM 0x00000020
199 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040
200 #define F2FS_FEATURE_QUOTA_INO 0x00000080
201 #define F2FS_FEATURE_INODE_CRTIME 0x00000100
202 #define F2FS_FEATURE_LOST_FOUND 0x00000200
203 #define F2FS_FEATURE_VERITY 0x00000400
204 #define F2FS_FEATURE_SB_CHKSUM 0x00000800
205 #define F2FS_FEATURE_CASEFOLD 0x00001000
206 #define F2FS_FEATURE_COMPRESSION 0x00002000
207 #define F2FS_FEATURE_RO 0x00004000
208
209 #define __F2FS_HAS_FEATURE(raw_super, mask) \
210 ((raw_super->feature & cpu_to_le32(mask)) != 0)
211 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
212
213 /*
214 * Default values for user and/or group using reserved blocks
215 */
216 #define F2FS_DEF_RESUID 0
217 #define F2FS_DEF_RESGID 0
218
219 /*
220 * For checkpoint manager
221 */
222 enum {
223 NAT_BITMAP,
224 SIT_BITMAP
225 };
226
227 #define CP_UMOUNT 0x00000001
228 #define CP_FASTBOOT 0x00000002
229 #define CP_SYNC 0x00000004
230 #define CP_RECOVERY 0x00000008
231 #define CP_DISCARD 0x00000010
232 #define CP_TRIMMED 0x00000020
233 #define CP_PAUSE 0x00000040
234 #define CP_RESIZE 0x00000080
235
236 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
237 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
238 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
239 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
240 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
241 #define DEF_CP_INTERVAL 60 /* 60 secs */
242 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
243 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
244 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
245 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
246
247 struct cp_control {
248 int reason;
249 __u64 trim_start;
250 __u64 trim_end;
251 __u64 trim_minlen;
252 };
253
254 /*
255 * indicate meta/data type
256 */
257 enum {
258 META_CP,
259 META_NAT,
260 META_SIT,
261 META_SSA,
262 META_MAX,
263 META_POR,
264 DATA_GENERIC, /* check range only */
265 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
266 DATA_GENERIC_ENHANCE_READ, /*
267 * strong check on range and segment
268 * bitmap but no warning due to race
269 * condition of read on truncated area
270 * by extent_cache
271 */
272 DATA_GENERIC_ENHANCE_UPDATE, /*
273 * strong check on range and segment
274 * bitmap for update case
275 */
276 META_GENERIC,
277 };
278
279 /* for the list of ino */
280 enum {
281 ORPHAN_INO, /* for orphan ino list */
282 APPEND_INO, /* for append ino list */
283 UPDATE_INO, /* for update ino list */
284 TRANS_DIR_INO, /* for transactions dir ino list */
285 FLUSH_INO, /* for multiple device flushing */
286 MAX_INO_ENTRY, /* max. list */
287 };
288
289 struct ino_entry {
290 struct list_head list; /* list head */
291 nid_t ino; /* inode number */
292 unsigned int dirty_device; /* dirty device bitmap */
293 };
294
295 /* for the list of inodes to be GCed */
296 struct inode_entry {
297 struct list_head list; /* list head */
298 struct inode *inode; /* vfs inode pointer */
299 };
300
301 struct fsync_node_entry {
302 struct list_head list; /* list head */
303 struct page *page; /* warm node page pointer */
304 unsigned int seq_id; /* sequence id */
305 };
306
307 struct ckpt_req {
308 struct completion wait; /* completion for checkpoint done */
309 struct llist_node llnode; /* llist_node to be linked in wait queue */
310 int ret; /* return code of checkpoint */
311 ktime_t queue_time; /* request queued time */
312 };
313
314 struct ckpt_req_control {
315 struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
316 int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
317 wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
318 atomic_t issued_ckpt; /* # of actually issued ckpts */
319 atomic_t total_ckpt; /* # of total ckpts */
320 atomic_t queued_ckpt; /* # of queued ckpts */
321 struct llist_head issue_list; /* list for command issue */
322 spinlock_t stat_lock; /* lock for below checkpoint time stats */
323 unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
324 unsigned int peak_time; /* peak wait time in msec until now */
325 };
326
327 /* for the bitmap indicate blocks to be discarded */
328 struct discard_entry {
329 struct list_head list; /* list head */
330 block_t start_blkaddr; /* start blockaddr of current segment */
331 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
332 };
333
334 /* minimum discard granularity, unit: block count */
335 #define MIN_DISCARD_GRANULARITY 1
336 /* default discard granularity of inner discard thread, unit: block count */
337 #define DEFAULT_DISCARD_GRANULARITY 16
338 /* default maximum discard granularity of ordered discard, unit: block count */
339 #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16
340
341 /* max discard pend list number */
342 #define MAX_PLIST_NUM 512
343 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
344 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
345
346 enum {
347 D_PREP, /* initial */
348 D_PARTIAL, /* partially submitted */
349 D_SUBMIT, /* all submitted */
350 D_DONE, /* finished */
351 };
352
353 struct discard_info {
354 block_t lstart; /* logical start address */
355 block_t len; /* length */
356 block_t start; /* actual start address in dev */
357 };
358
359 struct discard_cmd {
360 struct rb_node rb_node; /* rb node located in rb-tree */
361 struct discard_info di; /* discard info */
362 struct list_head list; /* command list */
363 struct completion wait; /* compleation */
364 struct block_device *bdev; /* bdev */
365 unsigned short ref; /* reference count */
366 unsigned char state; /* state */
367 unsigned char queued; /* queued discard */
368 int error; /* bio error */
369 spinlock_t lock; /* for state/bio_ref updating */
370 unsigned short bio_ref; /* bio reference count */
371 };
372
373 enum {
374 DPOLICY_BG,
375 DPOLICY_FORCE,
376 DPOLICY_FSTRIM,
377 DPOLICY_UMOUNT,
378 MAX_DPOLICY,
379 };
380
381 struct discard_policy {
382 int type; /* type of discard */
383 unsigned int min_interval; /* used for candidates exist */
384 unsigned int mid_interval; /* used for device busy */
385 unsigned int max_interval; /* used for candidates not exist */
386 unsigned int max_requests; /* # of discards issued per round */
387 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
388 bool io_aware; /* issue discard in idle time */
389 bool sync; /* submit discard with REQ_SYNC flag */
390 bool ordered; /* issue discard by lba order */
391 bool timeout; /* discard timeout for put_super */
392 unsigned int granularity; /* discard granularity */
393 };
394
395 struct discard_cmd_control {
396 struct task_struct *f2fs_issue_discard; /* discard thread */
397 struct list_head entry_list; /* 4KB discard entry list */
398 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
399 struct list_head wait_list; /* store on-flushing entries */
400 struct list_head fstrim_list; /* in-flight discard from fstrim */
401 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
402 struct mutex cmd_lock;
403 unsigned int nr_discards; /* # of discards in the list */
404 unsigned int max_discards; /* max. discards to be issued */
405 unsigned int max_discard_request; /* max. discard request per round */
406 unsigned int min_discard_issue_time; /* min. interval between discard issue */
407 unsigned int mid_discard_issue_time; /* mid. interval between discard issue */
408 unsigned int max_discard_issue_time; /* max. interval between discard issue */
409 unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
410 unsigned int discard_urgent_util; /* utilization which issue discard proactively */
411 unsigned int discard_granularity; /* discard granularity */
412 unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */
413 unsigned int undiscard_blks; /* # of undiscard blocks */
414 unsigned int next_pos; /* next discard position */
415 atomic_t issued_discard; /* # of issued discard */
416 atomic_t queued_discard; /* # of queued discard */
417 atomic_t discard_cmd_cnt; /* # of cached cmd count */
418 struct rb_root_cached root; /* root of discard rb-tree */
419 bool rbtree_check; /* config for consistence check */
420 bool discard_wake; /* to wake up discard thread */
421 };
422
423 /* for the list of fsync inodes, used only during recovery */
424 struct fsync_inode_entry {
425 struct list_head list; /* list head */
426 struct inode *inode; /* vfs inode pointer */
427 block_t blkaddr; /* block address locating the last fsync */
428 block_t last_dentry; /* block address locating the last dentry */
429 };
430
431 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
432 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
433
434 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
435 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
436 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
437 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
438
439 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
440 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
441
update_nats_in_cursum(struct f2fs_journal * journal,int i)442 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
443 {
444 int before = nats_in_cursum(journal);
445
446 journal->n_nats = cpu_to_le16(before + i);
447 return before;
448 }
449
update_sits_in_cursum(struct f2fs_journal * journal,int i)450 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
451 {
452 int before = sits_in_cursum(journal);
453
454 journal->n_sits = cpu_to_le16(before + i);
455 return before;
456 }
457
__has_cursum_space(struct f2fs_journal * journal,int size,int type)458 static inline bool __has_cursum_space(struct f2fs_journal *journal,
459 int size, int type)
460 {
461 if (type == NAT_JOURNAL)
462 return size <= MAX_NAT_JENTRIES(journal);
463 return size <= MAX_SIT_JENTRIES(journal);
464 }
465
466 /* for inline stuff */
467 #define DEF_INLINE_RESERVED_SIZE 1
468 static inline int get_extra_isize(struct inode *inode);
469 static inline int get_inline_xattr_addrs(struct inode *inode);
470 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
471 (CUR_ADDRS_PER_INODE(inode) - \
472 get_inline_xattr_addrs(inode) - \
473 DEF_INLINE_RESERVED_SIZE))
474
475 /* for inline dir */
476 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
477 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
478 BITS_PER_BYTE + 1))
479 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
480 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
481 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
482 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
483 NR_INLINE_DENTRY(inode) + \
484 INLINE_DENTRY_BITMAP_SIZE(inode)))
485
486 /*
487 * For INODE and NODE manager
488 */
489 /* for directory operations */
490
491 struct f2fs_filename {
492 /*
493 * The filename the user specified. This is NULL for some
494 * filesystem-internal operations, e.g. converting an inline directory
495 * to a non-inline one, or roll-forward recovering an encrypted dentry.
496 */
497 const struct qstr *usr_fname;
498
499 /*
500 * The on-disk filename. For encrypted directories, this is encrypted.
501 * This may be NULL for lookups in an encrypted dir without the key.
502 */
503 struct fscrypt_str disk_name;
504
505 /* The dirhash of this filename */
506 f2fs_hash_t hash;
507
508 #ifdef CONFIG_FS_ENCRYPTION
509 /*
510 * For lookups in encrypted directories: either the buffer backing
511 * disk_name, or a buffer that holds the decoded no-key name.
512 */
513 struct fscrypt_str crypto_buf;
514 #endif
515 #if IS_ENABLED(CONFIG_UNICODE)
516 /*
517 * For casefolded directories: the casefolded name, but it's left NULL
518 * if the original name is not valid Unicode, if the original name is
519 * "." or "..", if the directory is both casefolded and encrypted and
520 * its encryption key is unavailable, or if the filesystem is doing an
521 * internal operation where usr_fname is also NULL. In all these cases
522 * we fall back to treating the name as an opaque byte sequence.
523 */
524 struct fscrypt_str cf_name;
525 #endif
526 };
527
528 struct f2fs_dentry_ptr {
529 struct inode *inode;
530 void *bitmap;
531 struct f2fs_dir_entry *dentry;
532 __u8 (*filename)[F2FS_SLOT_LEN];
533 int max;
534 int nr_bitmap;
535 };
536
make_dentry_ptr_block(struct inode * inode,struct f2fs_dentry_ptr * d,struct f2fs_dentry_block * t)537 static inline void make_dentry_ptr_block(struct inode *inode,
538 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
539 {
540 d->inode = inode;
541 d->max = NR_DENTRY_IN_BLOCK;
542 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
543 d->bitmap = t->dentry_bitmap;
544 d->dentry = t->dentry;
545 d->filename = t->filename;
546 }
547
make_dentry_ptr_inline(struct inode * inode,struct f2fs_dentry_ptr * d,void * t)548 static inline void make_dentry_ptr_inline(struct inode *inode,
549 struct f2fs_dentry_ptr *d, void *t)
550 {
551 int entry_cnt = NR_INLINE_DENTRY(inode);
552 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
553 int reserved_size = INLINE_RESERVED_SIZE(inode);
554
555 d->inode = inode;
556 d->max = entry_cnt;
557 d->nr_bitmap = bitmap_size;
558 d->bitmap = t;
559 d->dentry = t + bitmap_size + reserved_size;
560 d->filename = t + bitmap_size + reserved_size +
561 SIZE_OF_DIR_ENTRY * entry_cnt;
562 }
563
564 /*
565 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
566 * as its node offset to distinguish from index node blocks.
567 * But some bits are used to mark the node block.
568 */
569 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
570 >> OFFSET_BIT_SHIFT)
571 enum {
572 ALLOC_NODE, /* allocate a new node page if needed */
573 LOOKUP_NODE, /* look up a node without readahead */
574 LOOKUP_NODE_RA, /*
575 * look up a node with readahead called
576 * by get_data_block.
577 */
578 };
579
580 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
581
582 /* congestion wait timeout value, default: 20ms */
583 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
584
585 /* maximum retry quota flush count */
586 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
587
588 /* maximum retry of EIO'ed page */
589 #define MAX_RETRY_PAGE_EIO 100
590
591 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
592
593 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
594
595 /* dirty segments threshold for triggering CP */
596 #define DEFAULT_DIRTY_THRESHOLD 4
597
598 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
599 #define RECOVERY_MIN_RA_BLOCKS 1
600
601 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
602
603 /* for in-memory extent cache entry */
604 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
605
606 /* number of extent info in extent cache we try to shrink */
607 #define READ_EXTENT_CACHE_SHRINK_NUMBER 128
608
609 /* number of age extent info in extent cache we try to shrink */
610 #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128
611 #define LAST_AGE_WEIGHT 30
612 #define SAME_AGE_REGION 1024
613
614 /*
615 * Define data block with age less than 1GB as hot data
616 * define data block with age less than 10GB but more than 1GB as warm data
617 */
618 #define DEF_HOT_DATA_AGE_THRESHOLD 262144
619 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440
620
621 /* extent cache type */
622 enum extent_type {
623 EX_READ,
624 EX_BLOCK_AGE,
625 NR_EXTENT_CACHES,
626 };
627
628 struct extent_info {
629 unsigned int fofs; /* start offset in a file */
630 unsigned int len; /* length of the extent */
631 union {
632 /* read extent_cache */
633 struct {
634 /* start block address of the extent */
635 block_t blk;
636 #ifdef CONFIG_F2FS_FS_COMPRESSION
637 /* physical extent length of compressed blocks */
638 unsigned int c_len;
639 #endif
640 };
641 /* block age extent_cache */
642 struct {
643 /* block age of the extent */
644 unsigned long long age;
645 /* last total blocks allocated */
646 unsigned long long last_blocks;
647 };
648 };
649 };
650
651 struct extent_node {
652 struct rb_node rb_node; /* rb node located in rb-tree */
653 struct extent_info ei; /* extent info */
654 struct list_head list; /* node in global extent list of sbi */
655 struct extent_tree *et; /* extent tree pointer */
656 };
657
658 struct extent_tree {
659 nid_t ino; /* inode number */
660 enum extent_type type; /* keep the extent tree type */
661 struct rb_root_cached root; /* root of extent info rb-tree */
662 struct extent_node *cached_en; /* recently accessed extent node */
663 struct list_head list; /* to be used by sbi->zombie_list */
664 rwlock_t lock; /* protect extent info rb-tree */
665 atomic_t node_cnt; /* # of extent node in rb-tree*/
666 bool largest_updated; /* largest extent updated */
667 struct extent_info largest; /* largest cached extent for EX_READ */
668 };
669
670 struct extent_tree_info {
671 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
672 struct mutex extent_tree_lock; /* locking extent radix tree */
673 struct list_head extent_list; /* lru list for shrinker */
674 spinlock_t extent_lock; /* locking extent lru list */
675 atomic_t total_ext_tree; /* extent tree count */
676 struct list_head zombie_list; /* extent zombie tree list */
677 atomic_t total_zombie_tree; /* extent zombie tree count */
678 atomic_t total_ext_node; /* extent info count */
679 };
680
681 /*
682 * State of block returned by f2fs_map_blocks.
683 */
684 #define F2FS_MAP_NEW (1U << 0)
685 #define F2FS_MAP_MAPPED (1U << 1)
686 #define F2FS_MAP_DELALLOC (1U << 2)
687 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
688 F2FS_MAP_DELALLOC)
689
690 struct f2fs_map_blocks {
691 struct block_device *m_bdev; /* for multi-device dio */
692 block_t m_pblk;
693 block_t m_lblk;
694 unsigned int m_len;
695 unsigned int m_flags;
696 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
697 pgoff_t *m_next_extent; /* point to next possible extent */
698 int m_seg_type;
699 bool m_may_create; /* indicate it is from write path */
700 bool m_multidev_dio; /* indicate it allows multi-device dio */
701 };
702
703 /* for flag in get_data_block */
704 enum {
705 F2FS_GET_BLOCK_DEFAULT,
706 F2FS_GET_BLOCK_FIEMAP,
707 F2FS_GET_BLOCK_BMAP,
708 F2FS_GET_BLOCK_DIO,
709 F2FS_GET_BLOCK_PRE_DIO,
710 F2FS_GET_BLOCK_PRE_AIO,
711 F2FS_GET_BLOCK_PRECACHE,
712 };
713
714 /*
715 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
716 */
717 #define FADVISE_COLD_BIT 0x01
718 #define FADVISE_LOST_PINO_BIT 0x02
719 #define FADVISE_ENCRYPT_BIT 0x04
720 #define FADVISE_ENC_NAME_BIT 0x08
721 #define FADVISE_KEEP_SIZE_BIT 0x10
722 #define FADVISE_HOT_BIT 0x20
723 #define FADVISE_VERITY_BIT 0x40
724 #define FADVISE_TRUNC_BIT 0x80
725
726 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
727
728 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
729 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
730 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
731
732 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
733 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
734 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
735
736 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
737 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
738
739 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
740 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
741
742 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
743 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
744
745 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
746 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
747 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
748
749 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
750 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
751
752 #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT)
753 #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT)
754 #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT)
755
756 #define DEF_DIR_LEVEL 0
757
758 enum {
759 GC_FAILURE_PIN,
760 MAX_GC_FAILURE
761 };
762
763 /* used for f2fs_inode_info->flags */
764 enum {
765 FI_NEW_INODE, /* indicate newly allocated inode */
766 FI_DIRTY_INODE, /* indicate inode is dirty or not */
767 FI_AUTO_RECOVER, /* indicate inode is recoverable */
768 FI_DIRTY_DIR, /* indicate directory has dirty pages */
769 FI_INC_LINK, /* need to increment i_nlink */
770 FI_ACL_MODE, /* indicate acl mode */
771 FI_NO_ALLOC, /* should not allocate any blocks */
772 FI_FREE_NID, /* free allocated nide */
773 FI_NO_EXTENT, /* not to use the extent cache */
774 FI_INLINE_XATTR, /* used for inline xattr */
775 FI_INLINE_DATA, /* used for inline data*/
776 FI_INLINE_DENTRY, /* used for inline dentry */
777 FI_APPEND_WRITE, /* inode has appended data */
778 FI_UPDATE_WRITE, /* inode has in-place-update data */
779 FI_NEED_IPU, /* used for ipu per file */
780 FI_ATOMIC_FILE, /* indicate atomic file */
781 FI_DATA_EXIST, /* indicate data exists */
782 FI_INLINE_DOTS, /* indicate inline dot dentries */
783 FI_SKIP_WRITES, /* should skip data page writeback */
784 FI_OPU_WRITE, /* used for opu per file */
785 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
786 FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */
787 FI_HOT_DATA, /* indicate file is hot */
788 FI_EXTRA_ATTR, /* indicate file has extra attribute */
789 FI_PROJ_INHERIT, /* indicate file inherits projectid */
790 FI_PIN_FILE, /* indicate file should not be gced */
791 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
792 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
793 FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
794 FI_MMAP_FILE, /* indicate file was mmapped */
795 FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
796 FI_COMPRESS_RELEASED, /* compressed blocks were released */
797 FI_ALIGNED_WRITE, /* enable aligned write */
798 FI_COW_FILE, /* indicate COW file */
799 FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
800 FI_ATOMIC_REPLACE, /* indicate atomic replace */
801 FI_OPENED_FILE, /* indicate file has been opened */
802 FI_MAX, /* max flag, never be used */
803 };
804
805 struct f2fs_inode_info {
806 struct inode vfs_inode; /* serve a vfs inode */
807 unsigned long i_flags; /* keep an inode flags for ioctl */
808 unsigned char i_advise; /* use to give file attribute hints */
809 unsigned char i_dir_level; /* use for dentry level for large dir */
810 unsigned int i_current_depth; /* only for directory depth */
811 /* for gc failure statistic */
812 unsigned int i_gc_failures[MAX_GC_FAILURE];
813 unsigned int i_pino; /* parent inode number */
814 umode_t i_acl_mode; /* keep file acl mode temporarily */
815
816 /* Use below internally in f2fs*/
817 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
818 struct f2fs_rwsem i_sem; /* protect fi info */
819 atomic_t dirty_pages; /* # of dirty pages */
820 f2fs_hash_t chash; /* hash value of given file name */
821 unsigned int clevel; /* maximum level of given file name */
822 struct task_struct *task; /* lookup and create consistency */
823 struct task_struct *cp_task; /* separate cp/wb IO stats*/
824 struct task_struct *wb_task; /* indicate inode is in context of writeback */
825 nid_t i_xattr_nid; /* node id that contains xattrs */
826 loff_t last_disk_size; /* lastly written file size */
827 spinlock_t i_size_lock; /* protect last_disk_size */
828
829 #ifdef CONFIG_QUOTA
830 struct dquot __rcu *i_dquot[MAXQUOTAS];
831
832 /* quota space reservation, managed internally by quota code */
833 qsize_t i_reserved_quota;
834 #endif
835 struct list_head dirty_list; /* dirty list for dirs and files */
836 struct list_head gdirty_list; /* linked in global dirty list */
837 struct task_struct *atomic_write_task; /* store atomic write task */
838 struct extent_tree *extent_tree[NR_EXTENT_CACHES];
839 /* cached extent_tree entry */
840 union {
841 struct inode *cow_inode; /* copy-on-write inode for atomic write */
842 struct inode *atomic_inode;
843 /* point to atomic_inode, available only for cow_inode */
844 };
845
846 /* avoid racing between foreground op and gc */
847 struct f2fs_rwsem i_gc_rwsem[2];
848 struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
849
850 int i_extra_isize; /* size of extra space located in i_addr */
851 kprojid_t i_projid; /* id for project quota */
852 int i_inline_xattr_size; /* inline xattr size */
853 struct timespec64 i_crtime; /* inode creation time */
854 struct timespec64 i_disk_time[3];/* inode disk times */
855
856 /* for file compress */
857 atomic_t i_compr_blocks; /* # of compressed blocks */
858 unsigned char i_compress_algorithm; /* algorithm type */
859 unsigned char i_log_cluster_size; /* log of cluster size */
860 unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
861 unsigned char i_compress_flag; /* compress flag */
862 unsigned int i_cluster_size; /* cluster size */
863
864 unsigned int atomic_write_cnt;
865 loff_t original_i_size; /* original i_size before atomic write */
866 };
867
get_read_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)868 static inline void get_read_extent_info(struct extent_info *ext,
869 struct f2fs_extent *i_ext)
870 {
871 ext->fofs = le32_to_cpu(i_ext->fofs);
872 ext->blk = le32_to_cpu(i_ext->blk);
873 ext->len = le32_to_cpu(i_ext->len);
874 }
875
set_raw_read_extent(struct extent_info * ext,struct f2fs_extent * i_ext)876 static inline void set_raw_read_extent(struct extent_info *ext,
877 struct f2fs_extent *i_ext)
878 {
879 i_ext->fofs = cpu_to_le32(ext->fofs);
880 i_ext->blk = cpu_to_le32(ext->blk);
881 i_ext->len = cpu_to_le32(ext->len);
882 }
883
__is_discard_mergeable(struct discard_info * back,struct discard_info * front,unsigned int max_len)884 static inline bool __is_discard_mergeable(struct discard_info *back,
885 struct discard_info *front, unsigned int max_len)
886 {
887 return (back->lstart + back->len == front->lstart) &&
888 (back->len + front->len <= max_len);
889 }
890
__is_discard_back_mergeable(struct discard_info * cur,struct discard_info * back,unsigned int max_len)891 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
892 struct discard_info *back, unsigned int max_len)
893 {
894 return __is_discard_mergeable(back, cur, max_len);
895 }
896
__is_discard_front_mergeable(struct discard_info * cur,struct discard_info * front,unsigned int max_len)897 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
898 struct discard_info *front, unsigned int max_len)
899 {
900 return __is_discard_mergeable(cur, front, max_len);
901 }
902
903 /*
904 * For free nid management
905 */
906 enum nid_state {
907 FREE_NID, /* newly added to free nid list */
908 PREALLOC_NID, /* it is preallocated */
909 MAX_NID_STATE,
910 };
911
912 enum nat_state {
913 TOTAL_NAT,
914 DIRTY_NAT,
915 RECLAIMABLE_NAT,
916 MAX_NAT_STATE,
917 };
918
919 struct f2fs_nm_info {
920 block_t nat_blkaddr; /* base disk address of NAT */
921 nid_t max_nid; /* maximum possible node ids */
922 nid_t available_nids; /* # of available node ids */
923 nid_t next_scan_nid; /* the next nid to be scanned */
924 nid_t max_rf_node_blocks; /* max # of nodes for recovery */
925 unsigned int ram_thresh; /* control the memory footprint */
926 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
927 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
928
929 /* NAT cache management */
930 struct radix_tree_root nat_root;/* root of the nat entry cache */
931 struct radix_tree_root nat_set_root;/* root of the nat set cache */
932 struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
933 struct list_head nat_entries; /* cached nat entry list (clean) */
934 spinlock_t nat_list_lock; /* protect clean nat entry list */
935 unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
936 unsigned int nat_blocks; /* # of nat blocks */
937
938 /* free node ids management */
939 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
940 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
941 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
942 spinlock_t nid_list_lock; /* protect nid lists ops */
943 struct mutex build_lock; /* lock for build free nids */
944 unsigned char **free_nid_bitmap;
945 unsigned char *nat_block_bitmap;
946 unsigned short *free_nid_count; /* free nid count of NAT block */
947
948 /* for checkpoint */
949 char *nat_bitmap; /* NAT bitmap pointer */
950
951 unsigned int nat_bits_blocks; /* # of nat bits blocks */
952 unsigned char *nat_bits; /* NAT bits blocks */
953 unsigned char *full_nat_bits; /* full NAT pages */
954 unsigned char *empty_nat_bits; /* empty NAT pages */
955 #ifdef CONFIG_F2FS_CHECK_FS
956 char *nat_bitmap_mir; /* NAT bitmap mirror */
957 #endif
958 int bitmap_size; /* bitmap size */
959 };
960
961 /*
962 * this structure is used as one of function parameters.
963 * all the information are dedicated to a given direct node block determined
964 * by the data offset in a file.
965 */
966 struct dnode_of_data {
967 struct inode *inode; /* vfs inode pointer */
968 struct page *inode_page; /* its inode page, NULL is possible */
969 struct page *node_page; /* cached direct node page */
970 nid_t nid; /* node id of the direct node block */
971 unsigned int ofs_in_node; /* data offset in the node page */
972 bool inode_page_locked; /* inode page is locked or not */
973 bool node_changed; /* is node block changed */
974 char cur_level; /* level of hole node page */
975 char max_level; /* level of current page located */
976 block_t data_blkaddr; /* block address of the node block */
977 };
978
set_new_dnode(struct dnode_of_data * dn,struct inode * inode,struct page * ipage,struct page * npage,nid_t nid)979 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
980 struct page *ipage, struct page *npage, nid_t nid)
981 {
982 memset(dn, 0, sizeof(*dn));
983 dn->inode = inode;
984 dn->inode_page = ipage;
985 dn->node_page = npage;
986 dn->nid = nid;
987 }
988
989 /*
990 * For SIT manager
991 *
992 * By default, there are 6 active log areas across the whole main area.
993 * When considering hot and cold data separation to reduce cleaning overhead,
994 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
995 * respectively.
996 * In the current design, you should not change the numbers intentionally.
997 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
998 * logs individually according to the underlying devices. (default: 6)
999 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
1000 * data and 8 for node logs.
1001 */
1002 #define NR_CURSEG_DATA_TYPE (3)
1003 #define NR_CURSEG_NODE_TYPE (3)
1004 #define NR_CURSEG_INMEM_TYPE (2)
1005 #define NR_CURSEG_RO_TYPE (2)
1006 #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
1007 #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
1008
1009 enum {
1010 CURSEG_HOT_DATA = 0, /* directory entry blocks */
1011 CURSEG_WARM_DATA, /* data blocks */
1012 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
1013 CURSEG_HOT_NODE, /* direct node blocks of directory files */
1014 CURSEG_WARM_NODE, /* direct node blocks of normal files */
1015 CURSEG_COLD_NODE, /* indirect node blocks */
1016 NR_PERSISTENT_LOG, /* number of persistent log */
1017 CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
1018 /* pinned file that needs consecutive block address */
1019 CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
1020 NO_CHECK_TYPE, /* number of persistent & inmem log */
1021 };
1022
1023 struct flush_cmd {
1024 struct completion wait;
1025 struct llist_node llnode;
1026 nid_t ino;
1027 int ret;
1028 };
1029
1030 struct flush_cmd_control {
1031 struct task_struct *f2fs_issue_flush; /* flush thread */
1032 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
1033 atomic_t issued_flush; /* # of issued flushes */
1034 atomic_t queued_flush; /* # of queued flushes */
1035 struct llist_head issue_list; /* list for command issue */
1036 struct llist_node *dispatch_list; /* list for command dispatch */
1037 };
1038
1039 struct f2fs_sm_info {
1040 struct sit_info *sit_info; /* whole segment information */
1041 struct free_segmap_info *free_info; /* free segment information */
1042 struct dirty_seglist_info *dirty_info; /* dirty segment information */
1043 struct curseg_info *curseg_array; /* active segment information */
1044
1045 struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
1046
1047 block_t seg0_blkaddr; /* block address of 0'th segment */
1048 block_t main_blkaddr; /* start block address of main area */
1049 block_t ssa_blkaddr; /* start block address of SSA area */
1050
1051 unsigned int segment_count; /* total # of segments */
1052 unsigned int main_segments; /* # of segments in main area */
1053 unsigned int reserved_segments; /* # of reserved segments */
1054 unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
1055 unsigned int ovp_segments; /* # of overprovision segments */
1056
1057 /* a threshold to reclaim prefree segments */
1058 unsigned int rec_prefree_segments;
1059
1060 struct list_head sit_entry_set; /* sit entry set list */
1061
1062 unsigned int ipu_policy; /* in-place-update policy */
1063 unsigned int min_ipu_util; /* in-place-update threshold */
1064 unsigned int min_fsync_blocks; /* threshold for fsync */
1065 unsigned int min_seq_blocks; /* threshold for sequential blocks */
1066 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1067 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1068
1069 /* for flush command control */
1070 struct flush_cmd_control *fcc_info;
1071
1072 /* for discard command control */
1073 struct discard_cmd_control *dcc_info;
1074 };
1075
1076 /*
1077 * For superblock
1078 */
1079 /*
1080 * COUNT_TYPE for monitoring
1081 *
1082 * f2fs monitors the number of several block types such as on-writeback,
1083 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1084 */
1085 #define WB_DATA_TYPE(p, f) \
1086 (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1087 enum count_type {
1088 F2FS_DIRTY_DENTS,
1089 F2FS_DIRTY_DATA,
1090 F2FS_DIRTY_QDATA,
1091 F2FS_DIRTY_NODES,
1092 F2FS_DIRTY_META,
1093 F2FS_DIRTY_IMETA,
1094 F2FS_WB_CP_DATA,
1095 F2FS_WB_DATA,
1096 F2FS_RD_DATA,
1097 F2FS_RD_NODE,
1098 F2FS_RD_META,
1099 F2FS_DIO_WRITE,
1100 F2FS_DIO_READ,
1101 NR_COUNT_TYPE,
1102 };
1103
1104 /*
1105 * The below are the page types of bios used in submit_bio().
1106 * The available types are:
1107 * DATA User data pages. It operates as async mode.
1108 * NODE Node pages. It operates as async mode.
1109 * META FS metadata pages such as SIT, NAT, CP.
1110 * NR_PAGE_TYPE The number of page types.
1111 * META_FLUSH Make sure the previous pages are written
1112 * with waiting the bio's completion
1113 * ... Only can be used with META.
1114 */
1115 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1116 #define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE)
1117 enum page_type {
1118 DATA = 0,
1119 NODE = 1, /* should not change this */
1120 META,
1121 NR_PAGE_TYPE,
1122 META_FLUSH,
1123 IPU, /* the below types are used by tracepoints only. */
1124 OPU,
1125 };
1126
1127 enum temp_type {
1128 HOT = 0, /* must be zero for meta bio */
1129 WARM,
1130 COLD,
1131 NR_TEMP_TYPE,
1132 };
1133
1134 enum need_lock_type {
1135 LOCK_REQ = 0,
1136 LOCK_DONE,
1137 LOCK_RETRY,
1138 };
1139
1140 enum cp_reason_type {
1141 CP_NO_NEEDED,
1142 CP_NON_REGULAR,
1143 CP_COMPRESSED,
1144 CP_HARDLINK,
1145 CP_SB_NEED_CP,
1146 CP_WRONG_PINO,
1147 CP_NO_SPC_ROLL,
1148 CP_NODE_NEED_CP,
1149 CP_FASTBOOT_MODE,
1150 CP_SPEC_LOG_NUM,
1151 CP_RECOVER_DIR,
1152 };
1153
1154 enum iostat_type {
1155 /* WRITE IO */
1156 APP_DIRECT_IO, /* app direct write IOs */
1157 APP_BUFFERED_IO, /* app buffered write IOs */
1158 APP_WRITE_IO, /* app write IOs */
1159 APP_MAPPED_IO, /* app mapped IOs */
1160 APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */
1161 APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */
1162 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1163 FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */
1164 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1165 FS_META_IO, /* meta IOs from kworker/reclaimer */
1166 FS_GC_DATA_IO, /* data IOs from forground gc */
1167 FS_GC_NODE_IO, /* node IOs from forground gc */
1168 FS_CP_DATA_IO, /* data IOs from checkpoint */
1169 FS_CP_NODE_IO, /* node IOs from checkpoint */
1170 FS_CP_META_IO, /* meta IOs from checkpoint */
1171
1172 /* READ IO */
1173 APP_DIRECT_READ_IO, /* app direct read IOs */
1174 APP_BUFFERED_READ_IO, /* app buffered read IOs */
1175 APP_READ_IO, /* app read IOs */
1176 APP_MAPPED_READ_IO, /* app mapped read IOs */
1177 APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */
1178 APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */
1179 FS_DATA_READ_IO, /* data read IOs */
1180 FS_GDATA_READ_IO, /* data read IOs from background gc */
1181 FS_CDATA_READ_IO, /* compressed data read IOs */
1182 FS_NODE_READ_IO, /* node read IOs */
1183 FS_META_READ_IO, /* meta read IOs */
1184
1185 /* other */
1186 FS_DISCARD_IO, /* discard */
1187 FS_FLUSH_IO, /* flush */
1188 FS_ZONE_RESET_IO, /* zone reset */
1189 NR_IO_TYPE,
1190 };
1191
1192 struct f2fs_io_info {
1193 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1194 nid_t ino; /* inode number */
1195 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1196 enum temp_type temp; /* contains HOT/WARM/COLD */
1197 enum req_op op; /* contains REQ_OP_ */
1198 blk_opf_t op_flags; /* req_flag_bits */
1199 block_t new_blkaddr; /* new block address to be written */
1200 block_t old_blkaddr; /* old block address before Cow */
1201 struct page *page; /* page to be written */
1202 struct page *encrypted_page; /* encrypted page */
1203 struct page *compressed_page; /* compressed page */
1204 struct list_head list; /* serialize IOs */
1205 unsigned int compr_blocks; /* # of compressed block addresses */
1206 unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */
1207 unsigned int version:8; /* version of the node */
1208 unsigned int submitted:1; /* indicate IO submission */
1209 unsigned int in_list:1; /* indicate fio is in io_list */
1210 unsigned int is_por:1; /* indicate IO is from recovery or not */
1211 unsigned int encrypted:1; /* indicate file is encrypted */
1212 unsigned int meta_gc:1; /* require meta inode GC */
1213 enum iostat_type io_type; /* io type */
1214 struct writeback_control *io_wbc; /* writeback control */
1215 struct bio **bio; /* bio for ipu */
1216 sector_t *last_block; /* last block number in bio */
1217 };
1218
1219 struct bio_entry {
1220 struct bio *bio;
1221 struct list_head list;
1222 };
1223
1224 #define is_read_io(rw) ((rw) == READ)
1225 struct f2fs_bio_info {
1226 struct f2fs_sb_info *sbi; /* f2fs superblock */
1227 struct bio *bio; /* bios to merge */
1228 sector_t last_block_in_bio; /* last block number */
1229 struct f2fs_io_info fio; /* store buffered io info. */
1230 #ifdef CONFIG_BLK_DEV_ZONED
1231 struct completion zone_wait; /* condition value for the previous open zone to close */
1232 struct bio *zone_pending_bio; /* pending bio for the previous zone */
1233 void *bi_private; /* previous bi_private for pending bio */
1234 #endif
1235 struct f2fs_rwsem io_rwsem; /* blocking op for bio */
1236 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1237 struct list_head io_list; /* track fios */
1238 struct list_head bio_list; /* bio entry list head */
1239 struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
1240 };
1241
1242 #define FDEV(i) (sbi->devs[i])
1243 #define RDEV(i) (raw_super->devs[i])
1244 struct f2fs_dev_info {
1245 struct block_device *bdev;
1246 char path[MAX_PATH_LEN];
1247 unsigned int total_segments;
1248 block_t start_blk;
1249 block_t end_blk;
1250 #ifdef CONFIG_BLK_DEV_ZONED
1251 unsigned int nr_blkz; /* Total number of zones */
1252 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1253 #endif
1254 };
1255
1256 enum inode_type {
1257 DIR_INODE, /* for dirty dir inode */
1258 FILE_INODE, /* for dirty regular/symlink inode */
1259 DIRTY_META, /* for all dirtied inode metadata */
1260 NR_INODE_TYPE,
1261 };
1262
1263 /* for inner inode cache management */
1264 struct inode_management {
1265 struct radix_tree_root ino_root; /* ino entry array */
1266 spinlock_t ino_lock; /* for ino entry lock */
1267 struct list_head ino_list; /* inode list head */
1268 unsigned long ino_num; /* number of entries */
1269 };
1270
1271 /* for GC_AT */
1272 struct atgc_management {
1273 bool atgc_enabled; /* ATGC is enabled or not */
1274 struct rb_root_cached root; /* root of victim rb-tree */
1275 struct list_head victim_list; /* linked with all victim entries */
1276 unsigned int victim_count; /* victim count in rb-tree */
1277 unsigned int candidate_ratio; /* candidate ratio */
1278 unsigned int max_candidate_count; /* max candidate count */
1279 unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
1280 unsigned long long age_threshold; /* age threshold */
1281 };
1282
1283 struct f2fs_gc_control {
1284 unsigned int victim_segno; /* target victim segment number */
1285 int init_gc_type; /* FG_GC or BG_GC */
1286 bool no_bg_gc; /* check the space and stop bg_gc */
1287 bool should_migrate_blocks; /* should migrate blocks */
1288 bool err_gc_skipped; /* return EAGAIN if GC skipped */
1289 unsigned int nr_free_secs; /* # of free sections to do GC */
1290 };
1291
1292 /*
1293 * For s_flag in struct f2fs_sb_info
1294 * Modification on enum should be synchronized with s_flag array
1295 */
1296 enum {
1297 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1298 SBI_IS_CLOSE, /* specify unmounting */
1299 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1300 SBI_POR_DOING, /* recovery is doing or not */
1301 SBI_NEED_SB_WRITE, /* need to recover superblock */
1302 SBI_NEED_CP, /* need to checkpoint */
1303 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1304 SBI_IS_RECOVERED, /* recovered orphan/data */
1305 SBI_CP_DISABLED, /* CP was disabled last mount */
1306 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1307 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1308 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1309 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1310 SBI_IS_RESIZEFS, /* resizefs is in process */
1311 SBI_IS_FREEZING, /* freezefs is in process */
1312 SBI_IS_WRITABLE, /* remove ro mountoption transiently */
1313 MAX_SBI_FLAG,
1314 };
1315
1316 enum {
1317 CP_TIME,
1318 REQ_TIME,
1319 DISCARD_TIME,
1320 GC_TIME,
1321 DISABLE_TIME,
1322 UMOUNT_DISCARD_TIMEOUT,
1323 MAX_TIME,
1324 };
1325
1326 /* Note that you need to keep synchronization with this gc_mode_names array */
1327 enum {
1328 GC_NORMAL,
1329 GC_IDLE_CB,
1330 GC_IDLE_GREEDY,
1331 GC_IDLE_AT,
1332 GC_URGENT_HIGH,
1333 GC_URGENT_LOW,
1334 GC_URGENT_MID,
1335 MAX_GC_MODE,
1336 };
1337
1338 enum {
1339 BGGC_MODE_ON, /* background gc is on */
1340 BGGC_MODE_OFF, /* background gc is off */
1341 BGGC_MODE_SYNC, /*
1342 * background gc is on, migrating blocks
1343 * like foreground gc
1344 */
1345 };
1346
1347 enum {
1348 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1349 FS_MODE_LFS, /* use lfs allocation only */
1350 FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */
1351 FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */
1352 };
1353
1354 enum {
1355 ALLOC_MODE_DEFAULT, /* stay default */
1356 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1357 };
1358
1359 enum fsync_mode {
1360 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1361 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1362 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1363 };
1364
1365 enum {
1366 COMPR_MODE_FS, /*
1367 * automatically compress compression
1368 * enabled files
1369 */
1370 COMPR_MODE_USER, /*
1371 * automatical compression is disabled.
1372 * user can control the file compression
1373 * using ioctls
1374 */
1375 };
1376
1377 enum {
1378 DISCARD_UNIT_BLOCK, /* basic discard unit is block */
1379 DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */
1380 DISCARD_UNIT_SECTION, /* basic discard unit is section */
1381 };
1382
1383 enum {
1384 MEMORY_MODE_NORMAL, /* memory mode for normal devices */
1385 MEMORY_MODE_LOW, /* memory mode for low memry devices */
1386 };
1387
1388 enum errors_option {
1389 MOUNT_ERRORS_READONLY, /* remount fs ro on errors */
1390 MOUNT_ERRORS_CONTINUE, /* continue on errors */
1391 MOUNT_ERRORS_PANIC, /* panic on errors */
1392 };
1393
1394 enum {
1395 BACKGROUND,
1396 FOREGROUND,
1397 MAX_CALL_TYPE,
1398 TOTAL_CALL = FOREGROUND,
1399 };
1400
1401 static inline int f2fs_test_bit(unsigned int nr, char *addr);
1402 static inline void f2fs_set_bit(unsigned int nr, char *addr);
1403 static inline void f2fs_clear_bit(unsigned int nr, char *addr);
1404
1405 /*
1406 * Layout of f2fs page.private:
1407 *
1408 * Layout A: lowest bit should be 1
1409 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
1410 * bit 0 PAGE_PRIVATE_NOT_POINTER
1411 * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
1412 * bit 2 PAGE_PRIVATE_INLINE_INODE
1413 * bit 3 PAGE_PRIVATE_REF_RESOURCE
1414 * bit 4- f2fs private data
1415 *
1416 * Layout B: lowest bit should be 0
1417 * page.private is a wrapped pointer.
1418 */
1419 enum {
1420 PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
1421 PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
1422 PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
1423 PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
1424 PAGE_PRIVATE_MAX
1425 };
1426
1427 /* For compression */
1428 enum compress_algorithm_type {
1429 COMPRESS_LZO,
1430 COMPRESS_LZ4,
1431 COMPRESS_ZSTD,
1432 COMPRESS_LZORLE,
1433 COMPRESS_MAX,
1434 };
1435
1436 enum compress_flag {
1437 COMPRESS_CHKSUM,
1438 COMPRESS_MAX_FLAG,
1439 };
1440
1441 #define COMPRESS_WATERMARK 20
1442 #define COMPRESS_PERCENT 20
1443
1444 #define COMPRESS_DATA_RESERVED_SIZE 4
1445 struct compress_data {
1446 __le32 clen; /* compressed data size */
1447 __le32 chksum; /* compressed data chksum */
1448 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1449 u8 cdata[]; /* compressed data */
1450 };
1451
1452 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1453
1454 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1455
1456 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
1457
1458 #define COMPRESS_LEVEL_OFFSET 8
1459
1460 /* compress context */
1461 struct compress_ctx {
1462 struct inode *inode; /* inode the context belong to */
1463 pgoff_t cluster_idx; /* cluster index number */
1464 unsigned int cluster_size; /* page count in cluster */
1465 unsigned int log_cluster_size; /* log of cluster size */
1466 struct page **rpages; /* pages store raw data in cluster */
1467 unsigned int nr_rpages; /* total page number in rpages */
1468 struct page **cpages; /* pages store compressed data in cluster */
1469 unsigned int nr_cpages; /* total page number in cpages */
1470 unsigned int valid_nr_cpages; /* valid page number in cpages */
1471 void *rbuf; /* virtual mapped address on rpages */
1472 struct compress_data *cbuf; /* virtual mapped address on cpages */
1473 size_t rlen; /* valid data length in rbuf */
1474 size_t clen; /* valid data length in cbuf */
1475 void *private; /* payload buffer for specified compression algorithm */
1476 void *private2; /* extra payload buffer */
1477 };
1478
1479 /* compress context for write IO path */
1480 struct compress_io_ctx {
1481 u32 magic; /* magic number to indicate page is compressed */
1482 struct inode *inode; /* inode the context belong to */
1483 struct page **rpages; /* pages store raw data in cluster */
1484 unsigned int nr_rpages; /* total page number in rpages */
1485 atomic_t pending_pages; /* in-flight compressed page count */
1486 };
1487
1488 /* Context for decompressing one cluster on the read IO path */
1489 struct decompress_io_ctx {
1490 u32 magic; /* magic number to indicate page is compressed */
1491 struct inode *inode; /* inode the context belong to */
1492 pgoff_t cluster_idx; /* cluster index number */
1493 unsigned int cluster_size; /* page count in cluster */
1494 unsigned int log_cluster_size; /* log of cluster size */
1495 struct page **rpages; /* pages store raw data in cluster */
1496 unsigned int nr_rpages; /* total page number in rpages */
1497 struct page **cpages; /* pages store compressed data in cluster */
1498 unsigned int nr_cpages; /* total page number in cpages */
1499 struct page **tpages; /* temp pages to pad holes in cluster */
1500 void *rbuf; /* virtual mapped address on rpages */
1501 struct compress_data *cbuf; /* virtual mapped address on cpages */
1502 size_t rlen; /* valid data length in rbuf */
1503 size_t clen; /* valid data length in cbuf */
1504
1505 /*
1506 * The number of compressed pages remaining to be read in this cluster.
1507 * This is initially nr_cpages. It is decremented by 1 each time a page
1508 * has been read (or failed to be read). When it reaches 0, the cluster
1509 * is decompressed (or an error is reported).
1510 *
1511 * If an error occurs before all the pages have been submitted for I/O,
1512 * then this will never reach 0. In this case the I/O submitter is
1513 * responsible for calling f2fs_decompress_end_io() instead.
1514 */
1515 atomic_t remaining_pages;
1516
1517 /*
1518 * Number of references to this decompress_io_ctx.
1519 *
1520 * One reference is held for I/O completion. This reference is dropped
1521 * after the pagecache pages are updated and unlocked -- either after
1522 * decompression (and verity if enabled), or after an error.
1523 *
1524 * In addition, each compressed page holds a reference while it is in a
1525 * bio. These references are necessary prevent compressed pages from
1526 * being freed while they are still in a bio.
1527 */
1528 refcount_t refcnt;
1529
1530 bool failed; /* IO error occurred before decompression? */
1531 bool need_verity; /* need fs-verity verification after decompression? */
1532 void *private; /* payload buffer for specified decompression algorithm */
1533 void *private2; /* extra payload buffer */
1534 struct work_struct verity_work; /* work to verify the decompressed pages */
1535 struct work_struct free_work; /* work for late free this structure itself */
1536 };
1537
1538 #define NULL_CLUSTER ((unsigned int)(~0))
1539 #define MIN_COMPRESS_LOG_SIZE 2
1540 #define MAX_COMPRESS_LOG_SIZE 8
1541 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
1542
1543 struct f2fs_sb_info {
1544 struct super_block *sb; /* pointer to VFS super block */
1545 struct proc_dir_entry *s_proc; /* proc entry */
1546 struct f2fs_super_block *raw_super; /* raw super block pointer */
1547 struct f2fs_rwsem sb_lock; /* lock for raw super block */
1548 int valid_super_block; /* valid super block no */
1549 unsigned long s_flag; /* flags for sbi */
1550 struct mutex writepages; /* mutex for writepages() */
1551
1552 #ifdef CONFIG_BLK_DEV_ZONED
1553 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1554 #endif
1555
1556 /* for node-related operations */
1557 struct f2fs_nm_info *nm_info; /* node manager */
1558 struct inode *node_inode; /* cache node blocks */
1559
1560 /* for segment-related operations */
1561 struct f2fs_sm_info *sm_info; /* segment manager */
1562
1563 /* for bio operations */
1564 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1565 /* keep migration IO order for LFS mode */
1566 struct f2fs_rwsem io_order_lock;
1567 pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
1568 int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
1569
1570 /* for checkpoint */
1571 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1572 int cur_cp_pack; /* remain current cp pack */
1573 spinlock_t cp_lock; /* for flag in ckpt */
1574 struct inode *meta_inode; /* cache meta blocks */
1575 struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
1576 struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
1577 struct f2fs_rwsem node_write; /* locking node writes */
1578 struct f2fs_rwsem node_change; /* locking node change */
1579 wait_queue_head_t cp_wait;
1580 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1581 long interval_time[MAX_TIME]; /* to store thresholds */
1582 struct ckpt_req_control cprc_info; /* for checkpoint request control */
1583
1584 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1585
1586 spinlock_t fsync_node_lock; /* for node entry lock */
1587 struct list_head fsync_node_list; /* node list head */
1588 unsigned int fsync_seg_id; /* sequence id */
1589 unsigned int fsync_node_num; /* number of node entries */
1590
1591 /* for orphan inode, use 0'th array */
1592 unsigned int max_orphans; /* max orphan inodes */
1593
1594 /* for inode management */
1595 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1596 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1597 struct mutex flush_lock; /* for flush exclusion */
1598
1599 /* for extent tree cache */
1600 struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
1601 atomic64_t allocated_data_blocks; /* for block age extent_cache */
1602
1603 /* The threshold used for hot and warm data seperation*/
1604 unsigned int hot_data_age_threshold;
1605 unsigned int warm_data_age_threshold;
1606 unsigned int last_age_weight;
1607
1608 /* basic filesystem units */
1609 unsigned int log_sectors_per_block; /* log2 sectors per block */
1610 unsigned int log_blocksize; /* log2 block size */
1611 unsigned int blocksize; /* block size */
1612 unsigned int root_ino_num; /* root inode number*/
1613 unsigned int node_ino_num; /* node inode number*/
1614 unsigned int meta_ino_num; /* meta inode number*/
1615 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1616 unsigned int blocks_per_seg; /* blocks per segment */
1617 unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
1618 unsigned int segs_per_sec; /* segments per section */
1619 unsigned int secs_per_zone; /* sections per zone */
1620 unsigned int total_sections; /* total section count */
1621 unsigned int total_node_count; /* total node block count */
1622 unsigned int total_valid_node_count; /* valid node block count */
1623 int dir_level; /* directory level */
1624 bool readdir_ra; /* readahead inode in readdir */
1625 u64 max_io_bytes; /* max io bytes to merge IOs */
1626
1627 block_t user_block_count; /* # of user blocks */
1628 block_t total_valid_block_count; /* # of valid blocks */
1629 block_t discard_blks; /* discard command candidats */
1630 block_t last_valid_block_count; /* for recovery */
1631 block_t reserved_blocks; /* configurable reserved blocks */
1632 block_t current_reserved_blocks; /* current reserved blocks */
1633
1634 /* Additional tracking for no checkpoint mode */
1635 block_t unusable_block_count; /* # of blocks saved by last cp */
1636
1637 unsigned int nquota_files; /* # of quota sysfile */
1638 struct f2fs_rwsem quota_sem; /* blocking cp for flags */
1639
1640 /* # of pages, see count_type */
1641 atomic_t nr_pages[NR_COUNT_TYPE];
1642 /* # of allocated blocks */
1643 struct percpu_counter alloc_valid_block_count;
1644 /* # of node block writes as roll forward recovery */
1645 struct percpu_counter rf_node_block_count;
1646
1647 /* writeback control */
1648 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1649
1650 /* valid inode count */
1651 struct percpu_counter total_valid_inode_count;
1652
1653 struct f2fs_mount_info mount_opt; /* mount options */
1654
1655 /* for cleaning operations */
1656 struct f2fs_rwsem gc_lock; /*
1657 * semaphore for GC, avoid
1658 * race between GC and GC or CP
1659 */
1660 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1661 struct atgc_management am; /* atgc management */
1662 unsigned int cur_victim_sec; /* current victim section num */
1663 unsigned int gc_mode; /* current GC state */
1664 unsigned int next_victim_seg[2]; /* next segment in victim section */
1665 spinlock_t gc_remaining_trials_lock;
1666 /* remaining trial count for GC_URGENT_* and GC_IDLE_* */
1667 unsigned int gc_remaining_trials;
1668
1669 /* for skip statistic */
1670 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1671
1672 /* threshold for gc trials on pinned files */
1673 u64 gc_pin_file_threshold;
1674 struct f2fs_rwsem pin_sem;
1675
1676 /* maximum # of trials to find a victim segment for SSR and GC */
1677 unsigned int max_victim_search;
1678 /* migration granularity of garbage collection, unit: segment */
1679 unsigned int migration_granularity;
1680
1681 /*
1682 * for stat information.
1683 * one is for the LFS mode, and the other is for the SSR mode.
1684 */
1685 #ifdef CONFIG_F2FS_STAT_FS
1686 struct f2fs_stat_info *stat_info; /* FS status information */
1687 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1688 unsigned int segment_count[2]; /* # of allocated segments */
1689 unsigned int block_count[2]; /* # of allocated blocks */
1690 atomic_t inplace_count; /* # of inplace update */
1691 /* # of lookup extent cache */
1692 atomic64_t total_hit_ext[NR_EXTENT_CACHES];
1693 /* # of hit rbtree extent node */
1694 atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
1695 /* # of hit cached extent node */
1696 atomic64_t read_hit_cached[NR_EXTENT_CACHES];
1697 /* # of hit largest extent node in read extent cache */
1698 atomic64_t read_hit_largest;
1699 atomic_t inline_xattr; /* # of inline_xattr inodes */
1700 atomic_t inline_inode; /* # of inline_data inodes */
1701 atomic_t inline_dir; /* # of inline_dentry inodes */
1702 atomic_t compr_inode; /* # of compressed inodes */
1703 atomic64_t compr_blocks; /* # of compressed blocks */
1704 atomic_t swapfile_inode; /* # of swapfile inodes */
1705 atomic_t atomic_files; /* # of opened atomic file */
1706 atomic_t max_aw_cnt; /* max # of atomic writes */
1707 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1708 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1709 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1710 atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */
1711 #endif
1712 spinlock_t stat_lock; /* lock for stat operations */
1713
1714 /* to attach REQ_META|REQ_FUA flags */
1715 unsigned int data_io_flag;
1716 unsigned int node_io_flag;
1717
1718 /* For sysfs support */
1719 struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
1720 struct completion s_kobj_unregister;
1721
1722 struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
1723 struct completion s_stat_kobj_unregister;
1724
1725 struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
1726 struct completion s_feature_list_kobj_unregister;
1727
1728 /* For shrinker support */
1729 struct list_head s_list;
1730 struct mutex umount_mutex;
1731 unsigned int shrinker_run_no;
1732
1733 /* For multi devices */
1734 int s_ndevs; /* number of devices */
1735 struct f2fs_dev_info *devs; /* for device list */
1736 unsigned int dirty_device; /* for checkpoint data flush */
1737 spinlock_t dev_lock; /* protect dirty_device */
1738 bool aligned_blksize; /* all devices has the same logical blksize */
1739
1740 /* For write statistics */
1741 u64 sectors_written_start;
1742 u64 kbytes_written;
1743
1744 /* Reference to checksum algorithm driver via cryptoapi */
1745 struct crypto_shash *s_chksum_driver;
1746
1747 /* Precomputed FS UUID checksum for seeding other checksums */
1748 __u32 s_chksum_seed;
1749
1750 struct workqueue_struct *post_read_wq; /* post read workqueue */
1751
1752 /*
1753 * If we are in irq context, let's update error information into
1754 * on-disk superblock in the work.
1755 */
1756 struct work_struct s_error_work;
1757 unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
1758 unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */
1759 spinlock_t error_lock; /* protect errors/stop_reason array */
1760 bool error_dirty; /* errors of sb is dirty */
1761
1762 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1763 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1764
1765 /* For reclaimed segs statistics per each GC mode */
1766 unsigned int gc_segment_mode; /* GC state for reclaimed segments */
1767 unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
1768
1769 unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
1770
1771 int max_fragment_chunk; /* max chunk size for block fragmentation mode */
1772 int max_fragment_hole; /* max hole size for block fragmentation mode */
1773
1774 /* For atomic write statistics */
1775 atomic64_t current_atomic_write;
1776 s64 peak_atomic_write;
1777 u64 committed_atomic_block;
1778 u64 revoked_atomic_block;
1779
1780 #ifdef CONFIG_F2FS_FS_COMPRESSION
1781 struct kmem_cache *page_array_slab; /* page array entry */
1782 unsigned int page_array_slab_size; /* default page array slab size */
1783
1784 /* For runtime compression statistics */
1785 u64 compr_written_block;
1786 u64 compr_saved_block;
1787 u32 compr_new_inode;
1788
1789 /* For compressed block cache */
1790 struct inode *compress_inode; /* cache compressed blocks */
1791 unsigned int compress_percent; /* cache page percentage */
1792 unsigned int compress_watermark; /* cache page watermark */
1793 atomic_t compress_page_hit; /* cache hit count */
1794 #endif
1795
1796 #ifdef CONFIG_F2FS_IOSTAT
1797 /* For app/fs IO statistics */
1798 spinlock_t iostat_lock;
1799 unsigned long long iostat_count[NR_IO_TYPE];
1800 unsigned long long iostat_bytes[NR_IO_TYPE];
1801 unsigned long long prev_iostat_bytes[NR_IO_TYPE];
1802 bool iostat_enable;
1803 unsigned long iostat_next_period;
1804 unsigned int iostat_period_ms;
1805
1806 /* For io latency related statistics info in one iostat period */
1807 spinlock_t iostat_lat_lock;
1808 struct iostat_lat_info *iostat_io_lat;
1809 #endif
1810 };
1811
1812 /* Definitions to access f2fs_sb_info */
1813 #define BLKS_PER_SEG(sbi) \
1814 ((sbi)->blocks_per_seg)
1815 #define BLKS_PER_SEC(sbi) \
1816 ((sbi)->segs_per_sec << (sbi)->log_blocks_per_seg)
1817 #define SEGS_PER_SEC(sbi) \
1818 ((sbi)->segs_per_sec)
1819
1820 __printf(3, 4)
1821 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
1822
1823 #define f2fs_err(sbi, fmt, ...) \
1824 f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
1825 #define f2fs_warn(sbi, fmt, ...) \
1826 f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
1827 #define f2fs_notice(sbi, fmt, ...) \
1828 f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
1829 #define f2fs_info(sbi, fmt, ...) \
1830 f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
1831 #define f2fs_debug(sbi, fmt, ...) \
1832 f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
1833
1834 #define f2fs_err_ratelimited(sbi, fmt, ...) \
1835 f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
1836 #define f2fs_warn_ratelimited(sbi, fmt, ...) \
1837 f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
1838 #define f2fs_info_ratelimited(sbi, fmt, ...) \
1839 f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
1840
1841 #ifdef CONFIG_F2FS_FAULT_INJECTION
1842 #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
1843 __builtin_return_address(0))
__time_to_inject(struct f2fs_sb_info * sbi,int type,const char * func,const char * parent_func)1844 static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
1845 const char *func, const char *parent_func)
1846 {
1847 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1848
1849 if (!ffi->inject_rate)
1850 return false;
1851
1852 if (!IS_FAULT_SET(ffi, type))
1853 return false;
1854
1855 atomic_inc(&ffi->inject_ops);
1856 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1857 atomic_set(&ffi->inject_ops, 0);
1858 f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
1859 f2fs_fault_name[type], func, parent_func);
1860 return true;
1861 }
1862 return false;
1863 }
1864 #else
time_to_inject(struct f2fs_sb_info * sbi,int type)1865 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1866 {
1867 return false;
1868 }
1869 #endif
1870
1871 /*
1872 * Test if the mounted volume is a multi-device volume.
1873 * - For a single regular disk volume, sbi->s_ndevs is 0.
1874 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1875 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1876 */
f2fs_is_multi_device(struct f2fs_sb_info * sbi)1877 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1878 {
1879 return sbi->s_ndevs > 1;
1880 }
1881
f2fs_update_time(struct f2fs_sb_info * sbi,int type)1882 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1883 {
1884 unsigned long now = jiffies;
1885
1886 sbi->last_time[type] = now;
1887
1888 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1889 if (type == REQ_TIME) {
1890 sbi->last_time[DISCARD_TIME] = now;
1891 sbi->last_time[GC_TIME] = now;
1892 }
1893 }
1894
f2fs_time_over(struct f2fs_sb_info * sbi,int type)1895 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1896 {
1897 unsigned long interval = sbi->interval_time[type] * HZ;
1898
1899 return time_after(jiffies, sbi->last_time[type] + interval);
1900 }
1901
f2fs_time_to_wait(struct f2fs_sb_info * sbi,int type)1902 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1903 int type)
1904 {
1905 unsigned long interval = sbi->interval_time[type] * HZ;
1906 unsigned int wait_ms = 0;
1907 long delta;
1908
1909 delta = (sbi->last_time[type] + interval) - jiffies;
1910 if (delta > 0)
1911 wait_ms = jiffies_to_msecs(delta);
1912
1913 return wait_ms;
1914 }
1915
1916 /*
1917 * Inline functions
1918 */
__f2fs_crc32(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1919 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1920 const void *address, unsigned int length)
1921 {
1922 struct {
1923 struct shash_desc shash;
1924 char ctx[4];
1925 } desc;
1926 int err;
1927
1928 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1929
1930 desc.shash.tfm = sbi->s_chksum_driver;
1931 *(u32 *)desc.ctx = crc;
1932
1933 err = crypto_shash_update(&desc.shash, address, length);
1934 BUG_ON(err);
1935
1936 return *(u32 *)desc.ctx;
1937 }
1938
f2fs_crc32(struct f2fs_sb_info * sbi,const void * address,unsigned int length)1939 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1940 unsigned int length)
1941 {
1942 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1943 }
1944
f2fs_crc_valid(struct f2fs_sb_info * sbi,__u32 blk_crc,void * buf,size_t buf_size)1945 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1946 void *buf, size_t buf_size)
1947 {
1948 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1949 }
1950
f2fs_chksum(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1951 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1952 const void *address, unsigned int length)
1953 {
1954 return __f2fs_crc32(sbi, crc, address, length);
1955 }
1956
F2FS_I(struct inode * inode)1957 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1958 {
1959 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1960 }
1961
F2FS_SB(struct super_block * sb)1962 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1963 {
1964 return sb->s_fs_info;
1965 }
1966
F2FS_I_SB(struct inode * inode)1967 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1968 {
1969 return F2FS_SB(inode->i_sb);
1970 }
1971
F2FS_M_SB(struct address_space * mapping)1972 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1973 {
1974 return F2FS_I_SB(mapping->host);
1975 }
1976
F2FS_P_SB(struct page * page)1977 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1978 {
1979 return F2FS_M_SB(page_file_mapping(page));
1980 }
1981
F2FS_RAW_SUPER(struct f2fs_sb_info * sbi)1982 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1983 {
1984 return (struct f2fs_super_block *)(sbi->raw_super);
1985 }
1986
F2FS_CKPT(struct f2fs_sb_info * sbi)1987 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1988 {
1989 return (struct f2fs_checkpoint *)(sbi->ckpt);
1990 }
1991
F2FS_NODE(struct page * page)1992 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1993 {
1994 return (struct f2fs_node *)page_address(page);
1995 }
1996
F2FS_INODE(struct page * page)1997 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1998 {
1999 return &((struct f2fs_node *)page_address(page))->i;
2000 }
2001
NM_I(struct f2fs_sb_info * sbi)2002 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
2003 {
2004 return (struct f2fs_nm_info *)(sbi->nm_info);
2005 }
2006
SM_I(struct f2fs_sb_info * sbi)2007 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
2008 {
2009 return (struct f2fs_sm_info *)(sbi->sm_info);
2010 }
2011
SIT_I(struct f2fs_sb_info * sbi)2012 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
2013 {
2014 return (struct sit_info *)(SM_I(sbi)->sit_info);
2015 }
2016
FREE_I(struct f2fs_sb_info * sbi)2017 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
2018 {
2019 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
2020 }
2021
DIRTY_I(struct f2fs_sb_info * sbi)2022 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
2023 {
2024 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
2025 }
2026
META_MAPPING(struct f2fs_sb_info * sbi)2027 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
2028 {
2029 return sbi->meta_inode->i_mapping;
2030 }
2031
NODE_MAPPING(struct f2fs_sb_info * sbi)2032 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
2033 {
2034 return sbi->node_inode->i_mapping;
2035 }
2036
is_sbi_flag_set(struct f2fs_sb_info * sbi,unsigned int type)2037 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
2038 {
2039 return test_bit(type, &sbi->s_flag);
2040 }
2041
set_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2042 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2043 {
2044 set_bit(type, &sbi->s_flag);
2045 }
2046
clear_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)2047 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
2048 {
2049 clear_bit(type, &sbi->s_flag);
2050 }
2051
cur_cp_version(struct f2fs_checkpoint * cp)2052 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
2053 {
2054 return le64_to_cpu(cp->checkpoint_ver);
2055 }
2056
f2fs_qf_ino(struct super_block * sb,int type)2057 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
2058 {
2059 if (type < F2FS_MAX_QUOTAS)
2060 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
2061 return 0;
2062 }
2063
cur_cp_crc(struct f2fs_checkpoint * cp)2064 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
2065 {
2066 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
2067 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
2068 }
2069
__is_set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2070 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2071 {
2072 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2073
2074 return ckpt_flags & f;
2075 }
2076
is_set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2077 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2078 {
2079 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
2080 }
2081
__set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2082 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2083 {
2084 unsigned int ckpt_flags;
2085
2086 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2087 ckpt_flags |= f;
2088 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2089 }
2090
set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2091 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2092 {
2093 unsigned long flags;
2094
2095 spin_lock_irqsave(&sbi->cp_lock, flags);
2096 __set_ckpt_flags(F2FS_CKPT(sbi), f);
2097 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2098 }
2099
__clear_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)2100 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
2101 {
2102 unsigned int ckpt_flags;
2103
2104 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
2105 ckpt_flags &= (~f);
2106 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
2107 }
2108
clear_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)2109 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
2110 {
2111 unsigned long flags;
2112
2113 spin_lock_irqsave(&sbi->cp_lock, flags);
2114 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
2115 spin_unlock_irqrestore(&sbi->cp_lock, flags);
2116 }
2117
2118 #define init_f2fs_rwsem(sem) \
2119 do { \
2120 static struct lock_class_key __key; \
2121 \
2122 __init_f2fs_rwsem((sem), #sem, &__key); \
2123 } while (0)
2124
__init_f2fs_rwsem(struct f2fs_rwsem * sem,const char * sem_name,struct lock_class_key * key)2125 static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
2126 const char *sem_name, struct lock_class_key *key)
2127 {
2128 __init_rwsem(&sem->internal_rwsem, sem_name, key);
2129 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2130 init_waitqueue_head(&sem->read_waiters);
2131 #endif
2132 }
2133
f2fs_rwsem_is_locked(struct f2fs_rwsem * sem)2134 static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
2135 {
2136 return rwsem_is_locked(&sem->internal_rwsem);
2137 }
2138
f2fs_rwsem_is_contended(struct f2fs_rwsem * sem)2139 static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
2140 {
2141 return rwsem_is_contended(&sem->internal_rwsem);
2142 }
2143
f2fs_down_read(struct f2fs_rwsem * sem)2144 static inline void f2fs_down_read(struct f2fs_rwsem *sem)
2145 {
2146 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2147 wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
2148 #else
2149 down_read(&sem->internal_rwsem);
2150 #endif
2151 }
2152
f2fs_down_read_trylock(struct f2fs_rwsem * sem)2153 static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
2154 {
2155 return down_read_trylock(&sem->internal_rwsem);
2156 }
2157
f2fs_up_read(struct f2fs_rwsem * sem)2158 static inline void f2fs_up_read(struct f2fs_rwsem *sem)
2159 {
2160 up_read(&sem->internal_rwsem);
2161 }
2162
f2fs_down_write(struct f2fs_rwsem * sem)2163 static inline void f2fs_down_write(struct f2fs_rwsem *sem)
2164 {
2165 down_write(&sem->internal_rwsem);
2166 }
2167
2168 #ifdef CONFIG_DEBUG_LOCK_ALLOC
f2fs_down_read_nested(struct f2fs_rwsem * sem,int subclass)2169 static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
2170 {
2171 down_read_nested(&sem->internal_rwsem, subclass);
2172 }
2173
f2fs_down_write_nested(struct f2fs_rwsem * sem,int subclass)2174 static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
2175 {
2176 down_write_nested(&sem->internal_rwsem, subclass);
2177 }
2178 #else
2179 #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
2180 #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
2181 #endif
2182
f2fs_down_write_trylock(struct f2fs_rwsem * sem)2183 static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
2184 {
2185 return down_write_trylock(&sem->internal_rwsem);
2186 }
2187
f2fs_up_write(struct f2fs_rwsem * sem)2188 static inline void f2fs_up_write(struct f2fs_rwsem *sem)
2189 {
2190 up_write(&sem->internal_rwsem);
2191 #ifdef CONFIG_F2FS_UNFAIR_RWSEM
2192 wake_up_all(&sem->read_waiters);
2193 #endif
2194 }
2195
f2fs_lock_op(struct f2fs_sb_info * sbi)2196 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
2197 {
2198 f2fs_down_read(&sbi->cp_rwsem);
2199 }
2200
f2fs_trylock_op(struct f2fs_sb_info * sbi)2201 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
2202 {
2203 if (time_to_inject(sbi, FAULT_LOCK_OP))
2204 return 0;
2205 return f2fs_down_read_trylock(&sbi->cp_rwsem);
2206 }
2207
f2fs_unlock_op(struct f2fs_sb_info * sbi)2208 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
2209 {
2210 f2fs_up_read(&sbi->cp_rwsem);
2211 }
2212
f2fs_lock_all(struct f2fs_sb_info * sbi)2213 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
2214 {
2215 f2fs_down_write(&sbi->cp_rwsem);
2216 }
2217
f2fs_unlock_all(struct f2fs_sb_info * sbi)2218 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
2219 {
2220 f2fs_up_write(&sbi->cp_rwsem);
2221 }
2222
__get_cp_reason(struct f2fs_sb_info * sbi)2223 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
2224 {
2225 int reason = CP_SYNC;
2226
2227 if (test_opt(sbi, FASTBOOT))
2228 reason = CP_FASTBOOT;
2229 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2230 reason = CP_UMOUNT;
2231 return reason;
2232 }
2233
__remain_node_summaries(int reason)2234 static inline bool __remain_node_summaries(int reason)
2235 {
2236 return (reason & (CP_UMOUNT | CP_FASTBOOT));
2237 }
2238
__exist_node_summaries(struct f2fs_sb_info * sbi)2239 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
2240 {
2241 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
2242 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
2243 }
2244
2245 /*
2246 * Check whether the inode has blocks or not
2247 */
F2FS_HAS_BLOCKS(struct inode * inode)2248 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
2249 {
2250 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
2251
2252 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
2253 }
2254
f2fs_has_xattr_block(unsigned int ofs)2255 static inline bool f2fs_has_xattr_block(unsigned int ofs)
2256 {
2257 return ofs == XATTR_NODE_OFFSET;
2258 }
2259
__allow_reserved_blocks(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2260 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
2261 struct inode *inode, bool cap)
2262 {
2263 if (!inode)
2264 return true;
2265 if (!test_opt(sbi, RESERVE_ROOT))
2266 return false;
2267 if (IS_NOQUOTA(inode))
2268 return true;
2269 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
2270 return true;
2271 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
2272 in_group_p(F2FS_OPTION(sbi).s_resgid))
2273 return true;
2274 if (cap && capable(CAP_SYS_RESOURCE))
2275 return true;
2276 return false;
2277 }
2278
get_available_block_count(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)2279 static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
2280 struct inode *inode, bool cap)
2281 {
2282 block_t avail_user_block_count;
2283
2284 avail_user_block_count = sbi->user_block_count -
2285 sbi->current_reserved_blocks;
2286
2287 if (!__allow_reserved_blocks(sbi, inode, cap))
2288 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2289
2290 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2291 if (avail_user_block_count > sbi->unusable_block_count)
2292 avail_user_block_count -= sbi->unusable_block_count;
2293 else
2294 avail_user_block_count = 0;
2295 }
2296
2297 return avail_user_block_count;
2298 }
2299
2300 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
inc_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,blkcnt_t * count,bool partial)2301 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
2302 struct inode *inode, blkcnt_t *count, bool partial)
2303 {
2304 long long diff = 0, release = 0;
2305 block_t avail_user_block_count;
2306 int ret;
2307
2308 ret = dquot_reserve_block(inode, *count);
2309 if (ret)
2310 return ret;
2311
2312 if (time_to_inject(sbi, FAULT_BLOCK)) {
2313 release = *count;
2314 goto release_quota;
2315 }
2316
2317 /*
2318 * let's increase this in prior to actual block count change in order
2319 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2320 */
2321 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2322
2323 spin_lock(&sbi->stat_lock);
2324
2325 avail_user_block_count = get_available_block_count(sbi, inode, true);
2326 diff = (long long)sbi->total_valid_block_count + *count -
2327 avail_user_block_count;
2328 if (unlikely(diff > 0)) {
2329 if (!partial) {
2330 spin_unlock(&sbi->stat_lock);
2331 release = *count;
2332 goto enospc;
2333 }
2334 if (diff > *count)
2335 diff = *count;
2336 *count -= diff;
2337 release = diff;
2338 if (!*count) {
2339 spin_unlock(&sbi->stat_lock);
2340 goto enospc;
2341 }
2342 }
2343 sbi->total_valid_block_count += (block_t)(*count);
2344
2345 spin_unlock(&sbi->stat_lock);
2346
2347 if (unlikely(release)) {
2348 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2349 dquot_release_reservation_block(inode, release);
2350 }
2351 f2fs_i_blocks_write(inode, *count, true, true);
2352 return 0;
2353
2354 enospc:
2355 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2356 release_quota:
2357 dquot_release_reservation_block(inode, release);
2358 return -ENOSPC;
2359 }
2360
2361 #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
2362 static inline bool page_private_##name(struct page *page) \
2363 { \
2364 return PagePrivate(page) && \
2365 test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
2366 test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2367 }
2368
2369 #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
2370 static inline void set_page_private_##name(struct page *page) \
2371 { \
2372 if (!PagePrivate(page)) \
2373 attach_page_private(page, (void *)0); \
2374 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
2375 set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2376 }
2377
2378 #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
2379 static inline void clear_page_private_##name(struct page *page) \
2380 { \
2381 clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
2382 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
2383 detach_page_private(page); \
2384 }
2385
2386 PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
2387 PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
2388 PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
2389
2390 PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
2391 PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
2392 PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
2393
2394 PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
2395 PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
2396 PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
2397
get_page_private_data(struct page * page)2398 static inline unsigned long get_page_private_data(struct page *page)
2399 {
2400 unsigned long data = page_private(page);
2401
2402 if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
2403 return 0;
2404 return data >> PAGE_PRIVATE_MAX;
2405 }
2406
set_page_private_data(struct page * page,unsigned long data)2407 static inline void set_page_private_data(struct page *page, unsigned long data)
2408 {
2409 if (!PagePrivate(page))
2410 attach_page_private(page, (void *)0);
2411 set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
2412 page_private(page) |= data << PAGE_PRIVATE_MAX;
2413 }
2414
clear_page_private_data(struct page * page)2415 static inline void clear_page_private_data(struct page *page)
2416 {
2417 page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
2418 if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
2419 detach_page_private(page);
2420 }
2421
clear_page_private_all(struct page * page)2422 static inline void clear_page_private_all(struct page *page)
2423 {
2424 clear_page_private_data(page);
2425 clear_page_private_reference(page);
2426 clear_page_private_gcing(page);
2427 clear_page_private_inline(page);
2428
2429 f2fs_bug_on(F2FS_P_SB(page), page_private(page));
2430 }
2431
dec_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,block_t count)2432 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2433 struct inode *inode,
2434 block_t count)
2435 {
2436 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2437
2438 spin_lock(&sbi->stat_lock);
2439 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2440 sbi->total_valid_block_count -= (block_t)count;
2441 if (sbi->reserved_blocks &&
2442 sbi->current_reserved_blocks < sbi->reserved_blocks)
2443 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2444 sbi->current_reserved_blocks + count);
2445 spin_unlock(&sbi->stat_lock);
2446 if (unlikely(inode->i_blocks < sectors)) {
2447 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2448 inode->i_ino,
2449 (unsigned long long)inode->i_blocks,
2450 (unsigned long long)sectors);
2451 set_sbi_flag(sbi, SBI_NEED_FSCK);
2452 return;
2453 }
2454 f2fs_i_blocks_write(inode, count, false, true);
2455 }
2456
inc_page_count(struct f2fs_sb_info * sbi,int count_type)2457 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2458 {
2459 atomic_inc(&sbi->nr_pages[count_type]);
2460
2461 if (count_type == F2FS_DIRTY_DENTS ||
2462 count_type == F2FS_DIRTY_NODES ||
2463 count_type == F2FS_DIRTY_META ||
2464 count_type == F2FS_DIRTY_QDATA ||
2465 count_type == F2FS_DIRTY_IMETA)
2466 set_sbi_flag(sbi, SBI_IS_DIRTY);
2467 }
2468
inode_inc_dirty_pages(struct inode * inode)2469 static inline void inode_inc_dirty_pages(struct inode *inode)
2470 {
2471 atomic_inc(&F2FS_I(inode)->dirty_pages);
2472 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2473 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2474 if (IS_NOQUOTA(inode))
2475 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2476 }
2477
dec_page_count(struct f2fs_sb_info * sbi,int count_type)2478 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2479 {
2480 atomic_dec(&sbi->nr_pages[count_type]);
2481 }
2482
inode_dec_dirty_pages(struct inode * inode)2483 static inline void inode_dec_dirty_pages(struct inode *inode)
2484 {
2485 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2486 !S_ISLNK(inode->i_mode))
2487 return;
2488
2489 atomic_dec(&F2FS_I(inode)->dirty_pages);
2490 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2491 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2492 if (IS_NOQUOTA(inode))
2493 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2494 }
2495
inc_atomic_write_cnt(struct inode * inode)2496 static inline void inc_atomic_write_cnt(struct inode *inode)
2497 {
2498 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2499 struct f2fs_inode_info *fi = F2FS_I(inode);
2500 u64 current_write;
2501
2502 fi->atomic_write_cnt++;
2503 atomic64_inc(&sbi->current_atomic_write);
2504 current_write = atomic64_read(&sbi->current_atomic_write);
2505 if (current_write > sbi->peak_atomic_write)
2506 sbi->peak_atomic_write = current_write;
2507 }
2508
release_atomic_write_cnt(struct inode * inode)2509 static inline void release_atomic_write_cnt(struct inode *inode)
2510 {
2511 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2512 struct f2fs_inode_info *fi = F2FS_I(inode);
2513
2514 atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
2515 fi->atomic_write_cnt = 0;
2516 }
2517
get_pages(struct f2fs_sb_info * sbi,int count_type)2518 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2519 {
2520 return atomic_read(&sbi->nr_pages[count_type]);
2521 }
2522
get_dirty_pages(struct inode * inode)2523 static inline int get_dirty_pages(struct inode *inode)
2524 {
2525 return atomic_read(&F2FS_I(inode)->dirty_pages);
2526 }
2527
get_blocktype_secs(struct f2fs_sb_info * sbi,int block_type)2528 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2529 {
2530 return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
2531 BLKS_PER_SEC(sbi));
2532 }
2533
valid_user_blocks(struct f2fs_sb_info * sbi)2534 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2535 {
2536 return sbi->total_valid_block_count;
2537 }
2538
discard_blocks(struct f2fs_sb_info * sbi)2539 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2540 {
2541 return sbi->discard_blks;
2542 }
2543
__bitmap_size(struct f2fs_sb_info * sbi,int flag)2544 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2545 {
2546 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2547
2548 /* return NAT or SIT bitmap */
2549 if (flag == NAT_BITMAP)
2550 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2551 else if (flag == SIT_BITMAP)
2552 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2553
2554 return 0;
2555 }
2556
__cp_payload(struct f2fs_sb_info * sbi)2557 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2558 {
2559 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2560 }
2561
__bitmap_ptr(struct f2fs_sb_info * sbi,int flag)2562 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2563 {
2564 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2565 void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
2566 int offset;
2567
2568 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2569 offset = (flag == SIT_BITMAP) ?
2570 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2571 /*
2572 * if large_nat_bitmap feature is enabled, leave checksum
2573 * protection for all nat/sit bitmaps.
2574 */
2575 return tmp_ptr + offset + sizeof(__le32);
2576 }
2577
2578 if (__cp_payload(sbi) > 0) {
2579 if (flag == NAT_BITMAP)
2580 return tmp_ptr;
2581 else
2582 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2583 } else {
2584 offset = (flag == NAT_BITMAP) ?
2585 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2586 return tmp_ptr + offset;
2587 }
2588 }
2589
__start_cp_addr(struct f2fs_sb_info * sbi)2590 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2591 {
2592 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2593
2594 if (sbi->cur_cp_pack == 2)
2595 start_addr += BLKS_PER_SEG(sbi);
2596 return start_addr;
2597 }
2598
__start_cp_next_addr(struct f2fs_sb_info * sbi)2599 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2600 {
2601 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2602
2603 if (sbi->cur_cp_pack == 1)
2604 start_addr += BLKS_PER_SEG(sbi);
2605 return start_addr;
2606 }
2607
__set_cp_next_pack(struct f2fs_sb_info * sbi)2608 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2609 {
2610 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2611 }
2612
__start_sum_addr(struct f2fs_sb_info * sbi)2613 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2614 {
2615 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2616 }
2617
2618 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
inc_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2619 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2620 struct inode *inode, bool is_inode)
2621 {
2622 block_t valid_block_count;
2623 unsigned int valid_node_count;
2624 unsigned int avail_user_block_count;
2625 int err;
2626
2627 if (is_inode) {
2628 if (inode) {
2629 err = dquot_alloc_inode(inode);
2630 if (err)
2631 return err;
2632 }
2633 } else {
2634 err = dquot_reserve_block(inode, 1);
2635 if (err)
2636 return err;
2637 }
2638
2639 if (time_to_inject(sbi, FAULT_BLOCK))
2640 goto enospc;
2641
2642 spin_lock(&sbi->stat_lock);
2643
2644 valid_block_count = sbi->total_valid_block_count + 1;
2645 avail_user_block_count = get_available_block_count(sbi, inode, false);
2646
2647 if (unlikely(valid_block_count > avail_user_block_count)) {
2648 spin_unlock(&sbi->stat_lock);
2649 goto enospc;
2650 }
2651
2652 valid_node_count = sbi->total_valid_node_count + 1;
2653 if (unlikely(valid_node_count > sbi->total_node_count)) {
2654 spin_unlock(&sbi->stat_lock);
2655 goto enospc;
2656 }
2657
2658 sbi->total_valid_node_count++;
2659 sbi->total_valid_block_count++;
2660 spin_unlock(&sbi->stat_lock);
2661
2662 if (inode) {
2663 if (is_inode)
2664 f2fs_mark_inode_dirty_sync(inode, true);
2665 else
2666 f2fs_i_blocks_write(inode, 1, true, true);
2667 }
2668
2669 percpu_counter_inc(&sbi->alloc_valid_block_count);
2670 return 0;
2671
2672 enospc:
2673 if (is_inode) {
2674 if (inode)
2675 dquot_free_inode(inode);
2676 } else {
2677 dquot_release_reservation_block(inode, 1);
2678 }
2679 return -ENOSPC;
2680 }
2681
dec_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2682 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2683 struct inode *inode, bool is_inode)
2684 {
2685 spin_lock(&sbi->stat_lock);
2686
2687 if (unlikely(!sbi->total_valid_block_count ||
2688 !sbi->total_valid_node_count)) {
2689 f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
2690 sbi->total_valid_block_count,
2691 sbi->total_valid_node_count);
2692 set_sbi_flag(sbi, SBI_NEED_FSCK);
2693 } else {
2694 sbi->total_valid_block_count--;
2695 sbi->total_valid_node_count--;
2696 }
2697
2698 if (sbi->reserved_blocks &&
2699 sbi->current_reserved_blocks < sbi->reserved_blocks)
2700 sbi->current_reserved_blocks++;
2701
2702 spin_unlock(&sbi->stat_lock);
2703
2704 if (is_inode) {
2705 dquot_free_inode(inode);
2706 } else {
2707 if (unlikely(inode->i_blocks == 0)) {
2708 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2709 inode->i_ino,
2710 (unsigned long long)inode->i_blocks);
2711 set_sbi_flag(sbi, SBI_NEED_FSCK);
2712 return;
2713 }
2714 f2fs_i_blocks_write(inode, 1, false, true);
2715 }
2716 }
2717
valid_node_count(struct f2fs_sb_info * sbi)2718 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2719 {
2720 return sbi->total_valid_node_count;
2721 }
2722
inc_valid_inode_count(struct f2fs_sb_info * sbi)2723 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2724 {
2725 percpu_counter_inc(&sbi->total_valid_inode_count);
2726 }
2727
dec_valid_inode_count(struct f2fs_sb_info * sbi)2728 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2729 {
2730 percpu_counter_dec(&sbi->total_valid_inode_count);
2731 }
2732
valid_inode_count(struct f2fs_sb_info * sbi)2733 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2734 {
2735 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2736 }
2737
f2fs_grab_cache_page(struct address_space * mapping,pgoff_t index,bool for_write)2738 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2739 pgoff_t index, bool for_write)
2740 {
2741 struct page *page;
2742 unsigned int flags;
2743
2744 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2745 if (!for_write)
2746 page = find_get_page_flags(mapping, index,
2747 FGP_LOCK | FGP_ACCESSED);
2748 else
2749 page = find_lock_page(mapping, index);
2750 if (page)
2751 return page;
2752
2753 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
2754 return NULL;
2755 }
2756
2757 if (!for_write)
2758 return grab_cache_page(mapping, index);
2759
2760 flags = memalloc_nofs_save();
2761 page = grab_cache_page_write_begin(mapping, index);
2762 memalloc_nofs_restore(flags);
2763
2764 return page;
2765 }
2766
f2fs_pagecache_get_page(struct address_space * mapping,pgoff_t index,fgf_t fgp_flags,gfp_t gfp_mask)2767 static inline struct page *f2fs_pagecache_get_page(
2768 struct address_space *mapping, pgoff_t index,
2769 fgf_t fgp_flags, gfp_t gfp_mask)
2770 {
2771 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
2772 return NULL;
2773
2774 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2775 }
2776
f2fs_put_page(struct page * page,int unlock)2777 static inline void f2fs_put_page(struct page *page, int unlock)
2778 {
2779 if (!page)
2780 return;
2781
2782 if (unlock) {
2783 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2784 unlock_page(page);
2785 }
2786 put_page(page);
2787 }
2788
f2fs_put_dnode(struct dnode_of_data * dn)2789 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2790 {
2791 if (dn->node_page)
2792 f2fs_put_page(dn->node_page, 1);
2793 if (dn->inode_page && dn->node_page != dn->inode_page)
2794 f2fs_put_page(dn->inode_page, 0);
2795 dn->node_page = NULL;
2796 dn->inode_page = NULL;
2797 }
2798
f2fs_kmem_cache_create(const char * name,size_t size)2799 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2800 size_t size)
2801 {
2802 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2803 }
2804
f2fs_kmem_cache_alloc_nofail(struct kmem_cache * cachep,gfp_t flags)2805 static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
2806 gfp_t flags)
2807 {
2808 void *entry;
2809
2810 entry = kmem_cache_alloc(cachep, flags);
2811 if (!entry)
2812 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2813 return entry;
2814 }
2815
f2fs_kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags,bool nofail,struct f2fs_sb_info * sbi)2816 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2817 gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
2818 {
2819 if (nofail)
2820 return f2fs_kmem_cache_alloc_nofail(cachep, flags);
2821
2822 if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
2823 return NULL;
2824
2825 return kmem_cache_alloc(cachep, flags);
2826 }
2827
is_inflight_io(struct f2fs_sb_info * sbi,int type)2828 static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
2829 {
2830 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2831 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2832 get_pages(sbi, F2FS_WB_CP_DATA) ||
2833 get_pages(sbi, F2FS_DIO_READ) ||
2834 get_pages(sbi, F2FS_DIO_WRITE))
2835 return true;
2836
2837 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2838 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2839 return true;
2840
2841 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2842 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2843 return true;
2844 return false;
2845 }
2846
is_idle(struct f2fs_sb_info * sbi,int type)2847 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2848 {
2849 if (sbi->gc_mode == GC_URGENT_HIGH)
2850 return true;
2851
2852 if (is_inflight_io(sbi, type))
2853 return false;
2854
2855 if (sbi->gc_mode == GC_URGENT_MID)
2856 return true;
2857
2858 if (sbi->gc_mode == GC_URGENT_LOW &&
2859 (type == DISCARD_TIME || type == GC_TIME))
2860 return true;
2861
2862 return f2fs_time_over(sbi, type);
2863 }
2864
f2fs_radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * item)2865 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2866 unsigned long index, void *item)
2867 {
2868 while (radix_tree_insert(root, index, item))
2869 cond_resched();
2870 }
2871
2872 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2873
IS_INODE(struct page * page)2874 static inline bool IS_INODE(struct page *page)
2875 {
2876 struct f2fs_node *p = F2FS_NODE(page);
2877
2878 return RAW_IS_INODE(p);
2879 }
2880
offset_in_addr(struct f2fs_inode * i)2881 static inline int offset_in_addr(struct f2fs_inode *i)
2882 {
2883 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2884 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2885 }
2886
blkaddr_in_node(struct f2fs_node * node)2887 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2888 {
2889 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2890 }
2891
2892 static inline int f2fs_has_extra_attr(struct inode *inode);
data_blkaddr(struct inode * inode,struct page * node_page,unsigned int offset)2893 static inline block_t data_blkaddr(struct inode *inode,
2894 struct page *node_page, unsigned int offset)
2895 {
2896 struct f2fs_node *raw_node;
2897 __le32 *addr_array;
2898 int base = 0;
2899 bool is_inode = IS_INODE(node_page);
2900
2901 raw_node = F2FS_NODE(node_page);
2902
2903 if (is_inode) {
2904 if (!inode)
2905 /* from GC path only */
2906 base = offset_in_addr(&raw_node->i);
2907 else if (f2fs_has_extra_attr(inode))
2908 base = get_extra_isize(inode);
2909 }
2910
2911 addr_array = blkaddr_in_node(raw_node);
2912 return le32_to_cpu(addr_array[base + offset]);
2913 }
2914
f2fs_data_blkaddr(struct dnode_of_data * dn)2915 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2916 {
2917 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2918 }
2919
f2fs_test_bit(unsigned int nr,char * addr)2920 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2921 {
2922 int mask;
2923
2924 addr += (nr >> 3);
2925 mask = BIT(7 - (nr & 0x07));
2926 return mask & *addr;
2927 }
2928
f2fs_set_bit(unsigned int nr,char * addr)2929 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2930 {
2931 int mask;
2932
2933 addr += (nr >> 3);
2934 mask = BIT(7 - (nr & 0x07));
2935 *addr |= mask;
2936 }
2937
f2fs_clear_bit(unsigned int nr,char * addr)2938 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2939 {
2940 int mask;
2941
2942 addr += (nr >> 3);
2943 mask = BIT(7 - (nr & 0x07));
2944 *addr &= ~mask;
2945 }
2946
f2fs_test_and_set_bit(unsigned int nr,char * addr)2947 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2948 {
2949 int mask;
2950 int ret;
2951
2952 addr += (nr >> 3);
2953 mask = BIT(7 - (nr & 0x07));
2954 ret = mask & *addr;
2955 *addr |= mask;
2956 return ret;
2957 }
2958
f2fs_test_and_clear_bit(unsigned int nr,char * addr)2959 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2960 {
2961 int mask;
2962 int ret;
2963
2964 addr += (nr >> 3);
2965 mask = BIT(7 - (nr & 0x07));
2966 ret = mask & *addr;
2967 *addr &= ~mask;
2968 return ret;
2969 }
2970
f2fs_change_bit(unsigned int nr,char * addr)2971 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2972 {
2973 int mask;
2974
2975 addr += (nr >> 3);
2976 mask = BIT(7 - (nr & 0x07));
2977 *addr ^= mask;
2978 }
2979
2980 /*
2981 * On-disk inode flags (f2fs_inode::i_flags)
2982 */
2983 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2984 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2985 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2986 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2987 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2988 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2989 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2990 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2991 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2992 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2993 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2994
2995 #define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL)
2996
2997 /* Flags that should be inherited by new inodes from their parent. */
2998 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2999 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3000 F2FS_CASEFOLD_FL)
3001
3002 /* Flags that are appropriate for regular files (all but dir-specific ones). */
3003 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
3004 F2FS_CASEFOLD_FL))
3005
3006 /* Flags that are appropriate for non-directories/regular files. */
3007 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
3008
f2fs_mask_flags(umode_t mode,__u32 flags)3009 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
3010 {
3011 if (S_ISDIR(mode))
3012 return flags;
3013 else if (S_ISREG(mode))
3014 return flags & F2FS_REG_FLMASK;
3015 else
3016 return flags & F2FS_OTHER_FLMASK;
3017 }
3018
__mark_inode_dirty_flag(struct inode * inode,int flag,bool set)3019 static inline void __mark_inode_dirty_flag(struct inode *inode,
3020 int flag, bool set)
3021 {
3022 switch (flag) {
3023 case FI_INLINE_XATTR:
3024 case FI_INLINE_DATA:
3025 case FI_INLINE_DENTRY:
3026 case FI_NEW_INODE:
3027 if (set)
3028 return;
3029 fallthrough;
3030 case FI_DATA_EXIST:
3031 case FI_INLINE_DOTS:
3032 case FI_PIN_FILE:
3033 case FI_COMPRESS_RELEASED:
3034 case FI_ATOMIC_COMMITTED:
3035 f2fs_mark_inode_dirty_sync(inode, true);
3036 }
3037 }
3038
set_inode_flag(struct inode * inode,int flag)3039 static inline void set_inode_flag(struct inode *inode, int flag)
3040 {
3041 set_bit(flag, F2FS_I(inode)->flags);
3042 __mark_inode_dirty_flag(inode, flag, true);
3043 }
3044
is_inode_flag_set(struct inode * inode,int flag)3045 static inline int is_inode_flag_set(struct inode *inode, int flag)
3046 {
3047 return test_bit(flag, F2FS_I(inode)->flags);
3048 }
3049
clear_inode_flag(struct inode * inode,int flag)3050 static inline void clear_inode_flag(struct inode *inode, int flag)
3051 {
3052 clear_bit(flag, F2FS_I(inode)->flags);
3053 __mark_inode_dirty_flag(inode, flag, false);
3054 }
3055
f2fs_verity_in_progress(struct inode * inode)3056 static inline bool f2fs_verity_in_progress(struct inode *inode)
3057 {
3058 return IS_ENABLED(CONFIG_FS_VERITY) &&
3059 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
3060 }
3061
set_acl_inode(struct inode * inode,umode_t mode)3062 static inline void set_acl_inode(struct inode *inode, umode_t mode)
3063 {
3064 F2FS_I(inode)->i_acl_mode = mode;
3065 set_inode_flag(inode, FI_ACL_MODE);
3066 f2fs_mark_inode_dirty_sync(inode, false);
3067 }
3068
f2fs_i_links_write(struct inode * inode,bool inc)3069 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
3070 {
3071 if (inc)
3072 inc_nlink(inode);
3073 else
3074 drop_nlink(inode);
3075 f2fs_mark_inode_dirty_sync(inode, true);
3076 }
3077
f2fs_i_blocks_write(struct inode * inode,block_t diff,bool add,bool claim)3078 static inline void f2fs_i_blocks_write(struct inode *inode,
3079 block_t diff, bool add, bool claim)
3080 {
3081 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3082 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3083
3084 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
3085 if (add) {
3086 if (claim)
3087 dquot_claim_block(inode, diff);
3088 else
3089 dquot_alloc_block_nofail(inode, diff);
3090 } else {
3091 dquot_free_block(inode, diff);
3092 }
3093
3094 f2fs_mark_inode_dirty_sync(inode, true);
3095 if (clean || recover)
3096 set_inode_flag(inode, FI_AUTO_RECOVER);
3097 }
3098
3099 static inline bool f2fs_is_atomic_file(struct inode *inode);
3100
f2fs_i_size_write(struct inode * inode,loff_t i_size)3101 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
3102 {
3103 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
3104 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
3105
3106 if (i_size_read(inode) == i_size)
3107 return;
3108
3109 i_size_write(inode, i_size);
3110
3111 if (f2fs_is_atomic_file(inode))
3112 return;
3113
3114 f2fs_mark_inode_dirty_sync(inode, true);
3115 if (clean || recover)
3116 set_inode_flag(inode, FI_AUTO_RECOVER);
3117 }
3118
f2fs_i_depth_write(struct inode * inode,unsigned int depth)3119 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
3120 {
3121 F2FS_I(inode)->i_current_depth = depth;
3122 f2fs_mark_inode_dirty_sync(inode, true);
3123 }
3124
f2fs_i_gc_failures_write(struct inode * inode,unsigned int count)3125 static inline void f2fs_i_gc_failures_write(struct inode *inode,
3126 unsigned int count)
3127 {
3128 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
3129 f2fs_mark_inode_dirty_sync(inode, true);
3130 }
3131
f2fs_i_xnid_write(struct inode * inode,nid_t xnid)3132 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
3133 {
3134 F2FS_I(inode)->i_xattr_nid = xnid;
3135 f2fs_mark_inode_dirty_sync(inode, true);
3136 }
3137
f2fs_i_pino_write(struct inode * inode,nid_t pino)3138 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
3139 {
3140 F2FS_I(inode)->i_pino = pino;
3141 f2fs_mark_inode_dirty_sync(inode, true);
3142 }
3143
get_inline_info(struct inode * inode,struct f2fs_inode * ri)3144 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
3145 {
3146 struct f2fs_inode_info *fi = F2FS_I(inode);
3147
3148 if (ri->i_inline & F2FS_INLINE_XATTR)
3149 set_bit(FI_INLINE_XATTR, fi->flags);
3150 if (ri->i_inline & F2FS_INLINE_DATA)
3151 set_bit(FI_INLINE_DATA, fi->flags);
3152 if (ri->i_inline & F2FS_INLINE_DENTRY)
3153 set_bit(FI_INLINE_DENTRY, fi->flags);
3154 if (ri->i_inline & F2FS_DATA_EXIST)
3155 set_bit(FI_DATA_EXIST, fi->flags);
3156 if (ri->i_inline & F2FS_INLINE_DOTS)
3157 set_bit(FI_INLINE_DOTS, fi->flags);
3158 if (ri->i_inline & F2FS_EXTRA_ATTR)
3159 set_bit(FI_EXTRA_ATTR, fi->flags);
3160 if (ri->i_inline & F2FS_PIN_FILE)
3161 set_bit(FI_PIN_FILE, fi->flags);
3162 if (ri->i_inline & F2FS_COMPRESS_RELEASED)
3163 set_bit(FI_COMPRESS_RELEASED, fi->flags);
3164 }
3165
set_raw_inline(struct inode * inode,struct f2fs_inode * ri)3166 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
3167 {
3168 ri->i_inline = 0;
3169
3170 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
3171 ri->i_inline |= F2FS_INLINE_XATTR;
3172 if (is_inode_flag_set(inode, FI_INLINE_DATA))
3173 ri->i_inline |= F2FS_INLINE_DATA;
3174 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
3175 ri->i_inline |= F2FS_INLINE_DENTRY;
3176 if (is_inode_flag_set(inode, FI_DATA_EXIST))
3177 ri->i_inline |= F2FS_DATA_EXIST;
3178 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
3179 ri->i_inline |= F2FS_INLINE_DOTS;
3180 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
3181 ri->i_inline |= F2FS_EXTRA_ATTR;
3182 if (is_inode_flag_set(inode, FI_PIN_FILE))
3183 ri->i_inline |= F2FS_PIN_FILE;
3184 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
3185 ri->i_inline |= F2FS_COMPRESS_RELEASED;
3186 }
3187
f2fs_has_extra_attr(struct inode * inode)3188 static inline int f2fs_has_extra_attr(struct inode *inode)
3189 {
3190 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
3191 }
3192
f2fs_has_inline_xattr(struct inode * inode)3193 static inline int f2fs_has_inline_xattr(struct inode *inode)
3194 {
3195 return is_inode_flag_set(inode, FI_INLINE_XATTR);
3196 }
3197
f2fs_compressed_file(struct inode * inode)3198 static inline int f2fs_compressed_file(struct inode *inode)
3199 {
3200 return S_ISREG(inode->i_mode) &&
3201 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
3202 }
3203
f2fs_need_compress_data(struct inode * inode)3204 static inline bool f2fs_need_compress_data(struct inode *inode)
3205 {
3206 int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
3207
3208 if (!f2fs_compressed_file(inode))
3209 return false;
3210
3211 if (compress_mode == COMPR_MODE_FS)
3212 return true;
3213 else if (compress_mode == COMPR_MODE_USER &&
3214 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
3215 return true;
3216
3217 return false;
3218 }
3219
addrs_per_inode(struct inode * inode)3220 static inline unsigned int addrs_per_inode(struct inode *inode)
3221 {
3222 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
3223 get_inline_xattr_addrs(inode);
3224
3225 if (!f2fs_compressed_file(inode))
3226 return addrs;
3227 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
3228 }
3229
addrs_per_block(struct inode * inode)3230 static inline unsigned int addrs_per_block(struct inode *inode)
3231 {
3232 if (!f2fs_compressed_file(inode))
3233 return DEF_ADDRS_PER_BLOCK;
3234 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
3235 }
3236
inline_xattr_addr(struct inode * inode,struct page * page)3237 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
3238 {
3239 struct f2fs_inode *ri = F2FS_INODE(page);
3240
3241 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
3242 get_inline_xattr_addrs(inode)]);
3243 }
3244
inline_xattr_size(struct inode * inode)3245 static inline int inline_xattr_size(struct inode *inode)
3246 {
3247 if (f2fs_has_inline_xattr(inode))
3248 return get_inline_xattr_addrs(inode) * sizeof(__le32);
3249 return 0;
3250 }
3251
3252 /*
3253 * Notice: check inline_data flag without inode page lock is unsafe.
3254 * It could change at any time by f2fs_convert_inline_page().
3255 */
f2fs_has_inline_data(struct inode * inode)3256 static inline int f2fs_has_inline_data(struct inode *inode)
3257 {
3258 return is_inode_flag_set(inode, FI_INLINE_DATA);
3259 }
3260
f2fs_exist_data(struct inode * inode)3261 static inline int f2fs_exist_data(struct inode *inode)
3262 {
3263 return is_inode_flag_set(inode, FI_DATA_EXIST);
3264 }
3265
f2fs_has_inline_dots(struct inode * inode)3266 static inline int f2fs_has_inline_dots(struct inode *inode)
3267 {
3268 return is_inode_flag_set(inode, FI_INLINE_DOTS);
3269 }
3270
f2fs_is_mmap_file(struct inode * inode)3271 static inline int f2fs_is_mmap_file(struct inode *inode)
3272 {
3273 return is_inode_flag_set(inode, FI_MMAP_FILE);
3274 }
3275
f2fs_is_pinned_file(struct inode * inode)3276 static inline bool f2fs_is_pinned_file(struct inode *inode)
3277 {
3278 return is_inode_flag_set(inode, FI_PIN_FILE);
3279 }
3280
f2fs_is_atomic_file(struct inode * inode)3281 static inline bool f2fs_is_atomic_file(struct inode *inode)
3282 {
3283 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
3284 }
3285
f2fs_is_cow_file(struct inode * inode)3286 static inline bool f2fs_is_cow_file(struct inode *inode)
3287 {
3288 return is_inode_flag_set(inode, FI_COW_FILE);
3289 }
3290
3291 static inline __le32 *get_dnode_addr(struct inode *inode,
3292 struct page *node_page);
inline_data_addr(struct inode * inode,struct page * page)3293 static inline void *inline_data_addr(struct inode *inode, struct page *page)
3294 {
3295 __le32 *addr = get_dnode_addr(inode, page);
3296
3297 return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
3298 }
3299
f2fs_has_inline_dentry(struct inode * inode)3300 static inline int f2fs_has_inline_dentry(struct inode *inode)
3301 {
3302 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
3303 }
3304
is_file(struct inode * inode,int type)3305 static inline int is_file(struct inode *inode, int type)
3306 {
3307 return F2FS_I(inode)->i_advise & type;
3308 }
3309
set_file(struct inode * inode,int type)3310 static inline void set_file(struct inode *inode, int type)
3311 {
3312 if (is_file(inode, type))
3313 return;
3314 F2FS_I(inode)->i_advise |= type;
3315 f2fs_mark_inode_dirty_sync(inode, true);
3316 }
3317
clear_file(struct inode * inode,int type)3318 static inline void clear_file(struct inode *inode, int type)
3319 {
3320 if (!is_file(inode, type))
3321 return;
3322 F2FS_I(inode)->i_advise &= ~type;
3323 f2fs_mark_inode_dirty_sync(inode, true);
3324 }
3325
f2fs_is_time_consistent(struct inode * inode)3326 static inline bool f2fs_is_time_consistent(struct inode *inode)
3327 {
3328 struct timespec64 ctime = inode_get_ctime(inode);
3329
3330 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
3331 return false;
3332 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ctime))
3333 return false;
3334 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
3335 return false;
3336 return true;
3337 }
3338
f2fs_skip_inode_update(struct inode * inode,int dsync)3339 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
3340 {
3341 bool ret;
3342
3343 if (dsync) {
3344 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3345
3346 spin_lock(&sbi->inode_lock[DIRTY_META]);
3347 ret = list_empty(&F2FS_I(inode)->gdirty_list);
3348 spin_unlock(&sbi->inode_lock[DIRTY_META]);
3349 return ret;
3350 }
3351 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
3352 file_keep_isize(inode) ||
3353 i_size_read(inode) & ~PAGE_MASK)
3354 return false;
3355
3356 if (!f2fs_is_time_consistent(inode))
3357 return false;
3358
3359 spin_lock(&F2FS_I(inode)->i_size_lock);
3360 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
3361 spin_unlock(&F2FS_I(inode)->i_size_lock);
3362
3363 return ret;
3364 }
3365
f2fs_readonly(struct super_block * sb)3366 static inline bool f2fs_readonly(struct super_block *sb)
3367 {
3368 return sb_rdonly(sb);
3369 }
3370
f2fs_cp_error(struct f2fs_sb_info * sbi)3371 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
3372 {
3373 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
3374 }
3375
is_dot_dotdot(const u8 * name,size_t len)3376 static inline bool is_dot_dotdot(const u8 *name, size_t len)
3377 {
3378 if (len == 1 && name[0] == '.')
3379 return true;
3380
3381 if (len == 2 && name[0] == '.' && name[1] == '.')
3382 return true;
3383
3384 return false;
3385 }
3386
f2fs_kmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3387 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
3388 size_t size, gfp_t flags)
3389 {
3390 if (time_to_inject(sbi, FAULT_KMALLOC))
3391 return NULL;
3392
3393 return kmalloc(size, flags);
3394 }
3395
f2fs_getname(struct f2fs_sb_info * sbi)3396 static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
3397 {
3398 if (time_to_inject(sbi, FAULT_KMALLOC))
3399 return NULL;
3400
3401 return __getname();
3402 }
3403
f2fs_putname(char * buf)3404 static inline void f2fs_putname(char *buf)
3405 {
3406 __putname(buf);
3407 }
3408
f2fs_kzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3409 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3410 size_t size, gfp_t flags)
3411 {
3412 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3413 }
3414
f2fs_kvmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3415 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3416 size_t size, gfp_t flags)
3417 {
3418 if (time_to_inject(sbi, FAULT_KVMALLOC))
3419 return NULL;
3420
3421 return kvmalloc(size, flags);
3422 }
3423
f2fs_kvzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3424 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3425 size_t size, gfp_t flags)
3426 {
3427 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3428 }
3429
get_extra_isize(struct inode * inode)3430 static inline int get_extra_isize(struct inode *inode)
3431 {
3432 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3433 }
3434
get_inline_xattr_addrs(struct inode * inode)3435 static inline int get_inline_xattr_addrs(struct inode *inode)
3436 {
3437 return F2FS_I(inode)->i_inline_xattr_size;
3438 }
3439
get_dnode_addr(struct inode * inode,struct page * node_page)3440 static inline __le32 *get_dnode_addr(struct inode *inode,
3441 struct page *node_page)
3442 {
3443 int base = 0;
3444
3445 if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
3446 base = get_extra_isize(inode);
3447
3448 return blkaddr_in_node(F2FS_NODE(node_page)) + base;
3449 }
3450
3451 #define f2fs_get_inode_mode(i) \
3452 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3453 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3454
3455 #define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32))
3456
3457 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
3458 (offsetof(struct f2fs_inode, i_extra_end) - \
3459 offsetof(struct f2fs_inode, i_extra_isize)) \
3460
3461 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
3462 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
3463 ((offsetof(typeof(*(f2fs_inode)), field) + \
3464 sizeof((f2fs_inode)->field)) \
3465 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
3466
3467 #define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1)
3468
3469 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3470
3471 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3472 block_t blkaddr, int type);
verify_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)3473 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3474 block_t blkaddr, int type)
3475 {
3476 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
3477 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3478 blkaddr, type);
3479 }
3480
__is_valid_data_blkaddr(block_t blkaddr)3481 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3482 {
3483 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3484 blkaddr == COMPRESS_ADDR)
3485 return false;
3486 return true;
3487 }
3488
3489 /*
3490 * file.c
3491 */
3492 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3493 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3494 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3495 int f2fs_truncate(struct inode *inode);
3496 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
3497 struct kstat *stat, u32 request_mask, unsigned int flags);
3498 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
3499 struct iattr *attr);
3500 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3501 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3502 int f2fs_precache_extents(struct inode *inode);
3503 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
3504 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3505 struct dentry *dentry, struct fileattr *fa);
3506 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3507 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3508 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3509 int f2fs_pin_file_control(struct inode *inode, bool inc);
3510
3511 /*
3512 * inode.c
3513 */
3514 void f2fs_set_inode_flags(struct inode *inode);
3515 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3516 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3517 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3518 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3519 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3520 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3521 void f2fs_update_inode_page(struct inode *inode);
3522 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3523 void f2fs_evict_inode(struct inode *inode);
3524 void f2fs_handle_failed_inode(struct inode *inode);
3525
3526 /*
3527 * namei.c
3528 */
3529 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3530 bool hot, bool set);
3531 struct dentry *f2fs_get_parent(struct dentry *child);
3532 int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3533 struct inode **new_inode);
3534
3535 /*
3536 * dir.c
3537 */
3538 int f2fs_init_casefolded_name(const struct inode *dir,
3539 struct f2fs_filename *fname);
3540 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3541 int lookup, struct f2fs_filename *fname);
3542 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3543 struct f2fs_filename *fname);
3544 void f2fs_free_filename(struct f2fs_filename *fname);
3545 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3546 const struct f2fs_filename *fname, int *max_slots);
3547 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3548 unsigned int start_pos, struct fscrypt_str *fstr);
3549 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3550 struct f2fs_dentry_ptr *d);
3551 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3552 const struct f2fs_filename *fname, struct page *dpage);
3553 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3554 unsigned int current_depth);
3555 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3556 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3557 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3558 const struct f2fs_filename *fname,
3559 struct page **res_page);
3560 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3561 const struct qstr *child, struct page **res_page);
3562 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3563 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3564 struct page **page);
3565 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3566 struct page *page, struct inode *inode);
3567 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3568 const struct f2fs_filename *fname);
3569 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3570 const struct fscrypt_str *name, f2fs_hash_t name_hash,
3571 unsigned int bit_pos);
3572 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3573 struct inode *inode, nid_t ino, umode_t mode);
3574 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3575 struct inode *inode, nid_t ino, umode_t mode);
3576 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3577 struct inode *inode, nid_t ino, umode_t mode);
3578 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3579 struct inode *dir, struct inode *inode);
3580 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
3581 struct f2fs_filename *fname);
3582 bool f2fs_empty_dir(struct inode *dir);
3583
f2fs_add_link(struct dentry * dentry,struct inode * inode)3584 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3585 {
3586 if (fscrypt_is_nokey_name(dentry))
3587 return -ENOKEY;
3588 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3589 inode, inode->i_ino, inode->i_mode);
3590 }
3591
3592 /*
3593 * super.c
3594 */
3595 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3596 void f2fs_inode_synced(struct inode *inode);
3597 int f2fs_dquot_initialize(struct inode *inode);
3598 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3599 int f2fs_quota_sync(struct super_block *sb, int type);
3600 loff_t max_file_blocks(struct inode *inode);
3601 void f2fs_quota_off_umount(struct super_block *sb);
3602 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
3603 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
3604 bool irq_context);
3605 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
3606 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error);
3607 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3608 int f2fs_sync_fs(struct super_block *sb, int sync);
3609 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3610
3611 /*
3612 * hash.c
3613 */
3614 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3615
3616 /*
3617 * node.c
3618 */
3619 struct node_info;
3620
3621 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3622 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3623 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3624 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3625 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3626 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3627 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3628 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3629 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3630 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3631 struct node_info *ni, bool checkpoint_context);
3632 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3633 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3634 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3635 int f2fs_truncate_xattr_node(struct inode *inode);
3636 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3637 unsigned int seq_id);
3638 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
3639 int f2fs_remove_inode_page(struct inode *inode);
3640 struct page *f2fs_new_inode_page(struct inode *inode);
3641 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3642 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3643 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3644 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3645 int f2fs_move_node_page(struct page *node_page, int gc_type);
3646 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3647 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3648 struct writeback_control *wbc, bool atomic,
3649 unsigned int *seq_id);
3650 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3651 struct writeback_control *wbc,
3652 bool do_balance, enum iostat_type io_type);
3653 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3654 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3655 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3656 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3657 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3658 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3659 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3660 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3661 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3662 unsigned int segno, struct f2fs_summary_block *sum);
3663 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
3664 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3665 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3666 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3667 int __init f2fs_create_node_manager_caches(void);
3668 void f2fs_destroy_node_manager_caches(void);
3669
3670 /*
3671 * segment.c
3672 */
3673 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3674 int f2fs_commit_atomic_write(struct inode *inode);
3675 void f2fs_abort_atomic_write(struct inode *inode, bool clean);
3676 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3677 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3678 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3679 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3680 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3681 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3682 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3683 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3684 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
3685 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3686 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3687 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3688 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3689 struct cp_control *cpc);
3690 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3691 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3692 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3693 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3694 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3695 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3696 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3697 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3698 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3699 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3700 unsigned int *newseg, bool new_sec, int dir);
3701 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3702 unsigned int start, unsigned int end);
3703 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
3704 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
3705 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3706 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3707 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3708 struct cp_control *cpc);
3709 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3710 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3711 block_t blk_addr);
3712 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3713 enum iostat_type io_type);
3714 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3715 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3716 struct f2fs_io_info *fio);
3717 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3718 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3719 block_t old_blkaddr, block_t new_blkaddr,
3720 bool recover_curseg, bool recover_newaddr,
3721 bool from_gc);
3722 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3723 block_t old_addr, block_t new_addr,
3724 unsigned char version, bool recover_curseg,
3725 bool recover_newaddr);
3726 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3727 block_t old_blkaddr, block_t *new_blkaddr,
3728 struct f2fs_summary *sum, int type,
3729 struct f2fs_io_info *fio);
3730 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3731 block_t blkaddr, unsigned int blkcnt);
3732 void f2fs_wait_on_page_writeback(struct page *page,
3733 enum page_type type, bool ordered, bool locked);
3734 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3735 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3736 block_t len);
3737 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3738 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3739 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3740 unsigned int val, int alloc);
3741 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3742 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3743 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3744 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3745 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3746 int __init f2fs_create_segment_manager_caches(void);
3747 void f2fs_destroy_segment_manager_caches(void);
3748 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3749 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3750 unsigned int segno);
3751 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3752 unsigned int segno);
3753
3754 #define DEF_FRAGMENT_SIZE 4
3755 #define MIN_FRAGMENT_SIZE 1
3756 #define MAX_FRAGMENT_SIZE 512
3757
f2fs_need_rand_seg(struct f2fs_sb_info * sbi)3758 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
3759 {
3760 return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
3761 F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
3762 }
3763
3764 /*
3765 * checkpoint.c
3766 */
3767 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
3768 unsigned char reason);
3769 void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
3770 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3771 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3772 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3773 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3774 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3775 block_t blkaddr, int type);
3776 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3777 int type, bool sync);
3778 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
3779 unsigned int ra_blocks);
3780 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3781 long nr_to_write, enum iostat_type io_type);
3782 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3783 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3784 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3785 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3786 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3787 unsigned int devidx, int type);
3788 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3789 unsigned int devidx, int type);
3790 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3791 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3792 void f2fs_add_orphan_inode(struct inode *inode);
3793 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3794 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3795 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3796 void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
3797 void f2fs_remove_dirty_inode(struct inode *inode);
3798 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
3799 bool from_cp);
3800 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3801 u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
3802 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3803 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3804 int __init f2fs_create_checkpoint_caches(void);
3805 void f2fs_destroy_checkpoint_caches(void);
3806 int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
3807 int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
3808 void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
3809 void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
3810
3811 /*
3812 * data.c
3813 */
3814 int __init f2fs_init_bioset(void);
3815 void f2fs_destroy_bioset(void);
3816 bool f2fs_is_cp_guaranteed(struct page *page);
3817 int f2fs_init_bio_entry_cache(void);
3818 void f2fs_destroy_bio_entry_cache(void);
3819 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
3820 enum page_type type);
3821 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
3822 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3823 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3824 struct inode *inode, struct page *page,
3825 nid_t ino, enum page_type type);
3826 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3827 struct bio **bio, struct page *page);
3828 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3829 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3830 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3831 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3832 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3833 block_t blk_addr, sector_t *sector);
3834 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3835 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3836 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3837 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3838 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3839 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
3840 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3841 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3842 blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
3843 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
3844 pgoff_t *next_pgofs);
3845 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3846 bool for_write);
3847 struct page *f2fs_get_new_data_page(struct inode *inode,
3848 struct page *ipage, pgoff_t index, bool new_i_size);
3849 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3850 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
3851 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3852 u64 start, u64 len);
3853 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3854 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3855 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3856 int f2fs_write_single_data_page(struct page *page, int *submitted,
3857 struct bio **bio, sector_t *last_block,
3858 struct writeback_control *wbc,
3859 enum iostat_type io_type,
3860 int compr_blocks, bool allow_balance);
3861 void f2fs_write_failed(struct inode *inode, loff_t to);
3862 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
3863 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
3864 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3865 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3866 int f2fs_init_post_read_processing(void);
3867 void f2fs_destroy_post_read_processing(void);
3868 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3869 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3870 extern const struct iomap_ops f2fs_iomap_ops;
3871
3872 /*
3873 * gc.c
3874 */
3875 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3876 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3877 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3878 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
3879 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3880 int f2fs_gc_range(struct f2fs_sb_info *sbi,
3881 unsigned int start_seg, unsigned int end_seg,
3882 bool dry_run, unsigned int dry_run_sections);
3883 int f2fs_resize_fs(struct file *filp, __u64 block_count);
3884 int __init f2fs_create_garbage_collection_cache(void);
3885 void f2fs_destroy_garbage_collection_cache(void);
3886 /* victim selection function for cleaning and SSR */
3887 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
3888 int gc_type, int type, char alloc_mode,
3889 unsigned long long age);
3890
3891 /*
3892 * recovery.c
3893 */
3894 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3895 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3896 int __init f2fs_create_recovery_cache(void);
3897 void f2fs_destroy_recovery_cache(void);
3898
3899 /*
3900 * debug.c
3901 */
3902 #ifdef CONFIG_F2FS_STAT_FS
3903 struct f2fs_stat_info {
3904 struct list_head stat_list;
3905 struct f2fs_sb_info *sbi;
3906 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3907 int main_area_segs, main_area_sections, main_area_zones;
3908 unsigned long long hit_cached[NR_EXTENT_CACHES];
3909 unsigned long long hit_rbtree[NR_EXTENT_CACHES];
3910 unsigned long long total_ext[NR_EXTENT_CACHES];
3911 unsigned long long hit_total[NR_EXTENT_CACHES];
3912 int ext_tree[NR_EXTENT_CACHES];
3913 int zombie_tree[NR_EXTENT_CACHES];
3914 int ext_node[NR_EXTENT_CACHES];
3915 /* to count memory footprint */
3916 unsigned long long ext_mem[NR_EXTENT_CACHES];
3917 /* for read extent cache */
3918 unsigned long long hit_largest;
3919 /* for block age extent cache */
3920 unsigned long long allocated_data_blocks;
3921 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3922 int ndirty_data, ndirty_qdata;
3923 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3924 int nats, dirty_nats, sits, dirty_sits;
3925 int free_nids, avail_nids, alloc_nids;
3926 int total_count, utilization;
3927 int nr_wb_cp_data, nr_wb_data;
3928 int nr_rd_data, nr_rd_node, nr_rd_meta;
3929 int nr_dio_read, nr_dio_write;
3930 unsigned int io_skip_bggc, other_skip_bggc;
3931 int nr_flushing, nr_flushed, flush_list_empty;
3932 int nr_discarding, nr_discarded;
3933 int nr_discard_cmd;
3934 unsigned int undiscard_blks;
3935 int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
3936 unsigned int cur_ckpt_time, peak_ckpt_time;
3937 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3938 int compr_inode, swapfile_inode;
3939 unsigned long long compr_blocks;
3940 int aw_cnt, max_aw_cnt;
3941 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3942 unsigned int bimodal, avg_vblocks;
3943 int util_free, util_valid, util_invalid;
3944 int rsvd_segs, overp_segs;
3945 int dirty_count, node_pages, meta_pages, compress_pages;
3946 int compress_page_hit;
3947 int prefree_count, free_segs, free_secs;
3948 int cp_call_count[MAX_CALL_TYPE], cp_count;
3949 int gc_call_count[MAX_CALL_TYPE];
3950 int gc_segs[2][2];
3951 int gc_secs[2][2];
3952 int tot_blks, data_blks, node_blks;
3953 int bg_data_blks, bg_node_blks;
3954 int curseg[NR_CURSEG_TYPE];
3955 int cursec[NR_CURSEG_TYPE];
3956 int curzone[NR_CURSEG_TYPE];
3957 unsigned int dirty_seg[NR_CURSEG_TYPE];
3958 unsigned int full_seg[NR_CURSEG_TYPE];
3959 unsigned int valid_blks[NR_CURSEG_TYPE];
3960
3961 unsigned int meta_count[META_MAX];
3962 unsigned int segment_count[2];
3963 unsigned int block_count[2];
3964 unsigned int inplace_count;
3965 unsigned long long base_mem, cache_mem, page_mem;
3966 };
3967
F2FS_STAT(struct f2fs_sb_info * sbi)3968 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3969 {
3970 return (struct f2fs_stat_info *)sbi->stat_info;
3971 }
3972
3973 #define stat_inc_cp_call_count(sbi, foreground) \
3974 atomic_inc(&sbi->cp_call_count[(foreground)])
3975 #define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
3976 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3977 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3978 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3979 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3980 #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
3981 #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
3982 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3983 #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
3984 #define stat_inc_inline_xattr(inode) \
3985 do { \
3986 if (f2fs_has_inline_xattr(inode)) \
3987 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3988 } while (0)
3989 #define stat_dec_inline_xattr(inode) \
3990 do { \
3991 if (f2fs_has_inline_xattr(inode)) \
3992 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3993 } while (0)
3994 #define stat_inc_inline_inode(inode) \
3995 do { \
3996 if (f2fs_has_inline_data(inode)) \
3997 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3998 } while (0)
3999 #define stat_dec_inline_inode(inode) \
4000 do { \
4001 if (f2fs_has_inline_data(inode)) \
4002 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
4003 } while (0)
4004 #define stat_inc_inline_dir(inode) \
4005 do { \
4006 if (f2fs_has_inline_dentry(inode)) \
4007 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
4008 } while (0)
4009 #define stat_dec_inline_dir(inode) \
4010 do { \
4011 if (f2fs_has_inline_dentry(inode)) \
4012 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
4013 } while (0)
4014 #define stat_inc_compr_inode(inode) \
4015 do { \
4016 if (f2fs_compressed_file(inode)) \
4017 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
4018 } while (0)
4019 #define stat_dec_compr_inode(inode) \
4020 do { \
4021 if (f2fs_compressed_file(inode)) \
4022 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
4023 } while (0)
4024 #define stat_add_compr_blocks(inode, blocks) \
4025 (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
4026 #define stat_sub_compr_blocks(inode, blocks) \
4027 (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
4028 #define stat_inc_swapfile_inode(inode) \
4029 (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
4030 #define stat_dec_swapfile_inode(inode) \
4031 (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
4032 #define stat_inc_atomic_inode(inode) \
4033 (atomic_inc(&F2FS_I_SB(inode)->atomic_files))
4034 #define stat_dec_atomic_inode(inode) \
4035 (atomic_dec(&F2FS_I_SB(inode)->atomic_files))
4036 #define stat_inc_meta_count(sbi, blkaddr) \
4037 do { \
4038 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
4039 atomic_inc(&(sbi)->meta_count[META_CP]); \
4040 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
4041 atomic_inc(&(sbi)->meta_count[META_SIT]); \
4042 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
4043 atomic_inc(&(sbi)->meta_count[META_NAT]); \
4044 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
4045 atomic_inc(&(sbi)->meta_count[META_SSA]); \
4046 } while (0)
4047 #define stat_inc_seg_type(sbi, curseg) \
4048 ((sbi)->segment_count[(curseg)->alloc_type]++)
4049 #define stat_inc_block_count(sbi, curseg) \
4050 ((sbi)->block_count[(curseg)->alloc_type]++)
4051 #define stat_inc_inplace_blocks(sbi) \
4052 (atomic_inc(&(sbi)->inplace_count))
4053 #define stat_update_max_atomic_write(inode) \
4054 do { \
4055 int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
4056 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
4057 if (cur > max) \
4058 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
4059 } while (0)
4060 #define stat_inc_gc_call_count(sbi, foreground) \
4061 (F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
4062 #define stat_inc_gc_sec_count(sbi, type, gc_type) \
4063 (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
4064 #define stat_inc_gc_seg_count(sbi, type, gc_type) \
4065 (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
4066
4067 #define stat_inc_tot_blk_count(si, blks) \
4068 ((si)->tot_blks += (blks))
4069
4070 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
4071 do { \
4072 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
4073 stat_inc_tot_blk_count(si, blks); \
4074 si->data_blks += (blks); \
4075 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4076 } while (0)
4077
4078 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
4079 do { \
4080 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
4081 stat_inc_tot_blk_count(si, blks); \
4082 si->node_blks += (blks); \
4083 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
4084 } while (0)
4085
4086 int f2fs_build_stats(struct f2fs_sb_info *sbi);
4087 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
4088 void __init f2fs_create_root_stats(void);
4089 void f2fs_destroy_root_stats(void);
4090 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
4091 #else
4092 #define stat_inc_cp_call_count(sbi, foreground) do { } while (0)
4093 #define stat_inc_cp_count(sbi) do { } while (0)
4094 #define stat_io_skip_bggc_count(sbi) do { } while (0)
4095 #define stat_other_skip_bggc_count(sbi) do { } while (0)
4096 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
4097 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
4098 #define stat_inc_total_hit(sbi, type) do { } while (0)
4099 #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
4100 #define stat_inc_largest_node_hit(sbi) do { } while (0)
4101 #define stat_inc_cached_node_hit(sbi, type) do { } while (0)
4102 #define stat_inc_inline_xattr(inode) do { } while (0)
4103 #define stat_dec_inline_xattr(inode) do { } while (0)
4104 #define stat_inc_inline_inode(inode) do { } while (0)
4105 #define stat_dec_inline_inode(inode) do { } while (0)
4106 #define stat_inc_inline_dir(inode) do { } while (0)
4107 #define stat_dec_inline_dir(inode) do { } while (0)
4108 #define stat_inc_compr_inode(inode) do { } while (0)
4109 #define stat_dec_compr_inode(inode) do { } while (0)
4110 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
4111 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
4112 #define stat_inc_swapfile_inode(inode) do { } while (0)
4113 #define stat_dec_swapfile_inode(inode) do { } while (0)
4114 #define stat_inc_atomic_inode(inode) do { } while (0)
4115 #define stat_dec_atomic_inode(inode) do { } while (0)
4116 #define stat_update_max_atomic_write(inode) do { } while (0)
4117 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
4118 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
4119 #define stat_inc_block_count(sbi, curseg) do { } while (0)
4120 #define stat_inc_inplace_blocks(sbi) do { } while (0)
4121 #define stat_inc_gc_call_count(sbi, foreground) do { } while (0)
4122 #define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0)
4123 #define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0)
4124 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
4125 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
4126 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
4127
f2fs_build_stats(struct f2fs_sb_info * sbi)4128 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_stats(struct f2fs_sb_info * sbi)4129 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
f2fs_create_root_stats(void)4130 static inline void __init f2fs_create_root_stats(void) { }
f2fs_destroy_root_stats(void)4131 static inline void f2fs_destroy_root_stats(void) { }
f2fs_update_sit_info(struct f2fs_sb_info * sbi)4132 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
4133 #endif
4134
4135 extern const struct file_operations f2fs_dir_operations;
4136 extern const struct file_operations f2fs_file_operations;
4137 extern const struct inode_operations f2fs_file_inode_operations;
4138 extern const struct address_space_operations f2fs_dblock_aops;
4139 extern const struct address_space_operations f2fs_node_aops;
4140 extern const struct address_space_operations f2fs_meta_aops;
4141 extern const struct inode_operations f2fs_dir_inode_operations;
4142 extern const struct inode_operations f2fs_symlink_inode_operations;
4143 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
4144 extern const struct inode_operations f2fs_special_inode_operations;
4145 extern struct kmem_cache *f2fs_inode_entry_slab;
4146
4147 /*
4148 * inline.c
4149 */
4150 bool f2fs_may_inline_data(struct inode *inode);
4151 bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
4152 bool f2fs_may_inline_dentry(struct inode *inode);
4153 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
4154 void f2fs_truncate_inline_inode(struct inode *inode,
4155 struct page *ipage, u64 from);
4156 int f2fs_read_inline_data(struct inode *inode, struct page *page);
4157 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
4158 int f2fs_convert_inline_inode(struct inode *inode);
4159 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
4160 int f2fs_write_inline_data(struct inode *inode, struct page *page);
4161 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
4162 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
4163 const struct f2fs_filename *fname,
4164 struct page **res_page);
4165 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
4166 struct page *ipage);
4167 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
4168 struct inode *inode, nid_t ino, umode_t mode);
4169 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
4170 struct page *page, struct inode *dir,
4171 struct inode *inode);
4172 bool f2fs_empty_inline_dir(struct inode *dir);
4173 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4174 struct fscrypt_str *fstr);
4175 int f2fs_inline_data_fiemap(struct inode *inode,
4176 struct fiemap_extent_info *fieinfo,
4177 __u64 start, __u64 len);
4178
4179 /*
4180 * shrinker.c
4181 */
4182 unsigned long f2fs_shrink_count(struct shrinker *shrink,
4183 struct shrink_control *sc);
4184 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
4185 struct shrink_control *sc);
4186 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
4187 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
4188
4189 /*
4190 * extent_cache.c
4191 */
4192 bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
4193 void f2fs_init_extent_tree(struct inode *inode);
4194 void f2fs_drop_extent_tree(struct inode *inode);
4195 void f2fs_destroy_extent_node(struct inode *inode);
4196 void f2fs_destroy_extent_tree(struct inode *inode);
4197 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
4198 int __init f2fs_create_extent_cache(void);
4199 void f2fs_destroy_extent_cache(void);
4200
4201 /* read extent cache ops */
4202 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
4203 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
4204 struct extent_info *ei);
4205 bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
4206 block_t *blkaddr);
4207 void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
4208 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
4209 pgoff_t fofs, block_t blkaddr, unsigned int len);
4210 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
4211 int nr_shrink);
4212
4213 /* block age extent cache ops */
4214 void f2fs_init_age_extent_tree(struct inode *inode);
4215 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
4216 struct extent_info *ei);
4217 void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
4218 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
4219 pgoff_t fofs, unsigned int len);
4220 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
4221 int nr_shrink);
4222
4223 /*
4224 * sysfs.c
4225 */
4226 #define MIN_RA_MUL 2
4227 #define MAX_RA_MUL 256
4228
4229 int __init f2fs_init_sysfs(void);
4230 void f2fs_exit_sysfs(void);
4231 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
4232 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
4233
4234 /* verity.c */
4235 extern const struct fsverity_operations f2fs_verityops;
4236
4237 /*
4238 * crypto support
4239 */
f2fs_encrypted_file(struct inode * inode)4240 static inline bool f2fs_encrypted_file(struct inode *inode)
4241 {
4242 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
4243 }
4244
f2fs_set_encrypted_inode(struct inode * inode)4245 static inline void f2fs_set_encrypted_inode(struct inode *inode)
4246 {
4247 #ifdef CONFIG_FS_ENCRYPTION
4248 file_set_encrypt(inode);
4249 f2fs_set_inode_flags(inode);
4250 #endif
4251 }
4252
4253 /*
4254 * Returns true if the reads of the inode's data need to undergo some
4255 * postprocessing step, like decryption or authenticity verification.
4256 */
f2fs_post_read_required(struct inode * inode)4257 static inline bool f2fs_post_read_required(struct inode *inode)
4258 {
4259 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
4260 f2fs_compressed_file(inode);
4261 }
4262
f2fs_used_in_atomic_write(struct inode * inode)4263 static inline bool f2fs_used_in_atomic_write(struct inode *inode)
4264 {
4265 return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
4266 }
4267
f2fs_meta_inode_gc_required(struct inode * inode)4268 static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
4269 {
4270 return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
4271 }
4272
4273 /*
4274 * compress.c
4275 */
4276 #ifdef CONFIG_F2FS_FS_COMPRESSION
4277 bool f2fs_is_compressed_page(struct page *page);
4278 struct page *f2fs_compress_control_page(struct page *page);
4279 int f2fs_prepare_compress_overwrite(struct inode *inode,
4280 struct page **pagep, pgoff_t index, void **fsdata);
4281 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
4282 pgoff_t index, unsigned copied);
4283 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
4284 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
4285 bool f2fs_is_compress_backend_ready(struct inode *inode);
4286 bool f2fs_is_compress_level_valid(int alg, int lvl);
4287 int __init f2fs_init_compress_mempool(void);
4288 void f2fs_destroy_compress_mempool(void);
4289 void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
4290 void f2fs_end_read_compressed_page(struct page *page, bool failed,
4291 block_t blkaddr, bool in_task);
4292 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
4293 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
4294 bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4295 int index, int nr_pages, bool uptodate);
4296 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
4297 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
4298 int f2fs_write_multi_pages(struct compress_ctx *cc,
4299 int *submitted,
4300 struct writeback_control *wbc,
4301 enum iostat_type io_type);
4302 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
4303 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
4304 pgoff_t fofs, block_t blkaddr,
4305 unsigned int llen, unsigned int c_len);
4306 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
4307 unsigned nr_pages, sector_t *last_block_in_bio,
4308 bool is_readahead, bool for_write);
4309 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
4310 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
4311 bool in_task);
4312 void f2fs_put_page_dic(struct page *page, bool in_task);
4313 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
4314 unsigned int ofs_in_node);
4315 int f2fs_init_compress_ctx(struct compress_ctx *cc);
4316 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
4317 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
4318 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
4319 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
4320 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
4321 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
4322 int __init f2fs_init_compress_cache(void);
4323 void f2fs_destroy_compress_cache(void);
4324 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
4325 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
4326 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4327 nid_t ino, block_t blkaddr);
4328 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
4329 block_t blkaddr);
4330 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
4331 #define inc_compr_inode_stat(inode) \
4332 do { \
4333 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4334 sbi->compr_new_inode++; \
4335 } while (0)
4336 #define add_compr_block_stat(inode, blocks) \
4337 do { \
4338 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
4339 int diff = F2FS_I(inode)->i_cluster_size - blocks; \
4340 sbi->compr_written_block += blocks; \
4341 sbi->compr_saved_block += diff; \
4342 } while (0)
4343 #else
f2fs_is_compressed_page(struct page * page)4344 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
f2fs_is_compress_backend_ready(struct inode * inode)4345 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
4346 {
4347 if (!f2fs_compressed_file(inode))
4348 return true;
4349 /* not support compression */
4350 return false;
4351 }
f2fs_is_compress_level_valid(int alg,int lvl)4352 static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
f2fs_compress_control_page(struct page * page)4353 static inline struct page *f2fs_compress_control_page(struct page *page)
4354 {
4355 WARN_ON_ONCE(1);
4356 return ERR_PTR(-EINVAL);
4357 }
f2fs_init_compress_mempool(void)4358 static inline int __init f2fs_init_compress_mempool(void) { return 0; }
f2fs_destroy_compress_mempool(void)4359 static inline void f2fs_destroy_compress_mempool(void) { }
f2fs_decompress_cluster(struct decompress_io_ctx * dic,bool in_task)4360 static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
4361 bool in_task) { }
f2fs_end_read_compressed_page(struct page * page,bool failed,block_t blkaddr,bool in_task)4362 static inline void f2fs_end_read_compressed_page(struct page *page,
4363 bool failed, block_t blkaddr, bool in_task)
4364 {
4365 WARN_ON_ONCE(1);
4366 }
f2fs_put_page_dic(struct page * page,bool in_task)4367 static inline void f2fs_put_page_dic(struct page *page, bool in_task)
4368 {
4369 WARN_ON_ONCE(1);
4370 }
f2fs_cluster_blocks_are_contiguous(struct dnode_of_data * dn,unsigned int ofs_in_node)4371 static inline unsigned int f2fs_cluster_blocks_are_contiguous(
4372 struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
f2fs_sanity_check_cluster(struct dnode_of_data * dn)4373 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
f2fs_init_compress_inode(struct f2fs_sb_info * sbi)4374 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_compress_inode(struct f2fs_sb_info * sbi)4375 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)4376 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)4377 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
f2fs_init_compress_cache(void)4378 static inline int __init f2fs_init_compress_cache(void) { return 0; }
f2fs_destroy_compress_cache(void)4379 static inline void f2fs_destroy_compress_cache(void) { }
f2fs_invalidate_compress_page(struct f2fs_sb_info * sbi,block_t blkaddr)4380 static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
4381 block_t blkaddr) { }
f2fs_cache_compressed_page(struct f2fs_sb_info * sbi,struct page * page,nid_t ino,block_t blkaddr)4382 static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
4383 struct page *page, nid_t ino, block_t blkaddr) { }
f2fs_load_compressed_page(struct f2fs_sb_info * sbi,struct page * page,block_t blkaddr)4384 static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
4385 struct page *page, block_t blkaddr) { return false; }
f2fs_invalidate_compress_pages(struct f2fs_sb_info * sbi,nid_t ino)4386 static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
4387 nid_t ino) { }
4388 #define inc_compr_inode_stat(inode) do { } while (0)
f2fs_update_read_extent_tree_range_compressed(struct inode * inode,pgoff_t fofs,block_t blkaddr,unsigned int llen,unsigned int c_len)4389 static inline void f2fs_update_read_extent_tree_range_compressed(
4390 struct inode *inode,
4391 pgoff_t fofs, block_t blkaddr,
4392 unsigned int llen, unsigned int c_len) { }
4393 #endif
4394
set_compress_context(struct inode * inode)4395 static inline int set_compress_context(struct inode *inode)
4396 {
4397 #ifdef CONFIG_F2FS_FS_COMPRESSION
4398 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4399
4400 F2FS_I(inode)->i_compress_algorithm =
4401 F2FS_OPTION(sbi).compress_algorithm;
4402 F2FS_I(inode)->i_log_cluster_size =
4403 F2FS_OPTION(sbi).compress_log_size;
4404 F2FS_I(inode)->i_compress_flag =
4405 F2FS_OPTION(sbi).compress_chksum ?
4406 BIT(COMPRESS_CHKSUM) : 0;
4407 F2FS_I(inode)->i_cluster_size =
4408 BIT(F2FS_I(inode)->i_log_cluster_size);
4409 if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
4410 F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
4411 F2FS_OPTION(sbi).compress_level)
4412 F2FS_I(inode)->i_compress_level =
4413 F2FS_OPTION(sbi).compress_level;
4414 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
4415 set_inode_flag(inode, FI_COMPRESSED_FILE);
4416 stat_inc_compr_inode(inode);
4417 inc_compr_inode_stat(inode);
4418 f2fs_mark_inode_dirty_sync(inode, true);
4419 return 0;
4420 #else
4421 return -EOPNOTSUPP;
4422 #endif
4423 }
4424
f2fs_disable_compressed_file(struct inode * inode)4425 static inline bool f2fs_disable_compressed_file(struct inode *inode)
4426 {
4427 struct f2fs_inode_info *fi = F2FS_I(inode);
4428
4429 f2fs_down_write(&F2FS_I(inode)->i_sem);
4430
4431 if (!f2fs_compressed_file(inode)) {
4432 f2fs_up_write(&F2FS_I(inode)->i_sem);
4433 return true;
4434 }
4435 if (f2fs_is_mmap_file(inode) ||
4436 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
4437 f2fs_up_write(&F2FS_I(inode)->i_sem);
4438 return false;
4439 }
4440
4441 fi->i_flags &= ~F2FS_COMPR_FL;
4442 stat_dec_compr_inode(inode);
4443 clear_inode_flag(inode, FI_COMPRESSED_FILE);
4444 f2fs_mark_inode_dirty_sync(inode, true);
4445
4446 f2fs_up_write(&F2FS_I(inode)->i_sem);
4447 return true;
4448 }
4449
4450 #define F2FS_FEATURE_FUNCS(name, flagname) \
4451 static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
4452 { \
4453 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
4454 }
4455
4456 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
4457 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
4458 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
4459 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
4460 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
4461 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
4462 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
4463 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
4464 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
4465 F2FS_FEATURE_FUNCS(verity, VERITY);
4466 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
4467 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
4468 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
4469 F2FS_FEATURE_FUNCS(readonly, RO);
4470
4471 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_blkz_is_seq(struct f2fs_sb_info * sbi,int devi,block_t blkaddr)4472 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
4473 block_t blkaddr)
4474 {
4475 unsigned int zno = blkaddr / sbi->blocks_per_blkz;
4476
4477 return test_bit(zno, FDEV(devi).blkz_seq);
4478 }
4479 #endif
4480
f2fs_bdev_index(struct f2fs_sb_info * sbi,struct block_device * bdev)4481 static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
4482 struct block_device *bdev)
4483 {
4484 int i;
4485
4486 if (!f2fs_is_multi_device(sbi))
4487 return 0;
4488
4489 for (i = 0; i < sbi->s_ndevs; i++)
4490 if (FDEV(i).bdev == bdev)
4491 return i;
4492
4493 WARN_ON(1);
4494 return -1;
4495 }
4496
f2fs_hw_should_discard(struct f2fs_sb_info * sbi)4497 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4498 {
4499 return f2fs_sb_has_blkzoned(sbi);
4500 }
4501
f2fs_bdev_support_discard(struct block_device * bdev)4502 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4503 {
4504 return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
4505 }
4506
f2fs_hw_support_discard(struct f2fs_sb_info * sbi)4507 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4508 {
4509 int i;
4510
4511 if (!f2fs_is_multi_device(sbi))
4512 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4513
4514 for (i = 0; i < sbi->s_ndevs; i++)
4515 if (f2fs_bdev_support_discard(FDEV(i).bdev))
4516 return true;
4517 return false;
4518 }
4519
f2fs_realtime_discard_enable(struct f2fs_sb_info * sbi)4520 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4521 {
4522 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4523 f2fs_hw_should_discard(sbi);
4524 }
4525
f2fs_hw_is_readonly(struct f2fs_sb_info * sbi)4526 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4527 {
4528 int i;
4529
4530 if (!f2fs_is_multi_device(sbi))
4531 return bdev_read_only(sbi->sb->s_bdev);
4532
4533 for (i = 0; i < sbi->s_ndevs; i++)
4534 if (bdev_read_only(FDEV(i).bdev))
4535 return true;
4536 return false;
4537 }
4538
f2fs_dev_is_readonly(struct f2fs_sb_info * sbi)4539 static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
4540 {
4541 return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
4542 }
4543
f2fs_lfs_mode(struct f2fs_sb_info * sbi)4544 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4545 {
4546 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4547 }
4548
f2fs_valid_pinned_area(struct f2fs_sb_info * sbi,block_t blkaddr)4549 static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
4550 block_t blkaddr)
4551 {
4552 if (f2fs_sb_has_blkzoned(sbi)) {
4553 int devi = f2fs_target_device_index(sbi, blkaddr);
4554
4555 return !bdev_is_zoned(FDEV(devi).bdev);
4556 }
4557 return true;
4558 }
4559
f2fs_low_mem_mode(struct f2fs_sb_info * sbi)4560 static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
4561 {
4562 return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
4563 }
4564
f2fs_may_compress(struct inode * inode)4565 static inline bool f2fs_may_compress(struct inode *inode)
4566 {
4567 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4568 f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
4569 f2fs_is_mmap_file(inode))
4570 return false;
4571 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4572 }
4573
f2fs_i_compr_blocks_update(struct inode * inode,u64 blocks,bool add)4574 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4575 u64 blocks, bool add)
4576 {
4577 struct f2fs_inode_info *fi = F2FS_I(inode);
4578 int diff = fi->i_cluster_size - blocks;
4579
4580 /* don't update i_compr_blocks if saved blocks were released */
4581 if (!add && !atomic_read(&fi->i_compr_blocks))
4582 return;
4583
4584 if (add) {
4585 atomic_add(diff, &fi->i_compr_blocks);
4586 stat_add_compr_blocks(inode, diff);
4587 } else {
4588 atomic_sub(diff, &fi->i_compr_blocks);
4589 stat_sub_compr_blocks(inode, diff);
4590 }
4591 f2fs_mark_inode_dirty_sync(inode, true);
4592 }
4593
f2fs_allow_multi_device_dio(struct f2fs_sb_info * sbi,int flag)4594 static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
4595 int flag)
4596 {
4597 if (!f2fs_is_multi_device(sbi))
4598 return false;
4599 if (flag != F2FS_GET_BLOCK_DIO)
4600 return false;
4601 return sbi->aligned_blksize;
4602 }
4603
f2fs_need_verity(const struct inode * inode,pgoff_t idx)4604 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
4605 {
4606 return fsverity_active(inode) &&
4607 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
4608 }
4609
4610 #ifdef CONFIG_F2FS_FAULT_INJECTION
4611 extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
4612 unsigned long type);
4613 #else
f2fs_build_fault_attr(struct f2fs_sb_info * sbi,unsigned long rate,unsigned long type)4614 static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
4615 unsigned long rate, unsigned long type)
4616 {
4617 return 0;
4618 }
4619 #endif
4620
is_journalled_quota(struct f2fs_sb_info * sbi)4621 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4622 {
4623 #ifdef CONFIG_QUOTA
4624 if (f2fs_sb_has_quota_ino(sbi))
4625 return true;
4626 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4627 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4628 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4629 return true;
4630 #endif
4631 return false;
4632 }
4633
f2fs_block_unit_discard(struct f2fs_sb_info * sbi)4634 static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
4635 {
4636 return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
4637 }
4638
f2fs_io_schedule_timeout(long timeout)4639 static inline void f2fs_io_schedule_timeout(long timeout)
4640 {
4641 set_current_state(TASK_UNINTERRUPTIBLE);
4642 io_schedule_timeout(timeout);
4643 }
4644
f2fs_handle_page_eio(struct f2fs_sb_info * sbi,pgoff_t ofs,enum page_type type)4645 static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
4646 enum page_type type)
4647 {
4648 if (unlikely(f2fs_cp_error(sbi)))
4649 return;
4650
4651 if (ofs == sbi->page_eio_ofs[type]) {
4652 if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
4653 set_ckpt_flags(sbi, CP_ERROR_FLAG);
4654 } else {
4655 sbi->page_eio_ofs[type] = ofs;
4656 sbi->page_eio_cnt[type] = 0;
4657 }
4658 }
4659
f2fs_is_readonly(struct f2fs_sb_info * sbi)4660 static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
4661 {
4662 return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
4663 }
4664
f2fs_truncate_meta_inode_pages(struct f2fs_sb_info * sbi,block_t blkaddr,unsigned int cnt)4665 static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
4666 block_t blkaddr, unsigned int cnt)
4667 {
4668 bool need_submit = false;
4669 int i = 0;
4670
4671 do {
4672 struct page *page;
4673
4674 page = find_get_page(META_MAPPING(sbi), blkaddr + i);
4675 if (page) {
4676 if (PageWriteback(page))
4677 need_submit = true;
4678 f2fs_put_page(page, 0);
4679 }
4680 } while (++i < cnt && !need_submit);
4681
4682 if (need_submit)
4683 f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
4684 NULL, 0, DATA);
4685
4686 truncate_inode_pages_range(META_MAPPING(sbi),
4687 F2FS_BLK_TO_BYTES((loff_t)blkaddr),
4688 F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
4689 }
4690
f2fs_invalidate_internal_cache(struct f2fs_sb_info * sbi,block_t blkaddr)4691 static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
4692 block_t blkaddr)
4693 {
4694 f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
4695 f2fs_invalidate_compress_page(sbi, blkaddr);
4696 }
4697
4698 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4699 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4700
4701 #endif /* _LINUX_F2FS_H */
4702