xref: /openbmc/linux/fs/f2fs/node.h (revision b181f7029bd71238ac2754ce7052dffd69432085)
1d29fbcdbSNishad Kamdar /* SPDX-License-Identifier: GPL-2.0 */
20a8165d7SJaegeuk Kim /*
339a53e0cSJaegeuk Kim  * fs/f2fs/node.h
439a53e0cSJaegeuk Kim  *
539a53e0cSJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
639a53e0cSJaegeuk Kim  *             http://www.samsung.com/
739a53e0cSJaegeuk Kim  */
839a53e0cSJaegeuk Kim /* start node id of a node block dedicated to the given node id */
968afcf2dSTomohiro Kusumi #define	START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
1039a53e0cSJaegeuk Kim 
1139a53e0cSJaegeuk Kim /* node block offset on the NAT area dedicated to the given start node id */
1268afcf2dSTomohiro Kusumi #define	NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK)
1339a53e0cSJaegeuk Kim 
14ea1a29a0SChao Yu /* # of pages to perform synchronous readahead before building free nids */
15ad4edb83SJaegeuk Kim #define FREE_NID_PAGES	8
16ad4edb83SJaegeuk Kim #define MAX_FREE_NIDS	(NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
1739a53e0cSJaegeuk Kim 
18042be373SChao Yu /* size of free nid batch when shrinking */
19042be373SChao Yu #define SHRINK_NID_BATCH_SIZE	8
20042be373SChao Yu 
21ad4edb83SJaegeuk Kim #define DEF_RA_NID_PAGES	0	/* # of nid pages to be readaheaded */
22ea1a29a0SChao Yu 
2339a53e0cSJaegeuk Kim /* maximum readahead size for node during getting data blocks */
2439a53e0cSJaegeuk Kim #define MAX_RA_NODE		128
2539a53e0cSJaegeuk Kim 
26cdfc41c1SJaegeuk Kim /* control the memory footprint threshold (10MB per 1GB ram) */
2729710bcfSJaegeuk Kim #define DEF_RAM_THRESHOLD	1
28cdfc41c1SJaegeuk Kim 
297d768d2cSChao Yu /* control dirty nats ratio threshold (default: 10% over max nid count) */
307d768d2cSChao Yu #define DEF_DIRTY_NAT_RATIO_THRESHOLD		10
31e589c2c4SJaegeuk Kim /* control total # of nats */
32e589c2c4SJaegeuk Kim #define DEF_NAT_CACHE_THRESHOLD			100000
337d768d2cSChao Yu 
3447c8ebccSJaegeuk Kim /* control total # of node writes used for roll-fowrad recovery */
3547c8ebccSJaegeuk Kim #define DEF_RF_NODE_BLOCKS			0
3647c8ebccSJaegeuk Kim 
3739a53e0cSJaegeuk Kim /* vector size for gang look-up from nat cache that consists of radix tree */
38c31e4961SChao Yu #define NAT_VEC_SIZE	32
3939a53e0cSJaegeuk Kim 
4056ae674cSJaegeuk Kim /* return value for read_node_page */
4156ae674cSJaegeuk Kim #define LOCKED_PAGE	1
4256ae674cSJaegeuk Kim 
43859fca6bSChao Yu /* check pinned file's alignment status of physical blocks */
44859fca6bSChao Yu #define FILE_NOT_ALIGNED	1
45859fca6bSChao Yu 
465c27f4eeSChao Yu /* For flag in struct node_info */
475c27f4eeSChao Yu enum {
485c27f4eeSChao Yu 	IS_CHECKPOINTED,	/* is it checkpointed before? */
495c27f4eeSChao Yu 	HAS_FSYNCED_INODE,	/* is the inode fsynced before? */
505c27f4eeSChao Yu 	HAS_LAST_FSYNC,		/* has the latest node fsync mark? */
515c27f4eeSChao Yu 	IS_DIRTY,		/* this nat entry is dirty? */
52780de47cSChao Yu 	IS_PREALLOC,		/* nat entry is preallocated */
535c27f4eeSChao Yu };
545c27f4eeSChao Yu 
5539a53e0cSJaegeuk Kim /*
5639a53e0cSJaegeuk Kim  * For node information
5739a53e0cSJaegeuk Kim  */
5839a53e0cSJaegeuk Kim struct node_info {
5939a53e0cSJaegeuk Kim 	nid_t nid;		/* node id */
6039a53e0cSJaegeuk Kim 	nid_t ino;		/* inode number of the node's owner */
6139a53e0cSJaegeuk Kim 	block_t	blk_addr;	/* block address of the node */
6239a53e0cSJaegeuk Kim 	unsigned char version;	/* version of the node */
635c27f4eeSChao Yu 	unsigned char flag;	/* for node information bits */
647ef35e3bSJaegeuk Kim };
657ef35e3bSJaegeuk Kim 
6639a53e0cSJaegeuk Kim struct nat_entry {
6739a53e0cSJaegeuk Kim 	struct list_head list;	/* for clean or dirty nat list */
6839a53e0cSJaegeuk Kim 	struct node_info ni;	/* in-memory node information */
6939a53e0cSJaegeuk Kim };
7039a53e0cSJaegeuk Kim 
7168afcf2dSTomohiro Kusumi #define nat_get_nid(nat)		((nat)->ni.nid)
7268afcf2dSTomohiro Kusumi #define nat_set_nid(nat, n)		((nat)->ni.nid = (n))
7368afcf2dSTomohiro Kusumi #define nat_get_blkaddr(nat)		((nat)->ni.blk_addr)
7468afcf2dSTomohiro Kusumi #define nat_set_blkaddr(nat, b)		((nat)->ni.blk_addr = (b))
7568afcf2dSTomohiro Kusumi #define nat_get_ino(nat)		((nat)->ni.ino)
7668afcf2dSTomohiro Kusumi #define nat_set_ino(nat, i)		((nat)->ni.ino = (i))
7768afcf2dSTomohiro Kusumi #define nat_get_version(nat)		((nat)->ni.version)
7868afcf2dSTomohiro Kusumi #define nat_set_version(nat, v)		((nat)->ni.version = (v))
7939a53e0cSJaegeuk Kim 
8068afcf2dSTomohiro Kusumi #define inc_node_version(version)	(++(version))
8139a53e0cSJaegeuk Kim 
copy_node_info(struct node_info * dst,struct node_info * src)825c27f4eeSChao Yu static inline void copy_node_info(struct node_info *dst,
835c27f4eeSChao Yu 						struct node_info *src)
845c27f4eeSChao Yu {
855c27f4eeSChao Yu 	dst->nid = src->nid;
865c27f4eeSChao Yu 	dst->ino = src->ino;
875c27f4eeSChao Yu 	dst->blk_addr = src->blk_addr;
885c27f4eeSChao Yu 	dst->version = src->version;
895c27f4eeSChao Yu 	/* should not copy flag here */
905c27f4eeSChao Yu }
915c27f4eeSChao Yu 
set_nat_flag(struct nat_entry * ne,unsigned int type,bool set)927ef35e3bSJaegeuk Kim static inline void set_nat_flag(struct nat_entry *ne,
937ef35e3bSJaegeuk Kim 				unsigned int type, bool set)
947ef35e3bSJaegeuk Kim {
957ef35e3bSJaegeuk Kim 	if (set)
96447286ebSYangtao Li 		ne->ni.flag |= BIT(type);
977ef35e3bSJaegeuk Kim 	else
98447286ebSYangtao Li 		ne->ni.flag &= ~BIT(type);
997ef35e3bSJaegeuk Kim }
1007ef35e3bSJaegeuk Kim 
get_nat_flag(struct nat_entry * ne,unsigned int type)1017ef35e3bSJaegeuk Kim static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
1027ef35e3bSJaegeuk Kim {
103447286ebSYangtao Li 	return ne->ni.flag & BIT(type);
1047ef35e3bSJaegeuk Kim }
1057ef35e3bSJaegeuk Kim 
nat_reset_flag(struct nat_entry * ne)10688bd02c9SJaegeuk Kim static inline void nat_reset_flag(struct nat_entry *ne)
10788bd02c9SJaegeuk Kim {
10888bd02c9SJaegeuk Kim 	/* these states can be set only after checkpoint was done */
10988bd02c9SJaegeuk Kim 	set_nat_flag(ne, IS_CHECKPOINTED, true);
11088bd02c9SJaegeuk Kim 	set_nat_flag(ne, HAS_FSYNCED_INODE, false);
11188bd02c9SJaegeuk Kim 	set_nat_flag(ne, HAS_LAST_FSYNC, true);
11288bd02c9SJaegeuk Kim }
11388bd02c9SJaegeuk Kim 
node_info_from_raw_nat(struct node_info * ni,struct f2fs_nat_entry * raw_ne)11439a53e0cSJaegeuk Kim static inline void node_info_from_raw_nat(struct node_info *ni,
11539a53e0cSJaegeuk Kim 						struct f2fs_nat_entry *raw_ne)
11639a53e0cSJaegeuk Kim {
11739a53e0cSJaegeuk Kim 	ni->ino = le32_to_cpu(raw_ne->ino);
11839a53e0cSJaegeuk Kim 	ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
11939a53e0cSJaegeuk Kim 	ni->version = raw_ne->version;
12039a53e0cSJaegeuk Kim }
12139a53e0cSJaegeuk Kim 
raw_nat_from_node_info(struct f2fs_nat_entry * raw_ne,struct node_info * ni)12294dac22eSChao Yu static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
12394dac22eSChao Yu 						struct node_info *ni)
12494dac22eSChao Yu {
12594dac22eSChao Yu 	raw_ne->ino = cpu_to_le32(ni->ino);
12694dac22eSChao Yu 	raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
12794dac22eSChao Yu 	raw_ne->version = ni->version;
12894dac22eSChao Yu }
12994dac22eSChao Yu 
excess_dirty_nats(struct f2fs_sb_info * sbi)1307d768d2cSChao Yu static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
1317d768d2cSChao Yu {
132a95ba66aSJaegeuk Kim 	return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
1332304cb0cSChao Yu 					NM_I(sbi)->dirty_nats_ratio / 100;
1347d768d2cSChao Yu }
1357d768d2cSChao Yu 
excess_cached_nats(struct f2fs_sb_info * sbi)136e589c2c4SJaegeuk Kim static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
137e589c2c4SJaegeuk Kim {
138a95ba66aSJaegeuk Kim 	return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
139e589c2c4SJaegeuk Kim }
140e589c2c4SJaegeuk Kim 
1416fb03f3aSJaegeuk Kim enum mem_type {
142cdfc41c1SJaegeuk Kim 	FREE_NIDS,	/* indicates the free nid list */
1436fb03f3aSJaegeuk Kim 	NAT_ENTRIES,	/* indicates the cached nat entry */
144a1257023SJaegeuk Kim 	DIRTY_DENTS,	/* indicates dirty dentry pages */
145e5e7ea3cSJaegeuk Kim 	INO_ENTRIES,	/* indicates inode entries */
14612607c1bSJaegeuk Kim 	READ_EXTENT_CACHE,	/* indicates read extent cache */
14771644dffSJaegeuk Kim 	AGE_EXTENT_CACHE,	/* indicates age extent cache */
148d6d2b491SSahitya Tummala 	DISCARD_CACHE,	/* indicates memory of cached discard cmds */
1496ce19affSChao Yu 	COMPRESS_PAGE,	/* indicates memory of cached compressed pages */
1501e84371fSJaegeuk Kim 	BASE_CHECK,	/* check kernel status */
151cdfc41c1SJaegeuk Kim };
152cdfc41c1SJaegeuk Kim 
153aec71382SChao Yu struct nat_entry_set {
154309cc2b6SJaegeuk Kim 	struct list_head set_list;	/* link with other nat sets */
155aec71382SChao Yu 	struct list_head entry_list;	/* link with dirty nat entries */
156309cc2b6SJaegeuk Kim 	nid_t set;			/* set number*/
157aec71382SChao Yu 	unsigned int entry_cnt;		/* the # of nat entries in set */
158aec71382SChao Yu };
159aec71382SChao Yu 
16039a53e0cSJaegeuk Kim struct free_nid {
16139a53e0cSJaegeuk Kim 	struct list_head list;	/* for free node id list */
16239a53e0cSJaegeuk Kim 	nid_t nid;		/* node id */
1639a4ffdf5SChao Yu 	int state;		/* in use or not: FREE_NID or PREALLOC_NID */
16439a53e0cSJaegeuk Kim };
16539a53e0cSJaegeuk Kim 
next_free_nid(struct f2fs_sb_info * sbi,nid_t * nid)166120c2cbaSJaegeuk Kim static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
16739a53e0cSJaegeuk Kim {
16839a53e0cSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
16939a53e0cSJaegeuk Kim 	struct free_nid *fnid;
17039a53e0cSJaegeuk Kim 
171b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
1729a4ffdf5SChao Yu 	if (nm_i->nid_cnt[FREE_NID] <= 0) {
173b8559dc2SChao Yu 		spin_unlock(&nm_i->nid_list_lock);
174120c2cbaSJaegeuk Kim 		return;
175c6e48930SHuang Ying 	}
1769a4ffdf5SChao Yu 	fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list);
17739a53e0cSJaegeuk Kim 	*nid = fnid->nid;
178b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
17939a53e0cSJaegeuk Kim }
18039a53e0cSJaegeuk Kim 
18139a53e0cSJaegeuk Kim /*
18239a53e0cSJaegeuk Kim  * inline functions
18339a53e0cSJaegeuk Kim  */
get_nat_bitmap(struct f2fs_sb_info * sbi,void * addr)18439a53e0cSJaegeuk Kim static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
18539a53e0cSJaegeuk Kim {
18639a53e0cSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
187599a09b2SChao Yu 
188599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
189599a09b2SChao Yu 	if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir,
190599a09b2SChao Yu 						nm_i->bitmap_size))
191599a09b2SChao Yu 		f2fs_bug_on(sbi, 1);
192599a09b2SChao Yu #endif
19339a53e0cSJaegeuk Kim 	memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
19439a53e0cSJaegeuk Kim }
19539a53e0cSJaegeuk Kim 
current_nat_addr(struct f2fs_sb_info * sbi,nid_t start)19639a53e0cSJaegeuk Kim static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
19739a53e0cSJaegeuk Kim {
19839a53e0cSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
19939a53e0cSJaegeuk Kim 	pgoff_t block_off;
20039a53e0cSJaegeuk Kim 	pgoff_t block_addr;
20139a53e0cSJaegeuk Kim 
2028a6aa325SFan Li 	/*
2038a6aa325SFan Li 	 * block_off = segment_off * 512 + off_in_segment
2048a6aa325SFan Li 	 * OLD = (segment_off * 512) * 2 + off_in_segment
2058a6aa325SFan Li 	 * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment
2068a6aa325SFan Li 	 */
20739a53e0cSJaegeuk Kim 	block_off = NAT_BLOCK_OFFSET(start);
20839a53e0cSJaegeuk Kim 
20939a53e0cSJaegeuk Kim 	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
2108a6aa325SFan Li 		(block_off << 1) -
211*f0248ba6SJaegeuk Kim 		(block_off & (BLKS_PER_SEG(sbi) - 1)));
21239a53e0cSJaegeuk Kim 
21339a53e0cSJaegeuk Kim 	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
214*f0248ba6SJaegeuk Kim 		block_addr += BLKS_PER_SEG(sbi);
21539a53e0cSJaegeuk Kim 
21639a53e0cSJaegeuk Kim 	return block_addr;
21739a53e0cSJaegeuk Kim }
21839a53e0cSJaegeuk Kim 
next_nat_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)21939a53e0cSJaegeuk Kim static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
22039a53e0cSJaegeuk Kim 						pgoff_t block_addr)
22139a53e0cSJaegeuk Kim {
22239a53e0cSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
22339a53e0cSJaegeuk Kim 
22439a53e0cSJaegeuk Kim 	block_addr -= nm_i->nat_blkaddr;
225447286ebSYangtao Li 	block_addr ^= BIT(sbi->log_blocks_per_seg);
22639a53e0cSJaegeuk Kim 	return block_addr + nm_i->nat_blkaddr;
22739a53e0cSJaegeuk Kim }
22839a53e0cSJaegeuk Kim 
set_to_next_nat(struct f2fs_nm_info * nm_i,nid_t start_nid)22939a53e0cSJaegeuk Kim static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
23039a53e0cSJaegeuk Kim {
23139a53e0cSJaegeuk Kim 	unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
23239a53e0cSJaegeuk Kim 
233c6ac4c0eSGu Zheng 	f2fs_change_bit(block_off, nm_i->nat_bitmap);
234599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
235599a09b2SChao Yu 	f2fs_change_bit(block_off, nm_i->nat_bitmap_mir);
236599a09b2SChao Yu #endif
23739a53e0cSJaegeuk Kim }
23839a53e0cSJaegeuk Kim 
ino_of_node(struct page * node_page)239a468f0efSJaegeuk Kim static inline nid_t ino_of_node(struct page *node_page)
240a468f0efSJaegeuk Kim {
241a468f0efSJaegeuk Kim 	struct f2fs_node *rn = F2FS_NODE(node_page);
242a468f0efSJaegeuk Kim 	return le32_to_cpu(rn->footer.ino);
243a468f0efSJaegeuk Kim }
244a468f0efSJaegeuk Kim 
nid_of_node(struct page * node_page)245a468f0efSJaegeuk Kim static inline nid_t nid_of_node(struct page *node_page)
246a468f0efSJaegeuk Kim {
247a468f0efSJaegeuk Kim 	struct f2fs_node *rn = F2FS_NODE(node_page);
248a468f0efSJaegeuk Kim 	return le32_to_cpu(rn->footer.nid);
249a468f0efSJaegeuk Kim }
250a468f0efSJaegeuk Kim 
ofs_of_node(struct page * node_page)251a468f0efSJaegeuk Kim static inline unsigned int ofs_of_node(struct page *node_page)
252a468f0efSJaegeuk Kim {
253a468f0efSJaegeuk Kim 	struct f2fs_node *rn = F2FS_NODE(node_page);
254a468f0efSJaegeuk Kim 	unsigned flag = le32_to_cpu(rn->footer.flag);
255a468f0efSJaegeuk Kim 	return flag >> OFFSET_BIT_SHIFT;
256a468f0efSJaegeuk Kim }
257a468f0efSJaegeuk Kim 
cpver_of_node(struct page * node_page)258a468f0efSJaegeuk Kim static inline __u64 cpver_of_node(struct page *node_page)
259a468f0efSJaegeuk Kim {
260a468f0efSJaegeuk Kim 	struct f2fs_node *rn = F2FS_NODE(node_page);
261a468f0efSJaegeuk Kim 	return le64_to_cpu(rn->footer.cp_ver);
262a468f0efSJaegeuk Kim }
263a468f0efSJaegeuk Kim 
next_blkaddr_of_node(struct page * node_page)264a468f0efSJaegeuk Kim static inline block_t next_blkaddr_of_node(struct page *node_page)
265a468f0efSJaegeuk Kim {
266a468f0efSJaegeuk Kim 	struct f2fs_node *rn = F2FS_NODE(node_page);
267a468f0efSJaegeuk Kim 	return le32_to_cpu(rn->footer.next_blkaddr);
268a468f0efSJaegeuk Kim }
269a468f0efSJaegeuk Kim 
fill_node_footer(struct page * page,nid_t nid,nid_t ino,unsigned int ofs,bool reset)27039a53e0cSJaegeuk Kim static inline void fill_node_footer(struct page *page, nid_t nid,
27139a53e0cSJaegeuk Kim 				nid_t ino, unsigned int ofs, bool reset)
27239a53e0cSJaegeuk Kim {
27345590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(page);
27409eb483eSJaegeuk Kim 	unsigned int old_flag = 0;
27509eb483eSJaegeuk Kim 
27639a53e0cSJaegeuk Kim 	if (reset)
27739a53e0cSJaegeuk Kim 		memset(rn, 0, sizeof(*rn));
27809eb483eSJaegeuk Kim 	else
27909eb483eSJaegeuk Kim 		old_flag = le32_to_cpu(rn->footer.flag);
28009eb483eSJaegeuk Kim 
28139a53e0cSJaegeuk Kim 	rn->footer.nid = cpu_to_le32(nid);
28239a53e0cSJaegeuk Kim 	rn->footer.ino = cpu_to_le32(ino);
28309eb483eSJaegeuk Kim 
28409eb483eSJaegeuk Kim 	/* should remain old flag bits such as COLD_BIT_SHIFT */
28509eb483eSJaegeuk Kim 	rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
28609eb483eSJaegeuk Kim 					(old_flag & OFFSET_BIT_MASK));
28739a53e0cSJaegeuk Kim }
28839a53e0cSJaegeuk Kim 
copy_node_footer(struct page * dst,struct page * src)28939a53e0cSJaegeuk Kim static inline void copy_node_footer(struct page *dst, struct page *src)
29039a53e0cSJaegeuk Kim {
29145590710SGu Zheng 	struct f2fs_node *src_rn = F2FS_NODE(src);
29245590710SGu Zheng 	struct f2fs_node *dst_rn = F2FS_NODE(dst);
29339a53e0cSJaegeuk Kim 	memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
29439a53e0cSJaegeuk Kim }
29539a53e0cSJaegeuk Kim 
fill_node_footer_blkaddr(struct page * page,block_t blkaddr)29639a53e0cSJaegeuk Kim static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
29739a53e0cSJaegeuk Kim {
2984081363fSJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
29945590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(page);
300ced2c7eaSKinglong Mee 	__u64 cp_ver = cur_cp_version(ckpt);
30145590710SGu Zheng 
302ced2c7eaSKinglong Mee 	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
303ced2c7eaSKinglong Mee 		cp_ver |= (cur_cp_crc(ckpt) << 32);
304ced2c7eaSKinglong Mee 
305a468f0efSJaegeuk Kim 	rn->footer.cp_ver = cpu_to_le64(cp_ver);
30625ca923bSJaegeuk Kim 	rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
30739a53e0cSJaegeuk Kim }
30839a53e0cSJaegeuk Kim 
is_recoverable_dnode(struct page * page)309a468f0efSJaegeuk Kim static inline bool is_recoverable_dnode(struct page *page)
31039a53e0cSJaegeuk Kim {
311a468f0efSJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
312a468f0efSJaegeuk Kim 	__u64 cp_ver = cur_cp_version(ckpt);
31339a53e0cSJaegeuk Kim 
314f2367923SJaegeuk Kim 	/* Don't care crc part, if fsck.f2fs sets it. */
315f2367923SJaegeuk Kim 	if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
316f2367923SJaegeuk Kim 		return (cp_ver << 32) == (cpver_of_node(page) << 32);
317f2367923SJaegeuk Kim 
318ced2c7eaSKinglong Mee 	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
319ced2c7eaSKinglong Mee 		cp_ver |= (cur_cp_crc(ckpt) << 32);
320ced2c7eaSKinglong Mee 
3210c0b471eSEric Biggers 	return cp_ver == cpver_of_node(page);
32239a53e0cSJaegeuk Kim }
32339a53e0cSJaegeuk Kim 
32439a53e0cSJaegeuk Kim /*
32539a53e0cSJaegeuk Kim  * f2fs assigns the following node offsets described as (num).
32639a53e0cSJaegeuk Kim  * N = NIDS_PER_BLOCK
32739a53e0cSJaegeuk Kim  *
32839a53e0cSJaegeuk Kim  *  Inode block (0)
32939a53e0cSJaegeuk Kim  *    |- direct node (1)
33039a53e0cSJaegeuk Kim  *    |- direct node (2)
33139a53e0cSJaegeuk Kim  *    |- indirect node (3)
33239a53e0cSJaegeuk Kim  *    |            `- direct node (4 => 4 + N - 1)
33339a53e0cSJaegeuk Kim  *    |- indirect node (4 + N)
33439a53e0cSJaegeuk Kim  *    |            `- direct node (5 + N => 5 + 2N - 1)
33539a53e0cSJaegeuk Kim  *    `- double indirect node (5 + 2N)
33639a53e0cSJaegeuk Kim  *                 `- indirect node (6 + 2N)
3374f4124d0SChao Yu  *                       `- direct node
3384f4124d0SChao Yu  *                 ......
3394f4124d0SChao Yu  *                 `- indirect node ((6 + 2N) + x(N + 1))
3404f4124d0SChao Yu  *                       `- direct node
3414f4124d0SChao Yu  *                 ......
3424f4124d0SChao Yu  *                 `- indirect node ((6 + 2N) + (N - 1)(N + 1))
3434f4124d0SChao Yu  *                       `- direct node
34439a53e0cSJaegeuk Kim  */
IS_DNODE(struct page * node_page)34539a53e0cSJaegeuk Kim static inline bool IS_DNODE(struct page *node_page)
34639a53e0cSJaegeuk Kim {
34739a53e0cSJaegeuk Kim 	unsigned int ofs = ofs_of_node(node_page);
348dbe6a5ffSJaegeuk Kim 
3494bc8e9bcSChao Yu 	if (f2fs_has_xattr_block(ofs))
350d260081cSChao Yu 		return true;
351dbe6a5ffSJaegeuk Kim 
35239a53e0cSJaegeuk Kim 	if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
35339a53e0cSJaegeuk Kim 			ofs == 5 + 2 * NIDS_PER_BLOCK)
35439a53e0cSJaegeuk Kim 		return false;
35539a53e0cSJaegeuk Kim 	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
35639a53e0cSJaegeuk Kim 		ofs -= 6 + 2 * NIDS_PER_BLOCK;
3573315101fSZhihui Zhang 		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
35839a53e0cSJaegeuk Kim 			return false;
35939a53e0cSJaegeuk Kim 	}
36039a53e0cSJaegeuk Kim 	return true;
36139a53e0cSJaegeuk Kim }
36239a53e0cSJaegeuk Kim 
set_nid(struct page * p,int off,nid_t nid,bool i)36312719ae1SJaegeuk Kim static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
36439a53e0cSJaegeuk Kim {
36545590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(p);
36639a53e0cSJaegeuk Kim 
367bae0ee7aSChao Yu 	f2fs_wait_on_page_writeback(p, NODE, true, true);
36839a53e0cSJaegeuk Kim 
36939a53e0cSJaegeuk Kim 	if (i)
37039a53e0cSJaegeuk Kim 		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
37139a53e0cSJaegeuk Kim 	else
37239a53e0cSJaegeuk Kim 		rn->in.nid[off] = cpu_to_le32(nid);
37312719ae1SJaegeuk Kim 	return set_page_dirty(p);
37439a53e0cSJaegeuk Kim }
37539a53e0cSJaegeuk Kim 
get_nid(struct page * p,int off,bool i)37639a53e0cSJaegeuk Kim static inline nid_t get_nid(struct page *p, int off, bool i)
37739a53e0cSJaegeuk Kim {
37845590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(p);
37945590710SGu Zheng 
38039a53e0cSJaegeuk Kim 	if (i)
38139a53e0cSJaegeuk Kim 		return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
38239a53e0cSJaegeuk Kim 	return le32_to_cpu(rn->in.nid[off]);
38339a53e0cSJaegeuk Kim }
38439a53e0cSJaegeuk Kim 
38539a53e0cSJaegeuk Kim /*
38639a53e0cSJaegeuk Kim  * Coldness identification:
38739a53e0cSJaegeuk Kim  *  - Mark cold files in f2fs_inode_info
38839a53e0cSJaegeuk Kim  *  - Mark cold node blocks in their node footer
38939a53e0cSJaegeuk Kim  *  - Mark cold data pages in page cache
39039a53e0cSJaegeuk Kim  */
39139a53e0cSJaegeuk Kim 
is_node(struct page * page,int type)392a06a2416SNamjae Jeon static inline int is_node(struct page *page, int type)
39339a53e0cSJaegeuk Kim {
39445590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(page);
395447286ebSYangtao Li 	return le32_to_cpu(rn->footer.flag) & BIT(type);
39639a53e0cSJaegeuk Kim }
39739a53e0cSJaegeuk Kim 
398a06a2416SNamjae Jeon #define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
399a06a2416SNamjae Jeon #define is_fsync_dnode(page)	is_node(page, FSYNC_BIT_SHIFT)
400a06a2416SNamjae Jeon #define is_dent_dnode(page)	is_node(page, DENT_BIT_SHIFT)
40139a53e0cSJaegeuk Kim 
set_cold_node(struct page * page,bool is_dir)402c5667575SChao Yu static inline void set_cold_node(struct page *page, bool is_dir)
40339a53e0cSJaegeuk Kim {
40445590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(page);
40539a53e0cSJaegeuk Kim 	unsigned int flag = le32_to_cpu(rn->footer.flag);
40639a53e0cSJaegeuk Kim 
407c5667575SChao Yu 	if (is_dir)
408447286ebSYangtao Li 		flag &= ~BIT(COLD_BIT_SHIFT);
40939a53e0cSJaegeuk Kim 	else
410447286ebSYangtao Li 		flag |= BIT(COLD_BIT_SHIFT);
41139a53e0cSJaegeuk Kim 	rn->footer.flag = cpu_to_le32(flag);
41239a53e0cSJaegeuk Kim }
41339a53e0cSJaegeuk Kim 
set_mark(struct page * page,int mark,int type)414a06a2416SNamjae Jeon static inline void set_mark(struct page *page, int mark, int type)
41539a53e0cSJaegeuk Kim {
41645590710SGu Zheng 	struct f2fs_node *rn = F2FS_NODE(page);
41739a53e0cSJaegeuk Kim 	unsigned int flag = le32_to_cpu(rn->footer.flag);
41839a53e0cSJaegeuk Kim 	if (mark)
419447286ebSYangtao Li 		flag |= BIT(type);
42039a53e0cSJaegeuk Kim 	else
421447286ebSYangtao Li 		flag &= ~BIT(type);
42239a53e0cSJaegeuk Kim 	rn->footer.flag = cpu_to_le32(flag);
42354c55c4eSWeichao Guo 
42454c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
42554c55c4eSWeichao Guo 	f2fs_inode_chksum_set(F2FS_P_SB(page), page);
42654c55c4eSWeichao Guo #endif
42739a53e0cSJaegeuk Kim }
428a06a2416SNamjae Jeon #define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
429a06a2416SNamjae Jeon #define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
430