xref: /openbmc/linux/fs/f2fs/segment.h (revision f7d84fa7)
1 /*
2  * fs/f2fs/segment.h
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
13 
14 /* constant macro */
15 #define NULL_SEGNO			((unsigned int)(~0))
16 #define NULL_SECNO			((unsigned int)(~0))
17 
18 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
19 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
20 
21 #define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
22 
23 /* L: Logical segment # in volume, R: Relative segment # in main area */
24 #define GET_L2R_SEGNO(free_i, segno)	((segno) - (free_i)->start_segno)
25 #define GET_R2L_SEGNO(free_i, segno)	((segno) + (free_i)->start_segno)
26 
27 #define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
28 #define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE)
29 
30 #define IS_CURSEG(sbi, seg)						\
31 	(((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
32 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
33 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
34 	 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
35 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
36 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
37 
38 #define IS_CURSEC(sbi, secno)						\
39 	(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
40 	  (sbi)->segs_per_sec) ||	\
41 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\
42 	  (sbi)->segs_per_sec) ||	\
43 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\
44 	  (sbi)->segs_per_sec) ||	\
45 	 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\
46 	  (sbi)->segs_per_sec) ||	\
47 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\
48 	  (sbi)->segs_per_sec) ||	\
49 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
50 	  (sbi)->segs_per_sec))	\
51 
52 #define MAIN_BLKADDR(sbi)	(SM_I(sbi)->main_blkaddr)
53 #define SEG0_BLKADDR(sbi)	(SM_I(sbi)->seg0_blkaddr)
54 
55 #define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
56 #define MAIN_SECS(sbi)	((sbi)->total_sections)
57 
58 #define TOTAL_SEGS(sbi)	(SM_I(sbi)->segment_count)
59 #define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
60 
61 #define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
62 #define SEGMENT_SIZE(sbi)	(1ULL << ((sbi)->log_blocksize +	\
63 					(sbi)->log_blocks_per_seg))
64 
65 #define START_BLOCK(sbi, segno)	(SEG0_BLKADDR(sbi) +			\
66 	 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
67 
68 #define NEXT_FREE_BLKADDR(sbi, curseg)					\
69 	(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
70 
71 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)	((blk_addr) - SEG0_BLKADDR(sbi))
72 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\
73 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
74 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\
75 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
76 
77 #define GET_SEGNO(sbi, blk_addr)					\
78 	((((blk_addr) == NULL_ADDR) || ((blk_addr) == NEW_ADDR)) ?	\
79 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
80 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
81 #define BLKS_PER_SEC(sbi)					\
82 	((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
83 #define GET_SEC_FROM_SEG(sbi, segno)				\
84 	((segno) / (sbi)->segs_per_sec)
85 #define GET_SEG_FROM_SEC(sbi, secno)				\
86 	((secno) * (sbi)->segs_per_sec)
87 #define GET_ZONE_FROM_SEC(sbi, secno)				\
88 	((secno) / (sbi)->secs_per_zone)
89 #define GET_ZONE_FROM_SEG(sbi, segno)				\
90 	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
91 
92 #define GET_SUM_BLOCK(sbi, segno)				\
93 	((sbi)->sm_info->ssa_blkaddr + (segno))
94 
95 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
96 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
97 
98 #define SIT_ENTRY_OFFSET(sit_i, segno)					\
99 	((segno) % (sit_i)->sents_per_block)
100 #define SIT_BLOCK_OFFSET(segno)					\
101 	((segno) / SIT_ENTRY_PER_BLOCK)
102 #define	START_SEGNO(segno)		\
103 	(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
104 #define SIT_BLK_CNT(sbi)			\
105 	((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
106 #define f2fs_bitmap_size(nr)			\
107 	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
108 
109 #define SECTOR_FROM_BLOCK(blk_addr)					\
110 	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
111 #define SECTOR_TO_BLOCK(sectors)					\
112 	((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
113 
114 /*
115  * indicate a block allocation direction: RIGHT and LEFT.
116  * RIGHT means allocating new sections towards the end of volume.
117  * LEFT means the opposite direction.
118  */
119 enum {
120 	ALLOC_RIGHT = 0,
121 	ALLOC_LEFT
122 };
123 
124 /*
125  * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
126  * LFS writes data sequentially with cleaning operations.
127  * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
128  */
129 enum {
130 	LFS = 0,
131 	SSR
132 };
133 
134 /*
135  * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
136  * GC_CB is based on cost-benefit algorithm.
137  * GC_GREEDY is based on greedy algorithm.
138  */
139 enum {
140 	GC_CB = 0,
141 	GC_GREEDY,
142 	ALLOC_NEXT,
143 	FLUSH_DEVICE,
144 	MAX_GC_POLICY,
145 };
146 
147 /*
148  * BG_GC means the background cleaning job.
149  * FG_GC means the on-demand cleaning job.
150  * FORCE_FG_GC means on-demand cleaning job in background.
151  */
152 enum {
153 	BG_GC = 0,
154 	FG_GC,
155 	FORCE_FG_GC,
156 };
157 
158 /* for a function parameter to select a victim segment */
159 struct victim_sel_policy {
160 	int alloc_mode;			/* LFS or SSR */
161 	int gc_mode;			/* GC_CB or GC_GREEDY */
162 	unsigned long *dirty_segmap;	/* dirty segment bitmap */
163 	unsigned int max_search;	/* maximum # of segments to search */
164 	unsigned int offset;		/* last scanned bitmap offset */
165 	unsigned int ofs_unit;		/* bitmap search unit */
166 	unsigned int min_cost;		/* minimum cost */
167 	unsigned int min_segno;		/* segment # having min. cost */
168 };
169 
170 struct seg_entry {
171 	unsigned int type:6;		/* segment type like CURSEG_XXX_TYPE */
172 	unsigned int valid_blocks:10;	/* # of valid blocks */
173 	unsigned int ckpt_valid_blocks:10;	/* # of valid blocks last cp */
174 	unsigned int padding:6;		/* padding */
175 	unsigned char *cur_valid_map;	/* validity bitmap of blocks */
176 #ifdef CONFIG_F2FS_CHECK_FS
177 	unsigned char *cur_valid_map_mir;	/* mirror of current valid bitmap */
178 #endif
179 	/*
180 	 * # of valid blocks and the validity bitmap stored in the the last
181 	 * checkpoint pack. This information is used by the SSR mode.
182 	 */
183 	unsigned char *ckpt_valid_map;	/* validity bitmap of blocks last cp */
184 	unsigned char *discard_map;
185 	unsigned long long mtime;	/* modification time of the segment */
186 };
187 
188 struct sec_entry {
189 	unsigned int valid_blocks;	/* # of valid blocks in a section */
190 };
191 
192 struct segment_allocation {
193 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
194 };
195 
196 /*
197  * this value is set in page as a private data which indicate that
198  * the page is atomically written, and it is in inmem_pages list.
199  */
200 #define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
201 #define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
202 
203 #define IS_ATOMIC_WRITTEN_PAGE(page)			\
204 		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
205 #define IS_DUMMY_WRITTEN_PAGE(page)			\
206 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
207 
208 struct inmem_pages {
209 	struct list_head list;
210 	struct page *page;
211 	block_t old_addr;		/* for revoking when fail to commit */
212 };
213 
214 struct sit_info {
215 	const struct segment_allocation *s_ops;
216 
217 	block_t sit_base_addr;		/* start block address of SIT area */
218 	block_t sit_blocks;		/* # of blocks used by SIT area */
219 	block_t written_valid_blocks;	/* # of valid blocks in main area */
220 	char *sit_bitmap;		/* SIT bitmap pointer */
221 #ifdef CONFIG_F2FS_CHECK_FS
222 	char *sit_bitmap_mir;		/* SIT bitmap mirror */
223 #endif
224 	unsigned int bitmap_size;	/* SIT bitmap size */
225 
226 	unsigned long *tmp_map;			/* bitmap for temporal use */
227 	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */
228 	unsigned int dirty_sentries;		/* # of dirty sentries */
229 	unsigned int sents_per_block;		/* # of SIT entries per block */
230 	struct mutex sentry_lock;		/* to protect SIT cache */
231 	struct seg_entry *sentries;		/* SIT segment-level cache */
232 	struct sec_entry *sec_entries;		/* SIT section-level cache */
233 
234 	/* for cost-benefit algorithm in cleaning procedure */
235 	unsigned long long elapsed_time;	/* elapsed time after mount */
236 	unsigned long long mounted_time;	/* mount time */
237 	unsigned long long min_mtime;		/* min. modification time */
238 	unsigned long long max_mtime;		/* max. modification time */
239 
240 	unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
241 };
242 
243 struct free_segmap_info {
244 	unsigned int start_segno;	/* start segment number logically */
245 	unsigned int free_segments;	/* # of free segments */
246 	unsigned int free_sections;	/* # of free sections */
247 	spinlock_t segmap_lock;		/* free segmap lock */
248 	unsigned long *free_segmap;	/* free segment bitmap */
249 	unsigned long *free_secmap;	/* free section bitmap */
250 };
251 
252 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
253 enum dirty_type {
254 	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */
255 	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */
256 	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */
257 	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */
258 	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */
259 	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */
260 	DIRTY,			/* to count # of dirty segments */
261 	PRE,			/* to count # of entirely obsolete segments */
262 	NR_DIRTY_TYPE
263 };
264 
265 struct dirty_seglist_info {
266 	const struct victim_selection *v_ops;	/* victim selction operation */
267 	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
268 	struct mutex seglist_lock;		/* lock for segment bitmaps */
269 	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
270 	unsigned long *victim_secmap;		/* background GC victims */
271 };
272 
273 /* victim selection function for cleaning and SSR */
274 struct victim_selection {
275 	int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
276 							int, int, char);
277 };
278 
279 /* for active log information */
280 struct curseg_info {
281 	struct mutex curseg_mutex;		/* lock for consistency */
282 	struct f2fs_summary_block *sum_blk;	/* cached summary block */
283 	struct rw_semaphore journal_rwsem;	/* protect journal area */
284 	struct f2fs_journal *journal;		/* cached journal info */
285 	unsigned char alloc_type;		/* current allocation type */
286 	unsigned int segno;			/* current segment number */
287 	unsigned short next_blkoff;		/* next block offset to write */
288 	unsigned int zone;			/* current zone number */
289 	unsigned int next_segno;		/* preallocated segment */
290 };
291 
292 struct sit_entry_set {
293 	struct list_head set_list;	/* link with all sit sets */
294 	unsigned int start_segno;	/* start segno of sits in set */
295 	unsigned int entry_cnt;		/* the # of sit entries in set */
296 };
297 
298 /*
299  * inline functions
300  */
301 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
302 {
303 	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
304 }
305 
306 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
307 						unsigned int segno)
308 {
309 	struct sit_info *sit_i = SIT_I(sbi);
310 	return &sit_i->sentries[segno];
311 }
312 
313 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
314 						unsigned int segno)
315 {
316 	struct sit_info *sit_i = SIT_I(sbi);
317 	return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
318 }
319 
320 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
321 				unsigned int segno, bool use_section)
322 {
323 	/*
324 	 * In order to get # of valid blocks in a section instantly from many
325 	 * segments, f2fs manages two counting structures separately.
326 	 */
327 	if (use_section && sbi->segs_per_sec > 1)
328 		return get_sec_entry(sbi, segno)->valid_blocks;
329 	else
330 		return get_seg_entry(sbi, segno)->valid_blocks;
331 }
332 
333 static inline void seg_info_from_raw_sit(struct seg_entry *se,
334 					struct f2fs_sit_entry *rs)
335 {
336 	se->valid_blocks = GET_SIT_VBLOCKS(rs);
337 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
338 	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
339 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
340 #ifdef CONFIG_F2FS_CHECK_FS
341 	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
342 #endif
343 	se->type = GET_SIT_TYPE(rs);
344 	se->mtime = le64_to_cpu(rs->mtime);
345 }
346 
347 static inline void seg_info_to_raw_sit(struct seg_entry *se,
348 					struct f2fs_sit_entry *rs)
349 {
350 	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
351 					se->valid_blocks;
352 	rs->vblocks = cpu_to_le16(raw_vblocks);
353 	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
354 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
355 	se->ckpt_valid_blocks = se->valid_blocks;
356 	rs->mtime = cpu_to_le64(se->mtime);
357 }
358 
359 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
360 		unsigned int max, unsigned int segno)
361 {
362 	unsigned int ret;
363 	spin_lock(&free_i->segmap_lock);
364 	ret = find_next_bit(free_i->free_segmap, max, segno);
365 	spin_unlock(&free_i->segmap_lock);
366 	return ret;
367 }
368 
369 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
370 {
371 	struct free_segmap_info *free_i = FREE_I(sbi);
372 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
373 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
374 	unsigned int next;
375 
376 	spin_lock(&free_i->segmap_lock);
377 	clear_bit(segno, free_i->free_segmap);
378 	free_i->free_segments++;
379 
380 	next = find_next_bit(free_i->free_segmap,
381 			start_segno + sbi->segs_per_sec, start_segno);
382 	if (next >= start_segno + sbi->segs_per_sec) {
383 		clear_bit(secno, free_i->free_secmap);
384 		free_i->free_sections++;
385 	}
386 	spin_unlock(&free_i->segmap_lock);
387 }
388 
389 static inline void __set_inuse(struct f2fs_sb_info *sbi,
390 		unsigned int segno)
391 {
392 	struct free_segmap_info *free_i = FREE_I(sbi);
393 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
394 
395 	set_bit(segno, free_i->free_segmap);
396 	free_i->free_segments--;
397 	if (!test_and_set_bit(secno, free_i->free_secmap))
398 		free_i->free_sections--;
399 }
400 
401 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
402 		unsigned int segno)
403 {
404 	struct free_segmap_info *free_i = FREE_I(sbi);
405 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
406 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
407 	unsigned int next;
408 
409 	spin_lock(&free_i->segmap_lock);
410 	if (test_and_clear_bit(segno, free_i->free_segmap)) {
411 		free_i->free_segments++;
412 
413 		next = find_next_bit(free_i->free_segmap,
414 				start_segno + sbi->segs_per_sec, start_segno);
415 		if (next >= start_segno + sbi->segs_per_sec) {
416 			if (test_and_clear_bit(secno, free_i->free_secmap))
417 				free_i->free_sections++;
418 		}
419 	}
420 	spin_unlock(&free_i->segmap_lock);
421 }
422 
423 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
424 		unsigned int segno)
425 {
426 	struct free_segmap_info *free_i = FREE_I(sbi);
427 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
428 
429 	spin_lock(&free_i->segmap_lock);
430 	if (!test_and_set_bit(segno, free_i->free_segmap)) {
431 		free_i->free_segments--;
432 		if (!test_and_set_bit(secno, free_i->free_secmap))
433 			free_i->free_sections--;
434 	}
435 	spin_unlock(&free_i->segmap_lock);
436 }
437 
438 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
439 		void *dst_addr)
440 {
441 	struct sit_info *sit_i = SIT_I(sbi);
442 
443 #ifdef CONFIG_F2FS_CHECK_FS
444 	if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
445 						sit_i->bitmap_size))
446 		f2fs_bug_on(sbi, 1);
447 #endif
448 	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
449 }
450 
451 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
452 {
453 	return SIT_I(sbi)->written_valid_blocks;
454 }
455 
456 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
457 {
458 	return FREE_I(sbi)->free_segments;
459 }
460 
461 static inline int reserved_segments(struct f2fs_sb_info *sbi)
462 {
463 	return SM_I(sbi)->reserved_segments;
464 }
465 
466 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
467 {
468 	return FREE_I(sbi)->free_sections;
469 }
470 
471 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
472 {
473 	return DIRTY_I(sbi)->nr_dirty[PRE];
474 }
475 
476 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
477 {
478 	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
479 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
480 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
481 		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
482 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
483 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
484 }
485 
486 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
487 {
488 	return SM_I(sbi)->ovp_segments;
489 }
490 
491 static inline int overprovision_sections(struct f2fs_sb_info *sbi)
492 {
493 	return GET_SEC_FROM_SEG(sbi, (unsigned int)overprovision_segments(sbi));
494 }
495 
496 static inline int reserved_sections(struct f2fs_sb_info *sbi)
497 {
498 	return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
499 }
500 
501 static inline bool need_SSR(struct f2fs_sb_info *sbi)
502 {
503 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
504 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
505 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
506 
507 	if (test_opt(sbi, LFS))
508 		return false;
509 
510 	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
511 						2 * reserved_sections(sbi));
512 }
513 
514 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
515 					int freed, int needed)
516 {
517 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
518 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
519 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
520 
521 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
522 		return false;
523 
524 	return (free_sections(sbi) + freed) <=
525 		(node_secs + 2 * dent_secs + imeta_secs +
526 		reserved_sections(sbi) + needed);
527 }
528 
529 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
530 {
531 	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
532 }
533 
534 static inline int utilization(struct f2fs_sb_info *sbi)
535 {
536 	return div_u64((u64)valid_user_blocks(sbi) * 100,
537 					sbi->user_block_count);
538 }
539 
540 /*
541  * Sometimes f2fs may be better to drop out-of-place update policy.
542  * And, users can control the policy through sysfs entries.
543  * There are five policies with triggering conditions as follows.
544  * F2FS_IPU_FORCE - all the time,
545  * F2FS_IPU_SSR - if SSR mode is activated,
546  * F2FS_IPU_UTIL - if FS utilization is over threashold,
547  * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
548  *                     threashold,
549  * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
550  *                     storages. IPU will be triggered only if the # of dirty
551  *                     pages over min_fsync_blocks.
552  * F2FS_IPUT_DISABLE - disable IPU. (=default option)
553  */
554 #define DEF_MIN_IPU_UTIL	70
555 #define DEF_MIN_FSYNC_BLOCKS	8
556 #define DEF_MIN_HOT_BLOCKS	16
557 
558 enum {
559 	F2FS_IPU_FORCE,
560 	F2FS_IPU_SSR,
561 	F2FS_IPU_UTIL,
562 	F2FS_IPU_SSR_UTIL,
563 	F2FS_IPU_FSYNC,
564 	F2FS_IPU_ASYNC,
565 };
566 
567 static inline bool need_inplace_update_policy(struct inode *inode,
568 				struct f2fs_io_info *fio)
569 {
570 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
571 	unsigned int policy = SM_I(sbi)->ipu_policy;
572 
573 	if (test_opt(sbi, LFS))
574 		return false;
575 
576 	if (policy & (0x1 << F2FS_IPU_FORCE))
577 		return true;
578 	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
579 		return true;
580 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
581 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
582 		return true;
583 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
584 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
585 		return true;
586 
587 	/*
588 	 * IPU for rewrite async pages
589 	 */
590 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
591 			fio && fio->op == REQ_OP_WRITE &&
592 			!(fio->op_flags & REQ_SYNC) &&
593 			!f2fs_encrypted_inode(inode))
594 		return true;
595 
596 	/* this is only set during fdatasync */
597 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
598 			is_inode_flag_set(inode, FI_NEED_IPU))
599 		return true;
600 
601 	return false;
602 }
603 
604 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
605 		int type)
606 {
607 	struct curseg_info *curseg = CURSEG_I(sbi, type);
608 	return curseg->segno;
609 }
610 
611 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
612 		int type)
613 {
614 	struct curseg_info *curseg = CURSEG_I(sbi, type);
615 	return curseg->alloc_type;
616 }
617 
618 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
619 {
620 	struct curseg_info *curseg = CURSEG_I(sbi, type);
621 	return curseg->next_blkoff;
622 }
623 
624 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
625 {
626 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
627 }
628 
629 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
630 {
631 	BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
632 			|| blk_addr >= MAX_BLKADDR(sbi));
633 }
634 
635 /*
636  * Summary block is always treated as an invalid block
637  */
638 static inline void check_block_count(struct f2fs_sb_info *sbi,
639 		int segno, struct f2fs_sit_entry *raw_sit)
640 {
641 #ifdef CONFIG_F2FS_CHECK_FS
642 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
643 	int valid_blocks = 0;
644 	int cur_pos = 0, next_pos;
645 
646 	/* check bitmap with valid block count */
647 	do {
648 		if (is_valid) {
649 			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
650 					sbi->blocks_per_seg,
651 					cur_pos);
652 			valid_blocks += next_pos - cur_pos;
653 		} else
654 			next_pos = find_next_bit_le(&raw_sit->valid_map,
655 					sbi->blocks_per_seg,
656 					cur_pos);
657 		cur_pos = next_pos;
658 		is_valid = !is_valid;
659 	} while (cur_pos < sbi->blocks_per_seg);
660 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
661 #endif
662 	/* check segment usage, and check boundary of a given segment number */
663 	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
664 					|| segno > TOTAL_SEGS(sbi) - 1);
665 }
666 
667 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
668 						unsigned int start)
669 {
670 	struct sit_info *sit_i = SIT_I(sbi);
671 	unsigned int offset = SIT_BLOCK_OFFSET(start);
672 	block_t blk_addr = sit_i->sit_base_addr + offset;
673 
674 	check_seg_range(sbi, start);
675 
676 #ifdef CONFIG_F2FS_CHECK_FS
677 	if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
678 			f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
679 		f2fs_bug_on(sbi, 1);
680 #endif
681 
682 	/* calculate sit block address */
683 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
684 		blk_addr += sit_i->sit_blocks;
685 
686 	return blk_addr;
687 }
688 
689 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
690 						pgoff_t block_addr)
691 {
692 	struct sit_info *sit_i = SIT_I(sbi);
693 	block_addr -= sit_i->sit_base_addr;
694 	if (block_addr < sit_i->sit_blocks)
695 		block_addr += sit_i->sit_blocks;
696 	else
697 		block_addr -= sit_i->sit_blocks;
698 
699 	return block_addr + sit_i->sit_base_addr;
700 }
701 
702 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
703 {
704 	unsigned int block_off = SIT_BLOCK_OFFSET(start);
705 
706 	f2fs_change_bit(block_off, sit_i->sit_bitmap);
707 #ifdef CONFIG_F2FS_CHECK_FS
708 	f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
709 #endif
710 }
711 
712 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
713 {
714 	struct sit_info *sit_i = SIT_I(sbi);
715 	time64_t now = ktime_get_real_seconds();
716 
717 	return sit_i->elapsed_time + now - sit_i->mounted_time;
718 }
719 
720 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
721 			unsigned int ofs_in_node, unsigned char version)
722 {
723 	sum->nid = cpu_to_le32(nid);
724 	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
725 	sum->version = version;
726 }
727 
728 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
729 {
730 	return __start_cp_addr(sbi) +
731 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
732 }
733 
734 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
735 {
736 	return __start_cp_addr(sbi) +
737 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
738 				- (base + 1) + type;
739 }
740 
741 static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
742 						unsigned int secno)
743 {
744 	if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >=
745 						sbi->fggc_threshold)
746 		return true;
747 	return false;
748 }
749 
750 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
751 {
752 	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
753 		return true;
754 	return false;
755 }
756 
757 /*
758  * It is very important to gather dirty pages and write at once, so that we can
759  * submit a big bio without interfering other data writes.
760  * By default, 512 pages for directory data,
761  * 512 pages (2MB) * 8 for nodes, and
762  * 256 pages * 8 for meta are set.
763  */
764 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
765 {
766 	if (sbi->sb->s_bdi->wb.dirty_exceeded)
767 		return 0;
768 
769 	if (type == DATA)
770 		return sbi->blocks_per_seg;
771 	else if (type == NODE)
772 		return 8 * sbi->blocks_per_seg;
773 	else if (type == META)
774 		return 8 * BIO_MAX_PAGES;
775 	else
776 		return 0;
777 }
778 
779 /*
780  * When writing pages, it'd better align nr_to_write for segment size.
781  */
782 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
783 					struct writeback_control *wbc)
784 {
785 	long nr_to_write, desired;
786 
787 	if (wbc->sync_mode != WB_SYNC_NONE)
788 		return 0;
789 
790 	nr_to_write = wbc->nr_to_write;
791 	desired = BIO_MAX_PAGES;
792 	if (type == NODE)
793 		desired <<= 1;
794 
795 	wbc->nr_to_write = desired;
796 	return desired - nr_to_write;
797 }
798