xref: /openbmc/linux/fs/f2fs/gc.c (revision 0d07cf5e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/gc.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 #include "gc.h"
21 #include <trace/events/f2fs.h>
22 
23 static int gc_thread_func(void *data)
24 {
25 	struct f2fs_sb_info *sbi = data;
26 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
27 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
28 	unsigned int wait_ms;
29 
30 	wait_ms = gc_th->min_sleep_time;
31 
32 	set_freezable();
33 	do {
34 		wait_event_interruptible_timeout(*wq,
35 				kthread_should_stop() || freezing(current) ||
36 				gc_th->gc_wake,
37 				msecs_to_jiffies(wait_ms));
38 
39 		/* give it a try one time */
40 		if (gc_th->gc_wake)
41 			gc_th->gc_wake = 0;
42 
43 		if (try_to_freeze()) {
44 			stat_other_skip_bggc_count(sbi);
45 			continue;
46 		}
47 		if (kthread_should_stop())
48 			break;
49 
50 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
51 			increase_sleep_time(gc_th, &wait_ms);
52 			stat_other_skip_bggc_count(sbi);
53 			continue;
54 		}
55 
56 		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
57 			f2fs_show_injection_info(FAULT_CHECKPOINT);
58 			f2fs_stop_checkpoint(sbi, false);
59 		}
60 
61 		if (!sb_start_write_trylock(sbi->sb)) {
62 			stat_other_skip_bggc_count(sbi);
63 			continue;
64 		}
65 
66 		/*
67 		 * [GC triggering condition]
68 		 * 0. GC is not conducted currently.
69 		 * 1. There are enough dirty segments.
70 		 * 2. IO subsystem is idle by checking the # of writeback pages.
71 		 * 3. IO subsystem is idle by checking the # of requests in
72 		 *    bdev's request list.
73 		 *
74 		 * Note) We have to avoid triggering GCs frequently.
75 		 * Because it is possible that some segments can be
76 		 * invalidated soon after by user update or deletion.
77 		 * So, I'd like to wait some time to collect dirty segments.
78 		 */
79 		if (sbi->gc_mode == GC_URGENT) {
80 			wait_ms = gc_th->urgent_sleep_time;
81 			mutex_lock(&sbi->gc_mutex);
82 			goto do_gc;
83 		}
84 
85 		if (!mutex_trylock(&sbi->gc_mutex)) {
86 			stat_other_skip_bggc_count(sbi);
87 			goto next;
88 		}
89 
90 		if (!is_idle(sbi, GC_TIME)) {
91 			increase_sleep_time(gc_th, &wait_ms);
92 			mutex_unlock(&sbi->gc_mutex);
93 			stat_io_skip_bggc_count(sbi);
94 			goto next;
95 		}
96 
97 		if (has_enough_invalid_blocks(sbi))
98 			decrease_sleep_time(gc_th, &wait_ms);
99 		else
100 			increase_sleep_time(gc_th, &wait_ms);
101 do_gc:
102 		stat_inc_bggc_count(sbi);
103 
104 		/* if return value is not zero, no victim was selected */
105 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
106 			wait_ms = gc_th->no_gc_sleep_time;
107 
108 		trace_f2fs_background_gc(sbi->sb, wait_ms,
109 				prefree_segments(sbi), free_segments(sbi));
110 
111 		/* balancing f2fs's metadata periodically */
112 		f2fs_balance_fs_bg(sbi);
113 next:
114 		sb_end_write(sbi->sb);
115 
116 	} while (!kthread_should_stop());
117 	return 0;
118 }
119 
120 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
121 {
122 	struct f2fs_gc_kthread *gc_th;
123 	dev_t dev = sbi->sb->s_bdev->bd_dev;
124 	int err = 0;
125 
126 	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
127 	if (!gc_th) {
128 		err = -ENOMEM;
129 		goto out;
130 	}
131 
132 	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
133 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
134 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
135 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
136 
137 	gc_th->gc_wake= 0;
138 
139 	sbi->gc_thread = gc_th;
140 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
141 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
142 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
143 	if (IS_ERR(gc_th->f2fs_gc_task)) {
144 		err = PTR_ERR(gc_th->f2fs_gc_task);
145 		kvfree(gc_th);
146 		sbi->gc_thread = NULL;
147 	}
148 out:
149 	return err;
150 }
151 
152 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
153 {
154 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
155 	if (!gc_th)
156 		return;
157 	kthread_stop(gc_th->f2fs_gc_task);
158 	kvfree(gc_th);
159 	sbi->gc_thread = NULL;
160 }
161 
162 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
163 {
164 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
165 
166 	switch (sbi->gc_mode) {
167 	case GC_IDLE_CB:
168 		gc_mode = GC_CB;
169 		break;
170 	case GC_IDLE_GREEDY:
171 	case GC_URGENT:
172 		gc_mode = GC_GREEDY;
173 		break;
174 	}
175 	return gc_mode;
176 }
177 
178 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
179 			int type, struct victim_sel_policy *p)
180 {
181 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
182 
183 	if (p->alloc_mode == SSR) {
184 		p->gc_mode = GC_GREEDY;
185 		p->dirty_segmap = dirty_i->dirty_segmap[type];
186 		p->max_search = dirty_i->nr_dirty[type];
187 		p->ofs_unit = 1;
188 	} else {
189 		p->gc_mode = select_gc_type(sbi, gc_type);
190 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
191 		p->max_search = dirty_i->nr_dirty[DIRTY];
192 		p->ofs_unit = sbi->segs_per_sec;
193 	}
194 
195 	/* we need to check every dirty segments in the FG_GC case */
196 	if (gc_type != FG_GC &&
197 			(sbi->gc_mode != GC_URGENT) &&
198 			p->max_search > sbi->max_victim_search)
199 		p->max_search = sbi->max_victim_search;
200 
201 	/* let's select beginning hot/small space first in no_heap mode*/
202 	if (test_opt(sbi, NOHEAP) &&
203 		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
204 		p->offset = 0;
205 	else
206 		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
207 }
208 
209 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
210 				struct victim_sel_policy *p)
211 {
212 	/* SSR allocates in a segment unit */
213 	if (p->alloc_mode == SSR)
214 		return sbi->blocks_per_seg;
215 	if (p->gc_mode == GC_GREEDY)
216 		return 2 * sbi->blocks_per_seg * p->ofs_unit;
217 	else if (p->gc_mode == GC_CB)
218 		return UINT_MAX;
219 	else /* No other gc_mode */
220 		return 0;
221 }
222 
223 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
224 {
225 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
226 	unsigned int secno;
227 
228 	/*
229 	 * If the gc_type is FG_GC, we can select victim segments
230 	 * selected by background GC before.
231 	 * Those segments guarantee they have small valid blocks.
232 	 */
233 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
234 		if (sec_usage_check(sbi, secno))
235 			continue;
236 		clear_bit(secno, dirty_i->victim_secmap);
237 		return GET_SEG_FROM_SEC(sbi, secno);
238 	}
239 	return NULL_SEGNO;
240 }
241 
242 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
243 {
244 	struct sit_info *sit_i = SIT_I(sbi);
245 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
246 	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
247 	unsigned long long mtime = 0;
248 	unsigned int vblocks;
249 	unsigned char age = 0;
250 	unsigned char u;
251 	unsigned int i;
252 
253 	for (i = 0; i < sbi->segs_per_sec; i++)
254 		mtime += get_seg_entry(sbi, start + i)->mtime;
255 	vblocks = get_valid_blocks(sbi, segno, true);
256 
257 	mtime = div_u64(mtime, sbi->segs_per_sec);
258 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
259 
260 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
261 
262 	/* Handle if the system time has changed by the user */
263 	if (mtime < sit_i->min_mtime)
264 		sit_i->min_mtime = mtime;
265 	if (mtime > sit_i->max_mtime)
266 		sit_i->max_mtime = mtime;
267 	if (sit_i->max_mtime != sit_i->min_mtime)
268 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
269 				sit_i->max_mtime - sit_i->min_mtime);
270 
271 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
272 }
273 
274 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
275 			unsigned int segno, struct victim_sel_policy *p)
276 {
277 	if (p->alloc_mode == SSR)
278 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
279 
280 	/* alloc_mode == LFS */
281 	if (p->gc_mode == GC_GREEDY)
282 		return get_valid_blocks(sbi, segno, true);
283 	else
284 		return get_cb_cost(sbi, segno);
285 }
286 
287 static unsigned int count_bits(const unsigned long *addr,
288 				unsigned int offset, unsigned int len)
289 {
290 	unsigned int end = offset + len, sum = 0;
291 
292 	while (offset < end) {
293 		if (test_bit(offset++, addr))
294 			++sum;
295 	}
296 	return sum;
297 }
298 
299 /*
300  * This function is called from two paths.
301  * One is garbage collection and the other is SSR segment selection.
302  * When it is called during GC, it just gets a victim segment
303  * and it does not remove it from dirty seglist.
304  * When it is called from SSR segment selection, it finds a segment
305  * which has minimum valid blocks and removes it from dirty seglist.
306  */
307 static int get_victim_by_default(struct f2fs_sb_info *sbi,
308 		unsigned int *result, int gc_type, int type, char alloc_mode)
309 {
310 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
311 	struct sit_info *sm = SIT_I(sbi);
312 	struct victim_sel_policy p;
313 	unsigned int secno, last_victim;
314 	unsigned int last_segment;
315 	unsigned int nsearched = 0;
316 
317 	mutex_lock(&dirty_i->seglist_lock);
318 	last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
319 
320 	p.alloc_mode = alloc_mode;
321 	select_policy(sbi, gc_type, type, &p);
322 
323 	p.min_segno = NULL_SEGNO;
324 	p.min_cost = get_max_cost(sbi, &p);
325 
326 	if (*result != NULL_SEGNO) {
327 		if (get_valid_blocks(sbi, *result, false) &&
328 			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
329 			p.min_segno = *result;
330 		goto out;
331 	}
332 
333 	if (p.max_search == 0)
334 		goto out;
335 
336 	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
337 		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
338 			p.min_segno = sbi->next_victim_seg[BG_GC];
339 			*result = p.min_segno;
340 			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
341 			goto got_result;
342 		}
343 		if (gc_type == FG_GC &&
344 				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
345 			p.min_segno = sbi->next_victim_seg[FG_GC];
346 			*result = p.min_segno;
347 			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
348 			goto got_result;
349 		}
350 	}
351 
352 	last_victim = sm->last_victim[p.gc_mode];
353 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
354 		p.min_segno = check_bg_victims(sbi);
355 		if (p.min_segno != NULL_SEGNO)
356 			goto got_it;
357 	}
358 
359 	while (1) {
360 		unsigned long cost;
361 		unsigned int segno;
362 
363 		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
364 		if (segno >= last_segment) {
365 			if (sm->last_victim[p.gc_mode]) {
366 				last_segment =
367 					sm->last_victim[p.gc_mode];
368 				sm->last_victim[p.gc_mode] = 0;
369 				p.offset = 0;
370 				continue;
371 			}
372 			break;
373 		}
374 
375 		p.offset = segno + p.ofs_unit;
376 		if (p.ofs_unit > 1) {
377 			p.offset -= segno % p.ofs_unit;
378 			nsearched += count_bits(p.dirty_segmap,
379 						p.offset - p.ofs_unit,
380 						p.ofs_unit);
381 		} else {
382 			nsearched++;
383 		}
384 
385 		secno = GET_SEC_FROM_SEG(sbi, segno);
386 
387 		if (sec_usage_check(sbi, secno))
388 			goto next;
389 		/* Don't touch checkpointed data */
390 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
391 					get_ckpt_valid_blocks(sbi, segno) &&
392 					p.alloc_mode != SSR))
393 			goto next;
394 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
395 			goto next;
396 
397 		cost = get_gc_cost(sbi, segno, &p);
398 
399 		if (p.min_cost > cost) {
400 			p.min_segno = segno;
401 			p.min_cost = cost;
402 		}
403 next:
404 		if (nsearched >= p.max_search) {
405 			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
406 				sm->last_victim[p.gc_mode] = last_victim + 1;
407 			else
408 				sm->last_victim[p.gc_mode] = segno + 1;
409 			sm->last_victim[p.gc_mode] %=
410 				(MAIN_SECS(sbi) * sbi->segs_per_sec);
411 			break;
412 		}
413 	}
414 	if (p.min_segno != NULL_SEGNO) {
415 got_it:
416 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
417 got_result:
418 		if (p.alloc_mode == LFS) {
419 			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
420 			if (gc_type == FG_GC)
421 				sbi->cur_victim_sec = secno;
422 			else
423 				set_bit(secno, dirty_i->victim_secmap);
424 		}
425 
426 	}
427 out:
428 	if (p.min_segno != NULL_SEGNO)
429 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
430 				sbi->cur_victim_sec,
431 				prefree_segments(sbi), free_segments(sbi));
432 	mutex_unlock(&dirty_i->seglist_lock);
433 
434 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
435 }
436 
437 static const struct victim_selection default_v_ops = {
438 	.get_victim = get_victim_by_default,
439 };
440 
441 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
442 {
443 	struct inode_entry *ie;
444 
445 	ie = radix_tree_lookup(&gc_list->iroot, ino);
446 	if (ie)
447 		return ie->inode;
448 	return NULL;
449 }
450 
451 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
452 {
453 	struct inode_entry *new_ie;
454 
455 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
456 		iput(inode);
457 		return;
458 	}
459 	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
460 	new_ie->inode = inode;
461 
462 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
463 	list_add_tail(&new_ie->list, &gc_list->ilist);
464 }
465 
466 static void put_gc_inode(struct gc_inode_list *gc_list)
467 {
468 	struct inode_entry *ie, *next_ie;
469 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
470 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
471 		iput(ie->inode);
472 		list_del(&ie->list);
473 		kmem_cache_free(f2fs_inode_entry_slab, ie);
474 	}
475 }
476 
477 static int check_valid_map(struct f2fs_sb_info *sbi,
478 				unsigned int segno, int offset)
479 {
480 	struct sit_info *sit_i = SIT_I(sbi);
481 	struct seg_entry *sentry;
482 	int ret;
483 
484 	down_read(&sit_i->sentry_lock);
485 	sentry = get_seg_entry(sbi, segno);
486 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
487 	up_read(&sit_i->sentry_lock);
488 	return ret;
489 }
490 
491 /*
492  * This function compares node address got in summary with that in NAT.
493  * On validity, copy that node with cold status, otherwise (invalid node)
494  * ignore that.
495  */
496 static int gc_node_segment(struct f2fs_sb_info *sbi,
497 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
498 {
499 	struct f2fs_summary *entry;
500 	block_t start_addr;
501 	int off;
502 	int phase = 0;
503 	bool fggc = (gc_type == FG_GC);
504 	int submitted = 0;
505 
506 	start_addr = START_BLOCK(sbi, segno);
507 
508 next_step:
509 	entry = sum;
510 
511 	if (fggc && phase == 2)
512 		atomic_inc(&sbi->wb_sync_req[NODE]);
513 
514 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
515 		nid_t nid = le32_to_cpu(entry->nid);
516 		struct page *node_page;
517 		struct node_info ni;
518 		int err;
519 
520 		/* stop BG_GC if there is not enough free sections. */
521 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
522 			return submitted;
523 
524 		if (check_valid_map(sbi, segno, off) == 0)
525 			continue;
526 
527 		if (phase == 0) {
528 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
529 							META_NAT, true);
530 			continue;
531 		}
532 
533 		if (phase == 1) {
534 			f2fs_ra_node_page(sbi, nid);
535 			continue;
536 		}
537 
538 		/* phase == 2 */
539 		node_page = f2fs_get_node_page(sbi, nid);
540 		if (IS_ERR(node_page))
541 			continue;
542 
543 		/* block may become invalid during f2fs_get_node_page */
544 		if (check_valid_map(sbi, segno, off) == 0) {
545 			f2fs_put_page(node_page, 1);
546 			continue;
547 		}
548 
549 		if (f2fs_get_node_info(sbi, nid, &ni)) {
550 			f2fs_put_page(node_page, 1);
551 			continue;
552 		}
553 
554 		if (ni.blk_addr != start_addr + off) {
555 			f2fs_put_page(node_page, 1);
556 			continue;
557 		}
558 
559 		err = f2fs_move_node_page(node_page, gc_type);
560 		if (!err && gc_type == FG_GC)
561 			submitted++;
562 		stat_inc_node_blk_count(sbi, 1, gc_type);
563 	}
564 
565 	if (++phase < 3)
566 		goto next_step;
567 
568 	if (fggc)
569 		atomic_dec(&sbi->wb_sync_req[NODE]);
570 	return submitted;
571 }
572 
573 /*
574  * Calculate start block index indicating the given node offset.
575  * Be careful, caller should give this node offset only indicating direct node
576  * blocks. If any node offsets, which point the other types of node blocks such
577  * as indirect or double indirect node blocks, are given, it must be a caller's
578  * bug.
579  */
580 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
581 {
582 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
583 	unsigned int bidx;
584 
585 	if (node_ofs == 0)
586 		return 0;
587 
588 	if (node_ofs <= 2) {
589 		bidx = node_ofs - 1;
590 	} else if (node_ofs <= indirect_blks) {
591 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
592 		bidx = node_ofs - 2 - dec;
593 	} else {
594 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
595 		bidx = node_ofs - 5 - dec;
596 	}
597 	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
598 }
599 
600 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
601 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
602 {
603 	struct page *node_page;
604 	nid_t nid;
605 	unsigned int ofs_in_node;
606 	block_t source_blkaddr;
607 
608 	nid = le32_to_cpu(sum->nid);
609 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
610 
611 	node_page = f2fs_get_node_page(sbi, nid);
612 	if (IS_ERR(node_page))
613 		return false;
614 
615 	if (f2fs_get_node_info(sbi, nid, dni)) {
616 		f2fs_put_page(node_page, 1);
617 		return false;
618 	}
619 
620 	if (sum->version != dni->version) {
621 		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
622 			  __func__);
623 		set_sbi_flag(sbi, SBI_NEED_FSCK);
624 	}
625 
626 	*nofs = ofs_of_node(node_page);
627 	source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
628 	f2fs_put_page(node_page, 1);
629 
630 	if (source_blkaddr != blkaddr)
631 		return false;
632 	return true;
633 }
634 
635 static int ra_data_block(struct inode *inode, pgoff_t index)
636 {
637 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
638 	struct address_space *mapping = inode->i_mapping;
639 	struct dnode_of_data dn;
640 	struct page *page;
641 	struct extent_info ei = {0, 0, 0};
642 	struct f2fs_io_info fio = {
643 		.sbi = sbi,
644 		.ino = inode->i_ino,
645 		.type = DATA,
646 		.temp = COLD,
647 		.op = REQ_OP_READ,
648 		.op_flags = 0,
649 		.encrypted_page = NULL,
650 		.in_list = false,
651 		.retry = false,
652 	};
653 	int err;
654 
655 	page = f2fs_grab_cache_page(mapping, index, true);
656 	if (!page)
657 		return -ENOMEM;
658 
659 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
660 		dn.data_blkaddr = ei.blk + index - ei.fofs;
661 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
662 						DATA_GENERIC_ENHANCE_READ))) {
663 			err = -EFSCORRUPTED;
664 			goto put_page;
665 		}
666 		goto got_it;
667 	}
668 
669 	set_new_dnode(&dn, inode, NULL, NULL, 0);
670 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
671 	if (err)
672 		goto put_page;
673 	f2fs_put_dnode(&dn);
674 
675 	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
676 		err = -ENOENT;
677 		goto put_page;
678 	}
679 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
680 						DATA_GENERIC_ENHANCE))) {
681 		err = -EFSCORRUPTED;
682 		goto put_page;
683 	}
684 got_it:
685 	/* read page */
686 	fio.page = page;
687 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
688 
689 	/*
690 	 * don't cache encrypted data into meta inode until previous dirty
691 	 * data were writebacked to avoid racing between GC and flush.
692 	 */
693 	f2fs_wait_on_page_writeback(page, DATA, true, true);
694 
695 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
696 
697 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
698 					dn.data_blkaddr,
699 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
700 	if (!fio.encrypted_page) {
701 		err = -ENOMEM;
702 		goto put_page;
703 	}
704 
705 	err = f2fs_submit_page_bio(&fio);
706 	if (err)
707 		goto put_encrypted_page;
708 	f2fs_put_page(fio.encrypted_page, 0);
709 	f2fs_put_page(page, 1);
710 	return 0;
711 put_encrypted_page:
712 	f2fs_put_page(fio.encrypted_page, 1);
713 put_page:
714 	f2fs_put_page(page, 1);
715 	return err;
716 }
717 
718 /*
719  * Move data block via META_MAPPING while keeping locked data page.
720  * This can be used to move blocks, aka LBAs, directly on disk.
721  */
722 static int move_data_block(struct inode *inode, block_t bidx,
723 				int gc_type, unsigned int segno, int off)
724 {
725 	struct f2fs_io_info fio = {
726 		.sbi = F2FS_I_SB(inode),
727 		.ino = inode->i_ino,
728 		.type = DATA,
729 		.temp = COLD,
730 		.op = REQ_OP_READ,
731 		.op_flags = 0,
732 		.encrypted_page = NULL,
733 		.in_list = false,
734 		.retry = false,
735 	};
736 	struct dnode_of_data dn;
737 	struct f2fs_summary sum;
738 	struct node_info ni;
739 	struct page *page, *mpage;
740 	block_t newaddr;
741 	int err = 0;
742 	bool lfs_mode = test_opt(fio.sbi, LFS);
743 
744 	/* do not read out */
745 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
746 	if (!page)
747 		return -ENOMEM;
748 
749 	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
750 		err = -ENOENT;
751 		goto out;
752 	}
753 
754 	if (f2fs_is_atomic_file(inode)) {
755 		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
756 		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
757 		err = -EAGAIN;
758 		goto out;
759 	}
760 
761 	if (f2fs_is_pinned_file(inode)) {
762 		f2fs_pin_file_control(inode, true);
763 		err = -EAGAIN;
764 		goto out;
765 	}
766 
767 	set_new_dnode(&dn, inode, NULL, NULL, 0);
768 	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
769 	if (err)
770 		goto out;
771 
772 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
773 		ClearPageUptodate(page);
774 		err = -ENOENT;
775 		goto put_out;
776 	}
777 
778 	/*
779 	 * don't cache encrypted data into meta inode until previous dirty
780 	 * data were writebacked to avoid racing between GC and flush.
781 	 */
782 	f2fs_wait_on_page_writeback(page, DATA, true, true);
783 
784 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
785 
786 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
787 	if (err)
788 		goto put_out;
789 
790 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
791 
792 	/* read page */
793 	fio.page = page;
794 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
795 
796 	if (lfs_mode)
797 		down_write(&fio.sbi->io_order_lock);
798 
799 	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
800 					&sum, CURSEG_COLD_DATA, NULL, false);
801 
802 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
803 				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
804 	if (!fio.encrypted_page) {
805 		err = -ENOMEM;
806 		goto recover_block;
807 	}
808 
809 	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
810 					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
811 	if (mpage) {
812 		bool updated = false;
813 
814 		if (PageUptodate(mpage)) {
815 			memcpy(page_address(fio.encrypted_page),
816 					page_address(mpage), PAGE_SIZE);
817 			updated = true;
818 		}
819 		f2fs_put_page(mpage, 1);
820 		invalidate_mapping_pages(META_MAPPING(fio.sbi),
821 					fio.old_blkaddr, fio.old_blkaddr);
822 		if (updated)
823 			goto write_page;
824 	}
825 
826 	err = f2fs_submit_page_bio(&fio);
827 	if (err)
828 		goto put_page_out;
829 
830 	/* write page */
831 	lock_page(fio.encrypted_page);
832 
833 	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
834 		err = -EIO;
835 		goto put_page_out;
836 	}
837 	if (unlikely(!PageUptodate(fio.encrypted_page))) {
838 		err = -EIO;
839 		goto put_page_out;
840 	}
841 
842 write_page:
843 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
844 	set_page_dirty(fio.encrypted_page);
845 	if (clear_page_dirty_for_io(fio.encrypted_page))
846 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
847 
848 	set_page_writeback(fio.encrypted_page);
849 	ClearPageError(page);
850 
851 	/* allocate block address */
852 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
853 
854 	fio.op = REQ_OP_WRITE;
855 	fio.op_flags = REQ_SYNC;
856 	fio.new_blkaddr = newaddr;
857 	f2fs_submit_page_write(&fio);
858 	if (fio.retry) {
859 		err = -EAGAIN;
860 		if (PageWriteback(fio.encrypted_page))
861 			end_page_writeback(fio.encrypted_page);
862 		goto put_page_out;
863 	}
864 
865 	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
866 
867 	f2fs_update_data_blkaddr(&dn, newaddr);
868 	set_inode_flag(inode, FI_APPEND_WRITE);
869 	if (page->index == 0)
870 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
871 put_page_out:
872 	f2fs_put_page(fio.encrypted_page, 1);
873 recover_block:
874 	if (lfs_mode)
875 		up_write(&fio.sbi->io_order_lock);
876 	if (err)
877 		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
878 								true, true);
879 put_out:
880 	f2fs_put_dnode(&dn);
881 out:
882 	f2fs_put_page(page, 1);
883 	return err;
884 }
885 
886 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
887 							unsigned int segno, int off)
888 {
889 	struct page *page;
890 	int err = 0;
891 
892 	page = f2fs_get_lock_data_page(inode, bidx, true);
893 	if (IS_ERR(page))
894 		return PTR_ERR(page);
895 
896 	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
897 		err = -ENOENT;
898 		goto out;
899 	}
900 
901 	if (f2fs_is_atomic_file(inode)) {
902 		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
903 		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
904 		err = -EAGAIN;
905 		goto out;
906 	}
907 	if (f2fs_is_pinned_file(inode)) {
908 		if (gc_type == FG_GC)
909 			f2fs_pin_file_control(inode, true);
910 		err = -EAGAIN;
911 		goto out;
912 	}
913 
914 	if (gc_type == BG_GC) {
915 		if (PageWriteback(page)) {
916 			err = -EAGAIN;
917 			goto out;
918 		}
919 		set_page_dirty(page);
920 		set_cold_data(page);
921 	} else {
922 		struct f2fs_io_info fio = {
923 			.sbi = F2FS_I_SB(inode),
924 			.ino = inode->i_ino,
925 			.type = DATA,
926 			.temp = COLD,
927 			.op = REQ_OP_WRITE,
928 			.op_flags = REQ_SYNC,
929 			.old_blkaddr = NULL_ADDR,
930 			.page = page,
931 			.encrypted_page = NULL,
932 			.need_lock = LOCK_REQ,
933 			.io_type = FS_GC_DATA_IO,
934 		};
935 		bool is_dirty = PageDirty(page);
936 
937 retry:
938 		f2fs_wait_on_page_writeback(page, DATA, true, true);
939 
940 		set_page_dirty(page);
941 		if (clear_page_dirty_for_io(page)) {
942 			inode_dec_dirty_pages(inode);
943 			f2fs_remove_dirty_inode(inode);
944 		}
945 
946 		set_cold_data(page);
947 
948 		err = f2fs_do_write_data_page(&fio);
949 		if (err) {
950 			clear_cold_data(page);
951 			if (err == -ENOMEM) {
952 				congestion_wait(BLK_RW_ASYNC, HZ/50);
953 				goto retry;
954 			}
955 			if (is_dirty)
956 				set_page_dirty(page);
957 		}
958 	}
959 out:
960 	f2fs_put_page(page, 1);
961 	return err;
962 }
963 
964 /*
965  * This function tries to get parent node of victim data block, and identifies
966  * data block validity. If the block is valid, copy that with cold status and
967  * modify parent node.
968  * If the parent node is not valid or the data block address is different,
969  * the victim data block is ignored.
970  */
971 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
972 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
973 {
974 	struct super_block *sb = sbi->sb;
975 	struct f2fs_summary *entry;
976 	block_t start_addr;
977 	int off;
978 	int phase = 0;
979 	int submitted = 0;
980 
981 	start_addr = START_BLOCK(sbi, segno);
982 
983 next_step:
984 	entry = sum;
985 
986 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
987 		struct page *data_page;
988 		struct inode *inode;
989 		struct node_info dni; /* dnode info for the data */
990 		unsigned int ofs_in_node, nofs;
991 		block_t start_bidx;
992 		nid_t nid = le32_to_cpu(entry->nid);
993 
994 		/* stop BG_GC if there is not enough free sections. */
995 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
996 			return submitted;
997 
998 		if (check_valid_map(sbi, segno, off) == 0)
999 			continue;
1000 
1001 		if (phase == 0) {
1002 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1003 							META_NAT, true);
1004 			continue;
1005 		}
1006 
1007 		if (phase == 1) {
1008 			f2fs_ra_node_page(sbi, nid);
1009 			continue;
1010 		}
1011 
1012 		/* Get an inode by ino with checking validity */
1013 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1014 			continue;
1015 
1016 		if (phase == 2) {
1017 			f2fs_ra_node_page(sbi, dni.ino);
1018 			continue;
1019 		}
1020 
1021 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1022 
1023 		if (phase == 3) {
1024 			inode = f2fs_iget(sb, dni.ino);
1025 			if (IS_ERR(inode) || is_bad_inode(inode))
1026 				continue;
1027 
1028 			if (!down_write_trylock(
1029 				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1030 				iput(inode);
1031 				sbi->skipped_gc_rwsem++;
1032 				continue;
1033 			}
1034 
1035 			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1036 								ofs_in_node;
1037 
1038 			if (f2fs_post_read_required(inode)) {
1039 				int err = ra_data_block(inode, start_bidx);
1040 
1041 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1042 				if (err) {
1043 					iput(inode);
1044 					continue;
1045 				}
1046 				add_gc_inode(gc_list, inode);
1047 				continue;
1048 			}
1049 
1050 			data_page = f2fs_get_read_data_page(inode,
1051 						start_bidx, REQ_RAHEAD, true);
1052 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1053 			if (IS_ERR(data_page)) {
1054 				iput(inode);
1055 				continue;
1056 			}
1057 
1058 			f2fs_put_page(data_page, 0);
1059 			add_gc_inode(gc_list, inode);
1060 			continue;
1061 		}
1062 
1063 		/* phase 4 */
1064 		inode = find_gc_inode(gc_list, dni.ino);
1065 		if (inode) {
1066 			struct f2fs_inode_info *fi = F2FS_I(inode);
1067 			bool locked = false;
1068 			int err;
1069 
1070 			if (S_ISREG(inode->i_mode)) {
1071 				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1072 					continue;
1073 				if (!down_write_trylock(
1074 						&fi->i_gc_rwsem[WRITE])) {
1075 					sbi->skipped_gc_rwsem++;
1076 					up_write(&fi->i_gc_rwsem[READ]);
1077 					continue;
1078 				}
1079 				locked = true;
1080 
1081 				/* wait for all inflight aio data */
1082 				inode_dio_wait(inode);
1083 			}
1084 
1085 			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1086 								+ ofs_in_node;
1087 			if (f2fs_post_read_required(inode))
1088 				err = move_data_block(inode, start_bidx,
1089 							gc_type, segno, off);
1090 			else
1091 				err = move_data_page(inode, start_bidx, gc_type,
1092 								segno, off);
1093 
1094 			if (!err && (gc_type == FG_GC ||
1095 					f2fs_post_read_required(inode)))
1096 				submitted++;
1097 
1098 			if (locked) {
1099 				up_write(&fi->i_gc_rwsem[WRITE]);
1100 				up_write(&fi->i_gc_rwsem[READ]);
1101 			}
1102 
1103 			stat_inc_data_blk_count(sbi, 1, gc_type);
1104 		}
1105 	}
1106 
1107 	if (++phase < 5)
1108 		goto next_step;
1109 
1110 	return submitted;
1111 }
1112 
1113 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1114 			int gc_type)
1115 {
1116 	struct sit_info *sit_i = SIT_I(sbi);
1117 	int ret;
1118 
1119 	down_write(&sit_i->sentry_lock);
1120 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1121 					      NO_CHECK_TYPE, LFS);
1122 	up_write(&sit_i->sentry_lock);
1123 	return ret;
1124 }
1125 
1126 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1127 				unsigned int start_segno,
1128 				struct gc_inode_list *gc_list, int gc_type)
1129 {
1130 	struct page *sum_page;
1131 	struct f2fs_summary_block *sum;
1132 	struct blk_plug plug;
1133 	unsigned int segno = start_segno;
1134 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1135 	int seg_freed = 0, migrated = 0;
1136 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1137 						SUM_TYPE_DATA : SUM_TYPE_NODE;
1138 	int submitted = 0;
1139 
1140 	if (__is_large_section(sbi))
1141 		end_segno = rounddown(end_segno, sbi->segs_per_sec);
1142 
1143 	/* readahead multi ssa blocks those have contiguous address */
1144 	if (__is_large_section(sbi))
1145 		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1146 					end_segno - segno, META_SSA, true);
1147 
1148 	/* reference all summary page */
1149 	while (segno < end_segno) {
1150 		sum_page = f2fs_get_sum_page(sbi, segno++);
1151 		if (IS_ERR(sum_page)) {
1152 			int err = PTR_ERR(sum_page);
1153 
1154 			end_segno = segno - 1;
1155 			for (segno = start_segno; segno < end_segno; segno++) {
1156 				sum_page = find_get_page(META_MAPPING(sbi),
1157 						GET_SUM_BLOCK(sbi, segno));
1158 				f2fs_put_page(sum_page, 0);
1159 				f2fs_put_page(sum_page, 0);
1160 			}
1161 			return err;
1162 		}
1163 		unlock_page(sum_page);
1164 	}
1165 
1166 	blk_start_plug(&plug);
1167 
1168 	for (segno = start_segno; segno < end_segno; segno++) {
1169 
1170 		/* find segment summary of victim */
1171 		sum_page = find_get_page(META_MAPPING(sbi),
1172 					GET_SUM_BLOCK(sbi, segno));
1173 		f2fs_put_page(sum_page, 0);
1174 
1175 		if (get_valid_blocks(sbi, segno, false) == 0)
1176 			goto freed;
1177 		if (__is_large_section(sbi) &&
1178 				migrated >= sbi->migration_granularity)
1179 			goto skip;
1180 		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1181 			goto skip;
1182 
1183 		sum = page_address(sum_page);
1184 		if (type != GET_SUM_TYPE((&sum->footer))) {
1185 			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1186 				 segno, type, GET_SUM_TYPE((&sum->footer)));
1187 			set_sbi_flag(sbi, SBI_NEED_FSCK);
1188 			f2fs_stop_checkpoint(sbi, false);
1189 			goto skip;
1190 		}
1191 
1192 		/*
1193 		 * this is to avoid deadlock:
1194 		 * - lock_page(sum_page)         - f2fs_replace_block
1195 		 *  - check_valid_map()            - down_write(sentry_lock)
1196 		 *   - down_read(sentry_lock)     - change_curseg()
1197 		 *                                  - lock_page(sum_page)
1198 		 */
1199 		if (type == SUM_TYPE_NODE)
1200 			submitted += gc_node_segment(sbi, sum->entries, segno,
1201 								gc_type);
1202 		else
1203 			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1204 							segno, gc_type);
1205 
1206 		stat_inc_seg_count(sbi, type, gc_type);
1207 
1208 freed:
1209 		if (gc_type == FG_GC &&
1210 				get_valid_blocks(sbi, segno, false) == 0)
1211 			seg_freed++;
1212 		migrated++;
1213 
1214 		if (__is_large_section(sbi) && segno + 1 < end_segno)
1215 			sbi->next_victim_seg[gc_type] = segno + 1;
1216 skip:
1217 		f2fs_put_page(sum_page, 0);
1218 	}
1219 
1220 	if (submitted)
1221 		f2fs_submit_merged_write(sbi,
1222 				(type == SUM_TYPE_NODE) ? NODE : DATA);
1223 
1224 	blk_finish_plug(&plug);
1225 
1226 	stat_inc_call_count(sbi->stat_info);
1227 
1228 	return seg_freed;
1229 }
1230 
1231 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1232 			bool background, unsigned int segno)
1233 {
1234 	int gc_type = sync ? FG_GC : BG_GC;
1235 	int sec_freed = 0, seg_freed = 0, total_freed = 0;
1236 	int ret = 0;
1237 	struct cp_control cpc;
1238 	unsigned int init_segno = segno;
1239 	struct gc_inode_list gc_list = {
1240 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1241 		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1242 	};
1243 	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1244 	unsigned long long first_skipped;
1245 	unsigned int skipped_round = 0, round = 0;
1246 
1247 	trace_f2fs_gc_begin(sbi->sb, sync, background,
1248 				get_pages(sbi, F2FS_DIRTY_NODES),
1249 				get_pages(sbi, F2FS_DIRTY_DENTS),
1250 				get_pages(sbi, F2FS_DIRTY_IMETA),
1251 				free_sections(sbi),
1252 				free_segments(sbi),
1253 				reserved_segments(sbi),
1254 				prefree_segments(sbi));
1255 
1256 	cpc.reason = __get_cp_reason(sbi);
1257 	sbi->skipped_gc_rwsem = 0;
1258 	first_skipped = last_skipped;
1259 gc_more:
1260 	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1261 		ret = -EINVAL;
1262 		goto stop;
1263 	}
1264 	if (unlikely(f2fs_cp_error(sbi))) {
1265 		ret = -EIO;
1266 		goto stop;
1267 	}
1268 
1269 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1270 		/*
1271 		 * For example, if there are many prefree_segments below given
1272 		 * threshold, we can make them free by checkpoint. Then, we
1273 		 * secure free segments which doesn't need fggc any more.
1274 		 */
1275 		if (prefree_segments(sbi) &&
1276 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1277 			ret = f2fs_write_checkpoint(sbi, &cpc);
1278 			if (ret)
1279 				goto stop;
1280 		}
1281 		if (has_not_enough_free_secs(sbi, 0, 0))
1282 			gc_type = FG_GC;
1283 	}
1284 
1285 	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1286 	if (gc_type == BG_GC && !background) {
1287 		ret = -EINVAL;
1288 		goto stop;
1289 	}
1290 	if (!__get_victim(sbi, &segno, gc_type)) {
1291 		ret = -ENODATA;
1292 		goto stop;
1293 	}
1294 
1295 	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1296 	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1297 		sec_freed++;
1298 	total_freed += seg_freed;
1299 
1300 	if (gc_type == FG_GC) {
1301 		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1302 						sbi->skipped_gc_rwsem)
1303 			skipped_round++;
1304 		last_skipped = sbi->skipped_atomic_files[FG_GC];
1305 		round++;
1306 	}
1307 
1308 	if (gc_type == FG_GC)
1309 		sbi->cur_victim_sec = NULL_SEGNO;
1310 
1311 	if (sync)
1312 		goto stop;
1313 
1314 	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1315 		if (skipped_round <= MAX_SKIP_GC_COUNT ||
1316 					skipped_round * 2 < round) {
1317 			segno = NULL_SEGNO;
1318 			goto gc_more;
1319 		}
1320 
1321 		if (first_skipped < last_skipped &&
1322 				(last_skipped - first_skipped) >
1323 						sbi->skipped_gc_rwsem) {
1324 			f2fs_drop_inmem_pages_all(sbi, true);
1325 			segno = NULL_SEGNO;
1326 			goto gc_more;
1327 		}
1328 		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1329 			ret = f2fs_write_checkpoint(sbi, &cpc);
1330 	}
1331 stop:
1332 	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1333 	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1334 
1335 	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1336 				get_pages(sbi, F2FS_DIRTY_NODES),
1337 				get_pages(sbi, F2FS_DIRTY_DENTS),
1338 				get_pages(sbi, F2FS_DIRTY_IMETA),
1339 				free_sections(sbi),
1340 				free_segments(sbi),
1341 				reserved_segments(sbi),
1342 				prefree_segments(sbi));
1343 
1344 	mutex_unlock(&sbi->gc_mutex);
1345 
1346 	put_gc_inode(&gc_list);
1347 
1348 	if (sync && !ret)
1349 		ret = sec_freed ? 0 : -EAGAIN;
1350 	return ret;
1351 }
1352 
1353 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1354 {
1355 	DIRTY_I(sbi)->v_ops = &default_v_ops;
1356 
1357 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1358 
1359 	/* give warm/cold data area from slower device */
1360 	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1361 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1362 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1363 }
1364 
1365 static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
1366 							unsigned int end)
1367 {
1368 	int type;
1369 	unsigned int segno, next_inuse;
1370 	int err = 0;
1371 
1372 	/* Move out cursegs from the target range */
1373 	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
1374 		allocate_segment_for_resize(sbi, type, start, end);
1375 
1376 	/* do GC to move out valid blocks in the range */
1377 	for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1378 		struct gc_inode_list gc_list = {
1379 			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1380 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1381 		};
1382 
1383 		mutex_lock(&sbi->gc_mutex);
1384 		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
1385 		mutex_unlock(&sbi->gc_mutex);
1386 		put_gc_inode(&gc_list);
1387 
1388 		if (get_valid_blocks(sbi, segno, true))
1389 			return -EAGAIN;
1390 	}
1391 
1392 	err = f2fs_sync_fs(sbi->sb, 1);
1393 	if (err)
1394 		return err;
1395 
1396 	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1397 	if (next_inuse <= end) {
1398 		f2fs_err(sbi, "segno %u should be free but still inuse!",
1399 			 next_inuse);
1400 		f2fs_bug_on(sbi, 1);
1401 	}
1402 	return err;
1403 }
1404 
1405 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1406 {
1407 	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1408 	int section_count = le32_to_cpu(raw_sb->section_count);
1409 	int segment_count = le32_to_cpu(raw_sb->segment_count);
1410 	int segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1411 	long long block_count = le64_to_cpu(raw_sb->block_count);
1412 	int segs = secs * sbi->segs_per_sec;
1413 
1414 	raw_sb->section_count = cpu_to_le32(section_count + secs);
1415 	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1416 	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1417 	raw_sb->block_count = cpu_to_le64(block_count +
1418 					(long long)segs * sbi->blocks_per_seg);
1419 }
1420 
1421 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1422 {
1423 	int segs = secs * sbi->segs_per_sec;
1424 	long long user_block_count =
1425 				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1426 
1427 	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1428 	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1429 	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1430 	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1431 	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
1432 					(long long)segs * sbi->blocks_per_seg);
1433 }
1434 
1435 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1436 {
1437 	__u64 old_block_count, shrunk_blocks;
1438 	unsigned int secs;
1439 	int gc_mode, gc_type;
1440 	int err = 0;
1441 	__u32 rem;
1442 
1443 	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1444 	if (block_count > old_block_count)
1445 		return -EINVAL;
1446 
1447 	/* new fs size should align to section size */
1448 	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1449 	if (rem)
1450 		return -EINVAL;
1451 
1452 	if (block_count == old_block_count)
1453 		return 0;
1454 
1455 	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1456 		f2fs_err(sbi, "Should run fsck to repair first.");
1457 		return -EFSCORRUPTED;
1458 	}
1459 
1460 	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1461 		f2fs_err(sbi, "Checkpoint should be enabled.");
1462 		return -EINVAL;
1463 	}
1464 
1465 	freeze_bdev(sbi->sb->s_bdev);
1466 
1467 	shrunk_blocks = old_block_count - block_count;
1468 	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
1469 	spin_lock(&sbi->stat_lock);
1470 	if (shrunk_blocks + valid_user_blocks(sbi) +
1471 		sbi->current_reserved_blocks + sbi->unusable_block_count +
1472 		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
1473 		err = -ENOSPC;
1474 	else
1475 		sbi->user_block_count -= shrunk_blocks;
1476 	spin_unlock(&sbi->stat_lock);
1477 	if (err) {
1478 		thaw_bdev(sbi->sb->s_bdev, sbi->sb);
1479 		return err;
1480 	}
1481 
1482 	mutex_lock(&sbi->resize_mutex);
1483 	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
1484 
1485 	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1486 
1487 	MAIN_SECS(sbi) -= secs;
1488 
1489 	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1490 		if (SIT_I(sbi)->last_victim[gc_mode] >=
1491 					MAIN_SECS(sbi) * sbi->segs_per_sec)
1492 			SIT_I(sbi)->last_victim[gc_mode] = 0;
1493 
1494 	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1495 		if (sbi->next_victim_seg[gc_type] >=
1496 					MAIN_SECS(sbi) * sbi->segs_per_sec)
1497 			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1498 
1499 	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1500 
1501 	err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
1502 			MAIN_SEGS(sbi) - 1);
1503 	if (err)
1504 		goto out;
1505 
1506 	update_sb_metadata(sbi, -secs);
1507 
1508 	err = f2fs_commit_super(sbi, false);
1509 	if (err) {
1510 		update_sb_metadata(sbi, secs);
1511 		goto out;
1512 	}
1513 
1514 	update_fs_metadata(sbi, -secs);
1515 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1516 	err = f2fs_sync_fs(sbi->sb, 1);
1517 	if (err) {
1518 		update_fs_metadata(sbi, secs);
1519 		update_sb_metadata(sbi, secs);
1520 		f2fs_commit_super(sbi, false);
1521 	}
1522 out:
1523 	if (err) {
1524 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1525 		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
1526 
1527 		MAIN_SECS(sbi) += secs;
1528 		spin_lock(&sbi->stat_lock);
1529 		sbi->user_block_count += shrunk_blocks;
1530 		spin_unlock(&sbi->stat_lock);
1531 	}
1532 	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1533 	mutex_unlock(&sbi->resize_mutex);
1534 	thaw_bdev(sbi->sb->s_bdev, sbi->sb);
1535 	return err;
1536 }
1537