xref: /openbmc/linux/fs/f2fs/gc.c (revision ca79522c)
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20 
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26 
27 static struct kmem_cache *winode_slab;
28 
29 static int gc_thread_func(void *data)
30 {
31 	struct f2fs_sb_info *sbi = data;
32 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 	long wait_ms;
34 
35 	wait_ms = GC_THREAD_MIN_SLEEP_TIME;
36 
37 	do {
38 		if (try_to_freeze())
39 			continue;
40 		else
41 			wait_event_interruptible_timeout(*wq,
42 						kthread_should_stop(),
43 						msecs_to_jiffies(wait_ms));
44 		if (kthread_should_stop())
45 			break;
46 
47 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
48 			wait_ms = GC_THREAD_MAX_SLEEP_TIME;
49 			continue;
50 		}
51 
52 		/*
53 		 * [GC triggering condition]
54 		 * 0. GC is not conducted currently.
55 		 * 1. There are enough dirty segments.
56 		 * 2. IO subsystem is idle by checking the # of writeback pages.
57 		 * 3. IO subsystem is idle by checking the # of requests in
58 		 *    bdev's request list.
59 		 *
60 		 * Note) We have to avoid triggering GCs too much frequently.
61 		 * Because it is possible that some segments can be
62 		 * invalidated soon after by user update or deletion.
63 		 * So, I'd like to wait some time to collect dirty segments.
64 		 */
65 		if (!mutex_trylock(&sbi->gc_mutex))
66 			continue;
67 
68 		if (!is_idle(sbi)) {
69 			wait_ms = increase_sleep_time(wait_ms);
70 			mutex_unlock(&sbi->gc_mutex);
71 			continue;
72 		}
73 
74 		if (has_enough_invalid_blocks(sbi))
75 			wait_ms = decrease_sleep_time(wait_ms);
76 		else
77 			wait_ms = increase_sleep_time(wait_ms);
78 
79 		sbi->bg_gc++;
80 
81 		/* if return value is not zero, no victim was selected */
82 		if (f2fs_gc(sbi))
83 			wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
84 	} while (!kthread_should_stop());
85 	return 0;
86 }
87 
88 int start_gc_thread(struct f2fs_sb_info *sbi)
89 {
90 	struct f2fs_gc_kthread *gc_th;
91 	dev_t dev = sbi->sb->s_bdev->bd_dev;
92 
93 	if (!test_opt(sbi, BG_GC))
94 		return 0;
95 	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
96 	if (!gc_th)
97 		return -ENOMEM;
98 
99 	sbi->gc_thread = gc_th;
100 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
101 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
102 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
103 	if (IS_ERR(gc_th->f2fs_gc_task)) {
104 		kfree(gc_th);
105 		sbi->gc_thread = NULL;
106 		return -ENOMEM;
107 	}
108 	return 0;
109 }
110 
111 void stop_gc_thread(struct f2fs_sb_info *sbi)
112 {
113 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
114 	if (!gc_th)
115 		return;
116 	kthread_stop(gc_th->f2fs_gc_task);
117 	kfree(gc_th);
118 	sbi->gc_thread = NULL;
119 }
120 
121 static int select_gc_type(int gc_type)
122 {
123 	return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
124 }
125 
126 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
127 			int type, struct victim_sel_policy *p)
128 {
129 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
130 
131 	if (p->alloc_mode == SSR) {
132 		p->gc_mode = GC_GREEDY;
133 		p->dirty_segmap = dirty_i->dirty_segmap[type];
134 		p->ofs_unit = 1;
135 	} else {
136 		p->gc_mode = select_gc_type(gc_type);
137 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
138 		p->ofs_unit = sbi->segs_per_sec;
139 	}
140 	p->offset = sbi->last_victim[p->gc_mode];
141 }
142 
143 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
144 				struct victim_sel_policy *p)
145 {
146 	/* SSR allocates in a segment unit */
147 	if (p->alloc_mode == SSR)
148 		return 1 << sbi->log_blocks_per_seg;
149 	if (p->gc_mode == GC_GREEDY)
150 		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
151 	else if (p->gc_mode == GC_CB)
152 		return UINT_MAX;
153 	else /* No other gc_mode */
154 		return 0;
155 }
156 
157 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
158 {
159 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
160 	unsigned int hint = 0;
161 	unsigned int secno;
162 
163 	/*
164 	 * If the gc_type is FG_GC, we can select victim segments
165 	 * selected by background GC before.
166 	 * Those segments guarantee they have small valid blocks.
167 	 */
168 next:
169 	secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
170 	if (secno < TOTAL_SECS(sbi)) {
171 		if (sec_usage_check(sbi, secno))
172 			goto next;
173 		clear_bit(secno, dirty_i->victim_secmap);
174 		return secno * sbi->segs_per_sec;
175 	}
176 	return NULL_SEGNO;
177 }
178 
179 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
180 {
181 	struct sit_info *sit_i = SIT_I(sbi);
182 	unsigned int secno = GET_SECNO(sbi, segno);
183 	unsigned int start = secno * sbi->segs_per_sec;
184 	unsigned long long mtime = 0;
185 	unsigned int vblocks;
186 	unsigned char age = 0;
187 	unsigned char u;
188 	unsigned int i;
189 
190 	for (i = 0; i < sbi->segs_per_sec; i++)
191 		mtime += get_seg_entry(sbi, start + i)->mtime;
192 	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
193 
194 	mtime = div_u64(mtime, sbi->segs_per_sec);
195 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
196 
197 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
198 
199 	/* Handle if the system time is changed by user */
200 	if (mtime < sit_i->min_mtime)
201 		sit_i->min_mtime = mtime;
202 	if (mtime > sit_i->max_mtime)
203 		sit_i->max_mtime = mtime;
204 	if (sit_i->max_mtime != sit_i->min_mtime)
205 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
206 				sit_i->max_mtime - sit_i->min_mtime);
207 
208 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
209 }
210 
211 static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
212 					struct victim_sel_policy *p)
213 {
214 	if (p->alloc_mode == SSR)
215 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
216 
217 	/* alloc_mode == LFS */
218 	if (p->gc_mode == GC_GREEDY)
219 		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
220 	else
221 		return get_cb_cost(sbi, segno);
222 }
223 
224 /*
225  * This function is called from two paths.
226  * One is garbage collection and the other is SSR segment selection.
227  * When it is called during GC, it just gets a victim segment
228  * and it does not remove it from dirty seglist.
229  * When it is called from SSR segment selection, it finds a segment
230  * which has minimum valid blocks and removes it from dirty seglist.
231  */
232 static int get_victim_by_default(struct f2fs_sb_info *sbi,
233 		unsigned int *result, int gc_type, int type, char alloc_mode)
234 {
235 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
236 	struct victim_sel_policy p;
237 	unsigned int secno;
238 	int nsearched = 0;
239 
240 	p.alloc_mode = alloc_mode;
241 	select_policy(sbi, gc_type, type, &p);
242 
243 	p.min_segno = NULL_SEGNO;
244 	p.min_cost = get_max_cost(sbi, &p);
245 
246 	mutex_lock(&dirty_i->seglist_lock);
247 
248 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
249 		p.min_segno = check_bg_victims(sbi);
250 		if (p.min_segno != NULL_SEGNO)
251 			goto got_it;
252 	}
253 
254 	while (1) {
255 		unsigned long cost;
256 		unsigned int segno;
257 
258 		segno = find_next_bit(p.dirty_segmap,
259 						TOTAL_SEGS(sbi), p.offset);
260 		if (segno >= TOTAL_SEGS(sbi)) {
261 			if (sbi->last_victim[p.gc_mode]) {
262 				sbi->last_victim[p.gc_mode] = 0;
263 				p.offset = 0;
264 				continue;
265 			}
266 			break;
267 		}
268 		p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
269 		secno = GET_SECNO(sbi, segno);
270 
271 		if (sec_usage_check(sbi, secno))
272 			continue;
273 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
274 			continue;
275 
276 		cost = get_gc_cost(sbi, segno, &p);
277 
278 		if (p.min_cost > cost) {
279 			p.min_segno = segno;
280 			p.min_cost = cost;
281 		}
282 
283 		if (cost == get_max_cost(sbi, &p))
284 			continue;
285 
286 		if (nsearched++ >= MAX_VICTIM_SEARCH) {
287 			sbi->last_victim[p.gc_mode] = segno;
288 			break;
289 		}
290 	}
291 got_it:
292 	if (p.min_segno != NULL_SEGNO) {
293 		if (p.alloc_mode == LFS) {
294 			secno = GET_SECNO(sbi, p.min_segno);
295 			if (gc_type == FG_GC)
296 				sbi->cur_victim_sec = secno;
297 			else
298 				set_bit(secno, dirty_i->victim_secmap);
299 		}
300 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
301 
302 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
303 				sbi->cur_victim_sec,
304 				prefree_segments(sbi), free_segments(sbi));
305 	}
306 	mutex_unlock(&dirty_i->seglist_lock);
307 
308 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
309 }
310 
311 static const struct victim_selection default_v_ops = {
312 	.get_victim = get_victim_by_default,
313 };
314 
315 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
316 {
317 	struct list_head *this;
318 	struct inode_entry *ie;
319 
320 	list_for_each(this, ilist) {
321 		ie = list_entry(this, struct inode_entry, list);
322 		if (ie->inode->i_ino == ino)
323 			return ie->inode;
324 	}
325 	return NULL;
326 }
327 
328 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
329 {
330 	struct list_head *this;
331 	struct inode_entry *new_ie, *ie;
332 
333 	list_for_each(this, ilist) {
334 		ie = list_entry(this, struct inode_entry, list);
335 		if (ie->inode == inode) {
336 			iput(inode);
337 			return;
338 		}
339 	}
340 repeat:
341 	new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
342 	if (!new_ie) {
343 		cond_resched();
344 		goto repeat;
345 	}
346 	new_ie->inode = inode;
347 	list_add_tail(&new_ie->list, ilist);
348 }
349 
350 static void put_gc_inode(struct list_head *ilist)
351 {
352 	struct inode_entry *ie, *next_ie;
353 	list_for_each_entry_safe(ie, next_ie, ilist, list) {
354 		iput(ie->inode);
355 		list_del(&ie->list);
356 		kmem_cache_free(winode_slab, ie);
357 	}
358 }
359 
360 static int check_valid_map(struct f2fs_sb_info *sbi,
361 				unsigned int segno, int offset)
362 {
363 	struct sit_info *sit_i = SIT_I(sbi);
364 	struct seg_entry *sentry;
365 	int ret;
366 
367 	mutex_lock(&sit_i->sentry_lock);
368 	sentry = get_seg_entry(sbi, segno);
369 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
370 	mutex_unlock(&sit_i->sentry_lock);
371 	return ret;
372 }
373 
374 /*
375  * This function compares node address got in summary with that in NAT.
376  * On validity, copy that node with cold status, otherwise (invalid node)
377  * ignore that.
378  */
379 static void gc_node_segment(struct f2fs_sb_info *sbi,
380 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
381 {
382 	bool initial = true;
383 	struct f2fs_summary *entry;
384 	int off;
385 
386 next_step:
387 	entry = sum;
388 
389 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
390 		nid_t nid = le32_to_cpu(entry->nid);
391 		struct page *node_page;
392 
393 		/* stop BG_GC if there is not enough free sections. */
394 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
395 			return;
396 
397 		if (check_valid_map(sbi, segno, off) == 0)
398 			continue;
399 
400 		if (initial) {
401 			ra_node_page(sbi, nid);
402 			continue;
403 		}
404 		node_page = get_node_page(sbi, nid);
405 		if (IS_ERR(node_page))
406 			continue;
407 
408 		/* set page dirty and write it */
409 		if (gc_type == FG_GC) {
410 			f2fs_submit_bio(sbi, NODE, true);
411 			wait_on_page_writeback(node_page);
412 			set_page_dirty(node_page);
413 		} else {
414 			if (!PageWriteback(node_page))
415 				set_page_dirty(node_page);
416 		}
417 		f2fs_put_page(node_page, 1);
418 		stat_inc_node_blk_count(sbi, 1);
419 	}
420 
421 	if (initial) {
422 		initial = false;
423 		goto next_step;
424 	}
425 
426 	if (gc_type == FG_GC) {
427 		struct writeback_control wbc = {
428 			.sync_mode = WB_SYNC_ALL,
429 			.nr_to_write = LONG_MAX,
430 			.for_reclaim = 0,
431 		};
432 		sync_node_pages(sbi, 0, &wbc);
433 
434 		/*
435 		 * In the case of FG_GC, it'd be better to reclaim this victim
436 		 * completely.
437 		 */
438 		if (get_valid_blocks(sbi, segno, 1) != 0)
439 			goto next_step;
440 	}
441 }
442 
443 /*
444  * Calculate start block index indicating the given node offset.
445  * Be careful, caller should give this node offset only indicating direct node
446  * blocks. If any node offsets, which point the other types of node blocks such
447  * as indirect or double indirect node blocks, are given, it must be a caller's
448  * bug.
449  */
450 block_t start_bidx_of_node(unsigned int node_ofs)
451 {
452 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
453 	unsigned int bidx;
454 
455 	if (node_ofs == 0)
456 		return 0;
457 
458 	if (node_ofs <= 2) {
459 		bidx = node_ofs - 1;
460 	} else if (node_ofs <= indirect_blks) {
461 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
462 		bidx = node_ofs - 2 - dec;
463 	} else {
464 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
465 		bidx = node_ofs - 5 - dec;
466 	}
467 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
468 }
469 
470 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
471 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
472 {
473 	struct page *node_page;
474 	nid_t nid;
475 	unsigned int ofs_in_node;
476 	block_t source_blkaddr;
477 
478 	nid = le32_to_cpu(sum->nid);
479 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
480 
481 	node_page = get_node_page(sbi, nid);
482 	if (IS_ERR(node_page))
483 		return 0;
484 
485 	get_node_info(sbi, nid, dni);
486 
487 	if (sum->version != dni->version) {
488 		f2fs_put_page(node_page, 1);
489 		return 0;
490 	}
491 
492 	*nofs = ofs_of_node(node_page);
493 	source_blkaddr = datablock_addr(node_page, ofs_in_node);
494 	f2fs_put_page(node_page, 1);
495 
496 	if (source_blkaddr != blkaddr)
497 		return 0;
498 	return 1;
499 }
500 
501 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
502 {
503 	if (gc_type == BG_GC) {
504 		if (PageWriteback(page))
505 			goto out;
506 		set_page_dirty(page);
507 		set_cold_data(page);
508 	} else {
509 		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
510 
511 		if (PageWriteback(page)) {
512 			f2fs_submit_bio(sbi, DATA, true);
513 			wait_on_page_writeback(page);
514 		}
515 
516 		if (clear_page_dirty_for_io(page) &&
517 			S_ISDIR(inode->i_mode)) {
518 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
519 			inode_dec_dirty_dents(inode);
520 		}
521 		set_cold_data(page);
522 		do_write_data_page(page);
523 		clear_cold_data(page);
524 	}
525 out:
526 	f2fs_put_page(page, 1);
527 }
528 
529 /*
530  * This function tries to get parent node of victim data block, and identifies
531  * data block validity. If the block is valid, copy that with cold status and
532  * modify parent node.
533  * If the parent node is not valid or the data block address is different,
534  * the victim data block is ignored.
535  */
536 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
537 		struct list_head *ilist, unsigned int segno, int gc_type)
538 {
539 	struct super_block *sb = sbi->sb;
540 	struct f2fs_summary *entry;
541 	block_t start_addr;
542 	int off;
543 	int phase = 0;
544 
545 	start_addr = START_BLOCK(sbi, segno);
546 
547 next_step:
548 	entry = sum;
549 
550 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
551 		struct page *data_page;
552 		struct inode *inode;
553 		struct node_info dni; /* dnode info for the data */
554 		unsigned int ofs_in_node, nofs;
555 		block_t start_bidx;
556 
557 		/* stop BG_GC if there is not enough free sections. */
558 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
559 			return;
560 
561 		if (check_valid_map(sbi, segno, off) == 0)
562 			continue;
563 
564 		if (phase == 0) {
565 			ra_node_page(sbi, le32_to_cpu(entry->nid));
566 			continue;
567 		}
568 
569 		/* Get an inode by ino with checking validity */
570 		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
571 			continue;
572 
573 		if (phase == 1) {
574 			ra_node_page(sbi, dni.ino);
575 			continue;
576 		}
577 
578 		start_bidx = start_bidx_of_node(nofs);
579 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
580 
581 		if (phase == 2) {
582 			inode = f2fs_iget(sb, dni.ino);
583 			if (IS_ERR(inode))
584 				continue;
585 
586 			data_page = find_data_page(inode,
587 					start_bidx + ofs_in_node, false);
588 			if (IS_ERR(data_page))
589 				goto next_iput;
590 
591 			f2fs_put_page(data_page, 0);
592 			add_gc_inode(inode, ilist);
593 		} else {
594 			inode = find_gc_inode(dni.ino, ilist);
595 			if (inode) {
596 				data_page = get_lock_data_page(inode,
597 						start_bidx + ofs_in_node);
598 				if (IS_ERR(data_page))
599 					continue;
600 				move_data_page(inode, data_page, gc_type);
601 				stat_inc_data_blk_count(sbi, 1);
602 			}
603 		}
604 		continue;
605 next_iput:
606 		iput(inode);
607 	}
608 
609 	if (++phase < 4)
610 		goto next_step;
611 
612 	if (gc_type == FG_GC) {
613 		f2fs_submit_bio(sbi, DATA, true);
614 
615 		/*
616 		 * In the case of FG_GC, it'd be better to reclaim this victim
617 		 * completely.
618 		 */
619 		if (get_valid_blocks(sbi, segno, 1) != 0) {
620 			phase = 2;
621 			goto next_step;
622 		}
623 	}
624 }
625 
626 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
627 						int gc_type, int type)
628 {
629 	struct sit_info *sit_i = SIT_I(sbi);
630 	int ret;
631 	mutex_lock(&sit_i->sentry_lock);
632 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
633 	mutex_unlock(&sit_i->sentry_lock);
634 	return ret;
635 }
636 
637 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
638 				struct list_head *ilist, int gc_type)
639 {
640 	struct page *sum_page;
641 	struct f2fs_summary_block *sum;
642 	struct blk_plug plug;
643 
644 	/* read segment summary of victim */
645 	sum_page = get_sum_page(sbi, segno);
646 	if (IS_ERR(sum_page))
647 		return;
648 
649 	blk_start_plug(&plug);
650 
651 	sum = page_address(sum_page);
652 
653 	switch (GET_SUM_TYPE((&sum->footer))) {
654 	case SUM_TYPE_NODE:
655 		gc_node_segment(sbi, sum->entries, segno, gc_type);
656 		break;
657 	case SUM_TYPE_DATA:
658 		gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
659 		break;
660 	}
661 	blk_finish_plug(&plug);
662 
663 	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
664 	stat_inc_call_count(sbi->stat_info);
665 
666 	f2fs_put_page(sum_page, 1);
667 }
668 
669 int f2fs_gc(struct f2fs_sb_info *sbi)
670 {
671 	struct list_head ilist;
672 	unsigned int segno, i;
673 	int gc_type = BG_GC;
674 	int nfree = 0;
675 	int ret = -1;
676 
677 	INIT_LIST_HEAD(&ilist);
678 gc_more:
679 	if (!(sbi->sb->s_flags & MS_ACTIVE))
680 		goto stop;
681 
682 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
683 		gc_type = FG_GC;
684 		write_checkpoint(sbi, false);
685 	}
686 
687 	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
688 		goto stop;
689 	ret = 0;
690 
691 	for (i = 0; i < sbi->segs_per_sec; i++)
692 		do_garbage_collect(sbi, segno + i, &ilist, gc_type);
693 
694 	if (gc_type == FG_GC) {
695 		sbi->cur_victim_sec = NULL_SEGNO;
696 		nfree++;
697 		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
698 	}
699 
700 	if (has_not_enough_free_secs(sbi, nfree))
701 		goto gc_more;
702 
703 	if (gc_type == FG_GC)
704 		write_checkpoint(sbi, false);
705 stop:
706 	mutex_unlock(&sbi->gc_mutex);
707 
708 	put_gc_inode(&ilist);
709 	return ret;
710 }
711 
712 void build_gc_manager(struct f2fs_sb_info *sbi)
713 {
714 	DIRTY_I(sbi)->v_ops = &default_v_ops;
715 }
716 
717 int __init create_gc_caches(void)
718 {
719 	winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
720 			sizeof(struct inode_entry), NULL);
721 	if (!winode_slab)
722 		return -ENOMEM;
723 	return 0;
724 }
725 
726 void destroy_gc_caches(void)
727 {
728 	kmem_cache_destroy(winode_slab);
729 }
730