xref: /openbmc/linux/fs/f2fs/gc.c (revision c4ee0af3)
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20 
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26 
27 static struct kmem_cache *winode_slab;
28 
29 static int gc_thread_func(void *data)
30 {
31 	struct f2fs_sb_info *sbi = data;
32 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 	long wait_ms;
35 
36 	wait_ms = gc_th->min_sleep_time;
37 
38 	do {
39 		if (try_to_freeze())
40 			continue;
41 		else
42 			wait_event_interruptible_timeout(*wq,
43 						kthread_should_stop(),
44 						msecs_to_jiffies(wait_ms));
45 		if (kthread_should_stop())
46 			break;
47 
48 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49 			wait_ms = increase_sleep_time(gc_th, wait_ms);
50 			continue;
51 		}
52 
53 		/*
54 		 * [GC triggering condition]
55 		 * 0. GC is not conducted currently.
56 		 * 1. There are enough dirty segments.
57 		 * 2. IO subsystem is idle by checking the # of writeback pages.
58 		 * 3. IO subsystem is idle by checking the # of requests in
59 		 *    bdev's request list.
60 		 *
61 		 * Note) We have to avoid triggering GCs too much frequently.
62 		 * Because it is possible that some segments can be
63 		 * invalidated soon after by user update or deletion.
64 		 * So, I'd like to wait some time to collect dirty segments.
65 		 */
66 		if (!mutex_trylock(&sbi->gc_mutex))
67 			continue;
68 
69 		if (!is_idle(sbi)) {
70 			wait_ms = increase_sleep_time(gc_th, wait_ms);
71 			mutex_unlock(&sbi->gc_mutex);
72 			continue;
73 		}
74 
75 		if (has_enough_invalid_blocks(sbi))
76 			wait_ms = decrease_sleep_time(gc_th, wait_ms);
77 		else
78 			wait_ms = increase_sleep_time(gc_th, wait_ms);
79 
80 		stat_inc_bggc_count(sbi);
81 
82 		/* if return value is not zero, no victim was selected */
83 		if (f2fs_gc(sbi))
84 			wait_ms = gc_th->no_gc_sleep_time;
85 
86 		/* balancing f2fs's metadata periodically */
87 		f2fs_balance_fs_bg(sbi);
88 
89 	} while (!kthread_should_stop());
90 	return 0;
91 }
92 
93 int start_gc_thread(struct f2fs_sb_info *sbi)
94 {
95 	struct f2fs_gc_kthread *gc_th;
96 	dev_t dev = sbi->sb->s_bdev->bd_dev;
97 	int err = 0;
98 
99 	if (!test_opt(sbi, BG_GC))
100 		goto out;
101 	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
102 	if (!gc_th) {
103 		err = -ENOMEM;
104 		goto out;
105 	}
106 
107 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
108 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
109 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
110 
111 	gc_th->gc_idle = 0;
112 
113 	sbi->gc_thread = gc_th;
114 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
115 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
116 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
117 	if (IS_ERR(gc_th->f2fs_gc_task)) {
118 		err = PTR_ERR(gc_th->f2fs_gc_task);
119 		kfree(gc_th);
120 		sbi->gc_thread = NULL;
121 	}
122 
123 out:
124 	return err;
125 }
126 
127 void stop_gc_thread(struct f2fs_sb_info *sbi)
128 {
129 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
130 	if (!gc_th)
131 		return;
132 	kthread_stop(gc_th->f2fs_gc_task);
133 	kfree(gc_th);
134 	sbi->gc_thread = NULL;
135 }
136 
137 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
138 {
139 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
140 
141 	if (gc_th && gc_th->gc_idle) {
142 		if (gc_th->gc_idle == 1)
143 			gc_mode = GC_CB;
144 		else if (gc_th->gc_idle == 2)
145 			gc_mode = GC_GREEDY;
146 	}
147 	return gc_mode;
148 }
149 
150 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
151 			int type, struct victim_sel_policy *p)
152 {
153 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
154 
155 	if (p->alloc_mode == SSR) {
156 		p->gc_mode = GC_GREEDY;
157 		p->dirty_segmap = dirty_i->dirty_segmap[type];
158 		p->max_search = dirty_i->nr_dirty[type];
159 		p->ofs_unit = 1;
160 	} else {
161 		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
162 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
163 		p->max_search = dirty_i->nr_dirty[DIRTY];
164 		p->ofs_unit = sbi->segs_per_sec;
165 	}
166 
167 	if (p->max_search > MAX_VICTIM_SEARCH)
168 		p->max_search = MAX_VICTIM_SEARCH;
169 
170 	p->offset = sbi->last_victim[p->gc_mode];
171 }
172 
173 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
174 				struct victim_sel_policy *p)
175 {
176 	/* SSR allocates in a segment unit */
177 	if (p->alloc_mode == SSR)
178 		return 1 << sbi->log_blocks_per_seg;
179 	if (p->gc_mode == GC_GREEDY)
180 		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
181 	else if (p->gc_mode == GC_CB)
182 		return UINT_MAX;
183 	else /* No other gc_mode */
184 		return 0;
185 }
186 
187 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
188 {
189 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
190 	unsigned int hint = 0;
191 	unsigned int secno;
192 
193 	/*
194 	 * If the gc_type is FG_GC, we can select victim segments
195 	 * selected by background GC before.
196 	 * Those segments guarantee they have small valid blocks.
197 	 */
198 next:
199 	secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
200 	if (secno < TOTAL_SECS(sbi)) {
201 		if (sec_usage_check(sbi, secno))
202 			goto next;
203 		clear_bit(secno, dirty_i->victim_secmap);
204 		return secno * sbi->segs_per_sec;
205 	}
206 	return NULL_SEGNO;
207 }
208 
209 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
210 {
211 	struct sit_info *sit_i = SIT_I(sbi);
212 	unsigned int secno = GET_SECNO(sbi, segno);
213 	unsigned int start = secno * sbi->segs_per_sec;
214 	unsigned long long mtime = 0;
215 	unsigned int vblocks;
216 	unsigned char age = 0;
217 	unsigned char u;
218 	unsigned int i;
219 
220 	for (i = 0; i < sbi->segs_per_sec; i++)
221 		mtime += get_seg_entry(sbi, start + i)->mtime;
222 	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
223 
224 	mtime = div_u64(mtime, sbi->segs_per_sec);
225 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
226 
227 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
228 
229 	/* Handle if the system time is changed by user */
230 	if (mtime < sit_i->min_mtime)
231 		sit_i->min_mtime = mtime;
232 	if (mtime > sit_i->max_mtime)
233 		sit_i->max_mtime = mtime;
234 	if (sit_i->max_mtime != sit_i->min_mtime)
235 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
236 				sit_i->max_mtime - sit_i->min_mtime);
237 
238 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
239 }
240 
241 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
242 			unsigned int segno, struct victim_sel_policy *p)
243 {
244 	if (p->alloc_mode == SSR)
245 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
246 
247 	/* alloc_mode == LFS */
248 	if (p->gc_mode == GC_GREEDY)
249 		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
250 	else
251 		return get_cb_cost(sbi, segno);
252 }
253 
254 /*
255  * This function is called from two paths.
256  * One is garbage collection and the other is SSR segment selection.
257  * When it is called during GC, it just gets a victim segment
258  * and it does not remove it from dirty seglist.
259  * When it is called from SSR segment selection, it finds a segment
260  * which has minimum valid blocks and removes it from dirty seglist.
261  */
262 static int get_victim_by_default(struct f2fs_sb_info *sbi,
263 		unsigned int *result, int gc_type, int type, char alloc_mode)
264 {
265 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
266 	struct victim_sel_policy p;
267 	unsigned int secno, max_cost;
268 	int nsearched = 0;
269 
270 	p.alloc_mode = alloc_mode;
271 	select_policy(sbi, gc_type, type, &p);
272 
273 	p.min_segno = NULL_SEGNO;
274 	p.min_cost = max_cost = get_max_cost(sbi, &p);
275 
276 	mutex_lock(&dirty_i->seglist_lock);
277 
278 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
279 		p.min_segno = check_bg_victims(sbi);
280 		if (p.min_segno != NULL_SEGNO)
281 			goto got_it;
282 	}
283 
284 	while (1) {
285 		unsigned long cost;
286 		unsigned int segno;
287 
288 		segno = find_next_bit(p.dirty_segmap,
289 						TOTAL_SEGS(sbi), p.offset);
290 		if (segno >= TOTAL_SEGS(sbi)) {
291 			if (sbi->last_victim[p.gc_mode]) {
292 				sbi->last_victim[p.gc_mode] = 0;
293 				p.offset = 0;
294 				continue;
295 			}
296 			break;
297 		}
298 
299 		p.offset = segno + p.ofs_unit;
300 		if (p.ofs_unit > 1)
301 			p.offset -= segno % p.ofs_unit;
302 
303 		secno = GET_SECNO(sbi, segno);
304 
305 		if (sec_usage_check(sbi, secno))
306 			continue;
307 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
308 			continue;
309 
310 		cost = get_gc_cost(sbi, segno, &p);
311 
312 		if (p.min_cost > cost) {
313 			p.min_segno = segno;
314 			p.min_cost = cost;
315 		} else if (unlikely(cost == max_cost)) {
316 			continue;
317 		}
318 
319 		if (nsearched++ >= p.max_search) {
320 			sbi->last_victim[p.gc_mode] = segno;
321 			break;
322 		}
323 	}
324 	if (p.min_segno != NULL_SEGNO) {
325 got_it:
326 		if (p.alloc_mode == LFS) {
327 			secno = GET_SECNO(sbi, p.min_segno);
328 			if (gc_type == FG_GC)
329 				sbi->cur_victim_sec = secno;
330 			else
331 				set_bit(secno, dirty_i->victim_secmap);
332 		}
333 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
334 
335 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
336 				sbi->cur_victim_sec,
337 				prefree_segments(sbi), free_segments(sbi));
338 	}
339 	mutex_unlock(&dirty_i->seglist_lock);
340 
341 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
342 }
343 
344 static const struct victim_selection default_v_ops = {
345 	.get_victim = get_victim_by_default,
346 };
347 
348 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
349 {
350 	struct inode_entry *ie;
351 
352 	list_for_each_entry(ie, ilist, list)
353 		if (ie->inode->i_ino == ino)
354 			return ie->inode;
355 	return NULL;
356 }
357 
358 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
359 {
360 	struct inode_entry *new_ie;
361 
362 	if (inode == find_gc_inode(inode->i_ino, ilist)) {
363 		iput(inode);
364 		return;
365 	}
366 
367 	new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
368 	new_ie->inode = inode;
369 	list_add_tail(&new_ie->list, ilist);
370 }
371 
372 static void put_gc_inode(struct list_head *ilist)
373 {
374 	struct inode_entry *ie, *next_ie;
375 	list_for_each_entry_safe(ie, next_ie, ilist, list) {
376 		iput(ie->inode);
377 		list_del(&ie->list);
378 		kmem_cache_free(winode_slab, ie);
379 	}
380 }
381 
382 static int check_valid_map(struct f2fs_sb_info *sbi,
383 				unsigned int segno, int offset)
384 {
385 	struct sit_info *sit_i = SIT_I(sbi);
386 	struct seg_entry *sentry;
387 	int ret;
388 
389 	mutex_lock(&sit_i->sentry_lock);
390 	sentry = get_seg_entry(sbi, segno);
391 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
392 	mutex_unlock(&sit_i->sentry_lock);
393 	return ret;
394 }
395 
396 /*
397  * This function compares node address got in summary with that in NAT.
398  * On validity, copy that node with cold status, otherwise (invalid node)
399  * ignore that.
400  */
401 static void gc_node_segment(struct f2fs_sb_info *sbi,
402 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
403 {
404 	bool initial = true;
405 	struct f2fs_summary *entry;
406 	int off;
407 
408 next_step:
409 	entry = sum;
410 
411 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
412 		nid_t nid = le32_to_cpu(entry->nid);
413 		struct page *node_page;
414 
415 		/* stop BG_GC if there is not enough free sections. */
416 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
417 			return;
418 
419 		if (check_valid_map(sbi, segno, off) == 0)
420 			continue;
421 
422 		if (initial) {
423 			ra_node_page(sbi, nid);
424 			continue;
425 		}
426 		node_page = get_node_page(sbi, nid);
427 		if (IS_ERR(node_page))
428 			continue;
429 
430 		/* set page dirty and write it */
431 		if (gc_type == FG_GC) {
432 			f2fs_wait_on_page_writeback(node_page, NODE, true);
433 			set_page_dirty(node_page);
434 		} else {
435 			if (!PageWriteback(node_page))
436 				set_page_dirty(node_page);
437 		}
438 		f2fs_put_page(node_page, 1);
439 		stat_inc_node_blk_count(sbi, 1);
440 	}
441 
442 	if (initial) {
443 		initial = false;
444 		goto next_step;
445 	}
446 
447 	if (gc_type == FG_GC) {
448 		struct writeback_control wbc = {
449 			.sync_mode = WB_SYNC_ALL,
450 			.nr_to_write = LONG_MAX,
451 			.for_reclaim = 0,
452 		};
453 		sync_node_pages(sbi, 0, &wbc);
454 
455 		/*
456 		 * In the case of FG_GC, it'd be better to reclaim this victim
457 		 * completely.
458 		 */
459 		if (get_valid_blocks(sbi, segno, 1) != 0)
460 			goto next_step;
461 	}
462 }
463 
464 /*
465  * Calculate start block index indicating the given node offset.
466  * Be careful, caller should give this node offset only indicating direct node
467  * blocks. If any node offsets, which point the other types of node blocks such
468  * as indirect or double indirect node blocks, are given, it must be a caller's
469  * bug.
470  */
471 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
472 {
473 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
474 	unsigned int bidx;
475 
476 	if (node_ofs == 0)
477 		return 0;
478 
479 	if (node_ofs <= 2) {
480 		bidx = node_ofs - 1;
481 	} else if (node_ofs <= indirect_blks) {
482 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
483 		bidx = node_ofs - 2 - dec;
484 	} else {
485 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
486 		bidx = node_ofs - 5 - dec;
487 	}
488 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
489 }
490 
491 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
492 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
493 {
494 	struct page *node_page;
495 	nid_t nid;
496 	unsigned int ofs_in_node;
497 	block_t source_blkaddr;
498 
499 	nid = le32_to_cpu(sum->nid);
500 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
501 
502 	node_page = get_node_page(sbi, nid);
503 	if (IS_ERR(node_page))
504 		return 0;
505 
506 	get_node_info(sbi, nid, dni);
507 
508 	if (sum->version != dni->version) {
509 		f2fs_put_page(node_page, 1);
510 		return 0;
511 	}
512 
513 	*nofs = ofs_of_node(node_page);
514 	source_blkaddr = datablock_addr(node_page, ofs_in_node);
515 	f2fs_put_page(node_page, 1);
516 
517 	if (source_blkaddr != blkaddr)
518 		return 0;
519 	return 1;
520 }
521 
522 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
523 {
524 	if (gc_type == BG_GC) {
525 		if (PageWriteback(page))
526 			goto out;
527 		set_page_dirty(page);
528 		set_cold_data(page);
529 	} else {
530 		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
531 
532 		f2fs_wait_on_page_writeback(page, DATA, true);
533 
534 		if (clear_page_dirty_for_io(page) &&
535 			S_ISDIR(inode->i_mode)) {
536 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
537 			inode_dec_dirty_dents(inode);
538 		}
539 		set_cold_data(page);
540 		do_write_data_page(page);
541 		clear_cold_data(page);
542 	}
543 out:
544 	f2fs_put_page(page, 1);
545 }
546 
547 /*
548  * This function tries to get parent node of victim data block, and identifies
549  * data block validity. If the block is valid, copy that with cold status and
550  * modify parent node.
551  * If the parent node is not valid or the data block address is different,
552  * the victim data block is ignored.
553  */
554 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
555 		struct list_head *ilist, unsigned int segno, int gc_type)
556 {
557 	struct super_block *sb = sbi->sb;
558 	struct f2fs_summary *entry;
559 	block_t start_addr;
560 	int off;
561 	int phase = 0;
562 
563 	start_addr = START_BLOCK(sbi, segno);
564 
565 next_step:
566 	entry = sum;
567 
568 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
569 		struct page *data_page;
570 		struct inode *inode;
571 		struct node_info dni; /* dnode info for the data */
572 		unsigned int ofs_in_node, nofs;
573 		block_t start_bidx;
574 
575 		/* stop BG_GC if there is not enough free sections. */
576 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
577 			return;
578 
579 		if (check_valid_map(sbi, segno, off) == 0)
580 			continue;
581 
582 		if (phase == 0) {
583 			ra_node_page(sbi, le32_to_cpu(entry->nid));
584 			continue;
585 		}
586 
587 		/* Get an inode by ino with checking validity */
588 		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
589 			continue;
590 
591 		if (phase == 1) {
592 			ra_node_page(sbi, dni.ino);
593 			continue;
594 		}
595 
596 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
597 
598 		if (phase == 2) {
599 			inode = f2fs_iget(sb, dni.ino);
600 			if (IS_ERR(inode))
601 				continue;
602 
603 			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
604 
605 			data_page = find_data_page(inode,
606 					start_bidx + ofs_in_node, false);
607 			if (IS_ERR(data_page))
608 				goto next_iput;
609 
610 			f2fs_put_page(data_page, 0);
611 			add_gc_inode(inode, ilist);
612 		} else {
613 			inode = find_gc_inode(dni.ino, ilist);
614 			if (inode) {
615 				start_bidx = start_bidx_of_node(nofs,
616 								F2FS_I(inode));
617 				data_page = get_lock_data_page(inode,
618 						start_bidx + ofs_in_node);
619 				if (IS_ERR(data_page))
620 					continue;
621 				move_data_page(inode, data_page, gc_type);
622 				stat_inc_data_blk_count(sbi, 1);
623 			}
624 		}
625 		continue;
626 next_iput:
627 		iput(inode);
628 	}
629 
630 	if (++phase < 4)
631 		goto next_step;
632 
633 	if (gc_type == FG_GC) {
634 		f2fs_submit_bio(sbi, DATA, true);
635 
636 		/*
637 		 * In the case of FG_GC, it'd be better to reclaim this victim
638 		 * completely.
639 		 */
640 		if (get_valid_blocks(sbi, segno, 1) != 0) {
641 			phase = 2;
642 			goto next_step;
643 		}
644 	}
645 }
646 
647 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
648 						int gc_type, int type)
649 {
650 	struct sit_info *sit_i = SIT_I(sbi);
651 	int ret;
652 	mutex_lock(&sit_i->sentry_lock);
653 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
654 	mutex_unlock(&sit_i->sentry_lock);
655 	return ret;
656 }
657 
658 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
659 				struct list_head *ilist, int gc_type)
660 {
661 	struct page *sum_page;
662 	struct f2fs_summary_block *sum;
663 	struct blk_plug plug;
664 
665 	/* read segment summary of victim */
666 	sum_page = get_sum_page(sbi, segno);
667 	if (IS_ERR(sum_page))
668 		return;
669 
670 	blk_start_plug(&plug);
671 
672 	sum = page_address(sum_page);
673 
674 	switch (GET_SUM_TYPE((&sum->footer))) {
675 	case SUM_TYPE_NODE:
676 		gc_node_segment(sbi, sum->entries, segno, gc_type);
677 		break;
678 	case SUM_TYPE_DATA:
679 		gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
680 		break;
681 	}
682 	blk_finish_plug(&plug);
683 
684 	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
685 	stat_inc_call_count(sbi->stat_info);
686 
687 	f2fs_put_page(sum_page, 1);
688 }
689 
690 int f2fs_gc(struct f2fs_sb_info *sbi)
691 {
692 	struct list_head ilist;
693 	unsigned int segno, i;
694 	int gc_type = BG_GC;
695 	int nfree = 0;
696 	int ret = -1;
697 
698 	INIT_LIST_HEAD(&ilist);
699 gc_more:
700 	if (!(sbi->sb->s_flags & MS_ACTIVE))
701 		goto stop;
702 
703 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
704 		gc_type = FG_GC;
705 		write_checkpoint(sbi, false);
706 	}
707 
708 	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
709 		goto stop;
710 	ret = 0;
711 
712 	for (i = 0; i < sbi->segs_per_sec; i++)
713 		do_garbage_collect(sbi, segno + i, &ilist, gc_type);
714 
715 	if (gc_type == FG_GC) {
716 		sbi->cur_victim_sec = NULL_SEGNO;
717 		nfree++;
718 		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
719 	}
720 
721 	if (has_not_enough_free_secs(sbi, nfree))
722 		goto gc_more;
723 
724 	if (gc_type == FG_GC)
725 		write_checkpoint(sbi, false);
726 stop:
727 	mutex_unlock(&sbi->gc_mutex);
728 
729 	put_gc_inode(&ilist);
730 	return ret;
731 }
732 
733 void build_gc_manager(struct f2fs_sb_info *sbi)
734 {
735 	DIRTY_I(sbi)->v_ops = &default_v_ops;
736 }
737 
738 int __init create_gc_caches(void)
739 {
740 	winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
741 			sizeof(struct inode_entry), NULL);
742 	if (!winode_slab)
743 		return -ENOMEM;
744 	return 0;
745 }
746 
747 void destroy_gc_caches(void)
748 {
749 	kmem_cache_destroy(winode_slab);
750 }
751