xref: /openbmc/linux/fs/f2fs/gc.c (revision 6a613ac6)
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20 
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26 
27 static int gc_thread_func(void *data)
28 {
29 	struct f2fs_sb_info *sbi = data;
30 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
32 	long wait_ms;
33 
34 	wait_ms = gc_th->min_sleep_time;
35 
36 	do {
37 		if (try_to_freeze())
38 			continue;
39 		else
40 			wait_event_interruptible_timeout(*wq,
41 						kthread_should_stop(),
42 						msecs_to_jiffies(wait_ms));
43 		if (kthread_should_stop())
44 			break;
45 
46 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
47 			increase_sleep_time(gc_th, &wait_ms);
48 			continue;
49 		}
50 
51 		/*
52 		 * [GC triggering condition]
53 		 * 0. GC is not conducted currently.
54 		 * 1. There are enough dirty segments.
55 		 * 2. IO subsystem is idle by checking the # of writeback pages.
56 		 * 3. IO subsystem is idle by checking the # of requests in
57 		 *    bdev's request list.
58 		 *
59 		 * Note) We have to avoid triggering GCs frequently.
60 		 * Because it is possible that some segments can be
61 		 * invalidated soon after by user update or deletion.
62 		 * So, I'd like to wait some time to collect dirty segments.
63 		 */
64 		if (!mutex_trylock(&sbi->gc_mutex))
65 			continue;
66 
67 		if (!is_idle(sbi)) {
68 			increase_sleep_time(gc_th, &wait_ms);
69 			mutex_unlock(&sbi->gc_mutex);
70 			continue;
71 		}
72 
73 		if (has_enough_invalid_blocks(sbi))
74 			decrease_sleep_time(gc_th, &wait_ms);
75 		else
76 			increase_sleep_time(gc_th, &wait_ms);
77 
78 		stat_inc_bggc_count(sbi);
79 
80 		/* if return value is not zero, no victim was selected */
81 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
82 			wait_ms = gc_th->no_gc_sleep_time;
83 
84 		trace_f2fs_background_gc(sbi->sb, wait_ms,
85 				prefree_segments(sbi), free_segments(sbi));
86 
87 		/* balancing f2fs's metadata periodically */
88 		f2fs_balance_fs_bg(sbi);
89 
90 	} while (!kthread_should_stop());
91 	return 0;
92 }
93 
94 int start_gc_thread(struct f2fs_sb_info *sbi)
95 {
96 	struct f2fs_gc_kthread *gc_th;
97 	dev_t dev = sbi->sb->s_bdev->bd_dev;
98 	int err = 0;
99 
100 	gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
101 	if (!gc_th) {
102 		err = -ENOMEM;
103 		goto out;
104 	}
105 
106 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
107 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
108 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
109 
110 	gc_th->gc_idle = 0;
111 
112 	sbi->gc_thread = gc_th;
113 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
114 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
115 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
116 	if (IS_ERR(gc_th->f2fs_gc_task)) {
117 		err = PTR_ERR(gc_th->f2fs_gc_task);
118 		kfree(gc_th);
119 		sbi->gc_thread = NULL;
120 	}
121 out:
122 	return err;
123 }
124 
125 void stop_gc_thread(struct f2fs_sb_info *sbi)
126 {
127 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
128 	if (!gc_th)
129 		return;
130 	kthread_stop(gc_th->f2fs_gc_task);
131 	kfree(gc_th);
132 	sbi->gc_thread = NULL;
133 }
134 
135 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
136 {
137 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
138 
139 	if (gc_th && gc_th->gc_idle) {
140 		if (gc_th->gc_idle == 1)
141 			gc_mode = GC_CB;
142 		else if (gc_th->gc_idle == 2)
143 			gc_mode = GC_GREEDY;
144 	}
145 	return gc_mode;
146 }
147 
148 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
149 			int type, struct victim_sel_policy *p)
150 {
151 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
152 
153 	if (p->alloc_mode == SSR) {
154 		p->gc_mode = GC_GREEDY;
155 		p->dirty_segmap = dirty_i->dirty_segmap[type];
156 		p->max_search = dirty_i->nr_dirty[type];
157 		p->ofs_unit = 1;
158 	} else {
159 		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
160 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
161 		p->max_search = dirty_i->nr_dirty[DIRTY];
162 		p->ofs_unit = sbi->segs_per_sec;
163 	}
164 
165 	if (p->max_search > sbi->max_victim_search)
166 		p->max_search = sbi->max_victim_search;
167 
168 	p->offset = sbi->last_victim[p->gc_mode];
169 }
170 
171 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
172 				struct victim_sel_policy *p)
173 {
174 	/* SSR allocates in a segment unit */
175 	if (p->alloc_mode == SSR)
176 		return 1 << sbi->log_blocks_per_seg;
177 	if (p->gc_mode == GC_GREEDY)
178 		return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
179 	else if (p->gc_mode == GC_CB)
180 		return UINT_MAX;
181 	else /* No other gc_mode */
182 		return 0;
183 }
184 
185 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
186 {
187 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
188 	unsigned int secno;
189 
190 	/*
191 	 * If the gc_type is FG_GC, we can select victim segments
192 	 * selected by background GC before.
193 	 * Those segments guarantee they have small valid blocks.
194 	 */
195 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
196 		if (sec_usage_check(sbi, secno))
197 			continue;
198 		clear_bit(secno, dirty_i->victim_secmap);
199 		return secno * sbi->segs_per_sec;
200 	}
201 	return NULL_SEGNO;
202 }
203 
204 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
205 {
206 	struct sit_info *sit_i = SIT_I(sbi);
207 	unsigned int secno = GET_SECNO(sbi, segno);
208 	unsigned int start = secno * sbi->segs_per_sec;
209 	unsigned long long mtime = 0;
210 	unsigned int vblocks;
211 	unsigned char age = 0;
212 	unsigned char u;
213 	unsigned int i;
214 
215 	for (i = 0; i < sbi->segs_per_sec; i++)
216 		mtime += get_seg_entry(sbi, start + i)->mtime;
217 	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
218 
219 	mtime = div_u64(mtime, sbi->segs_per_sec);
220 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
221 
222 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
223 
224 	/* Handle if the system time has changed by the user */
225 	if (mtime < sit_i->min_mtime)
226 		sit_i->min_mtime = mtime;
227 	if (mtime > sit_i->max_mtime)
228 		sit_i->max_mtime = mtime;
229 	if (sit_i->max_mtime != sit_i->min_mtime)
230 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
231 				sit_i->max_mtime - sit_i->min_mtime);
232 
233 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
234 }
235 
236 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
237 			unsigned int segno, struct victim_sel_policy *p)
238 {
239 	if (p->alloc_mode == SSR)
240 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
241 
242 	/* alloc_mode == LFS */
243 	if (p->gc_mode == GC_GREEDY)
244 		return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
245 	else
246 		return get_cb_cost(sbi, segno);
247 }
248 
249 /*
250  * This function is called from two paths.
251  * One is garbage collection and the other is SSR segment selection.
252  * When it is called during GC, it just gets a victim segment
253  * and it does not remove it from dirty seglist.
254  * When it is called from SSR segment selection, it finds a segment
255  * which has minimum valid blocks and removes it from dirty seglist.
256  */
257 static int get_victim_by_default(struct f2fs_sb_info *sbi,
258 		unsigned int *result, int gc_type, int type, char alloc_mode)
259 {
260 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
261 	struct victim_sel_policy p;
262 	unsigned int secno, max_cost;
263 	unsigned int last_segment = MAIN_SEGS(sbi);
264 	int nsearched = 0;
265 
266 	mutex_lock(&dirty_i->seglist_lock);
267 
268 	p.alloc_mode = alloc_mode;
269 	select_policy(sbi, gc_type, type, &p);
270 
271 	p.min_segno = NULL_SEGNO;
272 	p.min_cost = max_cost = get_max_cost(sbi, &p);
273 
274 	if (p.max_search == 0)
275 		goto out;
276 
277 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
278 		p.min_segno = check_bg_victims(sbi);
279 		if (p.min_segno != NULL_SEGNO)
280 			goto got_it;
281 	}
282 
283 	while (1) {
284 		unsigned long cost;
285 		unsigned int segno;
286 
287 		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
288 		if (segno >= last_segment) {
289 			if (sbi->last_victim[p.gc_mode]) {
290 				last_segment = sbi->last_victim[p.gc_mode];
291 				sbi->last_victim[p.gc_mode] = 0;
292 				p.offset = 0;
293 				continue;
294 			}
295 			break;
296 		}
297 
298 		p.offset = segno + p.ofs_unit;
299 		if (p.ofs_unit > 1)
300 			p.offset -= segno % p.ofs_unit;
301 
302 		secno = GET_SECNO(sbi, segno);
303 
304 		if (sec_usage_check(sbi, secno))
305 			continue;
306 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
307 			continue;
308 
309 		cost = get_gc_cost(sbi, segno, &p);
310 
311 		if (p.min_cost > cost) {
312 			p.min_segno = segno;
313 			p.min_cost = cost;
314 		} else if (unlikely(cost == max_cost)) {
315 			continue;
316 		}
317 
318 		if (nsearched++ >= p.max_search) {
319 			sbi->last_victim[p.gc_mode] = segno;
320 			break;
321 		}
322 	}
323 	if (p.min_segno != NULL_SEGNO) {
324 got_it:
325 		if (p.alloc_mode == LFS) {
326 			secno = GET_SECNO(sbi, p.min_segno);
327 			if (gc_type == FG_GC)
328 				sbi->cur_victim_sec = secno;
329 			else
330 				set_bit(secno, dirty_i->victim_secmap);
331 		}
332 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
333 
334 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
335 				sbi->cur_victim_sec,
336 				prefree_segments(sbi), free_segments(sbi));
337 	}
338 out:
339 	mutex_unlock(&dirty_i->seglist_lock);
340 
341 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
342 }
343 
344 static const struct victim_selection default_v_ops = {
345 	.get_victim = get_victim_by_default,
346 };
347 
348 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
349 {
350 	struct inode_entry *ie;
351 
352 	ie = radix_tree_lookup(&gc_list->iroot, ino);
353 	if (ie)
354 		return ie->inode;
355 	return NULL;
356 }
357 
358 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
359 {
360 	struct inode_entry *new_ie;
361 
362 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
363 		iput(inode);
364 		return;
365 	}
366 	new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
367 	new_ie->inode = inode;
368 
369 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
370 	list_add_tail(&new_ie->list, &gc_list->ilist);
371 }
372 
373 static void put_gc_inode(struct gc_inode_list *gc_list)
374 {
375 	struct inode_entry *ie, *next_ie;
376 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
377 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
378 		iput(ie->inode);
379 		list_del(&ie->list);
380 		kmem_cache_free(inode_entry_slab, ie);
381 	}
382 }
383 
384 static int check_valid_map(struct f2fs_sb_info *sbi,
385 				unsigned int segno, int offset)
386 {
387 	struct sit_info *sit_i = SIT_I(sbi);
388 	struct seg_entry *sentry;
389 	int ret;
390 
391 	mutex_lock(&sit_i->sentry_lock);
392 	sentry = get_seg_entry(sbi, segno);
393 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
394 	mutex_unlock(&sit_i->sentry_lock);
395 	return ret;
396 }
397 
398 /*
399  * This function compares node address got in summary with that in NAT.
400  * On validity, copy that node with cold status, otherwise (invalid node)
401  * ignore that.
402  */
403 static int gc_node_segment(struct f2fs_sb_info *sbi,
404 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
405 {
406 	bool initial = true;
407 	struct f2fs_summary *entry;
408 	block_t start_addr;
409 	int off;
410 
411 	start_addr = START_BLOCK(sbi, segno);
412 
413 next_step:
414 	entry = sum;
415 
416 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
417 		nid_t nid = le32_to_cpu(entry->nid);
418 		struct page *node_page;
419 		struct node_info ni;
420 
421 		/* stop BG_GC if there is not enough free sections. */
422 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
423 			return 0;
424 
425 		if (check_valid_map(sbi, segno, off) == 0)
426 			continue;
427 
428 		if (initial) {
429 			ra_node_page(sbi, nid);
430 			continue;
431 		}
432 		node_page = get_node_page(sbi, nid);
433 		if (IS_ERR(node_page))
434 			continue;
435 
436 		/* block may become invalid during get_node_page */
437 		if (check_valid_map(sbi, segno, off) == 0) {
438 			f2fs_put_page(node_page, 1);
439 			continue;
440 		}
441 
442 		get_node_info(sbi, nid, &ni);
443 		if (ni.blk_addr != start_addr + off) {
444 			f2fs_put_page(node_page, 1);
445 			continue;
446 		}
447 
448 		/* set page dirty and write it */
449 		if (gc_type == FG_GC) {
450 			f2fs_wait_on_page_writeback(node_page, NODE);
451 			set_page_dirty(node_page);
452 		} else {
453 			if (!PageWriteback(node_page))
454 				set_page_dirty(node_page);
455 		}
456 		f2fs_put_page(node_page, 1);
457 		stat_inc_node_blk_count(sbi, 1, gc_type);
458 	}
459 
460 	if (initial) {
461 		initial = false;
462 		goto next_step;
463 	}
464 
465 	if (gc_type == FG_GC) {
466 		struct writeback_control wbc = {
467 			.sync_mode = WB_SYNC_ALL,
468 			.nr_to_write = LONG_MAX,
469 			.for_reclaim = 0,
470 		};
471 		sync_node_pages(sbi, 0, &wbc);
472 
473 		/* return 1 only if FG_GC succefully reclaimed one */
474 		if (get_valid_blocks(sbi, segno, 1) == 0)
475 			return 1;
476 	}
477 	return 0;
478 }
479 
480 /*
481  * Calculate start block index indicating the given node offset.
482  * Be careful, caller should give this node offset only indicating direct node
483  * blocks. If any node offsets, which point the other types of node blocks such
484  * as indirect or double indirect node blocks, are given, it must be a caller's
485  * bug.
486  */
487 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
488 {
489 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
490 	unsigned int bidx;
491 
492 	if (node_ofs == 0)
493 		return 0;
494 
495 	if (node_ofs <= 2) {
496 		bidx = node_ofs - 1;
497 	} else if (node_ofs <= indirect_blks) {
498 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
499 		bidx = node_ofs - 2 - dec;
500 	} else {
501 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
502 		bidx = node_ofs - 5 - dec;
503 	}
504 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
505 }
506 
507 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
508 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
509 {
510 	struct page *node_page;
511 	nid_t nid;
512 	unsigned int ofs_in_node;
513 	block_t source_blkaddr;
514 
515 	nid = le32_to_cpu(sum->nid);
516 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
517 
518 	node_page = get_node_page(sbi, nid);
519 	if (IS_ERR(node_page))
520 		return false;
521 
522 	get_node_info(sbi, nid, dni);
523 
524 	if (sum->version != dni->version) {
525 		f2fs_put_page(node_page, 1);
526 		return false;
527 	}
528 
529 	*nofs = ofs_of_node(node_page);
530 	source_blkaddr = datablock_addr(node_page, ofs_in_node);
531 	f2fs_put_page(node_page, 1);
532 
533 	if (source_blkaddr != blkaddr)
534 		return false;
535 	return true;
536 }
537 
538 static void move_encrypted_block(struct inode *inode, block_t bidx)
539 {
540 	struct f2fs_io_info fio = {
541 		.sbi = F2FS_I_SB(inode),
542 		.type = DATA,
543 		.rw = READ_SYNC,
544 		.encrypted_page = NULL,
545 	};
546 	struct dnode_of_data dn;
547 	struct f2fs_summary sum;
548 	struct node_info ni;
549 	struct page *page;
550 	int err;
551 
552 	/* do not read out */
553 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
554 	if (!page)
555 		return;
556 
557 	set_new_dnode(&dn, inode, NULL, NULL, 0);
558 	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
559 	if (err)
560 		goto out;
561 
562 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
563 		ClearPageUptodate(page);
564 		goto put_out;
565 	}
566 
567 	/*
568 	 * don't cache encrypted data into meta inode until previous dirty
569 	 * data were writebacked to avoid racing between GC and flush.
570 	 */
571 	f2fs_wait_on_page_writeback(page, DATA);
572 
573 	get_node_info(fio.sbi, dn.nid, &ni);
574 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
575 
576 	/* read page */
577 	fio.page = page;
578 	fio.blk_addr = dn.data_blkaddr;
579 
580 	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
581 					fio.blk_addr,
582 					FGP_LOCK|FGP_CREAT,
583 					GFP_NOFS);
584 	if (!fio.encrypted_page)
585 		goto put_out;
586 
587 	err = f2fs_submit_page_bio(&fio);
588 	if (err)
589 		goto put_page_out;
590 
591 	/* write page */
592 	lock_page(fio.encrypted_page);
593 
594 	if (unlikely(!PageUptodate(fio.encrypted_page)))
595 		goto put_page_out;
596 	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
597 		goto put_page_out;
598 
599 	set_page_dirty(fio.encrypted_page);
600 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
601 	if (clear_page_dirty_for_io(fio.encrypted_page))
602 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
603 
604 	set_page_writeback(fio.encrypted_page);
605 
606 	/* allocate block address */
607 	f2fs_wait_on_page_writeback(dn.node_page, NODE);
608 	allocate_data_block(fio.sbi, NULL, fio.blk_addr,
609 					&fio.blk_addr, &sum, CURSEG_COLD_DATA);
610 	fio.rw = WRITE_SYNC;
611 	f2fs_submit_page_mbio(&fio);
612 
613 	dn.data_blkaddr = fio.blk_addr;
614 	set_data_blkaddr(&dn);
615 	f2fs_update_extent_cache(&dn);
616 	set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
617 	if (page->index == 0)
618 		set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
619 put_page_out:
620 	f2fs_put_page(fio.encrypted_page, 1);
621 put_out:
622 	f2fs_put_dnode(&dn);
623 out:
624 	f2fs_put_page(page, 1);
625 }
626 
627 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
628 {
629 	struct page *page;
630 
631 	page = get_lock_data_page(inode, bidx, true);
632 	if (IS_ERR(page))
633 		return;
634 
635 	if (gc_type == BG_GC) {
636 		if (PageWriteback(page))
637 			goto out;
638 		set_page_dirty(page);
639 		set_cold_data(page);
640 	} else {
641 		struct f2fs_io_info fio = {
642 			.sbi = F2FS_I_SB(inode),
643 			.type = DATA,
644 			.rw = WRITE_SYNC,
645 			.page = page,
646 			.encrypted_page = NULL,
647 		};
648 		set_page_dirty(page);
649 		f2fs_wait_on_page_writeback(page, DATA);
650 		if (clear_page_dirty_for_io(page))
651 			inode_dec_dirty_pages(inode);
652 		set_cold_data(page);
653 		do_write_data_page(&fio);
654 		clear_cold_data(page);
655 	}
656 out:
657 	f2fs_put_page(page, 1);
658 }
659 
660 /*
661  * This function tries to get parent node of victim data block, and identifies
662  * data block validity. If the block is valid, copy that with cold status and
663  * modify parent node.
664  * If the parent node is not valid or the data block address is different,
665  * the victim data block is ignored.
666  */
667 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
668 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
669 {
670 	struct super_block *sb = sbi->sb;
671 	struct f2fs_summary *entry;
672 	block_t start_addr;
673 	int off;
674 	int phase = 0;
675 
676 	start_addr = START_BLOCK(sbi, segno);
677 
678 next_step:
679 	entry = sum;
680 
681 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
682 		struct page *data_page;
683 		struct inode *inode;
684 		struct node_info dni; /* dnode info for the data */
685 		unsigned int ofs_in_node, nofs;
686 		block_t start_bidx;
687 
688 		/* stop BG_GC if there is not enough free sections. */
689 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
690 			return 0;
691 
692 		if (check_valid_map(sbi, segno, off) == 0)
693 			continue;
694 
695 		if (phase == 0) {
696 			ra_node_page(sbi, le32_to_cpu(entry->nid));
697 			continue;
698 		}
699 
700 		/* Get an inode by ino with checking validity */
701 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
702 			continue;
703 
704 		if (phase == 1) {
705 			ra_node_page(sbi, dni.ino);
706 			continue;
707 		}
708 
709 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
710 
711 		if (phase == 2) {
712 			inode = f2fs_iget(sb, dni.ino);
713 			if (IS_ERR(inode) || is_bad_inode(inode))
714 				continue;
715 
716 			/* if encrypted inode, let's go phase 3 */
717 			if (f2fs_encrypted_inode(inode) &&
718 						S_ISREG(inode->i_mode)) {
719 				add_gc_inode(gc_list, inode);
720 				continue;
721 			}
722 
723 			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
724 			data_page = get_read_data_page(inode,
725 					start_bidx + ofs_in_node, READA, true);
726 			if (IS_ERR(data_page)) {
727 				iput(inode);
728 				continue;
729 			}
730 
731 			f2fs_put_page(data_page, 0);
732 			add_gc_inode(gc_list, inode);
733 			continue;
734 		}
735 
736 		/* phase 3 */
737 		inode = find_gc_inode(gc_list, dni.ino);
738 		if (inode) {
739 			start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
740 								+ ofs_in_node;
741 			if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
742 				move_encrypted_block(inode, start_bidx);
743 			else
744 				move_data_page(inode, start_bidx, gc_type);
745 			stat_inc_data_blk_count(sbi, 1, gc_type);
746 		}
747 	}
748 
749 	if (++phase < 4)
750 		goto next_step;
751 
752 	if (gc_type == FG_GC) {
753 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
754 
755 		/* return 1 only if FG_GC succefully reclaimed one */
756 		if (get_valid_blocks(sbi, segno, 1) == 0)
757 			return 1;
758 	}
759 	return 0;
760 }
761 
762 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
763 			int gc_type)
764 {
765 	struct sit_info *sit_i = SIT_I(sbi);
766 	int ret;
767 
768 	mutex_lock(&sit_i->sentry_lock);
769 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
770 					      NO_CHECK_TYPE, LFS);
771 	mutex_unlock(&sit_i->sentry_lock);
772 	return ret;
773 }
774 
775 static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
776 				struct gc_inode_list *gc_list, int gc_type)
777 {
778 	struct page *sum_page;
779 	struct f2fs_summary_block *sum;
780 	struct blk_plug plug;
781 	int nfree = 0;
782 
783 	/* read segment summary of victim */
784 	sum_page = get_sum_page(sbi, segno);
785 
786 	blk_start_plug(&plug);
787 
788 	sum = page_address(sum_page);
789 
790 	/*
791 	 * this is to avoid deadlock:
792 	 * - lock_page(sum_page)         - f2fs_replace_block
793 	 *  - check_valid_map()            - mutex_lock(sentry_lock)
794 	 *   - mutex_lock(sentry_lock)     - change_curseg()
795 	 *                                  - lock_page(sum_page)
796 	 */
797 	unlock_page(sum_page);
798 
799 	switch (GET_SUM_TYPE((&sum->footer))) {
800 	case SUM_TYPE_NODE:
801 		nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
802 		break;
803 	case SUM_TYPE_DATA:
804 		nfree = gc_data_segment(sbi, sum->entries, gc_list,
805 							segno, gc_type);
806 		break;
807 	}
808 	blk_finish_plug(&plug);
809 
810 	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
811 	stat_inc_call_count(sbi->stat_info);
812 
813 	f2fs_put_page(sum_page, 0);
814 	return nfree;
815 }
816 
817 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
818 {
819 	unsigned int segno, i;
820 	int gc_type = sync ? FG_GC : BG_GC;
821 	int sec_freed = 0;
822 	int ret = -EINVAL;
823 	struct cp_control cpc;
824 	struct gc_inode_list gc_list = {
825 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
826 		.iroot = RADIX_TREE_INIT(GFP_NOFS),
827 	};
828 
829 	cpc.reason = __get_cp_reason(sbi);
830 gc_more:
831 	segno = NULL_SEGNO;
832 
833 	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
834 		goto stop;
835 	if (unlikely(f2fs_cp_error(sbi)))
836 		goto stop;
837 
838 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
839 		gc_type = FG_GC;
840 		if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
841 			write_checkpoint(sbi, &cpc);
842 	}
843 
844 	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
845 		goto stop;
846 	ret = 0;
847 
848 	/* readahead multi ssa blocks those have contiguous address */
849 	if (sbi->segs_per_sec > 1)
850 		ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
851 							META_SSA, true);
852 
853 	for (i = 0; i < sbi->segs_per_sec; i++) {
854 		/*
855 		 * for FG_GC case, halt gcing left segments once failed one
856 		 * of segments in selected section to avoid long latency.
857 		 */
858 		if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
859 				gc_type == FG_GC)
860 			break;
861 	}
862 
863 	if (i == sbi->segs_per_sec && gc_type == FG_GC)
864 		sec_freed++;
865 
866 	if (gc_type == FG_GC)
867 		sbi->cur_victim_sec = NULL_SEGNO;
868 
869 	if (!sync) {
870 		if (has_not_enough_free_secs(sbi, sec_freed))
871 			goto gc_more;
872 
873 		if (gc_type == FG_GC)
874 			write_checkpoint(sbi, &cpc);
875 	}
876 stop:
877 	mutex_unlock(&sbi->gc_mutex);
878 
879 	put_gc_inode(&gc_list);
880 
881 	if (sync)
882 		ret = sec_freed ? 0 : -EAGAIN;
883 	return ret;
884 }
885 
886 void build_gc_manager(struct f2fs_sb_info *sbi)
887 {
888 	DIRTY_I(sbi)->v_ops = &default_v_ops;
889 }
890