xref: /openbmc/linux/fs/f2fs/gc.c (revision bcb84fb4)
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
25 
26 static int gc_thread_func(void *data)
27 {
28 	struct f2fs_sb_info *sbi = data;
29 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 	long wait_ms;
32 
33 	wait_ms = gc_th->min_sleep_time;
34 
35 	do {
36 		if (try_to_freeze())
37 			continue;
38 		else
39 			wait_event_interruptible_timeout(*wq,
40 						kthread_should_stop(),
41 						msecs_to_jiffies(wait_ms));
42 		if (kthread_should_stop())
43 			break;
44 
45 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 			increase_sleep_time(gc_th, &wait_ms);
47 			continue;
48 		}
49 
50 #ifdef CONFIG_F2FS_FAULT_INJECTION
51 		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
52 			f2fs_show_injection_info(FAULT_CHECKPOINT);
53 			f2fs_stop_checkpoint(sbi, false);
54 		}
55 #endif
56 
57 		/*
58 		 * [GC triggering condition]
59 		 * 0. GC is not conducted currently.
60 		 * 1. There are enough dirty segments.
61 		 * 2. IO subsystem is idle by checking the # of writeback pages.
62 		 * 3. IO subsystem is idle by checking the # of requests in
63 		 *    bdev's request list.
64 		 *
65 		 * Note) We have to avoid triggering GCs frequently.
66 		 * Because it is possible that some segments can be
67 		 * invalidated soon after by user update or deletion.
68 		 * So, I'd like to wait some time to collect dirty segments.
69 		 */
70 		if (!mutex_trylock(&sbi->gc_mutex))
71 			continue;
72 
73 		if (!is_idle(sbi)) {
74 			increase_sleep_time(gc_th, &wait_ms);
75 			mutex_unlock(&sbi->gc_mutex);
76 			continue;
77 		}
78 
79 		if (has_enough_invalid_blocks(sbi))
80 			decrease_sleep_time(gc_th, &wait_ms);
81 		else
82 			increase_sleep_time(gc_th, &wait_ms);
83 
84 		stat_inc_bggc_count(sbi);
85 
86 		/* if return value is not zero, no victim was selected */
87 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
88 			wait_ms = gc_th->no_gc_sleep_time;
89 
90 		trace_f2fs_background_gc(sbi->sb, wait_ms,
91 				prefree_segments(sbi), free_segments(sbi));
92 
93 		/* balancing f2fs's metadata periodically */
94 		f2fs_balance_fs_bg(sbi);
95 
96 	} while (!kthread_should_stop());
97 	return 0;
98 }
99 
100 int start_gc_thread(struct f2fs_sb_info *sbi)
101 {
102 	struct f2fs_gc_kthread *gc_th;
103 	dev_t dev = sbi->sb->s_bdev->bd_dev;
104 	int err = 0;
105 
106 	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
107 	if (!gc_th) {
108 		err = -ENOMEM;
109 		goto out;
110 	}
111 
112 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
113 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
114 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
115 
116 	gc_th->gc_idle = 0;
117 
118 	sbi->gc_thread = gc_th;
119 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
120 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
121 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
122 	if (IS_ERR(gc_th->f2fs_gc_task)) {
123 		err = PTR_ERR(gc_th->f2fs_gc_task);
124 		kfree(gc_th);
125 		sbi->gc_thread = NULL;
126 	}
127 out:
128 	return err;
129 }
130 
131 void stop_gc_thread(struct f2fs_sb_info *sbi)
132 {
133 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
134 	if (!gc_th)
135 		return;
136 	kthread_stop(gc_th->f2fs_gc_task);
137 	kfree(gc_th);
138 	sbi->gc_thread = NULL;
139 }
140 
141 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
142 {
143 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
144 
145 	if (gc_th && gc_th->gc_idle) {
146 		if (gc_th->gc_idle == 1)
147 			gc_mode = GC_CB;
148 		else if (gc_th->gc_idle == 2)
149 			gc_mode = GC_GREEDY;
150 	}
151 	return gc_mode;
152 }
153 
154 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
155 			int type, struct victim_sel_policy *p)
156 {
157 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
158 
159 	if (p->alloc_mode == SSR) {
160 		p->gc_mode = GC_GREEDY;
161 		p->dirty_segmap = dirty_i->dirty_segmap[type];
162 		p->max_search = dirty_i->nr_dirty[type];
163 		p->ofs_unit = 1;
164 	} else {
165 		p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
166 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
167 		p->max_search = dirty_i->nr_dirty[DIRTY];
168 		p->ofs_unit = sbi->segs_per_sec;
169 	}
170 
171 	/* we need to check every dirty segments in the FG_GC case */
172 	if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
173 		p->max_search = sbi->max_victim_search;
174 
175 	p->offset = sbi->last_victim[p->gc_mode];
176 }
177 
178 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
179 				struct victim_sel_policy *p)
180 {
181 	/* SSR allocates in a segment unit */
182 	if (p->alloc_mode == SSR)
183 		return sbi->blocks_per_seg;
184 	if (p->gc_mode == GC_GREEDY)
185 		return sbi->blocks_per_seg * p->ofs_unit;
186 	else if (p->gc_mode == GC_CB)
187 		return UINT_MAX;
188 	else /* No other gc_mode */
189 		return 0;
190 }
191 
192 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
193 {
194 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
195 	unsigned int secno;
196 
197 	/*
198 	 * If the gc_type is FG_GC, we can select victim segments
199 	 * selected by background GC before.
200 	 * Those segments guarantee they have small valid blocks.
201 	 */
202 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
203 		if (sec_usage_check(sbi, secno))
204 			continue;
205 
206 		if (no_fggc_candidate(sbi, secno))
207 			continue;
208 
209 		clear_bit(secno, dirty_i->victim_secmap);
210 		return secno * sbi->segs_per_sec;
211 	}
212 	return NULL_SEGNO;
213 }
214 
215 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
216 {
217 	struct sit_info *sit_i = SIT_I(sbi);
218 	unsigned int secno = GET_SECNO(sbi, segno);
219 	unsigned int start = secno * sbi->segs_per_sec;
220 	unsigned long long mtime = 0;
221 	unsigned int vblocks;
222 	unsigned char age = 0;
223 	unsigned char u;
224 	unsigned int i;
225 
226 	for (i = 0; i < sbi->segs_per_sec; i++)
227 		mtime += get_seg_entry(sbi, start + i)->mtime;
228 	vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
229 
230 	mtime = div_u64(mtime, sbi->segs_per_sec);
231 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
232 
233 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
234 
235 	/* Handle if the system time has changed by the user */
236 	if (mtime < sit_i->min_mtime)
237 		sit_i->min_mtime = mtime;
238 	if (mtime > sit_i->max_mtime)
239 		sit_i->max_mtime = mtime;
240 	if (sit_i->max_mtime != sit_i->min_mtime)
241 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
242 				sit_i->max_mtime - sit_i->min_mtime);
243 
244 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
245 }
246 
247 static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
248 						unsigned int segno)
249 {
250 	unsigned int valid_blocks =
251 			get_valid_blocks(sbi, segno, sbi->segs_per_sec);
252 
253 	return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
254 				valid_blocks * 2 : valid_blocks;
255 }
256 
257 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
258 			unsigned int segno, struct victim_sel_policy *p)
259 {
260 	if (p->alloc_mode == SSR)
261 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
262 
263 	/* alloc_mode == LFS */
264 	if (p->gc_mode == GC_GREEDY)
265 		return get_greedy_cost(sbi, segno);
266 	else
267 		return get_cb_cost(sbi, segno);
268 }
269 
270 static unsigned int count_bits(const unsigned long *addr,
271 				unsigned int offset, unsigned int len)
272 {
273 	unsigned int end = offset + len, sum = 0;
274 
275 	while (offset < end) {
276 		if (test_bit(offset++, addr))
277 			++sum;
278 	}
279 	return sum;
280 }
281 
282 /*
283  * This function is called from two paths.
284  * One is garbage collection and the other is SSR segment selection.
285  * When it is called during GC, it just gets a victim segment
286  * and it does not remove it from dirty seglist.
287  * When it is called from SSR segment selection, it finds a segment
288  * which has minimum valid blocks and removes it from dirty seglist.
289  */
290 static int get_victim_by_default(struct f2fs_sb_info *sbi,
291 		unsigned int *result, int gc_type, int type, char alloc_mode)
292 {
293 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
294 	struct victim_sel_policy p;
295 	unsigned int secno, last_victim;
296 	unsigned int last_segment = MAIN_SEGS(sbi);
297 	unsigned int nsearched = 0;
298 
299 	mutex_lock(&dirty_i->seglist_lock);
300 
301 	p.alloc_mode = alloc_mode;
302 	select_policy(sbi, gc_type, type, &p);
303 
304 	p.min_segno = NULL_SEGNO;
305 	p.min_cost = get_max_cost(sbi, &p);
306 
307 	if (p.max_search == 0)
308 		goto out;
309 
310 	last_victim = sbi->last_victim[p.gc_mode];
311 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
312 		p.min_segno = check_bg_victims(sbi);
313 		if (p.min_segno != NULL_SEGNO)
314 			goto got_it;
315 	}
316 
317 	while (1) {
318 		unsigned long cost;
319 		unsigned int segno;
320 
321 		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
322 		if (segno >= last_segment) {
323 			if (sbi->last_victim[p.gc_mode]) {
324 				last_segment = sbi->last_victim[p.gc_mode];
325 				sbi->last_victim[p.gc_mode] = 0;
326 				p.offset = 0;
327 				continue;
328 			}
329 			break;
330 		}
331 
332 		p.offset = segno + p.ofs_unit;
333 		if (p.ofs_unit > 1) {
334 			p.offset -= segno % p.ofs_unit;
335 			nsearched += count_bits(p.dirty_segmap,
336 						p.offset - p.ofs_unit,
337 						p.ofs_unit);
338 		} else {
339 			nsearched++;
340 		}
341 
342 		secno = GET_SECNO(sbi, segno);
343 
344 		if (sec_usage_check(sbi, secno))
345 			goto next;
346 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
347 			goto next;
348 		if (gc_type == FG_GC && p.alloc_mode == LFS &&
349 					no_fggc_candidate(sbi, secno))
350 			goto next;
351 
352 		cost = get_gc_cost(sbi, segno, &p);
353 
354 		if (p.min_cost > cost) {
355 			p.min_segno = segno;
356 			p.min_cost = cost;
357 		}
358 next:
359 		if (nsearched >= p.max_search) {
360 			if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
361 				sbi->last_victim[p.gc_mode] = last_victim + 1;
362 			else
363 				sbi->last_victim[p.gc_mode] = segno + 1;
364 			break;
365 		}
366 	}
367 	if (p.min_segno != NULL_SEGNO) {
368 got_it:
369 		if (p.alloc_mode == LFS) {
370 			secno = GET_SECNO(sbi, p.min_segno);
371 			if (gc_type == FG_GC)
372 				sbi->cur_victim_sec = secno;
373 			else
374 				set_bit(secno, dirty_i->victim_secmap);
375 		}
376 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
377 
378 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
379 				sbi->cur_victim_sec,
380 				prefree_segments(sbi), free_segments(sbi));
381 	}
382 out:
383 	mutex_unlock(&dirty_i->seglist_lock);
384 
385 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
386 }
387 
388 static const struct victim_selection default_v_ops = {
389 	.get_victim = get_victim_by_default,
390 };
391 
392 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
393 {
394 	struct inode_entry *ie;
395 
396 	ie = radix_tree_lookup(&gc_list->iroot, ino);
397 	if (ie)
398 		return ie->inode;
399 	return NULL;
400 }
401 
402 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
403 {
404 	struct inode_entry *new_ie;
405 
406 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
407 		iput(inode);
408 		return;
409 	}
410 	new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
411 	new_ie->inode = inode;
412 
413 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
414 	list_add_tail(&new_ie->list, &gc_list->ilist);
415 }
416 
417 static void put_gc_inode(struct gc_inode_list *gc_list)
418 {
419 	struct inode_entry *ie, *next_ie;
420 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
421 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
422 		iput(ie->inode);
423 		list_del(&ie->list);
424 		kmem_cache_free(inode_entry_slab, ie);
425 	}
426 }
427 
428 static int check_valid_map(struct f2fs_sb_info *sbi,
429 				unsigned int segno, int offset)
430 {
431 	struct sit_info *sit_i = SIT_I(sbi);
432 	struct seg_entry *sentry;
433 	int ret;
434 
435 	mutex_lock(&sit_i->sentry_lock);
436 	sentry = get_seg_entry(sbi, segno);
437 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
438 	mutex_unlock(&sit_i->sentry_lock);
439 	return ret;
440 }
441 
442 /*
443  * This function compares node address got in summary with that in NAT.
444  * On validity, copy that node with cold status, otherwise (invalid node)
445  * ignore that.
446  */
447 static void gc_node_segment(struct f2fs_sb_info *sbi,
448 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
449 {
450 	struct f2fs_summary *entry;
451 	block_t start_addr;
452 	int off;
453 	int phase = 0;
454 
455 	start_addr = START_BLOCK(sbi, segno);
456 
457 next_step:
458 	entry = sum;
459 
460 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
461 		nid_t nid = le32_to_cpu(entry->nid);
462 		struct page *node_page;
463 		struct node_info ni;
464 
465 		/* stop BG_GC if there is not enough free sections. */
466 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
467 			return;
468 
469 		if (check_valid_map(sbi, segno, off) == 0)
470 			continue;
471 
472 		if (phase == 0) {
473 			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
474 							META_NAT, true);
475 			continue;
476 		}
477 
478 		if (phase == 1) {
479 			ra_node_page(sbi, nid);
480 			continue;
481 		}
482 
483 		/* phase == 2 */
484 		node_page = get_node_page(sbi, nid);
485 		if (IS_ERR(node_page))
486 			continue;
487 
488 		/* block may become invalid during get_node_page */
489 		if (check_valid_map(sbi, segno, off) == 0) {
490 			f2fs_put_page(node_page, 1);
491 			continue;
492 		}
493 
494 		get_node_info(sbi, nid, &ni);
495 		if (ni.blk_addr != start_addr + off) {
496 			f2fs_put_page(node_page, 1);
497 			continue;
498 		}
499 
500 		move_node_page(node_page, gc_type);
501 		stat_inc_node_blk_count(sbi, 1, gc_type);
502 	}
503 
504 	if (++phase < 3)
505 		goto next_step;
506 }
507 
508 /*
509  * Calculate start block index indicating the given node offset.
510  * Be careful, caller should give this node offset only indicating direct node
511  * blocks. If any node offsets, which point the other types of node blocks such
512  * as indirect or double indirect node blocks, are given, it must be a caller's
513  * bug.
514  */
515 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
516 {
517 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
518 	unsigned int bidx;
519 
520 	if (node_ofs == 0)
521 		return 0;
522 
523 	if (node_ofs <= 2) {
524 		bidx = node_ofs - 1;
525 	} else if (node_ofs <= indirect_blks) {
526 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
527 		bidx = node_ofs - 2 - dec;
528 	} else {
529 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
530 		bidx = node_ofs - 5 - dec;
531 	}
532 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
533 }
534 
535 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
536 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
537 {
538 	struct page *node_page;
539 	nid_t nid;
540 	unsigned int ofs_in_node;
541 	block_t source_blkaddr;
542 
543 	nid = le32_to_cpu(sum->nid);
544 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
545 
546 	node_page = get_node_page(sbi, nid);
547 	if (IS_ERR(node_page))
548 		return false;
549 
550 	get_node_info(sbi, nid, dni);
551 
552 	if (sum->version != dni->version) {
553 		f2fs_put_page(node_page, 1);
554 		return false;
555 	}
556 
557 	*nofs = ofs_of_node(node_page);
558 	source_blkaddr = datablock_addr(node_page, ofs_in_node);
559 	f2fs_put_page(node_page, 1);
560 
561 	if (source_blkaddr != blkaddr)
562 		return false;
563 	return true;
564 }
565 
566 static void move_encrypted_block(struct inode *inode, block_t bidx,
567 							unsigned int segno, int off)
568 {
569 	struct f2fs_io_info fio = {
570 		.sbi = F2FS_I_SB(inode),
571 		.type = DATA,
572 		.op = REQ_OP_READ,
573 		.op_flags = 0,
574 		.encrypted_page = NULL,
575 	};
576 	struct dnode_of_data dn;
577 	struct f2fs_summary sum;
578 	struct node_info ni;
579 	struct page *page;
580 	block_t newaddr;
581 	int err;
582 
583 	/* do not read out */
584 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
585 	if (!page)
586 		return;
587 
588 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
589 		goto out;
590 
591 	if (f2fs_is_atomic_file(inode))
592 		goto out;
593 
594 	set_new_dnode(&dn, inode, NULL, NULL, 0);
595 	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
596 	if (err)
597 		goto out;
598 
599 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
600 		ClearPageUptodate(page);
601 		goto put_out;
602 	}
603 
604 	/*
605 	 * don't cache encrypted data into meta inode until previous dirty
606 	 * data were writebacked to avoid racing between GC and flush.
607 	 */
608 	f2fs_wait_on_page_writeback(page, DATA, true);
609 
610 	get_node_info(fio.sbi, dn.nid, &ni);
611 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
612 
613 	/* read page */
614 	fio.page = page;
615 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
616 
617 	allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
618 							&sum, CURSEG_COLD_DATA);
619 
620 	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
621 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
622 	if (!fio.encrypted_page) {
623 		err = -ENOMEM;
624 		goto recover_block;
625 	}
626 
627 	err = f2fs_submit_page_bio(&fio);
628 	if (err)
629 		goto put_page_out;
630 
631 	/* write page */
632 	lock_page(fio.encrypted_page);
633 
634 	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
635 		err = -EIO;
636 		goto put_page_out;
637 	}
638 	if (unlikely(!PageUptodate(fio.encrypted_page))) {
639 		err = -EIO;
640 		goto put_page_out;
641 	}
642 
643 	set_page_dirty(fio.encrypted_page);
644 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
645 	if (clear_page_dirty_for_io(fio.encrypted_page))
646 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
647 
648 	set_page_writeback(fio.encrypted_page);
649 
650 	/* allocate block address */
651 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
652 
653 	fio.op = REQ_OP_WRITE;
654 	fio.op_flags = REQ_SYNC;
655 	fio.new_blkaddr = newaddr;
656 	f2fs_submit_page_mbio(&fio);
657 
658 	f2fs_update_data_blkaddr(&dn, newaddr);
659 	set_inode_flag(inode, FI_APPEND_WRITE);
660 	if (page->index == 0)
661 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
662 put_page_out:
663 	f2fs_put_page(fio.encrypted_page, 1);
664 recover_block:
665 	if (err)
666 		__f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
667 								true, true);
668 put_out:
669 	f2fs_put_dnode(&dn);
670 out:
671 	f2fs_put_page(page, 1);
672 }
673 
674 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
675 							unsigned int segno, int off)
676 {
677 	struct page *page;
678 
679 	page = get_lock_data_page(inode, bidx, true);
680 	if (IS_ERR(page))
681 		return;
682 
683 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
684 		goto out;
685 
686 	if (f2fs_is_atomic_file(inode))
687 		goto out;
688 
689 	if (gc_type == BG_GC) {
690 		if (PageWriteback(page))
691 			goto out;
692 		set_page_dirty(page);
693 		set_cold_data(page);
694 	} else {
695 		struct f2fs_io_info fio = {
696 			.sbi = F2FS_I_SB(inode),
697 			.type = DATA,
698 			.op = REQ_OP_WRITE,
699 			.op_flags = REQ_SYNC,
700 			.page = page,
701 			.encrypted_page = NULL,
702 		};
703 		bool is_dirty = PageDirty(page);
704 		int err;
705 
706 retry:
707 		set_page_dirty(page);
708 		f2fs_wait_on_page_writeback(page, DATA, true);
709 		if (clear_page_dirty_for_io(page)) {
710 			inode_dec_dirty_pages(inode);
711 			remove_dirty_inode(inode);
712 		}
713 
714 		set_cold_data(page);
715 
716 		err = do_write_data_page(&fio);
717 		if (err == -ENOMEM && is_dirty) {
718 			congestion_wait(BLK_RW_ASYNC, HZ/50);
719 			goto retry;
720 		}
721 	}
722 out:
723 	f2fs_put_page(page, 1);
724 }
725 
726 /*
727  * This function tries to get parent node of victim data block, and identifies
728  * data block validity. If the block is valid, copy that with cold status and
729  * modify parent node.
730  * If the parent node is not valid or the data block address is different,
731  * the victim data block is ignored.
732  */
733 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
734 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
735 {
736 	struct super_block *sb = sbi->sb;
737 	struct f2fs_summary *entry;
738 	block_t start_addr;
739 	int off;
740 	int phase = 0;
741 
742 	start_addr = START_BLOCK(sbi, segno);
743 
744 next_step:
745 	entry = sum;
746 
747 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
748 		struct page *data_page;
749 		struct inode *inode;
750 		struct node_info dni; /* dnode info for the data */
751 		unsigned int ofs_in_node, nofs;
752 		block_t start_bidx;
753 		nid_t nid = le32_to_cpu(entry->nid);
754 
755 		/* stop BG_GC if there is not enough free sections. */
756 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
757 			return;
758 
759 		if (check_valid_map(sbi, segno, off) == 0)
760 			continue;
761 
762 		if (phase == 0) {
763 			ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
764 							META_NAT, true);
765 			continue;
766 		}
767 
768 		if (phase == 1) {
769 			ra_node_page(sbi, nid);
770 			continue;
771 		}
772 
773 		/* Get an inode by ino with checking validity */
774 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
775 			continue;
776 
777 		if (phase == 2) {
778 			ra_node_page(sbi, dni.ino);
779 			continue;
780 		}
781 
782 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
783 
784 		if (phase == 3) {
785 			inode = f2fs_iget(sb, dni.ino);
786 			if (IS_ERR(inode) || is_bad_inode(inode))
787 				continue;
788 
789 			/* if encrypted inode, let's go phase 3 */
790 			if (f2fs_encrypted_inode(inode) &&
791 						S_ISREG(inode->i_mode)) {
792 				add_gc_inode(gc_list, inode);
793 				continue;
794 			}
795 
796 			start_bidx = start_bidx_of_node(nofs, inode);
797 			data_page = get_read_data_page(inode,
798 					start_bidx + ofs_in_node, REQ_RAHEAD,
799 					true);
800 			if (IS_ERR(data_page)) {
801 				iput(inode);
802 				continue;
803 			}
804 
805 			f2fs_put_page(data_page, 0);
806 			add_gc_inode(gc_list, inode);
807 			continue;
808 		}
809 
810 		/* phase 4 */
811 		inode = find_gc_inode(gc_list, dni.ino);
812 		if (inode) {
813 			struct f2fs_inode_info *fi = F2FS_I(inode);
814 			bool locked = false;
815 
816 			if (S_ISREG(inode->i_mode)) {
817 				if (!down_write_trylock(&fi->dio_rwsem[READ]))
818 					continue;
819 				if (!down_write_trylock(
820 						&fi->dio_rwsem[WRITE])) {
821 					up_write(&fi->dio_rwsem[READ]);
822 					continue;
823 				}
824 				locked = true;
825 			}
826 
827 			start_bidx = start_bidx_of_node(nofs, inode)
828 								+ ofs_in_node;
829 			if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
830 				move_encrypted_block(inode, start_bidx, segno, off);
831 			else
832 				move_data_page(inode, start_bidx, gc_type, segno, off);
833 
834 			if (locked) {
835 				up_write(&fi->dio_rwsem[WRITE]);
836 				up_write(&fi->dio_rwsem[READ]);
837 			}
838 
839 			stat_inc_data_blk_count(sbi, 1, gc_type);
840 		}
841 	}
842 
843 	if (++phase < 5)
844 		goto next_step;
845 }
846 
847 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
848 			int gc_type)
849 {
850 	struct sit_info *sit_i = SIT_I(sbi);
851 	int ret;
852 
853 	mutex_lock(&sit_i->sentry_lock);
854 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
855 					      NO_CHECK_TYPE, LFS);
856 	mutex_unlock(&sit_i->sentry_lock);
857 	return ret;
858 }
859 
860 static int do_garbage_collect(struct f2fs_sb_info *sbi,
861 				unsigned int start_segno,
862 				struct gc_inode_list *gc_list, int gc_type)
863 {
864 	struct page *sum_page;
865 	struct f2fs_summary_block *sum;
866 	struct blk_plug plug;
867 	unsigned int segno = start_segno;
868 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
869 	int sec_freed = 0;
870 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
871 						SUM_TYPE_DATA : SUM_TYPE_NODE;
872 
873 	/* readahead multi ssa blocks those have contiguous address */
874 	if (sbi->segs_per_sec > 1)
875 		ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
876 					sbi->segs_per_sec, META_SSA, true);
877 
878 	/* reference all summary page */
879 	while (segno < end_segno) {
880 		sum_page = get_sum_page(sbi, segno++);
881 		unlock_page(sum_page);
882 	}
883 
884 	blk_start_plug(&plug);
885 
886 	for (segno = start_segno; segno < end_segno; segno++) {
887 
888 		/* find segment summary of victim */
889 		sum_page = find_get_page(META_MAPPING(sbi),
890 					GET_SUM_BLOCK(sbi, segno));
891 		f2fs_put_page(sum_page, 0);
892 
893 		if (get_valid_blocks(sbi, segno, 1) == 0 ||
894 				!PageUptodate(sum_page) ||
895 				unlikely(f2fs_cp_error(sbi)))
896 			goto next;
897 
898 		sum = page_address(sum_page);
899 		f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
900 
901 		/*
902 		 * this is to avoid deadlock:
903 		 * - lock_page(sum_page)         - f2fs_replace_block
904 		 *  - check_valid_map()            - mutex_lock(sentry_lock)
905 		 *   - mutex_lock(sentry_lock)     - change_curseg()
906 		 *                                  - lock_page(sum_page)
907 		 */
908 
909 		if (type == SUM_TYPE_NODE)
910 			gc_node_segment(sbi, sum->entries, segno, gc_type);
911 		else
912 			gc_data_segment(sbi, sum->entries, gc_list, segno,
913 								gc_type);
914 
915 		stat_inc_seg_count(sbi, type, gc_type);
916 next:
917 		f2fs_put_page(sum_page, 0);
918 	}
919 
920 	if (gc_type == FG_GC)
921 		f2fs_submit_merged_bio(sbi,
922 				(type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
923 
924 	blk_finish_plug(&plug);
925 
926 	if (gc_type == FG_GC &&
927 		get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
928 		sec_freed = 1;
929 
930 	stat_inc_call_count(sbi->stat_info);
931 
932 	return sec_freed;
933 }
934 
935 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
936 {
937 	unsigned int segno;
938 	int gc_type = sync ? FG_GC : BG_GC;
939 	int sec_freed = 0;
940 	int ret = -EINVAL;
941 	struct cp_control cpc;
942 	struct gc_inode_list gc_list = {
943 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
944 		.iroot = RADIX_TREE_INIT(GFP_NOFS),
945 	};
946 
947 	cpc.reason = __get_cp_reason(sbi);
948 gc_more:
949 	if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
950 		goto stop;
951 	if (unlikely(f2fs_cp_error(sbi))) {
952 		ret = -EIO;
953 		goto stop;
954 	}
955 
956 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
957 		/*
958 		 * For example, if there are many prefree_segments below given
959 		 * threshold, we can make them free by checkpoint. Then, we
960 		 * secure free segments which doesn't need fggc any more.
961 		 */
962 		ret = write_checkpoint(sbi, &cpc);
963 		if (ret)
964 			goto stop;
965 		if (has_not_enough_free_secs(sbi, 0, 0))
966 			gc_type = FG_GC;
967 	}
968 
969 	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
970 	if (gc_type == BG_GC && !background)
971 		goto stop;
972 	if (!__get_victim(sbi, &segno, gc_type))
973 		goto stop;
974 	ret = 0;
975 
976 	if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
977 			gc_type == FG_GC)
978 		sec_freed++;
979 
980 	if (gc_type == FG_GC)
981 		sbi->cur_victim_sec = NULL_SEGNO;
982 
983 	if (!sync) {
984 		if (has_not_enough_free_secs(sbi, sec_freed, 0))
985 			goto gc_more;
986 
987 		if (gc_type == FG_GC)
988 			ret = write_checkpoint(sbi, &cpc);
989 	}
990 stop:
991 	mutex_unlock(&sbi->gc_mutex);
992 
993 	put_gc_inode(&gc_list);
994 
995 	if (sync)
996 		ret = sec_freed ? 0 : -EAGAIN;
997 	return ret;
998 }
999 
1000 void build_gc_manager(struct f2fs_sb_info *sbi)
1001 {
1002 	u64 main_count, resv_count, ovp_count, blocks_per_sec;
1003 
1004 	DIRTY_I(sbi)->v_ops = &default_v_ops;
1005 
1006 	/* threshold of # of valid blocks in a section for victims of FG_GC */
1007 	main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1008 	resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1009 	ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1010 	blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
1011 
1012 	sbi->fggc_threshold = div64_u64((main_count - ovp_count) * blocks_per_sec,
1013 					(main_count - resv_count));
1014 }
1015