xref: /openbmc/linux/fs/f2fs/gc.c (revision 0e01d176d5788f66dc64a7e61119edb56eb08339)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * fs/f2fs/gc.c
4   *
5   * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6   *             http://www.samsung.com/
7   */
8  #include <linux/fs.h>
9  #include <linux/module.h>
10  #include <linux/init.h>
11  #include <linux/f2fs_fs.h>
12  #include <linux/kthread.h>
13  #include <linux/delay.h>
14  #include <linux/freezer.h>
15  #include <linux/sched/signal.h>
16  #include <linux/random.h>
17  #include <linux/sched/mm.h>
18  
19  #include "f2fs.h"
20  #include "node.h"
21  #include "segment.h"
22  #include "gc.h"
23  #include "iostat.h"
24  #include <trace/events/f2fs.h>
25  
26  static struct kmem_cache *victim_entry_slab;
27  
28  static unsigned int count_bits(const unsigned long *addr,
29  				unsigned int offset, unsigned int len);
30  
31  static int gc_thread_func(void *data)
32  {
33  	struct f2fs_sb_info *sbi = data;
34  	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35  	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36  	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37  	unsigned int wait_ms;
38  
39  	wait_ms = gc_th->min_sleep_time;
40  
41  	set_freezable();
42  	do {
43  		bool sync_mode, foreground = false;
44  
45  		wait_event_interruptible_timeout(*wq,
46  				kthread_should_stop() || freezing(current) ||
47  				waitqueue_active(fggc_wq) ||
48  				gc_th->gc_wake,
49  				msecs_to_jiffies(wait_ms));
50  
51  		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
52  			foreground = true;
53  
54  		/* give it a try one time */
55  		if (gc_th->gc_wake)
56  			gc_th->gc_wake = 0;
57  
58  		if (try_to_freeze()) {
59  			stat_other_skip_bggc_count(sbi);
60  			continue;
61  		}
62  		if (kthread_should_stop())
63  			break;
64  
65  		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
66  			increase_sleep_time(gc_th, &wait_ms);
67  			stat_other_skip_bggc_count(sbi);
68  			continue;
69  		}
70  
71  		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
72  			f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
73  			f2fs_stop_checkpoint(sbi, false);
74  		}
75  
76  		if (!sb_start_write_trylock(sbi->sb)) {
77  			stat_other_skip_bggc_count(sbi);
78  			continue;
79  		}
80  
81  		/*
82  		 * [GC triggering condition]
83  		 * 0. GC is not conducted currently.
84  		 * 1. There are enough dirty segments.
85  		 * 2. IO subsystem is idle by checking the # of writeback pages.
86  		 * 3. IO subsystem is idle by checking the # of requests in
87  		 *    bdev's request list.
88  		 *
89  		 * Note) We have to avoid triggering GCs frequently.
90  		 * Because it is possible that some segments can be
91  		 * invalidated soon after by user update or deletion.
92  		 * So, I'd like to wait some time to collect dirty segments.
93  		 */
94  		if (sbi->gc_mode == GC_URGENT_HIGH) {
95  			spin_lock(&sbi->gc_urgent_high_lock);
96  			if (sbi->gc_urgent_high_limited) {
97  				if (!sbi->gc_urgent_high_remaining) {
98  					sbi->gc_urgent_high_limited = false;
99  					spin_unlock(&sbi->gc_urgent_high_lock);
100  					sbi->gc_mode = GC_NORMAL;
101  					continue;
102  				}
103  				sbi->gc_urgent_high_remaining--;
104  			}
105  			spin_unlock(&sbi->gc_urgent_high_lock);
106  		}
107  
108  		if (sbi->gc_mode == GC_URGENT_HIGH ||
109  				sbi->gc_mode == GC_URGENT_MID) {
110  			wait_ms = gc_th->urgent_sleep_time;
111  			f2fs_down_write(&sbi->gc_lock);
112  			goto do_gc;
113  		}
114  
115  		if (foreground) {
116  			f2fs_down_write(&sbi->gc_lock);
117  			goto do_gc;
118  		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
119  			stat_other_skip_bggc_count(sbi);
120  			goto next;
121  		}
122  
123  		if (!is_idle(sbi, GC_TIME)) {
124  			increase_sleep_time(gc_th, &wait_ms);
125  			f2fs_up_write(&sbi->gc_lock);
126  			stat_io_skip_bggc_count(sbi);
127  			goto next;
128  		}
129  
130  		if (has_enough_invalid_blocks(sbi))
131  			decrease_sleep_time(gc_th, &wait_ms);
132  		else
133  			increase_sleep_time(gc_th, &wait_ms);
134  do_gc:
135  		if (!foreground)
136  			stat_inc_bggc_count(sbi->stat_info);
137  
138  		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
139  
140  		/* foreground GC was been triggered via f2fs_balance_fs() */
141  		if (foreground)
142  			sync_mode = false;
143  
144  		/* if return value is not zero, no victim was selected */
145  		if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
146  			wait_ms = gc_th->no_gc_sleep_time;
147  
148  		if (foreground)
149  			wake_up_all(&gc_th->fggc_wq);
150  
151  		trace_f2fs_background_gc(sbi->sb, wait_ms,
152  				prefree_segments(sbi), free_segments(sbi));
153  
154  		/* balancing f2fs's metadata periodically */
155  		f2fs_balance_fs_bg(sbi, true);
156  next:
157  		sb_end_write(sbi->sb);
158  
159  	} while (!kthread_should_stop());
160  	return 0;
161  }
162  
163  int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
164  {
165  	struct f2fs_gc_kthread *gc_th;
166  	dev_t dev = sbi->sb->s_bdev->bd_dev;
167  	int err = 0;
168  
169  	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
170  	if (!gc_th) {
171  		err = -ENOMEM;
172  		goto out;
173  	}
174  
175  	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
176  	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
177  	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
178  	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
179  
180  	gc_th->gc_wake = 0;
181  
182  	sbi->gc_thread = gc_th;
183  	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
184  	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
185  	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
186  			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
187  	if (IS_ERR(gc_th->f2fs_gc_task)) {
188  		err = PTR_ERR(gc_th->f2fs_gc_task);
189  		kfree(gc_th);
190  		sbi->gc_thread = NULL;
191  	}
192  out:
193  	return err;
194  }
195  
196  void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
197  {
198  	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
199  
200  	if (!gc_th)
201  		return;
202  	kthread_stop(gc_th->f2fs_gc_task);
203  	wake_up_all(&gc_th->fggc_wq);
204  	kfree(gc_th);
205  	sbi->gc_thread = NULL;
206  }
207  
208  static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
209  {
210  	int gc_mode;
211  
212  	if (gc_type == BG_GC) {
213  		if (sbi->am.atgc_enabled)
214  			gc_mode = GC_AT;
215  		else
216  			gc_mode = GC_CB;
217  	} else {
218  		gc_mode = GC_GREEDY;
219  	}
220  
221  	switch (sbi->gc_mode) {
222  	case GC_IDLE_CB:
223  		gc_mode = GC_CB;
224  		break;
225  	case GC_IDLE_GREEDY:
226  	case GC_URGENT_HIGH:
227  		gc_mode = GC_GREEDY;
228  		break;
229  	case GC_IDLE_AT:
230  		gc_mode = GC_AT;
231  		break;
232  	}
233  
234  	return gc_mode;
235  }
236  
237  static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
238  			int type, struct victim_sel_policy *p)
239  {
240  	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
241  
242  	if (p->alloc_mode == SSR) {
243  		p->gc_mode = GC_GREEDY;
244  		p->dirty_bitmap = dirty_i->dirty_segmap[type];
245  		p->max_search = dirty_i->nr_dirty[type];
246  		p->ofs_unit = 1;
247  	} else if (p->alloc_mode == AT_SSR) {
248  		p->gc_mode = GC_GREEDY;
249  		p->dirty_bitmap = dirty_i->dirty_segmap[type];
250  		p->max_search = dirty_i->nr_dirty[type];
251  		p->ofs_unit = 1;
252  	} else {
253  		p->gc_mode = select_gc_type(sbi, gc_type);
254  		p->ofs_unit = sbi->segs_per_sec;
255  		if (__is_large_section(sbi)) {
256  			p->dirty_bitmap = dirty_i->dirty_secmap;
257  			p->max_search = count_bits(p->dirty_bitmap,
258  						0, MAIN_SECS(sbi));
259  		} else {
260  			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
261  			p->max_search = dirty_i->nr_dirty[DIRTY];
262  		}
263  	}
264  
265  	/*
266  	 * adjust candidates range, should select all dirty segments for
267  	 * foreground GC and urgent GC cases.
268  	 */
269  	if (gc_type != FG_GC &&
270  			(sbi->gc_mode != GC_URGENT_HIGH) &&
271  			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
272  			p->max_search > sbi->max_victim_search)
273  		p->max_search = sbi->max_victim_search;
274  
275  	/* let's select beginning hot/small space first in no_heap mode*/
276  	if (f2fs_need_rand_seg(sbi))
277  		p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
278  	else if (test_opt(sbi, NOHEAP) &&
279  		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
280  		p->offset = 0;
281  	else
282  		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
283  }
284  
285  static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
286  				struct victim_sel_policy *p)
287  {
288  	/* SSR allocates in a segment unit */
289  	if (p->alloc_mode == SSR)
290  		return sbi->blocks_per_seg;
291  	else if (p->alloc_mode == AT_SSR)
292  		return UINT_MAX;
293  
294  	/* LFS */
295  	if (p->gc_mode == GC_GREEDY)
296  		return 2 * sbi->blocks_per_seg * p->ofs_unit;
297  	else if (p->gc_mode == GC_CB)
298  		return UINT_MAX;
299  	else if (p->gc_mode == GC_AT)
300  		return UINT_MAX;
301  	else /* No other gc_mode */
302  		return 0;
303  }
304  
305  static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
306  {
307  	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
308  	unsigned int secno;
309  
310  	/*
311  	 * If the gc_type is FG_GC, we can select victim segments
312  	 * selected by background GC before.
313  	 * Those segments guarantee they have small valid blocks.
314  	 */
315  	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
316  		if (sec_usage_check(sbi, secno))
317  			continue;
318  		clear_bit(secno, dirty_i->victim_secmap);
319  		return GET_SEG_FROM_SEC(sbi, secno);
320  	}
321  	return NULL_SEGNO;
322  }
323  
324  static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
325  {
326  	struct sit_info *sit_i = SIT_I(sbi);
327  	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
328  	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
329  	unsigned long long mtime = 0;
330  	unsigned int vblocks;
331  	unsigned char age = 0;
332  	unsigned char u;
333  	unsigned int i;
334  	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
335  
336  	for (i = 0; i < usable_segs_per_sec; i++)
337  		mtime += get_seg_entry(sbi, start + i)->mtime;
338  	vblocks = get_valid_blocks(sbi, segno, true);
339  
340  	mtime = div_u64(mtime, usable_segs_per_sec);
341  	vblocks = div_u64(vblocks, usable_segs_per_sec);
342  
343  	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
344  
345  	/* Handle if the system time has changed by the user */
346  	if (mtime < sit_i->min_mtime)
347  		sit_i->min_mtime = mtime;
348  	if (mtime > sit_i->max_mtime)
349  		sit_i->max_mtime = mtime;
350  	if (sit_i->max_mtime != sit_i->min_mtime)
351  		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
352  				sit_i->max_mtime - sit_i->min_mtime);
353  
354  	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
355  }
356  
357  static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
358  			unsigned int segno, struct victim_sel_policy *p)
359  {
360  	if (p->alloc_mode == SSR)
361  		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
362  
363  	/* alloc_mode == LFS */
364  	if (p->gc_mode == GC_GREEDY)
365  		return get_valid_blocks(sbi, segno, true);
366  	else if (p->gc_mode == GC_CB)
367  		return get_cb_cost(sbi, segno);
368  
369  	f2fs_bug_on(sbi, 1);
370  	return 0;
371  }
372  
373  static unsigned int count_bits(const unsigned long *addr,
374  				unsigned int offset, unsigned int len)
375  {
376  	unsigned int end = offset + len, sum = 0;
377  
378  	while (offset < end) {
379  		if (test_bit(offset++, addr))
380  			++sum;
381  	}
382  	return sum;
383  }
384  
385  static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
386  				unsigned long long mtime, unsigned int segno,
387  				struct rb_node *parent, struct rb_node **p,
388  				bool left_most)
389  {
390  	struct atgc_management *am = &sbi->am;
391  	struct victim_entry *ve;
392  
393  	ve =  f2fs_kmem_cache_alloc(victim_entry_slab,
394  				GFP_NOFS, true, NULL);
395  
396  	ve->mtime = mtime;
397  	ve->segno = segno;
398  
399  	rb_link_node(&ve->rb_node, parent, p);
400  	rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
401  
402  	list_add_tail(&ve->list, &am->victim_list);
403  
404  	am->victim_count++;
405  
406  	return ve;
407  }
408  
409  static void insert_victim_entry(struct f2fs_sb_info *sbi,
410  				unsigned long long mtime, unsigned int segno)
411  {
412  	struct atgc_management *am = &sbi->am;
413  	struct rb_node **p;
414  	struct rb_node *parent = NULL;
415  	bool left_most = true;
416  
417  	p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
418  	attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
419  }
420  
421  static void add_victim_entry(struct f2fs_sb_info *sbi,
422  				struct victim_sel_policy *p, unsigned int segno)
423  {
424  	struct sit_info *sit_i = SIT_I(sbi);
425  	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
426  	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
427  	unsigned long long mtime = 0;
428  	unsigned int i;
429  
430  	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
431  		if (p->gc_mode == GC_AT &&
432  			get_valid_blocks(sbi, segno, true) == 0)
433  			return;
434  	}
435  
436  	for (i = 0; i < sbi->segs_per_sec; i++)
437  		mtime += get_seg_entry(sbi, start + i)->mtime;
438  	mtime = div_u64(mtime, sbi->segs_per_sec);
439  
440  	/* Handle if the system time has changed by the user */
441  	if (mtime < sit_i->min_mtime)
442  		sit_i->min_mtime = mtime;
443  	if (mtime > sit_i->max_mtime)
444  		sit_i->max_mtime = mtime;
445  	if (mtime < sit_i->dirty_min_mtime)
446  		sit_i->dirty_min_mtime = mtime;
447  	if (mtime > sit_i->dirty_max_mtime)
448  		sit_i->dirty_max_mtime = mtime;
449  
450  	/* don't choose young section as candidate */
451  	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
452  		return;
453  
454  	insert_victim_entry(sbi, mtime, segno);
455  }
456  
457  static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
458  						struct victim_sel_policy *p)
459  {
460  	struct atgc_management *am = &sbi->am;
461  	struct rb_node *parent = NULL;
462  	bool left_most;
463  
464  	f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
465  
466  	return parent;
467  }
468  
469  static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
470  						struct victim_sel_policy *p)
471  {
472  	struct sit_info *sit_i = SIT_I(sbi);
473  	struct atgc_management *am = &sbi->am;
474  	struct rb_root_cached *root = &am->root;
475  	struct rb_node *node;
476  	struct rb_entry *re;
477  	struct victim_entry *ve;
478  	unsigned long long total_time;
479  	unsigned long long age, u, accu;
480  	unsigned long long max_mtime = sit_i->dirty_max_mtime;
481  	unsigned long long min_mtime = sit_i->dirty_min_mtime;
482  	unsigned int sec_blocks = BLKS_PER_SEC(sbi);
483  	unsigned int vblocks;
484  	unsigned int dirty_threshold = max(am->max_candidate_count,
485  					am->candidate_ratio *
486  					am->victim_count / 100);
487  	unsigned int age_weight = am->age_weight;
488  	unsigned int cost;
489  	unsigned int iter = 0;
490  
491  	if (max_mtime < min_mtime)
492  		return;
493  
494  	max_mtime += 1;
495  	total_time = max_mtime - min_mtime;
496  
497  	accu = div64_u64(ULLONG_MAX, total_time);
498  	accu = min_t(unsigned long long, div_u64(accu, 100),
499  					DEFAULT_ACCURACY_CLASS);
500  
501  	node = rb_first_cached(root);
502  next:
503  	re = rb_entry_safe(node, struct rb_entry, rb_node);
504  	if (!re)
505  		return;
506  
507  	ve = (struct victim_entry *)re;
508  
509  	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
510  		goto skip;
511  
512  	/* age = 10000 * x% * 60 */
513  	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
514  								age_weight;
515  
516  	vblocks = get_valid_blocks(sbi, ve->segno, true);
517  	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
518  
519  	/* u = 10000 * x% * 40 */
520  	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
521  							(100 - age_weight);
522  
523  	f2fs_bug_on(sbi, age + u >= UINT_MAX);
524  
525  	cost = UINT_MAX - (age + u);
526  	iter++;
527  
528  	if (cost < p->min_cost ||
529  			(cost == p->min_cost && age > p->oldest_age)) {
530  		p->min_cost = cost;
531  		p->oldest_age = age;
532  		p->min_segno = ve->segno;
533  	}
534  skip:
535  	if (iter < dirty_threshold) {
536  		node = rb_next(node);
537  		goto next;
538  	}
539  }
540  
541  /*
542   * select candidates around source section in range of
543   * [target - dirty_threshold, target + dirty_threshold]
544   */
545  static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
546  						struct victim_sel_policy *p)
547  {
548  	struct sit_info *sit_i = SIT_I(sbi);
549  	struct atgc_management *am = &sbi->am;
550  	struct rb_node *node;
551  	struct rb_entry *re;
552  	struct victim_entry *ve;
553  	unsigned long long age;
554  	unsigned long long max_mtime = sit_i->dirty_max_mtime;
555  	unsigned long long min_mtime = sit_i->dirty_min_mtime;
556  	unsigned int seg_blocks = sbi->blocks_per_seg;
557  	unsigned int vblocks;
558  	unsigned int dirty_threshold = max(am->max_candidate_count,
559  					am->candidate_ratio *
560  					am->victim_count / 100);
561  	unsigned int cost;
562  	unsigned int iter = 0;
563  	int stage = 0;
564  
565  	if (max_mtime < min_mtime)
566  		return;
567  	max_mtime += 1;
568  next_stage:
569  	node = lookup_central_victim(sbi, p);
570  next_node:
571  	re = rb_entry_safe(node, struct rb_entry, rb_node);
572  	if (!re) {
573  		if (stage == 0)
574  			goto skip_stage;
575  		return;
576  	}
577  
578  	ve = (struct victim_entry *)re;
579  
580  	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
581  		goto skip_node;
582  
583  	age = max_mtime - ve->mtime;
584  
585  	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
586  	f2fs_bug_on(sbi, !vblocks);
587  
588  	/* rare case */
589  	if (vblocks == seg_blocks)
590  		goto skip_node;
591  
592  	iter++;
593  
594  	age = max_mtime - abs(p->age - age);
595  	cost = UINT_MAX - vblocks;
596  
597  	if (cost < p->min_cost ||
598  			(cost == p->min_cost && age > p->oldest_age)) {
599  		p->min_cost = cost;
600  		p->oldest_age = age;
601  		p->min_segno = ve->segno;
602  	}
603  skip_node:
604  	if (iter < dirty_threshold) {
605  		if (stage == 0)
606  			node = rb_prev(node);
607  		else if (stage == 1)
608  			node = rb_next(node);
609  		goto next_node;
610  	}
611  skip_stage:
612  	if (stage < 1) {
613  		stage++;
614  		iter = 0;
615  		goto next_stage;
616  	}
617  }
618  static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
619  						struct victim_sel_policy *p)
620  {
621  	f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
622  						&sbi->am.root, true));
623  
624  	if (p->gc_mode == GC_AT)
625  		atgc_lookup_victim(sbi, p);
626  	else if (p->alloc_mode == AT_SSR)
627  		atssr_lookup_victim(sbi, p);
628  	else
629  		f2fs_bug_on(sbi, 1);
630  }
631  
632  static void release_victim_entry(struct f2fs_sb_info *sbi)
633  {
634  	struct atgc_management *am = &sbi->am;
635  	struct victim_entry *ve, *tmp;
636  
637  	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
638  		list_del(&ve->list);
639  		kmem_cache_free(victim_entry_slab, ve);
640  		am->victim_count--;
641  	}
642  
643  	am->root = RB_ROOT_CACHED;
644  
645  	f2fs_bug_on(sbi, am->victim_count);
646  	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
647  }
648  
649  /*
650   * This function is called from two paths.
651   * One is garbage collection and the other is SSR segment selection.
652   * When it is called during GC, it just gets a victim segment
653   * and it does not remove it from dirty seglist.
654   * When it is called from SSR segment selection, it finds a segment
655   * which has minimum valid blocks and removes it from dirty seglist.
656   */
657  static int get_victim_by_default(struct f2fs_sb_info *sbi,
658  			unsigned int *result, int gc_type, int type,
659  			char alloc_mode, unsigned long long age)
660  {
661  	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
662  	struct sit_info *sm = SIT_I(sbi);
663  	struct victim_sel_policy p;
664  	unsigned int secno, last_victim;
665  	unsigned int last_segment;
666  	unsigned int nsearched;
667  	bool is_atgc;
668  	int ret = 0;
669  
670  	mutex_lock(&dirty_i->seglist_lock);
671  	last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
672  
673  	p.alloc_mode = alloc_mode;
674  	p.age = age;
675  	p.age_threshold = sbi->am.age_threshold;
676  
677  retry:
678  	select_policy(sbi, gc_type, type, &p);
679  	p.min_segno = NULL_SEGNO;
680  	p.oldest_age = 0;
681  	p.min_cost = get_max_cost(sbi, &p);
682  
683  	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
684  	nsearched = 0;
685  
686  	if (is_atgc)
687  		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
688  
689  	if (*result != NULL_SEGNO) {
690  		if (!get_valid_blocks(sbi, *result, false)) {
691  			ret = -ENODATA;
692  			goto out;
693  		}
694  
695  		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
696  			ret = -EBUSY;
697  		else
698  			p.min_segno = *result;
699  		goto out;
700  	}
701  
702  	ret = -ENODATA;
703  	if (p.max_search == 0)
704  		goto out;
705  
706  	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
707  		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
708  			p.min_segno = sbi->next_victim_seg[BG_GC];
709  			*result = p.min_segno;
710  			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
711  			goto got_result;
712  		}
713  		if (gc_type == FG_GC &&
714  				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
715  			p.min_segno = sbi->next_victim_seg[FG_GC];
716  			*result = p.min_segno;
717  			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
718  			goto got_result;
719  		}
720  	}
721  
722  	last_victim = sm->last_victim[p.gc_mode];
723  	if (p.alloc_mode == LFS && gc_type == FG_GC) {
724  		p.min_segno = check_bg_victims(sbi);
725  		if (p.min_segno != NULL_SEGNO)
726  			goto got_it;
727  	}
728  
729  	while (1) {
730  		unsigned long cost, *dirty_bitmap;
731  		unsigned int unit_no, segno;
732  
733  		dirty_bitmap = p.dirty_bitmap;
734  		unit_no = find_next_bit(dirty_bitmap,
735  				last_segment / p.ofs_unit,
736  				p.offset / p.ofs_unit);
737  		segno = unit_no * p.ofs_unit;
738  		if (segno >= last_segment) {
739  			if (sm->last_victim[p.gc_mode]) {
740  				last_segment =
741  					sm->last_victim[p.gc_mode];
742  				sm->last_victim[p.gc_mode] = 0;
743  				p.offset = 0;
744  				continue;
745  			}
746  			break;
747  		}
748  
749  		p.offset = segno + p.ofs_unit;
750  		nsearched++;
751  
752  #ifdef CONFIG_F2FS_CHECK_FS
753  		/*
754  		 * skip selecting the invalid segno (that is failed due to block
755  		 * validity check failure during GC) to avoid endless GC loop in
756  		 * such cases.
757  		 */
758  		if (test_bit(segno, sm->invalid_segmap))
759  			goto next;
760  #endif
761  
762  		secno = GET_SEC_FROM_SEG(sbi, segno);
763  
764  		if (sec_usage_check(sbi, secno))
765  			goto next;
766  
767  		/* Don't touch checkpointed data */
768  		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
769  			if (p.alloc_mode == LFS) {
770  				/*
771  				 * LFS is set to find source section during GC.
772  				 * The victim should have no checkpointed data.
773  				 */
774  				if (get_ckpt_valid_blocks(sbi, segno, true))
775  					goto next;
776  			} else {
777  				/*
778  				 * SSR | AT_SSR are set to find target segment
779  				 * for writes which can be full by checkpointed
780  				 * and newly written blocks.
781  				 */
782  				if (!f2fs_segment_has_free_slot(sbi, segno))
783  					goto next;
784  			}
785  		}
786  
787  		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
788  			goto next;
789  
790  		if (is_atgc) {
791  			add_victim_entry(sbi, &p, segno);
792  			goto next;
793  		}
794  
795  		cost = get_gc_cost(sbi, segno, &p);
796  
797  		if (p.min_cost > cost) {
798  			p.min_segno = segno;
799  			p.min_cost = cost;
800  		}
801  next:
802  		if (nsearched >= p.max_search) {
803  			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
804  				sm->last_victim[p.gc_mode] =
805  					last_victim + p.ofs_unit;
806  			else
807  				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
808  			sm->last_victim[p.gc_mode] %=
809  				(MAIN_SECS(sbi) * sbi->segs_per_sec);
810  			break;
811  		}
812  	}
813  
814  	/* get victim for GC_AT/AT_SSR */
815  	if (is_atgc) {
816  		lookup_victim_by_age(sbi, &p);
817  		release_victim_entry(sbi);
818  	}
819  
820  	if (is_atgc && p.min_segno == NULL_SEGNO &&
821  			sm->elapsed_time < p.age_threshold) {
822  		p.age_threshold = 0;
823  		goto retry;
824  	}
825  
826  	if (p.min_segno != NULL_SEGNO) {
827  got_it:
828  		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
829  got_result:
830  		if (p.alloc_mode == LFS) {
831  			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
832  			if (gc_type == FG_GC)
833  				sbi->cur_victim_sec = secno;
834  			else
835  				set_bit(secno, dirty_i->victim_secmap);
836  		}
837  		ret = 0;
838  
839  	}
840  out:
841  	if (p.min_segno != NULL_SEGNO)
842  		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
843  				sbi->cur_victim_sec,
844  				prefree_segments(sbi), free_segments(sbi));
845  	mutex_unlock(&dirty_i->seglist_lock);
846  
847  	return ret;
848  }
849  
850  static const struct victim_selection default_v_ops = {
851  	.get_victim = get_victim_by_default,
852  };
853  
854  static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
855  {
856  	struct inode_entry *ie;
857  
858  	ie = radix_tree_lookup(&gc_list->iroot, ino);
859  	if (ie)
860  		return ie->inode;
861  	return NULL;
862  }
863  
864  static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
865  {
866  	struct inode_entry *new_ie;
867  
868  	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
869  		iput(inode);
870  		return;
871  	}
872  	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
873  					GFP_NOFS, true, NULL);
874  	new_ie->inode = inode;
875  
876  	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
877  	list_add_tail(&new_ie->list, &gc_list->ilist);
878  }
879  
880  static void put_gc_inode(struct gc_inode_list *gc_list)
881  {
882  	struct inode_entry *ie, *next_ie;
883  
884  	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
885  		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
886  		iput(ie->inode);
887  		list_del(&ie->list);
888  		kmem_cache_free(f2fs_inode_entry_slab, ie);
889  	}
890  }
891  
892  static int check_valid_map(struct f2fs_sb_info *sbi,
893  				unsigned int segno, int offset)
894  {
895  	struct sit_info *sit_i = SIT_I(sbi);
896  	struct seg_entry *sentry;
897  	int ret;
898  
899  	down_read(&sit_i->sentry_lock);
900  	sentry = get_seg_entry(sbi, segno);
901  	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
902  	up_read(&sit_i->sentry_lock);
903  	return ret;
904  }
905  
906  /*
907   * This function compares node address got in summary with that in NAT.
908   * On validity, copy that node with cold status, otherwise (invalid node)
909   * ignore that.
910   */
911  static int gc_node_segment(struct f2fs_sb_info *sbi,
912  		struct f2fs_summary *sum, unsigned int segno, int gc_type)
913  {
914  	struct f2fs_summary *entry;
915  	block_t start_addr;
916  	int off;
917  	int phase = 0;
918  	bool fggc = (gc_type == FG_GC);
919  	int submitted = 0;
920  	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
921  
922  	start_addr = START_BLOCK(sbi, segno);
923  
924  next_step:
925  	entry = sum;
926  
927  	if (fggc && phase == 2)
928  		atomic_inc(&sbi->wb_sync_req[NODE]);
929  
930  	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
931  		nid_t nid = le32_to_cpu(entry->nid);
932  		struct page *node_page;
933  		struct node_info ni;
934  		int err;
935  
936  		/* stop BG_GC if there is not enough free sections. */
937  		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
938  			return submitted;
939  
940  		if (check_valid_map(sbi, segno, off) == 0)
941  			continue;
942  
943  		if (phase == 0) {
944  			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
945  							META_NAT, true);
946  			continue;
947  		}
948  
949  		if (phase == 1) {
950  			f2fs_ra_node_page(sbi, nid);
951  			continue;
952  		}
953  
954  		/* phase == 2 */
955  		node_page = f2fs_get_node_page(sbi, nid);
956  		if (IS_ERR(node_page))
957  			continue;
958  
959  		/* block may become invalid during f2fs_get_node_page */
960  		if (check_valid_map(sbi, segno, off) == 0) {
961  			f2fs_put_page(node_page, 1);
962  			continue;
963  		}
964  
965  		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
966  			f2fs_put_page(node_page, 1);
967  			continue;
968  		}
969  
970  		if (ni.blk_addr != start_addr + off) {
971  			f2fs_put_page(node_page, 1);
972  			continue;
973  		}
974  
975  		err = f2fs_move_node_page(node_page, gc_type);
976  		if (!err && gc_type == FG_GC)
977  			submitted++;
978  		stat_inc_node_blk_count(sbi, 1, gc_type);
979  	}
980  
981  	if (++phase < 3)
982  		goto next_step;
983  
984  	if (fggc)
985  		atomic_dec(&sbi->wb_sync_req[NODE]);
986  	return submitted;
987  }
988  
989  /*
990   * Calculate start block index indicating the given node offset.
991   * Be careful, caller should give this node offset only indicating direct node
992   * blocks. If any node offsets, which point the other types of node blocks such
993   * as indirect or double indirect node blocks, are given, it must be a caller's
994   * bug.
995   */
996  block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
997  {
998  	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
999  	unsigned int bidx;
1000  
1001  	if (node_ofs == 0)
1002  		return 0;
1003  
1004  	if (node_ofs <= 2) {
1005  		bidx = node_ofs - 1;
1006  	} else if (node_ofs <= indirect_blks) {
1007  		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1008  
1009  		bidx = node_ofs - 2 - dec;
1010  	} else {
1011  		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1012  
1013  		bidx = node_ofs - 5 - dec;
1014  	}
1015  	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1016  }
1017  
1018  static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1019  		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1020  {
1021  	struct page *node_page;
1022  	nid_t nid;
1023  	unsigned int ofs_in_node;
1024  	block_t source_blkaddr;
1025  
1026  	nid = le32_to_cpu(sum->nid);
1027  	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1028  
1029  	node_page = f2fs_get_node_page(sbi, nid);
1030  	if (IS_ERR(node_page))
1031  		return false;
1032  
1033  	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1034  		f2fs_put_page(node_page, 1);
1035  		return false;
1036  	}
1037  
1038  	if (sum->version != dni->version) {
1039  		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1040  			  __func__);
1041  		set_sbi_flag(sbi, SBI_NEED_FSCK);
1042  	}
1043  
1044  	if (f2fs_check_nid_range(sbi, dni->ino)) {
1045  		f2fs_put_page(node_page, 1);
1046  		return false;
1047  	}
1048  
1049  	*nofs = ofs_of_node(node_page);
1050  	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1051  	f2fs_put_page(node_page, 1);
1052  
1053  	if (source_blkaddr != blkaddr) {
1054  #ifdef CONFIG_F2FS_CHECK_FS
1055  		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1056  		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1057  
1058  		if (unlikely(check_valid_map(sbi, segno, offset))) {
1059  			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1060  				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1061  					 blkaddr, source_blkaddr, segno);
1062  				set_sbi_flag(sbi, SBI_NEED_FSCK);
1063  			}
1064  		}
1065  #endif
1066  		return false;
1067  	}
1068  	return true;
1069  }
1070  
1071  static int ra_data_block(struct inode *inode, pgoff_t index)
1072  {
1073  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1074  	struct address_space *mapping = inode->i_mapping;
1075  	struct dnode_of_data dn;
1076  	struct page *page;
1077  	struct extent_info ei = {0, 0, 0};
1078  	struct f2fs_io_info fio = {
1079  		.sbi = sbi,
1080  		.ino = inode->i_ino,
1081  		.type = DATA,
1082  		.temp = COLD,
1083  		.op = REQ_OP_READ,
1084  		.op_flags = 0,
1085  		.encrypted_page = NULL,
1086  		.in_list = false,
1087  		.retry = false,
1088  	};
1089  	int err;
1090  
1091  	page = f2fs_grab_cache_page(mapping, index, true);
1092  	if (!page)
1093  		return -ENOMEM;
1094  
1095  	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1096  		dn.data_blkaddr = ei.blk + index - ei.fofs;
1097  		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1098  						DATA_GENERIC_ENHANCE_READ))) {
1099  			err = -EFSCORRUPTED;
1100  			goto put_page;
1101  		}
1102  		goto got_it;
1103  	}
1104  
1105  	set_new_dnode(&dn, inode, NULL, NULL, 0);
1106  	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1107  	if (err)
1108  		goto put_page;
1109  	f2fs_put_dnode(&dn);
1110  
1111  	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1112  		err = -ENOENT;
1113  		goto put_page;
1114  	}
1115  	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1116  						DATA_GENERIC_ENHANCE))) {
1117  		err = -EFSCORRUPTED;
1118  		goto put_page;
1119  	}
1120  got_it:
1121  	/* read page */
1122  	fio.page = page;
1123  	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1124  
1125  	/*
1126  	 * don't cache encrypted data into meta inode until previous dirty
1127  	 * data were writebacked to avoid racing between GC and flush.
1128  	 */
1129  	f2fs_wait_on_page_writeback(page, DATA, true, true);
1130  
1131  	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1132  
1133  	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1134  					dn.data_blkaddr,
1135  					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1136  	if (!fio.encrypted_page) {
1137  		err = -ENOMEM;
1138  		goto put_page;
1139  	}
1140  
1141  	err = f2fs_submit_page_bio(&fio);
1142  	if (err)
1143  		goto put_encrypted_page;
1144  	f2fs_put_page(fio.encrypted_page, 0);
1145  	f2fs_put_page(page, 1);
1146  
1147  	f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1148  	f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1149  
1150  	return 0;
1151  put_encrypted_page:
1152  	f2fs_put_page(fio.encrypted_page, 1);
1153  put_page:
1154  	f2fs_put_page(page, 1);
1155  	return err;
1156  }
1157  
1158  /*
1159   * Move data block via META_MAPPING while keeping locked data page.
1160   * This can be used to move blocks, aka LBAs, directly on disk.
1161   */
1162  static int move_data_block(struct inode *inode, block_t bidx,
1163  				int gc_type, unsigned int segno, int off)
1164  {
1165  	struct f2fs_io_info fio = {
1166  		.sbi = F2FS_I_SB(inode),
1167  		.ino = inode->i_ino,
1168  		.type = DATA,
1169  		.temp = COLD,
1170  		.op = REQ_OP_READ,
1171  		.op_flags = 0,
1172  		.encrypted_page = NULL,
1173  		.in_list = false,
1174  		.retry = false,
1175  	};
1176  	struct dnode_of_data dn;
1177  	struct f2fs_summary sum;
1178  	struct node_info ni;
1179  	struct page *page, *mpage;
1180  	block_t newaddr;
1181  	int err = 0;
1182  	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1183  	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1184  				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1185  				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1186  
1187  	/* do not read out */
1188  	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1189  	if (!page)
1190  		return -ENOMEM;
1191  
1192  	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1193  		err = -ENOENT;
1194  		goto out;
1195  	}
1196  
1197  	if (f2fs_is_atomic_file(inode)) {
1198  		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1199  		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1200  		err = -EAGAIN;
1201  		goto out;
1202  	}
1203  
1204  	if (f2fs_is_pinned_file(inode)) {
1205  		f2fs_pin_file_control(inode, true);
1206  		err = -EAGAIN;
1207  		goto out;
1208  	}
1209  
1210  	set_new_dnode(&dn, inode, NULL, NULL, 0);
1211  	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1212  	if (err)
1213  		goto out;
1214  
1215  	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1216  		ClearPageUptodate(page);
1217  		err = -ENOENT;
1218  		goto put_out;
1219  	}
1220  
1221  	/*
1222  	 * don't cache encrypted data into meta inode until previous dirty
1223  	 * data were writebacked to avoid racing between GC and flush.
1224  	 */
1225  	f2fs_wait_on_page_writeback(page, DATA, true, true);
1226  
1227  	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1228  
1229  	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1230  	if (err)
1231  		goto put_out;
1232  
1233  	/* read page */
1234  	fio.page = page;
1235  	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1236  
1237  	if (lfs_mode)
1238  		f2fs_down_write(&fio.sbi->io_order_lock);
1239  
1240  	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1241  					fio.old_blkaddr, false);
1242  	if (!mpage) {
1243  		err = -ENOMEM;
1244  		goto up_out;
1245  	}
1246  
1247  	fio.encrypted_page = mpage;
1248  
1249  	/* read source block in mpage */
1250  	if (!PageUptodate(mpage)) {
1251  		err = f2fs_submit_page_bio(&fio);
1252  		if (err) {
1253  			f2fs_put_page(mpage, 1);
1254  			goto up_out;
1255  		}
1256  
1257  		f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1258  		f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1259  
1260  		lock_page(mpage);
1261  		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1262  						!PageUptodate(mpage))) {
1263  			err = -EIO;
1264  			f2fs_put_page(mpage, 1);
1265  			goto up_out;
1266  		}
1267  	}
1268  
1269  	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1270  
1271  	/* allocate block address */
1272  	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1273  				&sum, type, NULL);
1274  
1275  	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1276  				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1277  	if (!fio.encrypted_page) {
1278  		err = -ENOMEM;
1279  		f2fs_put_page(mpage, 1);
1280  		goto recover_block;
1281  	}
1282  
1283  	/* write target block */
1284  	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1285  	memcpy(page_address(fio.encrypted_page),
1286  				page_address(mpage), PAGE_SIZE);
1287  	f2fs_put_page(mpage, 1);
1288  	invalidate_mapping_pages(META_MAPPING(fio.sbi),
1289  				fio.old_blkaddr, fio.old_blkaddr);
1290  	f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1291  
1292  	set_page_dirty(fio.encrypted_page);
1293  	if (clear_page_dirty_for_io(fio.encrypted_page))
1294  		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1295  
1296  	set_page_writeback(fio.encrypted_page);
1297  	ClearPageError(page);
1298  
1299  	fio.op = REQ_OP_WRITE;
1300  	fio.op_flags = REQ_SYNC;
1301  	fio.new_blkaddr = newaddr;
1302  	f2fs_submit_page_write(&fio);
1303  	if (fio.retry) {
1304  		err = -EAGAIN;
1305  		if (PageWriteback(fio.encrypted_page))
1306  			end_page_writeback(fio.encrypted_page);
1307  		goto put_page_out;
1308  	}
1309  
1310  	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1311  
1312  	f2fs_update_data_blkaddr(&dn, newaddr);
1313  	set_inode_flag(inode, FI_APPEND_WRITE);
1314  	if (page->index == 0)
1315  		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1316  put_page_out:
1317  	f2fs_put_page(fio.encrypted_page, 1);
1318  recover_block:
1319  	if (err)
1320  		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1321  							true, true, true);
1322  up_out:
1323  	if (lfs_mode)
1324  		f2fs_up_write(&fio.sbi->io_order_lock);
1325  put_out:
1326  	f2fs_put_dnode(&dn);
1327  out:
1328  	f2fs_put_page(page, 1);
1329  	return err;
1330  }
1331  
1332  static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1333  							unsigned int segno, int off)
1334  {
1335  	struct page *page;
1336  	int err = 0;
1337  
1338  	page = f2fs_get_lock_data_page(inode, bidx, true);
1339  	if (IS_ERR(page))
1340  		return PTR_ERR(page);
1341  
1342  	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1343  		err = -ENOENT;
1344  		goto out;
1345  	}
1346  
1347  	if (f2fs_is_atomic_file(inode)) {
1348  		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1349  		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1350  		err = -EAGAIN;
1351  		goto out;
1352  	}
1353  	if (f2fs_is_pinned_file(inode)) {
1354  		if (gc_type == FG_GC)
1355  			f2fs_pin_file_control(inode, true);
1356  		err = -EAGAIN;
1357  		goto out;
1358  	}
1359  
1360  	if (gc_type == BG_GC) {
1361  		if (PageWriteback(page)) {
1362  			err = -EAGAIN;
1363  			goto out;
1364  		}
1365  		set_page_dirty(page);
1366  		set_page_private_gcing(page);
1367  	} else {
1368  		struct f2fs_io_info fio = {
1369  			.sbi = F2FS_I_SB(inode),
1370  			.ino = inode->i_ino,
1371  			.type = DATA,
1372  			.temp = COLD,
1373  			.op = REQ_OP_WRITE,
1374  			.op_flags = REQ_SYNC,
1375  			.old_blkaddr = NULL_ADDR,
1376  			.page = page,
1377  			.encrypted_page = NULL,
1378  			.need_lock = LOCK_REQ,
1379  			.io_type = FS_GC_DATA_IO,
1380  		};
1381  		bool is_dirty = PageDirty(page);
1382  
1383  retry:
1384  		f2fs_wait_on_page_writeback(page, DATA, true, true);
1385  
1386  		set_page_dirty(page);
1387  		if (clear_page_dirty_for_io(page)) {
1388  			inode_dec_dirty_pages(inode);
1389  			f2fs_remove_dirty_inode(inode);
1390  		}
1391  
1392  		set_page_private_gcing(page);
1393  
1394  		err = f2fs_do_write_data_page(&fio);
1395  		if (err) {
1396  			clear_page_private_gcing(page);
1397  			if (err == -ENOMEM) {
1398  				memalloc_retry_wait(GFP_NOFS);
1399  				goto retry;
1400  			}
1401  			if (is_dirty)
1402  				set_page_dirty(page);
1403  		}
1404  	}
1405  out:
1406  	f2fs_put_page(page, 1);
1407  	return err;
1408  }
1409  
1410  /*
1411   * This function tries to get parent node of victim data block, and identifies
1412   * data block validity. If the block is valid, copy that with cold status and
1413   * modify parent node.
1414   * If the parent node is not valid or the data block address is different,
1415   * the victim data block is ignored.
1416   */
1417  static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1418  		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1419  		bool force_migrate)
1420  {
1421  	struct super_block *sb = sbi->sb;
1422  	struct f2fs_summary *entry;
1423  	block_t start_addr;
1424  	int off;
1425  	int phase = 0;
1426  	int submitted = 0;
1427  	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1428  
1429  	start_addr = START_BLOCK(sbi, segno);
1430  
1431  next_step:
1432  	entry = sum;
1433  
1434  	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1435  		struct page *data_page;
1436  		struct inode *inode;
1437  		struct node_info dni; /* dnode info for the data */
1438  		unsigned int ofs_in_node, nofs;
1439  		block_t start_bidx;
1440  		nid_t nid = le32_to_cpu(entry->nid);
1441  
1442  		/*
1443  		 * stop BG_GC if there is not enough free sections.
1444  		 * Or, stop GC if the segment becomes fully valid caused by
1445  		 * race condition along with SSR block allocation.
1446  		 */
1447  		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1448  			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1449  							BLKS_PER_SEC(sbi)))
1450  			return submitted;
1451  
1452  		if (check_valid_map(sbi, segno, off) == 0)
1453  			continue;
1454  
1455  		if (phase == 0) {
1456  			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1457  							META_NAT, true);
1458  			continue;
1459  		}
1460  
1461  		if (phase == 1) {
1462  			f2fs_ra_node_page(sbi, nid);
1463  			continue;
1464  		}
1465  
1466  		/* Get an inode by ino with checking validity */
1467  		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1468  			continue;
1469  
1470  		if (phase == 2) {
1471  			f2fs_ra_node_page(sbi, dni.ino);
1472  			continue;
1473  		}
1474  
1475  		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1476  
1477  		if (phase == 3) {
1478  			inode = f2fs_iget(sb, dni.ino);
1479  			if (IS_ERR(inode) || is_bad_inode(inode) ||
1480  					special_file(inode->i_mode))
1481  				continue;
1482  
1483  			if (!f2fs_down_write_trylock(
1484  				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1485  				iput(inode);
1486  				sbi->skipped_gc_rwsem++;
1487  				continue;
1488  			}
1489  
1490  			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1491  								ofs_in_node;
1492  
1493  			if (f2fs_post_read_required(inode)) {
1494  				int err = ra_data_block(inode, start_bidx);
1495  
1496  				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1497  				if (err) {
1498  					iput(inode);
1499  					continue;
1500  				}
1501  				add_gc_inode(gc_list, inode);
1502  				continue;
1503  			}
1504  
1505  			data_page = f2fs_get_read_data_page(inode,
1506  						start_bidx, REQ_RAHEAD, true);
1507  			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1508  			if (IS_ERR(data_page)) {
1509  				iput(inode);
1510  				continue;
1511  			}
1512  
1513  			f2fs_put_page(data_page, 0);
1514  			add_gc_inode(gc_list, inode);
1515  			continue;
1516  		}
1517  
1518  		/* phase 4 */
1519  		inode = find_gc_inode(gc_list, dni.ino);
1520  		if (inode) {
1521  			struct f2fs_inode_info *fi = F2FS_I(inode);
1522  			bool locked = false;
1523  			int err;
1524  
1525  			if (S_ISREG(inode->i_mode)) {
1526  				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1527  					sbi->skipped_gc_rwsem++;
1528  					continue;
1529  				}
1530  				if (!f2fs_down_write_trylock(
1531  						&fi->i_gc_rwsem[WRITE])) {
1532  					sbi->skipped_gc_rwsem++;
1533  					f2fs_up_write(&fi->i_gc_rwsem[READ]);
1534  					continue;
1535  				}
1536  				locked = true;
1537  
1538  				/* wait for all inflight aio data */
1539  				inode_dio_wait(inode);
1540  			}
1541  
1542  			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1543  								+ ofs_in_node;
1544  			if (f2fs_post_read_required(inode))
1545  				err = move_data_block(inode, start_bidx,
1546  							gc_type, segno, off);
1547  			else
1548  				err = move_data_page(inode, start_bidx, gc_type,
1549  								segno, off);
1550  
1551  			if (!err && (gc_type == FG_GC ||
1552  					f2fs_post_read_required(inode)))
1553  				submitted++;
1554  
1555  			if (locked) {
1556  				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1557  				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1558  			}
1559  
1560  			stat_inc_data_blk_count(sbi, 1, gc_type);
1561  		}
1562  	}
1563  
1564  	if (++phase < 5)
1565  		goto next_step;
1566  
1567  	return submitted;
1568  }
1569  
1570  static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1571  			int gc_type)
1572  {
1573  	struct sit_info *sit_i = SIT_I(sbi);
1574  	int ret;
1575  
1576  	down_write(&sit_i->sentry_lock);
1577  	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1578  					      NO_CHECK_TYPE, LFS, 0);
1579  	up_write(&sit_i->sentry_lock);
1580  	return ret;
1581  }
1582  
1583  static int do_garbage_collect(struct f2fs_sb_info *sbi,
1584  				unsigned int start_segno,
1585  				struct gc_inode_list *gc_list, int gc_type,
1586  				bool force_migrate)
1587  {
1588  	struct page *sum_page;
1589  	struct f2fs_summary_block *sum;
1590  	struct blk_plug plug;
1591  	unsigned int segno = start_segno;
1592  	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1593  	int seg_freed = 0, migrated = 0;
1594  	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1595  						SUM_TYPE_DATA : SUM_TYPE_NODE;
1596  	int submitted = 0;
1597  
1598  	if (__is_large_section(sbi))
1599  		end_segno = rounddown(end_segno, sbi->segs_per_sec);
1600  
1601  	/*
1602  	 * zone-capacity can be less than zone-size in zoned devices,
1603  	 * resulting in less than expected usable segments in the zone,
1604  	 * calculate the end segno in the zone which can be garbage collected
1605  	 */
1606  	if (f2fs_sb_has_blkzoned(sbi))
1607  		end_segno -= sbi->segs_per_sec -
1608  					f2fs_usable_segs_in_sec(sbi, segno);
1609  
1610  	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1611  
1612  	/* readahead multi ssa blocks those have contiguous address */
1613  	if (__is_large_section(sbi))
1614  		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1615  					end_segno - segno, META_SSA, true);
1616  
1617  	/* reference all summary page */
1618  	while (segno < end_segno) {
1619  		sum_page = f2fs_get_sum_page(sbi, segno++);
1620  		if (IS_ERR(sum_page)) {
1621  			int err = PTR_ERR(sum_page);
1622  
1623  			end_segno = segno - 1;
1624  			for (segno = start_segno; segno < end_segno; segno++) {
1625  				sum_page = find_get_page(META_MAPPING(sbi),
1626  						GET_SUM_BLOCK(sbi, segno));
1627  				f2fs_put_page(sum_page, 0);
1628  				f2fs_put_page(sum_page, 0);
1629  			}
1630  			return err;
1631  		}
1632  		unlock_page(sum_page);
1633  	}
1634  
1635  	blk_start_plug(&plug);
1636  
1637  	for (segno = start_segno; segno < end_segno; segno++) {
1638  
1639  		/* find segment summary of victim */
1640  		sum_page = find_get_page(META_MAPPING(sbi),
1641  					GET_SUM_BLOCK(sbi, segno));
1642  		f2fs_put_page(sum_page, 0);
1643  
1644  		if (get_valid_blocks(sbi, segno, false) == 0)
1645  			goto freed;
1646  		if (gc_type == BG_GC && __is_large_section(sbi) &&
1647  				migrated >= sbi->migration_granularity)
1648  			goto skip;
1649  		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1650  			goto skip;
1651  
1652  		sum = page_address(sum_page);
1653  		if (type != GET_SUM_TYPE((&sum->footer))) {
1654  			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1655  				 segno, type, GET_SUM_TYPE((&sum->footer)));
1656  			set_sbi_flag(sbi, SBI_NEED_FSCK);
1657  			f2fs_stop_checkpoint(sbi, false);
1658  			goto skip;
1659  		}
1660  
1661  		/*
1662  		 * this is to avoid deadlock:
1663  		 * - lock_page(sum_page)         - f2fs_replace_block
1664  		 *  - check_valid_map()            - down_write(sentry_lock)
1665  		 *   - down_read(sentry_lock)     - change_curseg()
1666  		 *                                  - lock_page(sum_page)
1667  		 */
1668  		if (type == SUM_TYPE_NODE)
1669  			submitted += gc_node_segment(sbi, sum->entries, segno,
1670  								gc_type);
1671  		else
1672  			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1673  							segno, gc_type,
1674  							force_migrate);
1675  
1676  		stat_inc_seg_count(sbi, type, gc_type);
1677  		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1678  		migrated++;
1679  
1680  freed:
1681  		if (gc_type == FG_GC &&
1682  				get_valid_blocks(sbi, segno, false) == 0)
1683  			seg_freed++;
1684  
1685  		if (__is_large_section(sbi) && segno + 1 < end_segno)
1686  			sbi->next_victim_seg[gc_type] = segno + 1;
1687  skip:
1688  		f2fs_put_page(sum_page, 0);
1689  	}
1690  
1691  	if (submitted)
1692  		f2fs_submit_merged_write(sbi,
1693  				(type == SUM_TYPE_NODE) ? NODE : DATA);
1694  
1695  	blk_finish_plug(&plug);
1696  
1697  	stat_inc_call_count(sbi->stat_info);
1698  
1699  	return seg_freed;
1700  }
1701  
1702  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1703  			bool background, bool force, unsigned int segno)
1704  {
1705  	int gc_type = sync ? FG_GC : BG_GC;
1706  	int sec_freed = 0, seg_freed = 0, total_freed = 0;
1707  	int ret = 0;
1708  	struct cp_control cpc;
1709  	unsigned int init_segno = segno;
1710  	struct gc_inode_list gc_list = {
1711  		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1712  		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1713  	};
1714  	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1715  	unsigned long long first_skipped;
1716  	unsigned int skipped_round = 0, round = 0;
1717  
1718  	trace_f2fs_gc_begin(sbi->sb, sync, background,
1719  				get_pages(sbi, F2FS_DIRTY_NODES),
1720  				get_pages(sbi, F2FS_DIRTY_DENTS),
1721  				get_pages(sbi, F2FS_DIRTY_IMETA),
1722  				free_sections(sbi),
1723  				free_segments(sbi),
1724  				reserved_segments(sbi),
1725  				prefree_segments(sbi));
1726  
1727  	cpc.reason = __get_cp_reason(sbi);
1728  	sbi->skipped_gc_rwsem = 0;
1729  	first_skipped = last_skipped;
1730  gc_more:
1731  	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1732  		ret = -EINVAL;
1733  		goto stop;
1734  	}
1735  	if (unlikely(f2fs_cp_error(sbi))) {
1736  		ret = -EIO;
1737  		goto stop;
1738  	}
1739  
1740  	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1741  		/*
1742  		 * For example, if there are many prefree_segments below given
1743  		 * threshold, we can make them free by checkpoint. Then, we
1744  		 * secure free segments which doesn't need fggc any more.
1745  		 */
1746  		if (prefree_segments(sbi) &&
1747  				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1748  			ret = f2fs_write_checkpoint(sbi, &cpc);
1749  			if (ret)
1750  				goto stop;
1751  		}
1752  		if (has_not_enough_free_secs(sbi, 0, 0))
1753  			gc_type = FG_GC;
1754  	}
1755  
1756  	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1757  	if (gc_type == BG_GC && !background) {
1758  		ret = -EINVAL;
1759  		goto stop;
1760  	}
1761  	ret = __get_victim(sbi, &segno, gc_type);
1762  	if (ret)
1763  		goto stop;
1764  
1765  	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1766  	if (gc_type == FG_GC &&
1767  		seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1768  		sec_freed++;
1769  	total_freed += seg_freed;
1770  
1771  	if (gc_type == FG_GC) {
1772  		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1773  						sbi->skipped_gc_rwsem)
1774  			skipped_round++;
1775  		last_skipped = sbi->skipped_atomic_files[FG_GC];
1776  		round++;
1777  	}
1778  
1779  	if (gc_type == FG_GC)
1780  		sbi->cur_victim_sec = NULL_SEGNO;
1781  
1782  	if (sync)
1783  		goto stop;
1784  
1785  	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1786  		if (skipped_round <= MAX_SKIP_GC_COUNT ||
1787  					skipped_round * 2 < round) {
1788  			segno = NULL_SEGNO;
1789  			goto gc_more;
1790  		}
1791  
1792  		if (first_skipped < last_skipped &&
1793  				(last_skipped - first_skipped) >
1794  						sbi->skipped_gc_rwsem) {
1795  			f2fs_drop_inmem_pages_all(sbi, true);
1796  			segno = NULL_SEGNO;
1797  			goto gc_more;
1798  		}
1799  		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1800  			ret = f2fs_write_checkpoint(sbi, &cpc);
1801  	}
1802  stop:
1803  	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1804  	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1805  
1806  	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1807  				get_pages(sbi, F2FS_DIRTY_NODES),
1808  				get_pages(sbi, F2FS_DIRTY_DENTS),
1809  				get_pages(sbi, F2FS_DIRTY_IMETA),
1810  				free_sections(sbi),
1811  				free_segments(sbi),
1812  				reserved_segments(sbi),
1813  				prefree_segments(sbi));
1814  
1815  	f2fs_up_write(&sbi->gc_lock);
1816  
1817  	put_gc_inode(&gc_list);
1818  
1819  	if (sync && !ret)
1820  		ret = sec_freed ? 0 : -EAGAIN;
1821  	return ret;
1822  }
1823  
1824  int __init f2fs_create_garbage_collection_cache(void)
1825  {
1826  	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1827  					sizeof(struct victim_entry));
1828  	if (!victim_entry_slab)
1829  		return -ENOMEM;
1830  	return 0;
1831  }
1832  
1833  void f2fs_destroy_garbage_collection_cache(void)
1834  {
1835  	kmem_cache_destroy(victim_entry_slab);
1836  }
1837  
1838  static void init_atgc_management(struct f2fs_sb_info *sbi)
1839  {
1840  	struct atgc_management *am = &sbi->am;
1841  
1842  	if (test_opt(sbi, ATGC) &&
1843  		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1844  		am->atgc_enabled = true;
1845  
1846  	am->root = RB_ROOT_CACHED;
1847  	INIT_LIST_HEAD(&am->victim_list);
1848  	am->victim_count = 0;
1849  
1850  	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1851  	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1852  	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1853  	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1854  }
1855  
1856  void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1857  {
1858  	DIRTY_I(sbi)->v_ops = &default_v_ops;
1859  
1860  	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1861  
1862  	/* give warm/cold data area from slower device */
1863  	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1864  		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1865  				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1866  
1867  	init_atgc_management(sbi);
1868  }
1869  
1870  static int free_segment_range(struct f2fs_sb_info *sbi,
1871  				unsigned int secs, bool gc_only)
1872  {
1873  	unsigned int segno, next_inuse, start, end;
1874  	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1875  	int gc_mode, gc_type;
1876  	int err = 0;
1877  	int type;
1878  
1879  	/* Force block allocation for GC */
1880  	MAIN_SECS(sbi) -= secs;
1881  	start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1882  	end = MAIN_SEGS(sbi) - 1;
1883  
1884  	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1885  	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1886  		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1887  			SIT_I(sbi)->last_victim[gc_mode] = 0;
1888  
1889  	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1890  		if (sbi->next_victim_seg[gc_type] >= start)
1891  			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1892  	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1893  
1894  	/* Move out cursegs from the target range */
1895  	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1896  		f2fs_allocate_segment_for_resize(sbi, type, start, end);
1897  
1898  	/* do GC to move out valid blocks in the range */
1899  	for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1900  		struct gc_inode_list gc_list = {
1901  			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1902  			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1903  		};
1904  
1905  		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1906  		put_gc_inode(&gc_list);
1907  
1908  		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1909  			err = -EAGAIN;
1910  			goto out;
1911  		}
1912  		if (fatal_signal_pending(current)) {
1913  			err = -ERESTARTSYS;
1914  			goto out;
1915  		}
1916  	}
1917  	if (gc_only)
1918  		goto out;
1919  
1920  	err = f2fs_write_checkpoint(sbi, &cpc);
1921  	if (err)
1922  		goto out;
1923  
1924  	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1925  	if (next_inuse <= end) {
1926  		f2fs_err(sbi, "segno %u should be free but still inuse!",
1927  			 next_inuse);
1928  		f2fs_bug_on(sbi, 1);
1929  	}
1930  out:
1931  	MAIN_SECS(sbi) += secs;
1932  	return err;
1933  }
1934  
1935  static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1936  {
1937  	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1938  	int section_count;
1939  	int segment_count;
1940  	int segment_count_main;
1941  	long long block_count;
1942  	int segs = secs * sbi->segs_per_sec;
1943  
1944  	f2fs_down_write(&sbi->sb_lock);
1945  
1946  	section_count = le32_to_cpu(raw_sb->section_count);
1947  	segment_count = le32_to_cpu(raw_sb->segment_count);
1948  	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1949  	block_count = le64_to_cpu(raw_sb->block_count);
1950  
1951  	raw_sb->section_count = cpu_to_le32(section_count + secs);
1952  	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1953  	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1954  	raw_sb->block_count = cpu_to_le64(block_count +
1955  					(long long)segs * sbi->blocks_per_seg);
1956  	if (f2fs_is_multi_device(sbi)) {
1957  		int last_dev = sbi->s_ndevs - 1;
1958  		int dev_segs =
1959  			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1960  
1961  		raw_sb->devs[last_dev].total_segments =
1962  						cpu_to_le32(dev_segs + segs);
1963  	}
1964  
1965  	f2fs_up_write(&sbi->sb_lock);
1966  }
1967  
1968  static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1969  {
1970  	int segs = secs * sbi->segs_per_sec;
1971  	long long blks = (long long)segs * sbi->blocks_per_seg;
1972  	long long user_block_count =
1973  				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1974  
1975  	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1976  	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1977  	MAIN_SECS(sbi) += secs;
1978  	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1979  	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1980  	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1981  
1982  	if (f2fs_is_multi_device(sbi)) {
1983  		int last_dev = sbi->s_ndevs - 1;
1984  
1985  		FDEV(last_dev).total_segments =
1986  				(int)FDEV(last_dev).total_segments + segs;
1987  		FDEV(last_dev).end_blk =
1988  				(long long)FDEV(last_dev).end_blk + blks;
1989  #ifdef CONFIG_BLK_DEV_ZONED
1990  		FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1991  					(int)(blks >> sbi->log_blocks_per_blkz);
1992  #endif
1993  	}
1994  }
1995  
1996  int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1997  {
1998  	__u64 old_block_count, shrunk_blocks;
1999  	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2000  	unsigned int secs;
2001  	int err = 0;
2002  	__u32 rem;
2003  
2004  	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2005  	if (block_count > old_block_count)
2006  		return -EINVAL;
2007  
2008  	if (f2fs_is_multi_device(sbi)) {
2009  		int last_dev = sbi->s_ndevs - 1;
2010  		__u64 last_segs = FDEV(last_dev).total_segments;
2011  
2012  		if (block_count + last_segs * sbi->blocks_per_seg <=
2013  								old_block_count)
2014  			return -EINVAL;
2015  	}
2016  
2017  	/* new fs size should align to section size */
2018  	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2019  	if (rem)
2020  		return -EINVAL;
2021  
2022  	if (block_count == old_block_count)
2023  		return 0;
2024  
2025  	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2026  		f2fs_err(sbi, "Should run fsck to repair first.");
2027  		return -EFSCORRUPTED;
2028  	}
2029  
2030  	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2031  		f2fs_err(sbi, "Checkpoint should be enabled.");
2032  		return -EINVAL;
2033  	}
2034  
2035  	shrunk_blocks = old_block_count - block_count;
2036  	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2037  
2038  	/* stop other GC */
2039  	if (!f2fs_down_write_trylock(&sbi->gc_lock))
2040  		return -EAGAIN;
2041  
2042  	/* stop CP to protect MAIN_SEC in free_segment_range */
2043  	f2fs_lock_op(sbi);
2044  
2045  	spin_lock(&sbi->stat_lock);
2046  	if (shrunk_blocks + valid_user_blocks(sbi) +
2047  		sbi->current_reserved_blocks + sbi->unusable_block_count +
2048  		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2049  		err = -ENOSPC;
2050  	spin_unlock(&sbi->stat_lock);
2051  
2052  	if (err)
2053  		goto out_unlock;
2054  
2055  	err = free_segment_range(sbi, secs, true);
2056  
2057  out_unlock:
2058  	f2fs_unlock_op(sbi);
2059  	f2fs_up_write(&sbi->gc_lock);
2060  	if (err)
2061  		return err;
2062  
2063  	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2064  
2065  	freeze_super(sbi->sb);
2066  	f2fs_down_write(&sbi->gc_lock);
2067  	f2fs_down_write(&sbi->cp_global_sem);
2068  
2069  	spin_lock(&sbi->stat_lock);
2070  	if (shrunk_blocks + valid_user_blocks(sbi) +
2071  		sbi->current_reserved_blocks + sbi->unusable_block_count +
2072  		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2073  		err = -ENOSPC;
2074  	else
2075  		sbi->user_block_count -= shrunk_blocks;
2076  	spin_unlock(&sbi->stat_lock);
2077  	if (err)
2078  		goto out_err;
2079  
2080  	err = free_segment_range(sbi, secs, false);
2081  	if (err)
2082  		goto recover_out;
2083  
2084  	update_sb_metadata(sbi, -secs);
2085  
2086  	err = f2fs_commit_super(sbi, false);
2087  	if (err) {
2088  		update_sb_metadata(sbi, secs);
2089  		goto recover_out;
2090  	}
2091  
2092  	update_fs_metadata(sbi, -secs);
2093  	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2094  	set_sbi_flag(sbi, SBI_IS_DIRTY);
2095  
2096  	err = f2fs_write_checkpoint(sbi, &cpc);
2097  	if (err) {
2098  		update_fs_metadata(sbi, secs);
2099  		update_sb_metadata(sbi, secs);
2100  		f2fs_commit_super(sbi, false);
2101  	}
2102  recover_out:
2103  	if (err) {
2104  		set_sbi_flag(sbi, SBI_NEED_FSCK);
2105  		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2106  
2107  		spin_lock(&sbi->stat_lock);
2108  		sbi->user_block_count += shrunk_blocks;
2109  		spin_unlock(&sbi->stat_lock);
2110  	}
2111  out_err:
2112  	f2fs_up_write(&sbi->cp_global_sem);
2113  	f2fs_up_write(&sbi->gc_lock);
2114  	thaw_super(sbi->sb);
2115  	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2116  	return err;
2117  }
2118