1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include "iostat.h"
24 #include <trace/events/f2fs.h>
25
26 static struct kmem_cache *victim_entry_slab;
27
28 static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
gc_thread_func(void * data)31 static int gc_thread_func(void *data)
32 {
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37 unsigned int wait_ms;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
42
43 wait_ms = gc_th->min_sleep_time;
44
45 set_freezable();
46 do {
47 bool sync_mode, foreground = false;
48
49 wait_event_interruptible_timeout(*wq,
50 kthread_should_stop() || freezing(current) ||
51 waitqueue_active(fggc_wq) ||
52 gc_th->gc_wake,
53 msecs_to_jiffies(wait_ms));
54
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
58 /* give it a try one time */
59 if (gc_th->gc_wake)
60 gc_th->gc_wake = false;
61
62 if (try_to_freeze() || f2fs_readonly(sbi->sb)) {
63 stat_other_skip_bggc_count(sbi);
64 continue;
65 }
66 if (kthread_should_stop())
67 break;
68
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70 increase_sleep_time(gc_th, &wait_ms);
71 stat_other_skip_bggc_count(sbi);
72 continue;
73 }
74
75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
76 f2fs_stop_checkpoint(sbi, false,
77 STOP_CP_REASON_FAULT_INJECT);
78
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
81 continue;
82 }
83
84 /*
85 * [GC triggering condition]
86 * 0. GC is not conducted currently.
87 * 1. There are enough dirty segments.
88 * 2. IO subsystem is idle by checking the # of writeback pages.
89 * 3. IO subsystem is idle by checking the # of requests in
90 * bdev's request list.
91 *
92 * Note) We have to avoid triggering GCs frequently.
93 * Because it is possible that some segments can be
94 * invalidated soon after by user update or deletion.
95 * So, I'd like to wait some time to collect dirty segments.
96 */
97 if (sbi->gc_mode == GC_URGENT_HIGH ||
98 sbi->gc_mode == GC_URGENT_MID) {
99 wait_ms = gc_th->urgent_sleep_time;
100 f2fs_down_write(&sbi->gc_lock);
101 goto do_gc;
102 }
103
104 if (foreground) {
105 f2fs_down_write(&sbi->gc_lock);
106 goto do_gc;
107 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
108 stat_other_skip_bggc_count(sbi);
109 goto next;
110 }
111
112 if (!is_idle(sbi, GC_TIME)) {
113 increase_sleep_time(gc_th, &wait_ms);
114 f2fs_up_write(&sbi->gc_lock);
115 stat_io_skip_bggc_count(sbi);
116 goto next;
117 }
118
119 if (has_enough_invalid_blocks(sbi))
120 decrease_sleep_time(gc_th, &wait_ms);
121 else
122 increase_sleep_time(gc_th, &wait_ms);
123 do_gc:
124 stat_inc_gc_call_count(sbi, foreground ?
125 FOREGROUND : BACKGROUND);
126
127 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
128
129 /* foreground GC was been triggered via f2fs_balance_fs() */
130 if (foreground)
131 sync_mode = false;
132
133 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
134 gc_control.no_bg_gc = foreground;
135 gc_control.nr_free_secs = foreground ? 1 : 0;
136
137 /* if return value is not zero, no victim was selected */
138 if (f2fs_gc(sbi, &gc_control)) {
139 /* don't bother wait_ms by foreground gc */
140 if (!foreground)
141 wait_ms = gc_th->no_gc_sleep_time;
142 } else {
143 /* reset wait_ms to default sleep time */
144 if (wait_ms == gc_th->no_gc_sleep_time)
145 wait_ms = gc_th->min_sleep_time;
146 }
147
148 if (foreground)
149 wake_up_all(&gc_th->fggc_wq);
150
151 trace_f2fs_background_gc(sbi->sb, wait_ms,
152 prefree_segments(sbi), free_segments(sbi));
153
154 /* balancing f2fs's metadata periodically */
155 f2fs_balance_fs_bg(sbi, true);
156 next:
157 if (sbi->gc_mode != GC_NORMAL) {
158 spin_lock(&sbi->gc_remaining_trials_lock);
159 if (sbi->gc_remaining_trials) {
160 sbi->gc_remaining_trials--;
161 if (!sbi->gc_remaining_trials)
162 sbi->gc_mode = GC_NORMAL;
163 }
164 spin_unlock(&sbi->gc_remaining_trials_lock);
165 }
166 sb_end_write(sbi->sb);
167
168 } while (!kthread_should_stop());
169 return 0;
170 }
171
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)172 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
173 {
174 struct f2fs_gc_kthread *gc_th;
175 dev_t dev = sbi->sb->s_bdev->bd_dev;
176
177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
178 if (!gc_th)
179 return -ENOMEM;
180
181 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
182 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
183 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
184 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
185
186 gc_th->gc_wake = false;
187
188 sbi->gc_thread = gc_th;
189 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
190 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
191 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
192 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
193 if (IS_ERR(gc_th->f2fs_gc_task)) {
194 int err = PTR_ERR(gc_th->f2fs_gc_task);
195
196 kfree(gc_th);
197 sbi->gc_thread = NULL;
198 return err;
199 }
200
201 return 0;
202 }
203
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)204 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
205 {
206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
207
208 if (!gc_th)
209 return;
210 kthread_stop(gc_th->f2fs_gc_task);
211 wake_up_all(&gc_th->fggc_wq);
212 kfree(gc_th);
213 sbi->gc_thread = NULL;
214 }
215
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)216 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
217 {
218 int gc_mode;
219
220 if (gc_type == BG_GC) {
221 if (sbi->am.atgc_enabled)
222 gc_mode = GC_AT;
223 else
224 gc_mode = GC_CB;
225 } else {
226 gc_mode = GC_GREEDY;
227 }
228
229 switch (sbi->gc_mode) {
230 case GC_IDLE_CB:
231 gc_mode = GC_CB;
232 break;
233 case GC_IDLE_GREEDY:
234 case GC_URGENT_HIGH:
235 gc_mode = GC_GREEDY;
236 break;
237 case GC_IDLE_AT:
238 gc_mode = GC_AT;
239 break;
240 }
241
242 return gc_mode;
243 }
244
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)245 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
246 int type, struct victim_sel_policy *p)
247 {
248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249
250 if (p->alloc_mode == SSR) {
251 p->gc_mode = GC_GREEDY;
252 p->dirty_bitmap = dirty_i->dirty_segmap[type];
253 p->max_search = dirty_i->nr_dirty[type];
254 p->ofs_unit = 1;
255 } else if (p->alloc_mode == AT_SSR) {
256 p->gc_mode = GC_GREEDY;
257 p->dirty_bitmap = dirty_i->dirty_segmap[type];
258 p->max_search = dirty_i->nr_dirty[type];
259 p->ofs_unit = 1;
260 } else {
261 p->gc_mode = select_gc_type(sbi, gc_type);
262 p->ofs_unit = SEGS_PER_SEC(sbi);
263 if (__is_large_section(sbi)) {
264 p->dirty_bitmap = dirty_i->dirty_secmap;
265 p->max_search = count_bits(p->dirty_bitmap,
266 0, MAIN_SECS(sbi));
267 } else {
268 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
269 p->max_search = dirty_i->nr_dirty[DIRTY];
270 }
271 }
272
273 /*
274 * adjust candidates range, should select all dirty segments for
275 * foreground GC and urgent GC cases.
276 */
277 if (gc_type != FG_GC &&
278 (sbi->gc_mode != GC_URGENT_HIGH) &&
279 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
280 p->max_search > sbi->max_victim_search)
281 p->max_search = sbi->max_victim_search;
282
283 /* let's select beginning hot/small space first. */
284 if (f2fs_need_rand_seg(sbi))
285 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
286 SEGS_PER_SEC(sbi));
287 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
288 p->offset = 0;
289 else
290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
291 }
292
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)293 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
294 struct victim_sel_policy *p)
295 {
296 /* SSR allocates in a segment unit */
297 if (p->alloc_mode == SSR)
298 return BLKS_PER_SEG(sbi);
299 else if (p->alloc_mode == AT_SSR)
300 return UINT_MAX;
301
302 /* LFS */
303 if (p->gc_mode == GC_GREEDY)
304 return 2 * BLKS_PER_SEG(sbi) * p->ofs_unit;
305 else if (p->gc_mode == GC_CB)
306 return UINT_MAX;
307 else if (p->gc_mode == GC_AT)
308 return UINT_MAX;
309 else /* No other gc_mode */
310 return 0;
311 }
312
check_bg_victims(struct f2fs_sb_info * sbi)313 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314 {
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316 unsigned int secno;
317
318 /*
319 * If the gc_type is FG_GC, we can select victim segments
320 * selected by background GC before.
321 * Those segments guarantee they have small valid blocks.
322 */
323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
324 if (sec_usage_check(sbi, secno))
325 continue;
326 clear_bit(secno, dirty_i->victim_secmap);
327 return GET_SEG_FROM_SEC(sbi, secno);
328 }
329 return NULL_SEGNO;
330 }
331
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)332 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333 {
334 struct sit_info *sit_i = SIT_I(sbi);
335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
337 unsigned long long mtime = 0;
338 unsigned int vblocks;
339 unsigned char age = 0;
340 unsigned char u;
341 unsigned int i;
342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
343
344 for (i = 0; i < usable_segs_per_sec; i++)
345 mtime += get_seg_entry(sbi, start + i)->mtime;
346 vblocks = get_valid_blocks(sbi, segno, true);
347
348 mtime = div_u64(mtime, usable_segs_per_sec);
349 vblocks = div_u64(vblocks, usable_segs_per_sec);
350
351 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
352
353 /* Handle if the system time has changed by the user */
354 if (mtime < sit_i->min_mtime)
355 sit_i->min_mtime = mtime;
356 if (mtime > sit_i->max_mtime)
357 sit_i->max_mtime = mtime;
358 if (sit_i->max_mtime != sit_i->min_mtime)
359 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
360 sit_i->max_mtime - sit_i->min_mtime);
361
362 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
363 }
364
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p)365 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
366 unsigned int segno, struct victim_sel_policy *p)
367 {
368 if (p->alloc_mode == SSR)
369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
370
371 /* alloc_mode == LFS */
372 if (p->gc_mode == GC_GREEDY)
373 return get_valid_blocks(sbi, segno, true);
374 else if (p->gc_mode == GC_CB)
375 return get_cb_cost(sbi, segno);
376
377 f2fs_bug_on(sbi, 1);
378 return 0;
379 }
380
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)381 static unsigned int count_bits(const unsigned long *addr,
382 unsigned int offset, unsigned int len)
383 {
384 unsigned int end = offset + len, sum = 0;
385
386 while (offset < end) {
387 if (test_bit(offset++, addr))
388 ++sum;
389 }
390 return sum;
391 }
392
f2fs_check_victim_tree(struct f2fs_sb_info * sbi,struct rb_root_cached * root)393 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
394 struct rb_root_cached *root)
395 {
396 #ifdef CONFIG_F2FS_CHECK_FS
397 struct rb_node *cur = rb_first_cached(root), *next;
398 struct victim_entry *cur_ve, *next_ve;
399
400 while (cur) {
401 next = rb_next(cur);
402 if (!next)
403 return true;
404
405 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
406 next_ve = rb_entry(next, struct victim_entry, rb_node);
407
408 if (cur_ve->mtime > next_ve->mtime) {
409 f2fs_info(sbi, "broken victim_rbtree, "
410 "cur_mtime(%llu) next_mtime(%llu)",
411 cur_ve->mtime, next_ve->mtime);
412 return false;
413 }
414 cur = next;
415 }
416 #endif
417 return true;
418 }
419
__lookup_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime)420 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
421 unsigned long long mtime)
422 {
423 struct atgc_management *am = &sbi->am;
424 struct rb_node *node = am->root.rb_root.rb_node;
425 struct victim_entry *ve = NULL;
426
427 while (node) {
428 ve = rb_entry(node, struct victim_entry, rb_node);
429
430 if (mtime < ve->mtime)
431 node = node->rb_left;
432 else
433 node = node->rb_right;
434 }
435 return ve;
436 }
437
__create_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)438 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
439 unsigned long long mtime, unsigned int segno)
440 {
441 struct atgc_management *am = &sbi->am;
442 struct victim_entry *ve;
443
444 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
445
446 ve->mtime = mtime;
447 ve->segno = segno;
448
449 list_add_tail(&ve->list, &am->victim_list);
450 am->victim_count++;
451
452 return ve;
453 }
454
__insert_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)455 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
456 unsigned long long mtime, unsigned int segno)
457 {
458 struct atgc_management *am = &sbi->am;
459 struct rb_root_cached *root = &am->root;
460 struct rb_node **p = &root->rb_root.rb_node;
461 struct rb_node *parent = NULL;
462 struct victim_entry *ve;
463 bool left_most = true;
464
465 /* look up rb tree to find parent node */
466 while (*p) {
467 parent = *p;
468 ve = rb_entry(parent, struct victim_entry, rb_node);
469
470 if (mtime < ve->mtime) {
471 p = &(*p)->rb_left;
472 } else {
473 p = &(*p)->rb_right;
474 left_most = false;
475 }
476 }
477
478 ve = __create_victim_entry(sbi, mtime, segno);
479
480 rb_link_node(&ve->rb_node, parent, p);
481 rb_insert_color_cached(&ve->rb_node, root, left_most);
482 }
483
add_victim_entry(struct f2fs_sb_info * sbi,struct victim_sel_policy * p,unsigned int segno)484 static void add_victim_entry(struct f2fs_sb_info *sbi,
485 struct victim_sel_policy *p, unsigned int segno)
486 {
487 struct sit_info *sit_i = SIT_I(sbi);
488 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
489 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
490 unsigned long long mtime = 0;
491 unsigned int i;
492
493 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
494 if (p->gc_mode == GC_AT &&
495 get_valid_blocks(sbi, segno, true) == 0)
496 return;
497 }
498
499 for (i = 0; i < SEGS_PER_SEC(sbi); i++)
500 mtime += get_seg_entry(sbi, start + i)->mtime;
501 mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
502
503 /* Handle if the system time has changed by the user */
504 if (mtime < sit_i->min_mtime)
505 sit_i->min_mtime = mtime;
506 if (mtime > sit_i->max_mtime)
507 sit_i->max_mtime = mtime;
508 if (mtime < sit_i->dirty_min_mtime)
509 sit_i->dirty_min_mtime = mtime;
510 if (mtime > sit_i->dirty_max_mtime)
511 sit_i->dirty_max_mtime = mtime;
512
513 /* don't choose young section as candidate */
514 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
515 return;
516
517 __insert_victim_entry(sbi, mtime, segno);
518 }
519
atgc_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)520 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
521 struct victim_sel_policy *p)
522 {
523 struct sit_info *sit_i = SIT_I(sbi);
524 struct atgc_management *am = &sbi->am;
525 struct rb_root_cached *root = &am->root;
526 struct rb_node *node;
527 struct victim_entry *ve;
528 unsigned long long total_time;
529 unsigned long long age, u, accu;
530 unsigned long long max_mtime = sit_i->dirty_max_mtime;
531 unsigned long long min_mtime = sit_i->dirty_min_mtime;
532 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
533 unsigned int vblocks;
534 unsigned int dirty_threshold = max(am->max_candidate_count,
535 am->candidate_ratio *
536 am->victim_count / 100);
537 unsigned int age_weight = am->age_weight;
538 unsigned int cost;
539 unsigned int iter = 0;
540
541 if (max_mtime < min_mtime)
542 return;
543
544 max_mtime += 1;
545 total_time = max_mtime - min_mtime;
546
547 accu = div64_u64(ULLONG_MAX, total_time);
548 accu = min_t(unsigned long long, div_u64(accu, 100),
549 DEFAULT_ACCURACY_CLASS);
550
551 node = rb_first_cached(root);
552 next:
553 ve = rb_entry_safe(node, struct victim_entry, rb_node);
554 if (!ve)
555 return;
556
557 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
558 goto skip;
559
560 /* age = 10000 * x% * 60 */
561 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
562 age_weight;
563
564 vblocks = get_valid_blocks(sbi, ve->segno, true);
565 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
566
567 /* u = 10000 * x% * 40 */
568 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
569 (100 - age_weight);
570
571 f2fs_bug_on(sbi, age + u >= UINT_MAX);
572
573 cost = UINT_MAX - (age + u);
574 iter++;
575
576 if (cost < p->min_cost ||
577 (cost == p->min_cost && age > p->oldest_age)) {
578 p->min_cost = cost;
579 p->oldest_age = age;
580 p->min_segno = ve->segno;
581 }
582 skip:
583 if (iter < dirty_threshold) {
584 node = rb_next(node);
585 goto next;
586 }
587 }
588
589 /*
590 * select candidates around source section in range of
591 * [target - dirty_threshold, target + dirty_threshold]
592 */
atssr_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)593 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
594 struct victim_sel_policy *p)
595 {
596 struct sit_info *sit_i = SIT_I(sbi);
597 struct atgc_management *am = &sbi->am;
598 struct victim_entry *ve;
599 unsigned long long age;
600 unsigned long long max_mtime = sit_i->dirty_max_mtime;
601 unsigned long long min_mtime = sit_i->dirty_min_mtime;
602 unsigned int vblocks;
603 unsigned int dirty_threshold = max(am->max_candidate_count,
604 am->candidate_ratio *
605 am->victim_count / 100);
606 unsigned int cost, iter;
607 int stage = 0;
608
609 if (max_mtime < min_mtime)
610 return;
611 max_mtime += 1;
612 next_stage:
613 iter = 0;
614 ve = __lookup_victim_entry(sbi, p->age);
615 next_node:
616 if (!ve) {
617 if (stage++ == 0)
618 goto next_stage;
619 return;
620 }
621
622 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
623 goto skip_node;
624
625 age = max_mtime - ve->mtime;
626
627 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
628 f2fs_bug_on(sbi, !vblocks);
629
630 /* rare case */
631 if (vblocks == BLKS_PER_SEG(sbi))
632 goto skip_node;
633
634 iter++;
635
636 age = max_mtime - abs(p->age - age);
637 cost = UINT_MAX - vblocks;
638
639 if (cost < p->min_cost ||
640 (cost == p->min_cost && age > p->oldest_age)) {
641 p->min_cost = cost;
642 p->oldest_age = age;
643 p->min_segno = ve->segno;
644 }
645 skip_node:
646 if (iter < dirty_threshold) {
647 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
648 rb_next(&ve->rb_node),
649 struct victim_entry, rb_node);
650 goto next_node;
651 }
652
653 if (stage++ == 0)
654 goto next_stage;
655 }
656
lookup_victim_by_age(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)657 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
658 struct victim_sel_policy *p)
659 {
660 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
661
662 if (p->gc_mode == GC_AT)
663 atgc_lookup_victim(sbi, p);
664 else if (p->alloc_mode == AT_SSR)
665 atssr_lookup_victim(sbi, p);
666 else
667 f2fs_bug_on(sbi, 1);
668 }
669
release_victim_entry(struct f2fs_sb_info * sbi)670 static void release_victim_entry(struct f2fs_sb_info *sbi)
671 {
672 struct atgc_management *am = &sbi->am;
673 struct victim_entry *ve, *tmp;
674
675 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
676 list_del(&ve->list);
677 kmem_cache_free(victim_entry_slab, ve);
678 am->victim_count--;
679 }
680
681 am->root = RB_ROOT_CACHED;
682
683 f2fs_bug_on(sbi, am->victim_count);
684 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
685 }
686
f2fs_pin_section(struct f2fs_sb_info * sbi,unsigned int segno)687 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
688 {
689 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
690 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
691
692 if (!dirty_i->enable_pin_section)
693 return false;
694 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
695 dirty_i->pinned_secmap_cnt++;
696 return true;
697 }
698
f2fs_pinned_section_exists(struct dirty_seglist_info * dirty_i)699 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
700 {
701 return dirty_i->pinned_secmap_cnt;
702 }
703
f2fs_section_is_pinned(struct dirty_seglist_info * dirty_i,unsigned int secno)704 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
705 unsigned int secno)
706 {
707 return dirty_i->enable_pin_section &&
708 f2fs_pinned_section_exists(dirty_i) &&
709 test_bit(secno, dirty_i->pinned_secmap);
710 }
711
f2fs_unpin_all_sections(struct f2fs_sb_info * sbi,bool enable)712 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
713 {
714 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
715
716 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
717 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
718 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
719 }
720 DIRTY_I(sbi)->enable_pin_section = enable;
721 }
722
f2fs_gc_pinned_control(struct inode * inode,int gc_type,unsigned int segno)723 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
724 unsigned int segno)
725 {
726 if (!f2fs_is_pinned_file(inode))
727 return 0;
728 if (gc_type != FG_GC)
729 return -EBUSY;
730 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
731 f2fs_pin_file_control(inode, true);
732 return -EAGAIN;
733 }
734
735 /*
736 * This function is called from two paths.
737 * One is garbage collection and the other is SSR segment selection.
738 * When it is called during GC, it just gets a victim segment
739 * and it does not remove it from dirty seglist.
740 * When it is called from SSR segment selection, it finds a segment
741 * which has minimum valid blocks and removes it from dirty seglist.
742 */
f2fs_get_victim(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode,unsigned long long age)743 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
744 int gc_type, int type, char alloc_mode,
745 unsigned long long age)
746 {
747 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
748 struct sit_info *sm = SIT_I(sbi);
749 struct victim_sel_policy p;
750 unsigned int secno, last_victim;
751 unsigned int last_segment;
752 unsigned int nsearched;
753 bool is_atgc;
754 int ret = 0;
755
756 mutex_lock(&dirty_i->seglist_lock);
757 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
758
759 p.alloc_mode = alloc_mode;
760 p.age = age;
761 p.age_threshold = sbi->am.age_threshold;
762
763 retry:
764 select_policy(sbi, gc_type, type, &p);
765 p.min_segno = NULL_SEGNO;
766 p.oldest_age = 0;
767 p.min_cost = get_max_cost(sbi, &p);
768
769 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
770 nsearched = 0;
771
772 if (is_atgc)
773 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
774
775 if (*result != NULL_SEGNO) {
776 if (!get_valid_blocks(sbi, *result, false)) {
777 ret = -ENODATA;
778 goto out;
779 }
780
781 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
782 ret = -EBUSY;
783 else
784 p.min_segno = *result;
785 goto out;
786 }
787
788 ret = -ENODATA;
789 if (p.max_search == 0)
790 goto out;
791
792 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
793 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
794 p.min_segno = sbi->next_victim_seg[BG_GC];
795 *result = p.min_segno;
796 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
797 goto got_result;
798 }
799 if (gc_type == FG_GC &&
800 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
801 p.min_segno = sbi->next_victim_seg[FG_GC];
802 *result = p.min_segno;
803 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
804 goto got_result;
805 }
806 }
807
808 last_victim = sm->last_victim[p.gc_mode];
809 if (p.alloc_mode == LFS && gc_type == FG_GC) {
810 p.min_segno = check_bg_victims(sbi);
811 if (p.min_segno != NULL_SEGNO)
812 goto got_it;
813 }
814
815 while (1) {
816 unsigned long cost, *dirty_bitmap;
817 unsigned int unit_no, segno;
818
819 dirty_bitmap = p.dirty_bitmap;
820 unit_no = find_next_bit(dirty_bitmap,
821 last_segment / p.ofs_unit,
822 p.offset / p.ofs_unit);
823 segno = unit_no * p.ofs_unit;
824 if (segno >= last_segment) {
825 if (sm->last_victim[p.gc_mode]) {
826 last_segment =
827 sm->last_victim[p.gc_mode];
828 sm->last_victim[p.gc_mode] = 0;
829 p.offset = 0;
830 continue;
831 }
832 break;
833 }
834
835 p.offset = segno + p.ofs_unit;
836 nsearched++;
837
838 #ifdef CONFIG_F2FS_CHECK_FS
839 /*
840 * skip selecting the invalid segno (that is failed due to block
841 * validity check failure during GC) to avoid endless GC loop in
842 * such cases.
843 */
844 if (test_bit(segno, sm->invalid_segmap))
845 goto next;
846 #endif
847
848 secno = GET_SEC_FROM_SEG(sbi, segno);
849
850 if (sec_usage_check(sbi, secno))
851 goto next;
852
853 /* Don't touch checkpointed data */
854 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
855 if (p.alloc_mode == LFS) {
856 /*
857 * LFS is set to find source section during GC.
858 * The victim should have no checkpointed data.
859 */
860 if (get_ckpt_valid_blocks(sbi, segno, true))
861 goto next;
862 } else {
863 /*
864 * SSR | AT_SSR are set to find target segment
865 * for writes which can be full by checkpointed
866 * and newly written blocks.
867 */
868 if (!f2fs_segment_has_free_slot(sbi, segno))
869 goto next;
870 }
871 }
872
873 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
874 goto next;
875
876 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
877 goto next;
878
879 if (is_atgc) {
880 add_victim_entry(sbi, &p, segno);
881 goto next;
882 }
883
884 cost = get_gc_cost(sbi, segno, &p);
885
886 if (p.min_cost > cost) {
887 p.min_segno = segno;
888 p.min_cost = cost;
889 }
890 next:
891 if (nsearched >= p.max_search) {
892 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
893 sm->last_victim[p.gc_mode] =
894 last_victim + p.ofs_unit;
895 else
896 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
897 sm->last_victim[p.gc_mode] %=
898 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
899 break;
900 }
901 }
902
903 /* get victim for GC_AT/AT_SSR */
904 if (is_atgc) {
905 lookup_victim_by_age(sbi, &p);
906 release_victim_entry(sbi);
907 }
908
909 if (is_atgc && p.min_segno == NULL_SEGNO &&
910 sm->elapsed_time < p.age_threshold) {
911 p.age_threshold = 0;
912 goto retry;
913 }
914
915 if (p.min_segno != NULL_SEGNO) {
916 got_it:
917 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
918 got_result:
919 if (p.alloc_mode == LFS) {
920 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
921 if (gc_type == FG_GC)
922 sbi->cur_victim_sec = secno;
923 else
924 set_bit(secno, dirty_i->victim_secmap);
925 }
926 ret = 0;
927
928 }
929 out:
930 if (p.min_segno != NULL_SEGNO)
931 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
932 sbi->cur_victim_sec,
933 prefree_segments(sbi), free_segments(sbi));
934 mutex_unlock(&dirty_i->seglist_lock);
935
936 return ret;
937 }
938
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)939 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
940 {
941 struct inode_entry *ie;
942
943 ie = radix_tree_lookup(&gc_list->iroot, ino);
944 if (ie)
945 return ie->inode;
946 return NULL;
947 }
948
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)949 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
950 {
951 struct inode_entry *new_ie;
952
953 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
954 iput(inode);
955 return;
956 }
957 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
958 GFP_NOFS, true, NULL);
959 new_ie->inode = inode;
960
961 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
962 list_add_tail(&new_ie->list, &gc_list->ilist);
963 }
964
put_gc_inode(struct gc_inode_list * gc_list)965 static void put_gc_inode(struct gc_inode_list *gc_list)
966 {
967 struct inode_entry *ie, *next_ie;
968
969 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
970 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
971 iput(ie->inode);
972 list_del(&ie->list);
973 kmem_cache_free(f2fs_inode_entry_slab, ie);
974 }
975 }
976
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)977 static int check_valid_map(struct f2fs_sb_info *sbi,
978 unsigned int segno, int offset)
979 {
980 struct sit_info *sit_i = SIT_I(sbi);
981 struct seg_entry *sentry;
982 int ret;
983
984 down_read(&sit_i->sentry_lock);
985 sentry = get_seg_entry(sbi, segno);
986 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
987 up_read(&sit_i->sentry_lock);
988 return ret;
989 }
990
991 /*
992 * This function compares node address got in summary with that in NAT.
993 * On validity, copy that node with cold status, otherwise (invalid node)
994 * ignore that.
995 */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type)996 static int gc_node_segment(struct f2fs_sb_info *sbi,
997 struct f2fs_summary *sum, unsigned int segno, int gc_type)
998 {
999 struct f2fs_summary *entry;
1000 block_t start_addr;
1001 int off;
1002 int phase = 0;
1003 bool fggc = (gc_type == FG_GC);
1004 int submitted = 0;
1005 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1006
1007 start_addr = START_BLOCK(sbi, segno);
1008
1009 next_step:
1010 entry = sum;
1011
1012 if (fggc && phase == 2)
1013 atomic_inc(&sbi->wb_sync_req[NODE]);
1014
1015 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1016 nid_t nid = le32_to_cpu(entry->nid);
1017 struct page *node_page;
1018 struct node_info ni;
1019 int err;
1020
1021 /* stop BG_GC if there is not enough free sections. */
1022 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1023 return submitted;
1024
1025 if (check_valid_map(sbi, segno, off) == 0)
1026 continue;
1027
1028 if (phase == 0) {
1029 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1030 META_NAT, true);
1031 continue;
1032 }
1033
1034 if (phase == 1) {
1035 f2fs_ra_node_page(sbi, nid);
1036 continue;
1037 }
1038
1039 /* phase == 2 */
1040 node_page = f2fs_get_node_page(sbi, nid);
1041 if (IS_ERR(node_page))
1042 continue;
1043
1044 /* block may become invalid during f2fs_get_node_page */
1045 if (check_valid_map(sbi, segno, off) == 0) {
1046 f2fs_put_page(node_page, 1);
1047 continue;
1048 }
1049
1050 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1051 f2fs_put_page(node_page, 1);
1052 continue;
1053 }
1054
1055 if (ni.blk_addr != start_addr + off) {
1056 f2fs_put_page(node_page, 1);
1057 continue;
1058 }
1059
1060 err = f2fs_move_node_page(node_page, gc_type);
1061 if (!err && gc_type == FG_GC)
1062 submitted++;
1063 stat_inc_node_blk_count(sbi, 1, gc_type);
1064 }
1065
1066 if (++phase < 3)
1067 goto next_step;
1068
1069 if (fggc)
1070 atomic_dec(&sbi->wb_sync_req[NODE]);
1071 return submitted;
1072 }
1073
1074 /*
1075 * Calculate start block index indicating the given node offset.
1076 * Be careful, caller should give this node offset only indicating direct node
1077 * blocks. If any node offsets, which point the other types of node blocks such
1078 * as indirect or double indirect node blocks, are given, it must be a caller's
1079 * bug.
1080 */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)1081 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1082 {
1083 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1084 unsigned int bidx;
1085
1086 if (node_ofs == 0)
1087 return 0;
1088
1089 if (node_ofs <= 2) {
1090 bidx = node_ofs - 1;
1091 } else if (node_ofs <= indirect_blks) {
1092 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1093
1094 bidx = node_ofs - 2 - dec;
1095 } else {
1096 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1097
1098 bidx = node_ofs - 5 - dec;
1099 }
1100 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1101 }
1102
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)1103 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1104 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1105 {
1106 struct page *node_page;
1107 nid_t nid;
1108 unsigned int ofs_in_node, max_addrs, base;
1109 block_t source_blkaddr;
1110
1111 nid = le32_to_cpu(sum->nid);
1112 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1113
1114 node_page = f2fs_get_node_page(sbi, nid);
1115 if (IS_ERR(node_page))
1116 return false;
1117
1118 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1119 f2fs_put_page(node_page, 1);
1120 return false;
1121 }
1122
1123 if (sum->version != dni->version) {
1124 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1125 __func__);
1126 set_sbi_flag(sbi, SBI_NEED_FSCK);
1127 }
1128
1129 if (f2fs_check_nid_range(sbi, dni->ino)) {
1130 f2fs_put_page(node_page, 1);
1131 return false;
1132 }
1133
1134 if (IS_INODE(node_page)) {
1135 base = offset_in_addr(F2FS_INODE(node_page));
1136 max_addrs = DEF_ADDRS_PER_INODE;
1137 } else {
1138 base = 0;
1139 max_addrs = DEF_ADDRS_PER_BLOCK;
1140 }
1141
1142 if (base + ofs_in_node >= max_addrs) {
1143 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1144 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1145 f2fs_put_page(node_page, 1);
1146 return false;
1147 }
1148
1149 *nofs = ofs_of_node(node_page);
1150 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1151 f2fs_put_page(node_page, 1);
1152
1153 if (source_blkaddr != blkaddr) {
1154 #ifdef CONFIG_F2FS_CHECK_FS
1155 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1156 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1157
1158 if (unlikely(check_valid_map(sbi, segno, offset))) {
1159 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1160 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1161 blkaddr, source_blkaddr, segno);
1162 set_sbi_flag(sbi, SBI_NEED_FSCK);
1163 }
1164 }
1165 #endif
1166 return false;
1167 }
1168 return true;
1169 }
1170
ra_data_block(struct inode * inode,pgoff_t index)1171 static int ra_data_block(struct inode *inode, pgoff_t index)
1172 {
1173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 struct address_space *mapping = inode->i_mapping;
1175 struct dnode_of_data dn;
1176 struct page *page;
1177 struct f2fs_io_info fio = {
1178 .sbi = sbi,
1179 .ino = inode->i_ino,
1180 .type = DATA,
1181 .temp = COLD,
1182 .op = REQ_OP_READ,
1183 .op_flags = 0,
1184 .encrypted_page = NULL,
1185 .in_list = 0,
1186 };
1187 int err;
1188
1189 page = f2fs_grab_cache_page(mapping, index, true);
1190 if (!page)
1191 return -ENOMEM;
1192
1193 if (f2fs_lookup_read_extent_cache_block(inode, index,
1194 &dn.data_blkaddr)) {
1195 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1196 DATA_GENERIC_ENHANCE_READ))) {
1197 err = -EFSCORRUPTED;
1198 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1199 goto put_page;
1200 }
1201 goto got_it;
1202 }
1203
1204 set_new_dnode(&dn, inode, NULL, NULL, 0);
1205 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1206 if (err)
1207 goto put_page;
1208 f2fs_put_dnode(&dn);
1209
1210 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1211 err = -ENOENT;
1212 goto put_page;
1213 }
1214 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1215 DATA_GENERIC_ENHANCE))) {
1216 err = -EFSCORRUPTED;
1217 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1218 goto put_page;
1219 }
1220 got_it:
1221 /* read page */
1222 fio.page = page;
1223 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1224
1225 /*
1226 * don't cache encrypted data into meta inode until previous dirty
1227 * data were writebacked to avoid racing between GC and flush.
1228 */
1229 f2fs_wait_on_page_writeback(page, DATA, true, true);
1230
1231 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1232
1233 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1234 dn.data_blkaddr,
1235 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1236 if (!fio.encrypted_page) {
1237 err = -ENOMEM;
1238 goto put_page;
1239 }
1240
1241 err = f2fs_submit_page_bio(&fio);
1242 if (err)
1243 goto put_encrypted_page;
1244 f2fs_put_page(fio.encrypted_page, 0);
1245 f2fs_put_page(page, 1);
1246
1247 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1248 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1249
1250 return 0;
1251 put_encrypted_page:
1252 f2fs_put_page(fio.encrypted_page, 1);
1253 put_page:
1254 f2fs_put_page(page, 1);
1255 return err;
1256 }
1257
1258 /*
1259 * Move data block via META_MAPPING while keeping locked data page.
1260 * This can be used to move blocks, aka LBAs, directly on disk.
1261 */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1262 static int move_data_block(struct inode *inode, block_t bidx,
1263 int gc_type, unsigned int segno, int off)
1264 {
1265 struct f2fs_io_info fio = {
1266 .sbi = F2FS_I_SB(inode),
1267 .ino = inode->i_ino,
1268 .type = DATA,
1269 .temp = COLD,
1270 .op = REQ_OP_READ,
1271 .op_flags = 0,
1272 .encrypted_page = NULL,
1273 .in_list = 0,
1274 };
1275 struct dnode_of_data dn;
1276 struct f2fs_summary sum;
1277 struct node_info ni;
1278 struct page *page, *mpage;
1279 block_t newaddr;
1280 int err = 0;
1281 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1282 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1283 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1284 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1285
1286 /* do not read out */
1287 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1288 if (!page)
1289 return -ENOMEM;
1290
1291 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1292 err = -ENOENT;
1293 goto out;
1294 }
1295
1296 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1297 if (err)
1298 goto out;
1299
1300 set_new_dnode(&dn, inode, NULL, NULL, 0);
1301 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1302 if (err)
1303 goto out;
1304
1305 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1306 ClearPageUptodate(page);
1307 err = -ENOENT;
1308 goto put_out;
1309 }
1310
1311 /*
1312 * don't cache encrypted data into meta inode until previous dirty
1313 * data were writebacked to avoid racing between GC and flush.
1314 */
1315 f2fs_wait_on_page_writeback(page, DATA, true, true);
1316
1317 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1318
1319 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1320 if (err)
1321 goto put_out;
1322
1323 /* read page */
1324 fio.page = page;
1325 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1326
1327 if (lfs_mode)
1328 f2fs_down_write(&fio.sbi->io_order_lock);
1329
1330 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1331 fio.old_blkaddr, false);
1332 if (!mpage) {
1333 err = -ENOMEM;
1334 goto up_out;
1335 }
1336
1337 fio.encrypted_page = mpage;
1338
1339 /* read source block in mpage */
1340 if (!PageUptodate(mpage)) {
1341 err = f2fs_submit_page_bio(&fio);
1342 if (err) {
1343 f2fs_put_page(mpage, 1);
1344 goto up_out;
1345 }
1346
1347 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1348 F2FS_BLKSIZE);
1349 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1350 F2FS_BLKSIZE);
1351
1352 lock_page(mpage);
1353 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1354 !PageUptodate(mpage))) {
1355 err = -EIO;
1356 f2fs_put_page(mpage, 1);
1357 goto up_out;
1358 }
1359 }
1360
1361 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1362
1363 /* allocate block address */
1364 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1365 &sum, type, NULL);
1366
1367 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1368 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1369 if (!fio.encrypted_page) {
1370 err = -ENOMEM;
1371 f2fs_put_page(mpage, 1);
1372 goto recover_block;
1373 }
1374
1375 /* write target block */
1376 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1377 memcpy(page_address(fio.encrypted_page),
1378 page_address(mpage), PAGE_SIZE);
1379 f2fs_put_page(mpage, 1);
1380
1381 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1382
1383 set_page_dirty(fio.encrypted_page);
1384 if (clear_page_dirty_for_io(fio.encrypted_page))
1385 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1386
1387 set_page_writeback(fio.encrypted_page);
1388
1389 fio.op = REQ_OP_WRITE;
1390 fio.op_flags = REQ_SYNC;
1391 fio.new_blkaddr = newaddr;
1392 f2fs_submit_page_write(&fio);
1393
1394 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1395
1396 f2fs_update_data_blkaddr(&dn, newaddr);
1397 set_inode_flag(inode, FI_APPEND_WRITE);
1398
1399 f2fs_put_page(fio.encrypted_page, 1);
1400 recover_block:
1401 if (err)
1402 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1403 true, true, true);
1404 up_out:
1405 if (lfs_mode)
1406 f2fs_up_write(&fio.sbi->io_order_lock);
1407 put_out:
1408 f2fs_put_dnode(&dn);
1409 out:
1410 f2fs_put_page(page, 1);
1411 return err;
1412 }
1413
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1414 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1415 unsigned int segno, int off)
1416 {
1417 struct page *page;
1418 int err = 0;
1419
1420 page = f2fs_get_lock_data_page(inode, bidx, true);
1421 if (IS_ERR(page))
1422 return PTR_ERR(page);
1423
1424 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1425 err = -ENOENT;
1426 goto out;
1427 }
1428
1429 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1430 if (err)
1431 goto out;
1432
1433 if (gc_type == BG_GC) {
1434 if (PageWriteback(page)) {
1435 err = -EAGAIN;
1436 goto out;
1437 }
1438 set_page_dirty(page);
1439 set_page_private_gcing(page);
1440 } else {
1441 struct f2fs_io_info fio = {
1442 .sbi = F2FS_I_SB(inode),
1443 .ino = inode->i_ino,
1444 .type = DATA,
1445 .temp = COLD,
1446 .op = REQ_OP_WRITE,
1447 .op_flags = REQ_SYNC,
1448 .old_blkaddr = NULL_ADDR,
1449 .page = page,
1450 .encrypted_page = NULL,
1451 .need_lock = LOCK_REQ,
1452 .io_type = FS_GC_DATA_IO,
1453 };
1454 bool is_dirty = PageDirty(page);
1455
1456 retry:
1457 f2fs_wait_on_page_writeback(page, DATA, true, true);
1458
1459 set_page_dirty(page);
1460 if (clear_page_dirty_for_io(page)) {
1461 inode_dec_dirty_pages(inode);
1462 f2fs_remove_dirty_inode(inode);
1463 }
1464
1465 set_page_private_gcing(page);
1466
1467 err = f2fs_do_write_data_page(&fio);
1468 if (err) {
1469 clear_page_private_gcing(page);
1470 if (err == -ENOMEM) {
1471 memalloc_retry_wait(GFP_NOFS);
1472 goto retry;
1473 }
1474 if (is_dirty)
1475 set_page_dirty(page);
1476 }
1477 }
1478 out:
1479 f2fs_put_page(page, 1);
1480 return err;
1481 }
1482
1483 /*
1484 * This function tries to get parent node of victim data block, and identifies
1485 * data block validity. If the block is valid, copy that with cold status and
1486 * modify parent node.
1487 * If the parent node is not valid or the data block address is different,
1488 * the victim data block is ignored.
1489 */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type,bool force_migrate)1490 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1491 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1492 bool force_migrate)
1493 {
1494 struct super_block *sb = sbi->sb;
1495 struct f2fs_summary *entry;
1496 block_t start_addr;
1497 int off;
1498 int phase = 0;
1499 int submitted = 0;
1500 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1501
1502 start_addr = START_BLOCK(sbi, segno);
1503
1504 next_step:
1505 entry = sum;
1506
1507 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1508 struct page *data_page;
1509 struct inode *inode;
1510 struct node_info dni; /* dnode info for the data */
1511 unsigned int ofs_in_node, nofs;
1512 block_t start_bidx;
1513 nid_t nid = le32_to_cpu(entry->nid);
1514
1515 /*
1516 * stop BG_GC if there is not enough free sections.
1517 * Or, stop GC if the segment becomes fully valid caused by
1518 * race condition along with SSR block allocation.
1519 */
1520 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1521 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1522 CAP_BLKS_PER_SEC(sbi)))
1523 return submitted;
1524
1525 if (check_valid_map(sbi, segno, off) == 0)
1526 continue;
1527
1528 if (phase == 0) {
1529 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1530 META_NAT, true);
1531 continue;
1532 }
1533
1534 if (phase == 1) {
1535 f2fs_ra_node_page(sbi, nid);
1536 continue;
1537 }
1538
1539 /* Get an inode by ino with checking validity */
1540 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1541 continue;
1542
1543 if (phase == 2) {
1544 f2fs_ra_node_page(sbi, dni.ino);
1545 continue;
1546 }
1547
1548 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1549
1550 if (phase == 3) {
1551 int err;
1552
1553 inode = f2fs_iget(sb, dni.ino);
1554 if (IS_ERR(inode))
1555 continue;
1556
1557 if (is_bad_inode(inode) ||
1558 special_file(inode->i_mode)) {
1559 iput(inode);
1560 continue;
1561 }
1562
1563 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1564 if (err == -EAGAIN) {
1565 iput(inode);
1566 return submitted;
1567 }
1568
1569 if (!f2fs_down_write_trylock(
1570 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1571 iput(inode);
1572 sbi->skipped_gc_rwsem++;
1573 continue;
1574 }
1575
1576 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1577 ofs_in_node;
1578
1579 if (f2fs_post_read_required(inode)) {
1580 int err = ra_data_block(inode, start_bidx);
1581
1582 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1583 if (err) {
1584 iput(inode);
1585 continue;
1586 }
1587 add_gc_inode(gc_list, inode);
1588 continue;
1589 }
1590
1591 data_page = f2fs_get_read_data_page(inode, start_bidx,
1592 REQ_RAHEAD, true, NULL);
1593 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1594 if (IS_ERR(data_page)) {
1595 iput(inode);
1596 continue;
1597 }
1598
1599 f2fs_put_page(data_page, 0);
1600 add_gc_inode(gc_list, inode);
1601 continue;
1602 }
1603
1604 /* phase 4 */
1605 inode = find_gc_inode(gc_list, dni.ino);
1606 if (inode) {
1607 struct f2fs_inode_info *fi = F2FS_I(inode);
1608 bool locked = false;
1609 int err;
1610
1611 if (S_ISREG(inode->i_mode)) {
1612 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1613 sbi->skipped_gc_rwsem++;
1614 continue;
1615 }
1616 if (!f2fs_down_write_trylock(
1617 &fi->i_gc_rwsem[READ])) {
1618 sbi->skipped_gc_rwsem++;
1619 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1620 continue;
1621 }
1622 locked = true;
1623
1624 /* wait for all inflight aio data */
1625 inode_dio_wait(inode);
1626 }
1627
1628 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1629 + ofs_in_node;
1630 if (f2fs_post_read_required(inode))
1631 err = move_data_block(inode, start_bidx,
1632 gc_type, segno, off);
1633 else
1634 err = move_data_page(inode, start_bidx, gc_type,
1635 segno, off);
1636
1637 if (!err && (gc_type == FG_GC ||
1638 f2fs_post_read_required(inode)))
1639 submitted++;
1640
1641 if (locked) {
1642 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1643 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1644 }
1645
1646 stat_inc_data_blk_count(sbi, 1, gc_type);
1647 }
1648 }
1649
1650 if (++phase < 5)
1651 goto next_step;
1652
1653 return submitted;
1654 }
1655
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type)1656 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1657 int gc_type)
1658 {
1659 struct sit_info *sit_i = SIT_I(sbi);
1660 int ret;
1661
1662 down_write(&sit_i->sentry_lock);
1663 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
1664 up_write(&sit_i->sentry_lock);
1665 return ret;
1666 }
1667
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type,bool force_migrate)1668 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1669 unsigned int start_segno,
1670 struct gc_inode_list *gc_list, int gc_type,
1671 bool force_migrate)
1672 {
1673 struct page *sum_page;
1674 struct f2fs_summary_block *sum;
1675 struct blk_plug plug;
1676 unsigned int segno = start_segno;
1677 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1678 int seg_freed = 0, migrated = 0;
1679 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1680 SUM_TYPE_DATA : SUM_TYPE_NODE;
1681 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1682 int submitted = 0;
1683
1684 if (__is_large_section(sbi))
1685 end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1686
1687 /*
1688 * zone-capacity can be less than zone-size in zoned devices,
1689 * resulting in less than expected usable segments in the zone,
1690 * calculate the end segno in the zone which can be garbage collected
1691 */
1692 if (f2fs_sb_has_blkzoned(sbi))
1693 end_segno -= SEGS_PER_SEC(sbi) -
1694 f2fs_usable_segs_in_sec(sbi, segno);
1695
1696 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1697
1698 /* readahead multi ssa blocks those have contiguous address */
1699 if (__is_large_section(sbi))
1700 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1701 end_segno - segno, META_SSA, true);
1702
1703 /* reference all summary page */
1704 while (segno < end_segno) {
1705 sum_page = f2fs_get_sum_page(sbi, segno++);
1706 if (IS_ERR(sum_page)) {
1707 int err = PTR_ERR(sum_page);
1708
1709 end_segno = segno - 1;
1710 for (segno = start_segno; segno < end_segno; segno++) {
1711 sum_page = find_get_page(META_MAPPING(sbi),
1712 GET_SUM_BLOCK(sbi, segno));
1713 f2fs_put_page(sum_page, 0);
1714 f2fs_put_page(sum_page, 0);
1715 }
1716 return err;
1717 }
1718 unlock_page(sum_page);
1719 }
1720
1721 blk_start_plug(&plug);
1722
1723 for (segno = start_segno; segno < end_segno; segno++) {
1724
1725 /* find segment summary of victim */
1726 sum_page = find_get_page(META_MAPPING(sbi),
1727 GET_SUM_BLOCK(sbi, segno));
1728 f2fs_put_page(sum_page, 0);
1729
1730 if (get_valid_blocks(sbi, segno, false) == 0)
1731 goto freed;
1732 if (gc_type == BG_GC && __is_large_section(sbi) &&
1733 migrated >= sbi->migration_granularity)
1734 goto skip;
1735 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1736 goto skip;
1737
1738 sum = page_address(sum_page);
1739 if (type != GET_SUM_TYPE((&sum->footer))) {
1740 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1741 segno, type, GET_SUM_TYPE((&sum->footer)));
1742 set_sbi_flag(sbi, SBI_NEED_FSCK);
1743 f2fs_stop_checkpoint(sbi, false,
1744 STOP_CP_REASON_CORRUPTED_SUMMARY);
1745 goto skip;
1746 }
1747
1748 /*
1749 * this is to avoid deadlock:
1750 * - lock_page(sum_page) - f2fs_replace_block
1751 * - check_valid_map() - down_write(sentry_lock)
1752 * - down_read(sentry_lock) - change_curseg()
1753 * - lock_page(sum_page)
1754 */
1755 if (type == SUM_TYPE_NODE)
1756 submitted += gc_node_segment(sbi, sum->entries, segno,
1757 gc_type);
1758 else
1759 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1760 segno, gc_type,
1761 force_migrate);
1762
1763 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1764 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1765 migrated++;
1766
1767 freed:
1768 if (gc_type == FG_GC &&
1769 get_valid_blocks(sbi, segno, false) == 0)
1770 seg_freed++;
1771
1772 if (__is_large_section(sbi))
1773 sbi->next_victim_seg[gc_type] =
1774 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1775 skip:
1776 f2fs_put_page(sum_page, 0);
1777 }
1778
1779 if (submitted)
1780 f2fs_submit_merged_write(sbi, data_type);
1781
1782 blk_finish_plug(&plug);
1783
1784 if (migrated)
1785 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1786
1787 return seg_freed;
1788 }
1789
f2fs_gc(struct f2fs_sb_info * sbi,struct f2fs_gc_control * gc_control)1790 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1791 {
1792 int gc_type = gc_control->init_gc_type;
1793 unsigned int segno = gc_control->victim_segno;
1794 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1795 int ret = 0;
1796 struct cp_control cpc;
1797 struct gc_inode_list gc_list = {
1798 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1799 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1800 };
1801 unsigned int skipped_round = 0, round = 0;
1802 unsigned int upper_secs;
1803
1804 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1805 gc_control->nr_free_secs,
1806 get_pages(sbi, F2FS_DIRTY_NODES),
1807 get_pages(sbi, F2FS_DIRTY_DENTS),
1808 get_pages(sbi, F2FS_DIRTY_IMETA),
1809 free_sections(sbi),
1810 free_segments(sbi),
1811 reserved_segments(sbi),
1812 prefree_segments(sbi));
1813
1814 cpc.reason = __get_cp_reason(sbi);
1815 gc_more:
1816 sbi->skipped_gc_rwsem = 0;
1817 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1818 ret = -EINVAL;
1819 goto stop;
1820 }
1821 if (unlikely(f2fs_cp_error(sbi))) {
1822 ret = -EIO;
1823 goto stop;
1824 }
1825
1826 /* Let's run FG_GC, if we don't have enough space. */
1827 if (has_not_enough_free_secs(sbi, 0, 0)) {
1828 gc_type = FG_GC;
1829
1830 /*
1831 * For example, if there are many prefree_segments below given
1832 * threshold, we can make them free by checkpoint. Then, we
1833 * secure free segments which doesn't need fggc any more.
1834 */
1835 if (prefree_segments(sbi)) {
1836 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1837 ret = f2fs_write_checkpoint(sbi, &cpc);
1838 if (ret)
1839 goto stop;
1840 /* Reset due to checkpoint */
1841 sec_freed = 0;
1842 }
1843 }
1844
1845 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1846 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1847 ret = -EINVAL;
1848 goto stop;
1849 }
1850 retry:
1851 ret = __get_victim(sbi, &segno, gc_type);
1852 if (ret) {
1853 /* allow to search victim from sections has pinned data */
1854 if (ret == -ENODATA && gc_type == FG_GC &&
1855 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1856 f2fs_unpin_all_sections(sbi, false);
1857 goto retry;
1858 }
1859 goto stop;
1860 }
1861
1862 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1863 gc_control->should_migrate_blocks);
1864 total_freed += seg_freed;
1865
1866 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1867 sec_freed++;
1868 total_sec_freed++;
1869 }
1870
1871 if (gc_type == FG_GC) {
1872 sbi->cur_victim_sec = NULL_SEGNO;
1873
1874 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1875 if (!gc_control->no_bg_gc &&
1876 total_sec_freed < gc_control->nr_free_secs)
1877 goto go_gc_more;
1878 goto stop;
1879 }
1880 if (sbi->skipped_gc_rwsem)
1881 skipped_round++;
1882 round++;
1883 if (skipped_round > MAX_SKIP_GC_COUNT &&
1884 skipped_round * 2 >= round) {
1885 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1886 ret = f2fs_write_checkpoint(sbi, &cpc);
1887 goto stop;
1888 }
1889 } else if (has_enough_free_secs(sbi, 0, 0)) {
1890 goto stop;
1891 }
1892
1893 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1894
1895 /*
1896 * Write checkpoint to reclaim prefree segments.
1897 * We need more three extra sections for writer's data/node/dentry.
1898 */
1899 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1900 prefree_segments(sbi)) {
1901 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1902 ret = f2fs_write_checkpoint(sbi, &cpc);
1903 if (ret)
1904 goto stop;
1905 /* Reset due to checkpoint */
1906 sec_freed = 0;
1907 }
1908 go_gc_more:
1909 segno = NULL_SEGNO;
1910 goto gc_more;
1911
1912 stop:
1913 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1914 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1915
1916 if (gc_type == FG_GC)
1917 f2fs_unpin_all_sections(sbi, true);
1918
1919 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1920 get_pages(sbi, F2FS_DIRTY_NODES),
1921 get_pages(sbi, F2FS_DIRTY_DENTS),
1922 get_pages(sbi, F2FS_DIRTY_IMETA),
1923 free_sections(sbi),
1924 free_segments(sbi),
1925 reserved_segments(sbi),
1926 prefree_segments(sbi));
1927
1928 f2fs_up_write(&sbi->gc_lock);
1929
1930 put_gc_inode(&gc_list);
1931
1932 if (gc_control->err_gc_skipped && !ret)
1933 ret = total_sec_freed ? 0 : -EAGAIN;
1934 return ret;
1935 }
1936
f2fs_create_garbage_collection_cache(void)1937 int __init f2fs_create_garbage_collection_cache(void)
1938 {
1939 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1940 sizeof(struct victim_entry));
1941 return victim_entry_slab ? 0 : -ENOMEM;
1942 }
1943
f2fs_destroy_garbage_collection_cache(void)1944 void f2fs_destroy_garbage_collection_cache(void)
1945 {
1946 kmem_cache_destroy(victim_entry_slab);
1947 }
1948
init_atgc_management(struct f2fs_sb_info * sbi)1949 static void init_atgc_management(struct f2fs_sb_info *sbi)
1950 {
1951 struct atgc_management *am = &sbi->am;
1952
1953 if (test_opt(sbi, ATGC) &&
1954 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1955 am->atgc_enabled = true;
1956
1957 am->root = RB_ROOT_CACHED;
1958 INIT_LIST_HEAD(&am->victim_list);
1959 am->victim_count = 0;
1960
1961 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1962 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1963 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1964 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1965 }
1966
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)1967 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1968 {
1969 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1970
1971 /* give warm/cold data area from slower device */
1972 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1973 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1974 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1975
1976 init_atgc_management(sbi);
1977 }
1978
f2fs_gc_range(struct f2fs_sb_info * sbi,unsigned int start_seg,unsigned int end_seg,bool dry_run,unsigned int dry_run_sections)1979 int f2fs_gc_range(struct f2fs_sb_info *sbi,
1980 unsigned int start_seg, unsigned int end_seg,
1981 bool dry_run, unsigned int dry_run_sections)
1982 {
1983 unsigned int segno;
1984 unsigned int gc_secs = dry_run_sections;
1985
1986 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
1987 struct gc_inode_list gc_list = {
1988 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1989 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1990 };
1991
1992 do_garbage_collect(sbi, segno, &gc_list, FG_GC,
1993 dry_run_sections == 0);
1994 put_gc_inode(&gc_list);
1995
1996 if (!dry_run && get_valid_blocks(sbi, segno, true))
1997 return -EAGAIN;
1998 if (dry_run && dry_run_sections &&
1999 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2000 break;
2001
2002 if (fatal_signal_pending(current))
2003 return -ERESTARTSYS;
2004 }
2005
2006 return 0;
2007 }
2008
free_segment_range(struct f2fs_sb_info * sbi,unsigned int secs,bool dry_run)2009 static int free_segment_range(struct f2fs_sb_info *sbi,
2010 unsigned int secs, bool dry_run)
2011 {
2012 unsigned int next_inuse, start, end;
2013 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2014 int gc_mode, gc_type;
2015 int err = 0;
2016 int type;
2017
2018 /* Force block allocation for GC */
2019 MAIN_SECS(sbi) -= secs;
2020 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2021 end = MAIN_SEGS(sbi) - 1;
2022
2023 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2024 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2025 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2026 SIT_I(sbi)->last_victim[gc_mode] = 0;
2027
2028 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2029 if (sbi->next_victim_seg[gc_type] >= start)
2030 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2031 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2032
2033 /* Move out cursegs from the target range */
2034 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
2035 f2fs_allocate_segment_for_resize(sbi, type, start, end);
2036
2037 /* do GC to move out valid blocks in the range */
2038 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2039 if (err || dry_run)
2040 goto out;
2041
2042 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2043 err = f2fs_write_checkpoint(sbi, &cpc);
2044 if (err)
2045 goto out;
2046
2047 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2048 if (next_inuse <= end) {
2049 f2fs_err(sbi, "segno %u should be free but still inuse!",
2050 next_inuse);
2051 f2fs_bug_on(sbi, 1);
2052 }
2053 out:
2054 MAIN_SECS(sbi) += secs;
2055 return err;
2056 }
2057
update_sb_metadata(struct f2fs_sb_info * sbi,int secs)2058 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2059 {
2060 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2061 int section_count;
2062 int segment_count;
2063 int segment_count_main;
2064 long long block_count;
2065 int segs = secs * SEGS_PER_SEC(sbi);
2066
2067 f2fs_down_write(&sbi->sb_lock);
2068
2069 section_count = le32_to_cpu(raw_sb->section_count);
2070 segment_count = le32_to_cpu(raw_sb->segment_count);
2071 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2072 block_count = le64_to_cpu(raw_sb->block_count);
2073
2074 raw_sb->section_count = cpu_to_le32(section_count + secs);
2075 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2076 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2077 raw_sb->block_count = cpu_to_le64(block_count +
2078 (long long)(segs << sbi->log_blocks_per_seg));
2079 if (f2fs_is_multi_device(sbi)) {
2080 int last_dev = sbi->s_ndevs - 1;
2081 int dev_segs =
2082 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2083
2084 raw_sb->devs[last_dev].total_segments =
2085 cpu_to_le32(dev_segs + segs);
2086 }
2087
2088 f2fs_up_write(&sbi->sb_lock);
2089 }
2090
update_fs_metadata(struct f2fs_sb_info * sbi,int secs)2091 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2092 {
2093 int segs = secs * SEGS_PER_SEC(sbi);
2094 long long blks = (long long)segs << sbi->log_blocks_per_seg;
2095 long long user_block_count =
2096 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2097
2098 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2099 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2100 MAIN_SECS(sbi) += secs;
2101 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2102 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2103 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2104
2105 if (f2fs_is_multi_device(sbi)) {
2106 int last_dev = sbi->s_ndevs - 1;
2107
2108 FDEV(last_dev).total_segments =
2109 (int)FDEV(last_dev).total_segments + segs;
2110 FDEV(last_dev).end_blk =
2111 (long long)FDEV(last_dev).end_blk + blks;
2112 #ifdef CONFIG_BLK_DEV_ZONED
2113 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2114 div_u64(blks, sbi->blocks_per_blkz);
2115 #endif
2116 }
2117 }
2118
f2fs_resize_fs(struct file * filp,__u64 block_count)2119 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2120 {
2121 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2122 __u64 old_block_count, shrunk_blocks;
2123 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2124 unsigned int secs;
2125 int err = 0;
2126 __u32 rem;
2127
2128 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2129 if (block_count > old_block_count)
2130 return -EINVAL;
2131
2132 if (f2fs_is_multi_device(sbi)) {
2133 int last_dev = sbi->s_ndevs - 1;
2134 __u64 last_segs = FDEV(last_dev).total_segments;
2135
2136 if (block_count + (last_segs << sbi->log_blocks_per_seg) <=
2137 old_block_count)
2138 return -EINVAL;
2139 }
2140
2141 /* new fs size should align to section size */
2142 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2143 if (rem)
2144 return -EINVAL;
2145
2146 if (block_count == old_block_count)
2147 return 0;
2148
2149 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2150 f2fs_err(sbi, "Should run fsck to repair first.");
2151 return -EFSCORRUPTED;
2152 }
2153
2154 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2155 f2fs_err(sbi, "Checkpoint should be enabled.");
2156 return -EINVAL;
2157 }
2158
2159 err = mnt_want_write_file(filp);
2160 if (err)
2161 return err;
2162
2163 shrunk_blocks = old_block_count - block_count;
2164 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2165
2166 /* stop other GC */
2167 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2168 err = -EAGAIN;
2169 goto out_drop_write;
2170 }
2171
2172 /* stop CP to protect MAIN_SEC in free_segment_range */
2173 f2fs_lock_op(sbi);
2174
2175 spin_lock(&sbi->stat_lock);
2176 if (shrunk_blocks + valid_user_blocks(sbi) +
2177 sbi->current_reserved_blocks + sbi->unusable_block_count +
2178 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2179 err = -ENOSPC;
2180 spin_unlock(&sbi->stat_lock);
2181
2182 if (err)
2183 goto out_unlock;
2184
2185 err = free_segment_range(sbi, secs, true);
2186
2187 out_unlock:
2188 f2fs_unlock_op(sbi);
2189 f2fs_up_write(&sbi->gc_lock);
2190 out_drop_write:
2191 mnt_drop_write_file(filp);
2192 if (err)
2193 return err;
2194
2195 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2196 if (err)
2197 return err;
2198
2199 if (f2fs_readonly(sbi->sb)) {
2200 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2201 if (err)
2202 return err;
2203 return -EROFS;
2204 }
2205
2206 f2fs_down_write(&sbi->gc_lock);
2207 f2fs_down_write(&sbi->cp_global_sem);
2208
2209 spin_lock(&sbi->stat_lock);
2210 if (shrunk_blocks + valid_user_blocks(sbi) +
2211 sbi->current_reserved_blocks + sbi->unusable_block_count +
2212 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2213 err = -ENOSPC;
2214 else
2215 sbi->user_block_count -= shrunk_blocks;
2216 spin_unlock(&sbi->stat_lock);
2217 if (err)
2218 goto out_err;
2219
2220 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2221 err = free_segment_range(sbi, secs, false);
2222 if (err)
2223 goto recover_out;
2224
2225 update_sb_metadata(sbi, -secs);
2226
2227 err = f2fs_commit_super(sbi, false);
2228 if (err) {
2229 update_sb_metadata(sbi, secs);
2230 goto recover_out;
2231 }
2232
2233 update_fs_metadata(sbi, -secs);
2234 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2235 set_sbi_flag(sbi, SBI_IS_DIRTY);
2236
2237 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2238 err = f2fs_write_checkpoint(sbi, &cpc);
2239 if (err) {
2240 update_fs_metadata(sbi, secs);
2241 update_sb_metadata(sbi, secs);
2242 f2fs_commit_super(sbi, false);
2243 }
2244 recover_out:
2245 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2246 if (err) {
2247 set_sbi_flag(sbi, SBI_NEED_FSCK);
2248 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2249
2250 spin_lock(&sbi->stat_lock);
2251 sbi->user_block_count += shrunk_blocks;
2252 spin_unlock(&sbi->stat_lock);
2253 }
2254 out_err:
2255 f2fs_up_write(&sbi->cp_global_sem);
2256 f2fs_up_write(&sbi->gc_lock);
2257 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2258 return err;
2259 }
2260